id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
408958
|
import os
from setuptools import setup
from setuptools import find_packages
import glob
libs = ['../build/libios_runtime.so', '../build/libtrt_runtime.so']
existing_libs = []
for libpath in libs:
if os.path.exists(libpath):
existing_libs.append(libpath)
assert any('libios_runtime.so' in lib for lib in existing_libs), "IOS runtime library not found."
setup(name='ios',
version='0.1.dev0',
description='IOS: An Inter-Operator Scheduler for CNN Acceleration',
zip_safe=False,
packages=find_packages(),
url='https://github.com/mit-han-lab/inter-operator-scheduler',
include_package_data=True,
install_requires=[
"numpy",
"pydot",
"tqdm"
],
data_files=[('ios', existing_libs),
('ios', [*glob.glob('ios/models/randwire_graphs/generated/*'), 'ios/models/randwire_graphs/ws.py'])]
)
|
409016
|
from sklearn.base import BaseEstimator
from sklearn.utils import check_random_state
class SubSampler(BaseEstimator):
def __init__(self, ratio=.3, random_state=None):
self.ratio = ratio
self.random_state = random_state
self.random_state_ = None
def transform_pipe(self, X, y=None):
# Awkward situation: random_state_ is set at transform time :)
if self.random_state_ is None:
self.random_state_ = check_random_state(self.random_state)
n_samples, _ = X.shape
random_choice = self.random_state_.random_sample(n_samples)
random_choice = random_choice < self.ratio
X_out = X[random_choice]
y_out = None
if y is not None:
y_out = y[random_choice]
return X_out, y_out
|
409024
|
import torch
import torchvision
from torch import nn as nn
from utils import util
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.register_buffer('zero_tensor', torch.tensor(0.))
self.zero_tensor.requires_grad_(False)
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode == 'wgangp':
self.loss = None
elif gan_mode == 'hinge':
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def get_zero_tensor(self, prediction):
return self.zero_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real, for_discriminator=True):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
if isinstance(prediction, list):
losses = []
for p in prediction:
target_tensor = self.get_target_tensor(p, target_is_real)
losses.append(self.loss(p, target_tensor))
return sum(losses)
else:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
elif self.gan_mode == 'hinge':
if isinstance(prediction, list):
loss = 0
for pred_i in prediction:
if isinstance(pred_i, list):
pred_i = pred_i[-1]
loss_tensor = self(pred_i, target_is_real, for_discriminator)
bs = 1 if len(loss_tensor.size()) == 0 else loss_tensor.size(0)
new_loss = torch.mean(loss_tensor.view(bs, -1), dim=1)
loss += new_loss
return loss / len(prediction)
else:
if for_discriminator:
if target_is_real:
minval = torch.min(prediction - 1, self.get_zero_tensor(prediction))
loss = -torch.mean(minval)
else:
minval = torch.min(-prediction - 1, self.get_zero_tensor(prediction))
loss = -torch.mean(minval)
else:
assert target_is_real
loss = -torch.mean(prediction)
else:
raise NotImplementedError('gan mode %s not implemented' % self.gan_mode)
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]). \
contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class VGG19(torch.nn.Module):
def __init__(self, requires_grad=False):
super().__init__()
vgg_pretrained_features = torchvision.models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
class VGGLoss(nn.Module):
def __init__(self):
super(VGGLoss, self).__init__()
self.vgg = VGG19()
self.vgg.eval()
util.set_requires_grad(self.vgg, False)
self.criterion = nn.L1Loss()
self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]
def forward(self, x, y):
# x_vgg, y_vgg = self.vgg(x), self.vgg(y)
# loss = 0
loss = 0
x_vgg = self.vgg(x)
with torch.no_grad():
y_vgg = self.vgg(y)
for i in range(len(x_vgg)):
loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
return loss
|
409080
|
import collections
import os
import unicodedata
from typing import List
import numpy as np
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTransform(object):
def __init__(
self,
vocab_file,
do_lower_case=True,
do_basic_tokenize=True,
never_split=None,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
tokenize_chinese_chars=True,
strip_accents=None,
max_length=128,
task="text",
**kwargs
):
self.do_basic_tokenize = do_basic_tokenize
self.unk_token = unk_token
self.sep_token = sep_token
self.pad_token = pad_token
self.cls_token = cls_token
self.mask_token = mask_token
self.max_length = max_length
self.task = task
self.added_tokens_encoder = {}
self.added_tokens_decoder = {}
self.unique_no_split_tokens: List[str] = []
super().__init__(**kwargs)
if not os.path.isfile(vocab_file):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
self.all_special_tokens = ['[UNK]', '[SEP]', '[PAD]', '[CLS]', '[MASK]']
self.is_train = True
def set_train(self):
self.is_train = True
def set_eval(self):
self.is_train = False
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
# If the token is part of the never_split set
if token in self.basic_tokenizer.never_split:
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def convert_tokens_to_ids(self, tokens):
if tokens is None:
return None
if isinstance(tokens, str):
return self._convert_token_to_id(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id(token))
return ids
def string_to_ids(self, text, max_length=None):
tokens = self._tokenize(text)
if max_length is None:
tokens = [self.cls_token] + tokens + [self.sep_token]
attention_mask = [1] * len(tokens)
else:
if not isinstance(max_length, int):
raise ValueError(f"{max_length} is not int.")
else:
tokens_length = len(tokens)
if tokens_length >= (max_length - 2):
tokens = [self.cls_token] + tokens[:max_length - 2] + [self.sep_token]
attention_mask = [1] * len(tokens)
else:
attention_mask = [1] * (len(tokens) + 2) + [0] * (max_length - tokens_length - 2)
tokens = [self.cls_token] + tokens + [self.sep_token] + [self.pad_token] * (
max_length - tokens_length - 2)
ids = self.convert_tokens_to_ids(tokens)
return {"inputs": np.array(ids), "attention_mask": np.array(attention_mask)}
def process_token(self, text, label, max_length=None):
ids = []
labels = []
for token, l in zip(text, label):
token = self._tokenize(token)
id = self.convert_tokens_to_ids(token)
l = [l] * len(id)
ids += id
labels += l
if max_length is None:
ids = self.convert_tokens_to_ids([self.cls_token]) + ids + self.convert_tokens_to_ids([self.sep_token])
attention_mask = [1] * len(ids)
labels = [-100] + labels + [-100]
else:
if not isinstance(max_length, int):
raise ValueError(f"{max_length} is not int.")
else:
ids_length = len(ids)
if ids_length >= (max_length - 2):
ids = self.convert_tokens_to_ids([self.cls_token]) + \
ids[:max_length - 2] + self.convert_tokens_to_ids([self.sep_token])
attention_mask = [1] * len(ids)
labels = [-100] + labels[:max_length - 2] + [-100]
else:
attention_mask = [1] * (len(ids) + 2) + [0] * (max_length - ids_length - 2)
ids = self.convert_tokens_to_ids([self.cls_token]) + \
ids + self.convert_tokens_to_ids([self.sep_token]) + self.convert_tokens_to_ids(
[self.pad_token]) * (max_length - ids_length - 2)
labels = [-100] + labels + [-100] * (max_length - ids_length - 1)
return {"inputs": np.array(ids), "attention_mask": np.array(attention_mask),
"token_type_ids": np.zeros_like(np.array(ids))}, {"labels": np.array(labels)}
def __call__(self, text, label, max_length=None):
if self.task == "token":
return self.process_token(text, label, max_length if max_length else self.max_length)
max_length = max_length if max_length else self.max_length
inputs = self.string_to_ids(text, max_length=max_length)
inputs["token_type_ids"] = np.zeros_like(inputs["inputs"])
return inputs, {"labels": label}
class BasicTokenizer(object):
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
Args:
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to lowercase the input when tokenizing.
never_split (:obj:`Iterable`, `optional`):
Collection of tokens which will never be split during tokenization. Only has an effect when
:obj:`do_basic_tokenize=True`
tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this `issue
<https://github.com/huggingface/transformers/issues/328>`__).
strip_accents: (:obj:`bool`, `optional`):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for :obj:`lowercase` (as in the original BERT).
"""
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None):
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = set(never_split)
self.tokenize_chinese_chars = tokenize_chinese_chars
self.strip_accents = strip_accents
def tokenize(self, text, never_split=None):
"""
Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see
WordPieceTokenizer.
Args:
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes. Now implemented directly at the base class level (see
:func:`PreTrainedTokenizer.tokenize`) List of token not to split.
"""
# union() returns a new set by concatenating the two sets.
never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if token not in never_split:
if self.do_lower_case:
token = token.lower()
if self.strip_accents is not False:
token = self._run_strip_accents(token)
elif self.strip_accents:
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token, never_split))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
tokenization using the given vocabulary.
For example, :obj:`input = "unaffable"` wil return as output :obj:`["un", "##aff", "##able"]`.
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_control(char):
"""Checks whether `char` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `char` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def _is_whitespace(char):
"""Checks whether `char` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
|
409083
|
import numpy as np
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def initialize(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
|
409154
|
import gzip
import numpy as np
def lazy_file_reader(path):
with gzip.open(path, 'rb') as fin:
for line in fin:
yield [int(i) for i in line.decode().strip().split()]
class lazy_file_iterator():
def __init__(self, path):
self.fin = gzip.open(path, "rb")
self.buf_size = 100
self.cache = []
self.pointer = 0
def reset(self):
self.fin.seek(0)
def __next__(self):
if len(self.cache) <= 0:
for _ in range(self.buf_size):
line = self.fin.readline()
if line == '':
self.reset()
self.cache.append(line)
return [int(i) for i in self.cache.pop().decode().strip().spit()]
def __iter__(self):
return self
def __len__(self):
return len(self.cache)
# No need to be lazy any more
class words_sampler_iterator():
def __init__(self, probs, buffer_size, batch_size, uniform_sample):
self.probs = probs
self.set_size = len(probs)
if uniform_sample:
self.probs = np.ones(self.set_size) * 1. / self.set_size
self.batch_size = batch_size
self.cache = np.arange(buffer_size)
def retrieve_cache(self):
return self.cache
def __next__(self):
return np.random.choice(self.set_size, self.batch_size, replace=True, p=self.probs)
def __iter__(self):
return self
def __len__(self):
return len(self.cache)
|
409175
|
import FWCore.ParameterSet.Config as cms
# Hodoscope Reconstruction
from RecoTBCalo.EcalTBHodoscopeReconstructor.ecal2004TBHodoscopeReconstructor_cfi import *
# TDC Reconstruction
from RecoTBCalo.EcalTBTDCReconstructor.ecal2004TBTDCReconstructor_cfi import *
# uncalibrated rechit producer
from RecoTBCalo.EcalTBRecProducers.ecal2004TBWeightUncalibRecHit_cfi import *
localReco2004_rawData = cms.Sequence(ecal2004TBHodoscopeReconstructor*ecal2004TBTDCReconstructor*ecal2004TBWeightUncalibRecHit)
|
409181
|
import os
import click
import subprocess
import docker
import sys
from anchore.configuration import AnchoreConfiguration
from anchore.version import version as anchore_version
import analyzer
import query
import audit
import system
import logs
import toolbox
import login
import feeds
import policybundle
import anchore.anchore_image_db, anchore.anchore_utils
from anchore.util import contexts
from .common import init_output_format, anchore_print_err, extended_help_option
main_extended_help="""
Anchore is a tool for analyzing, querying, and curating container
images to deliver the transparency, predictability, and control
necessary to use containers in production. Anchore is composed of a
toolset that runs locally on a host as well as a web service that
monitors the container ecosystem and provides inputs to the local
toolset.
The tool has capabilities to populate a registry, run analysis,
explore/query the analysis results (including custom queries and
checks), and run policy-based gate-functions against container images
to help an ops team decide if a container should go into the CI/CD
pipeline and on to production based on attributes of the Dockerfile,
built image, or both.
After installation, the first command run should be: 'anchore feeds
list' to initialize the system and load feed data.
Feeds are different types of data that the anchore web service makes
available for certain container image queries and scans. At any point
in time, you can use the 'anchore feeds' commands to sync the latest
data from the anchore web service, subscribe and/or unsubscribe from
anchore feeds, or get a list of available feeds. Except during feeds
operations, no network connectivity is required by anchore to run
analysis and query images.
Configuration Files:
Anchore configuration files are automatically installed if not found when looking in-order at:
\b
$HOME/.anchore/conf
/etc/anchore
A default install will copy the files to $HOME/.anchore/conf/, but they may be manually put in /etc/anchore for a system global config.
* config.yaml - main configuration file. Used to override default values. Anchore will search for it in
$HOME/.anchore/conf, then in /etc/anchore. If not found in either place, a new one is initilized and copied to $HOME/.anchore
* anchore_gate.policy - The global gate policy file (see anchore-gate(1) for more help on gates).
High-level example flows:
Initialize the system and sync the by-default subscribed feed 'vulnerabilties':
\b
anchore feeds list
anchore feeds sync
Analyze an image
docker pull nginx:latest
anchore analyze --image nginx:latest --imagetype base
Generate a summary report on all analyzed images
anchore audit report
Check gate output for nginx:latest:
anchore gate --image nginx:latest
"""
@click.group()
@click.option('--verbose', is_flag=True, help='Enable verbose output to stderr.')
@click.option('--debug', is_flag=True, help='Developer debug output to stderr.')
@click.option('--quiet', is_flag=True, help='Only errors to stderr, no status messages.')
@click.option('--json', is_flag=True, help='Output formatted json to stdout.')
@click.option('--plain', is_flag=True, help='Output formatted scriptable text to stdout.')
@click.option('--html', is_flag=True, help='Output formatted HTML table to stdout.')
@click.option('--config-override', help='Override an anchore configuration option (can be used multiple times).', metavar='<config_opt>=<config_value>', multiple=True)
@click.version_option(version=anchore_version)
@click.pass_context
@extended_help_option(extended_help=main_extended_help)
def main_entry(ctx, verbose, debug, quiet, json, plain, html, config_override):
"""
Anchore is a tool to analyze, query, and curate container images. The options at this top level
control stdout and stderr verbosity and format.
After installation, the first command run should be: 'anchore feeds
list' to initialize the system and load feed data.
High-level example flows:
Initialize the system and sync the by-default subscribed feed 'vulnerabilties':
\b
anchore feeds list
anchore feeds sync
Analyze an image
docker pull nginx:latest
anchore analyze --image nginx:latest --imagetype base
Generate a summary report on all analyzed images
anchore audit report
Check gate output for nginx:latest:
anchore gate --image nginx:latest
"""
# Load the config into the context object
logfile = None
debug_logfile = None
try:
try:
config_overrides = {}
if config_override:
for el in config_override:
try:
(key, val) = el.split('=')
if not key or not val:
raise Exception("could not split by '='")
config_overrides[key] = val
except:
click.echo("Error: specified --config_override param cannot be parsed (should be <config_opt>=<value>): " + str(el))
exit(1)
args = {'verbose': verbose, 'debug': debug, 'json': json, 'plain': plain, 'html': html, 'quiet': quiet, 'config_overrides':config_overrides}
anchore_conf = AnchoreConfiguration(cliargs=args)
except Exception as err:
click.echo("Error setting up/reading Anchore configuration", err=True)
click.echo("Info: "+str(err), err=True)
import traceback
traceback.print_exc()
sys.exit(1)
try:
logfile = anchore_conf.data['log_file'] if 'log_file' in anchore_conf.data else None
debug_logfile = anchore_conf.data['debug_log_file'] if 'debug_log_file' in anchore_conf.data else None
except Exception, e:
click.echo(str(e))
ctx.obj = anchore_conf
except:
if ctx.invoked_subcommand != 'system':
click.echo('Expected, but did not find configuration file at %s' % os.path.join(AnchoreConfiguration.DEFAULT_CONFIG_FILE), err=True)
exit(1)
try:
init_output_format(json, plain, debug, verbose, quiet, log_filepath=logfile, debug_log_filepath=debug_logfile)
except Exception, e:
click.echo('Error initializing logging: %s' % str(e))
exit(2)
if not anchore_pre_flight_check(ctx):
anchore_print_err("Error running pre-flight checks")
exit(1)
try:
if not anchore.anchore_utils.anchore_common_context_setup(ctx.obj):
anchore_print_err("Error setting up common data based on configuration")
exit(1)
except ValueError as err:
print "ERROR: " + str(err)
exit(1)
main_entry.add_command(system.system)
main_entry.add_command(query.query)
main_entry.add_command(audit.audit)
main_entry.add_command(analyzer.analyze)
main_entry.add_command(analyzer.gate)
main_entry.add_command(toolbox.toolbox)
main_entry.add_command(login.login)
main_entry.add_command(login.logout)
main_entry.add_command(login.whoami)
main_entry.add_command(feeds.feeds)
main_entry.add_command(policybundle.policybundle)
def anchore_pre_flight_check(ctx):
# helper checks
try:
subcommand = ctx.invoked_subcommand
config = ctx.obj.data
except:
return(False)
if subcommand in ['explore', 'gate', 'analyze']:
# check for some shellouts for analyzers
try:
cmd = ['dpkg-query', '--version']
sout = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except Exception as err:
anchore_print_err("Anchore requires dpkg libs and commands")
return(False)
try:
from rpmUtils.miscutils import splitFilename
cmd = ['rpm', '--version']
sout = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except Exception as err:
anchore_print_err("Anchore requires yum/rpm libs and commands")
return(False)
if subcommand in ['explore', 'gate', 'analyze', 'toolbox']:
# check DB readiness
try:
db = anchore.anchore_image_db.load(driver=config['anchore_db_driver'], config=config)
except Exception as err:
anchore_print_err("Could not set up connection to Anchore DB")
return(False)
return(True)
|
409207
|
from email.utils import formatdate
import json
from testing.http_util import ChunkedResponse, JsonResponse
from ldclient.interfaces import EventProcessor, FeatureRequester, FeatureStore, UpdateProcessor
def make_items_map(items = []):
ret = {}
for item in items:
ret[item['key']] = item
return ret
def make_put_event(flags = [], segments = []):
data = { "data": { "flags": make_items_map(flags), "segments": make_items_map(segments) } }
return 'event:put\ndata: %s\n\n' % json.dumps(data)
def make_patch_event(kind, item):
path = '%s%s' % (kind.stream_api_path, item['key'])
data = { "path": path, "data": item }
return 'event:patch\ndata: %s\n\n' % json.dumps(data)
def make_delete_event(kind, key, version):
path = '%s%s' % (kind.stream_api_path, key)
data = { "path": path, "version": version }
return 'event:delete\ndata: %s\n\n' % json.dumps(data)
def stream_content(event = None):
stream = ChunkedResponse({ 'Content-Type': 'text/event-stream' })
if event:
stream.push(event)
return stream
def poll_content(flags = [], segments = []):
data = { "flags": make_items_map(flags), "segments": make_items_map(segments) }
return JsonResponse(data)
class MockEventProcessor(EventProcessor):
def __init__(self, *_):
self._running = False
self._events = []
def stop(self):
self._running = False
def start(self):
self._running = True
def is_alive(self):
return self._running
def send_event(self, event):
self._events.append(event)
def flush(self):
pass
class MockFeatureRequester(FeatureRequester):
def __init__(self):
self.all_data = {}
self.exception = None
self.request_count = 0
def get_all_data(self):
self.request_count += 1
if self.exception is not None:
raise self.exception
return self.all_data
class MockResponse:
def __init__(self, status, headers):
self._status = status
self._headers = headers
@property
def status(self):
return self._status
def getheader(self, name):
return self._headers.get(name.lower())
class MockHttp:
def __init__(self):
self._recorded_requests = []
self._request_data = None
self._request_headers = None
self._response_func = None
self._response_status = 200
self._server_time = None
def request(self, method, uri, headers, timeout, body, retries):
self._recorded_requests.append((headers, body))
resp_hdr = dict()
if self._server_time is not None:
resp_hdr['date'] = formatdate(self._server_time / 1000, localtime=False, usegmt=True)
if self._response_func is not None:
return self._response_func()
return MockResponse(self._response_status, resp_hdr)
def clear(self):
pass
@property
def request_data(self):
if len(self._recorded_requests) != 0:
return self._recorded_requests[-1][1]
@property
def request_headers(self):
if len(self._recorded_requests) != 0:
return self._recorded_requests[-1][0]
@property
def recorded_requests(self):
return self._recorded_requests
def set_response_status(self, status):
self._response_status = status
def set_response_func(self, response_func):
self._response_func = response_func
def set_server_time(self, timestamp):
self._server_time = timestamp
def reset(self):
self._recorded_requests = []
class MockUpdateProcessor(UpdateProcessor):
def __init__(self, config, store, ready):
ready.set()
def start(self):
pass
def stop(self):
pass
def is_alive(self):
return True
def initialized(self):
return True
class CapturingFeatureStore(FeatureStore):
def init(self, all_data):
self.data = all_data
def get(self, kind, key, callback=lambda x: x):
pass
def all(self, kind, callback=lambda x: x):
pass
def delete(self, kind, key, version):
pass
def upsert(self, kind, item):
pass
@property
def initialized(self):
return True
@property
def received_data(self):
return self.data
|
409229
|
from argparse import ArgumentParser
import sys
from toncli.modules.tests.tests import TestsRunner
from toncli.modules.utils.system.argparse_fix import argv_fix
class RunTestsCommand():
def __init__(self, string_kwargs, parser: ArgumentParser):
_, kwargs = argv_fix(sys.argv, string_kwargs)
args = parser.parse_args(['run_tests', *kwargs])
test_runner = TestsRunner()
test_runner.run(args.contracts.split() if args.contracts else None,
verbose=args.verbose,
output_results=args.output_results)
|
409234
|
import os
import argparse
from itertools import cycle
import torch
from sklearn.manifold import TSNE
from torch.autograd import Variable
import matplotlib.pyplot as plt
from alternate_data_loader import MNIST_Paired
from torch.utils.data import DataLoader
from utils import transform_config
from networks import Encoder, Decoder
from utils import group_wise_reparameterize, accumulate_group_evidence, reparameterize
parser = argparse.ArgumentParser()
# add arguments
parser.add_argument('--cuda', type=bool, default=False, help="run the following code on a GPU")
parser.add_argument('--accumulate_evidence', type=str, default=False, help="accumulate class evidence before producing swapped images")
parser.add_argument('--batch_size', type=int, default=64, help="batch size for training")
parser.add_argument('--image_size', type=int, default=28, help="height and width of the image")
parser.add_argument('--num_test_samples', type=int, default=10000, help="number of test samples")
parser.add_argument('--style_dim', type=int, default=10, help="dimension of varying factor latent space")
parser.add_argument('--class_dim', type=int, default=10, help="dimension of common factor latent space")
# paths to save models
parser.add_argument('--encoder_save', type=str, default='encoder_0.1_var_reparam', help="model save for encoder")
parser.add_argument('--decoder_save', type=str, default='decoder_0.1_var_reparam', help="model save for decoder")
FLAGS = parser.parse_args()
if __name__ == '__main__':
"""
model definitions
"""
encoder = Encoder(style_dim=FLAGS.style_dim, class_dim=FLAGS.class_dim)
decoder = Decoder(style_dim=FLAGS.style_dim, class_dim=FLAGS.class_dim)
encoder.load_state_dict(
torch.load(os.path.join('checkpoints', FLAGS.encoder_save), map_location=lambda storage, loc: storage))
decoder.load_state_dict(
torch.load(os.path.join('checkpoints', FLAGS.decoder_save), map_location=lambda storage, loc: storage))
"""
variable definition
"""
z_space = torch.FloatTensor(1, FLAGS.style_dim)
'''
test
'''
# load data set and create data loader instance
print('Loading MNIST paired dataset...')
paired_mnist = MNIST_Paired(root='mnist', download=True, train=False, transform=transform_config)
loader = cycle(DataLoader(paired_mnist, batch_size=FLAGS.num_test_samples, shuffle=True, num_workers=0, drop_last=True))
image_batch, _, labels_batch = next(loader)
style_mu, style_logvar, class_mu, class_logvar = encoder(Variable(image_batch))
style_latent_embeddings = reparameterize(training=True, mu=style_mu, logvar=style_logvar)
if FLAGS.accumulate_evidence:
grouped_mu, grouped_logvar = accumulate_group_evidence(
class_mu.data, class_logvar.data, labels_batch, FLAGS.cuda
)
class_latent_embeddings = group_wise_reparameterize(
training=True, mu=grouped_mu, logvar=grouped_logvar, labels_batch=labels_batch, cuda=FLAGS.cuda
)
else:
class_latent_embeddings = reparameterize(training=True, mu=class_mu, logvar=class_logvar)
# perform t-SNE embedding
vis_data = TSNE(n_components=2, verbose=1, perplexity=30.0, n_iter=1000).fit_transform(class_latent_embeddings.data.numpy())
# plot the result
vis_x = vis_data[:, 0]
vis_y = vis_data[:, 1]
fig, ax = plt.subplots(1)
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.scatter(vis_x, vis_y, marker='.', c=labels_batch.numpy(), cmap=plt.cm.get_cmap("jet", 10))
plt.axis('off')
plt.colorbar(ticks=range(10))
plt.clim(-0.5, 9.5)
plt.show()
|
409242
|
from smol_evm.constants import MAX_UINT256
from smol_evm.context import ExecutionContext
from smol_evm.opcodes import DUP1, DUP2, POP
from smol_evm.stack import Stack, StackOverflow, StackUnderflow, InvalidStackItem
from shared import with_stack_contents
import pytest
@pytest.fixture
def stack() -> Stack:
return Stack()
@pytest.fixture
def context() -> ExecutionContext:
return ExecutionContext()
def test_underflow_pop(stack):
with pytest.raises(StackUnderflow):
stack.pop()
def test_underflow_empty_peek(stack):
with pytest.raises(StackUnderflow):
stack.peek(0)
def test_underflow_non_empty_peek(stack):
stack.push(1)
stack.push(2)
with pytest.raises(StackUnderflow):
stack.peek(2)
def test_happy_peek(stack):
stack.push(1)
stack.push(2)
assert stack.peek(0) == 2
assert stack.peek(1) == 1
def test_overflow():
stack = Stack(max_depth=1)
stack.push(0)
with pytest.raises(StackOverflow):
stack.push(1)
def test_push_pop(stack):
stack.push(1)
stack.push(2)
assert stack.pop() == 2
assert stack.pop() == 1
def test_push_uint256_max(stack):
stack.push(MAX_UINT256)
assert stack.pop() == MAX_UINT256
def test_invalid_value_negative(stack):
with pytest.raises(InvalidStackItem) as excinfo:
stack.push(-1)
assert excinfo.value.args[0]['item'] == -1
def test_invalid_value_too_big(stack):
with pytest.raises(InvalidStackItem) as excinfo:
stack.push(MAX_UINT256 + 1)
assert excinfo.value.args[0]['item'] == MAX_UINT256 + 1
def test_swap1(stack):
for x in [1, 2, 3]:
stack.push(x)
stack.swap(1)
assert stack.pop() == 2
assert stack.pop() == 3
assert stack.pop() == 1
def test_swap2(stack):
for x in [1, 2, 3]:
stack.push(x)
stack.swap(2)
assert stack.pop() == 1
assert stack.pop() == 2
assert stack.pop() == 3
def test_swap_underflow(stack):
with pytest.raises(StackUnderflow):
stack.swap(1)
def test_peek(stack):
for x in [1, 2, 3]:
stack.push(x)
assert stack.peek(0) == 3
assert stack.peek(1) == 2
assert stack.peek(2) == 1
def test_peek_underflow(stack):
with pytest.raises(StackUnderflow):
stack.peek(1)
def test_dup1(context):
DUP1.execute(with_stack_contents(context, [1, 2, 3]))
assert context.stack.pop() == 3
assert context.stack.pop() == 3
assert context.stack.pop() == 2
def test_dup2(context):
DUP2.execute(with_stack_contents(context, [1, 2, 3]))
assert context.stack.pop() == 2
assert context.stack.pop() == 3
assert context.stack.pop() == 2
def test_pop_instruction(context):
POP.execute(with_stack_contents(context, [1, 2, 3]))
assert context.stack.pop() == 2
assert context.stack.pop() == 1
|
409271
|
from datetime import datetime, timedelta
from pytz import utc
from timely_beliefs import BeliefsDataFrame, BeliefSource, Sensor, TimedBelief
def sixteen_probabilistic_beliefs() -> BeliefsDataFrame:
"""Nice BeliefsDataFrame to show.
For a single sensor, it contains 4 events, for each of which 2 beliefs by 2 sources each, described by 2 or 3
probabilistic values, depending on the source.
Note that the event resolution of the sensor is 15 minutes.
"""
n_events = 4
n_beliefs = 2
n_sources = 2
true_value = 100
example_sensor = Sensor(event_resolution=timedelta(minutes=15), name="Sensor 1")
example_source_a = BeliefSource(name="Source A")
example_source_b = BeliefSource(name="Source B")
sources = [example_source_a, example_source_b]
cps = [0.1587, 0.5, 0.8413, 0.5, 1]
# Build up a BeliefsDataFrame with various events, beliefs, sources and probabilistic accuracy (for a single sensor)
beliefs = [
TimedBelief(
source=sources[s],
sensor=example_sensor,
event_value=int(
1
* (e + 1)
* (
true_value + (10 ** (n_beliefs - b - 1)) * (cps[p] - 0.5) / 0.3413
if s % 2 == 0
else true_value * (p - 3)
)
),
belief_time=datetime(2000, 1, 1, tzinfo=utc) + timedelta(hours=b),
event_start=datetime(2000, 1, 3, 9, tzinfo=utc) + timedelta(hours=e),
cumulative_probability=cps[p],
)
for e in range(n_events) # 4 events
for b in range(n_beliefs) # 2 beliefs
for s in range(n_sources) # 2 sources
for p in range(
3 * (s % 2), 2 * (s % 2) + 3
) # alternating 3 and 2 cumulative probabilities
]
return BeliefsDataFrame(sensor=example_sensor, beliefs=beliefs)
|
409295
|
import copy
import cupy
import chainer
import chainer.functions as F
from chainer.backends import cuda
from chainer.training.extensions import Evaluator
from chainer.dataset import convert
from chainer import Reporter
from chainer import reporter as reporter_module
from src.functions.evaluation import dice_coefficient, mean_dice_coefficient
from src.functions.loss.mixed_dice_loss import dice_loss_plus_cross_entropy
class CPCSegEvaluator(Evaluator):
def __init__(
self,
config,
iterator,
target,
converter=convert.concat_examples,
device=None,
):
super().__init__(iterator, target,
converter, device,
None, None)
self.nested_label = config['nested_label']
self.seg_lossfun = eval(config['seg_lossfun'])
self.rec_loss_weight = config['vaeseg_rec_loss_weight']
self.kl_loss_weight = config['vaeseg_kl_loss_weight']
self.grid_size = config['grid_size']
self.base_channels = config['base_channels']
self.cpc_loss_weight = config['cpc_vaeseg_cpc_loss_weight']
self.cpc_pattern = config['cpc_pattern']
self.is_brats = config['is_brats']
self.dataset = config['dataset_name']
self.nb_labels = config['nb_labels']
self.crop_size = eval(config['crop_size'])
def compute_loss(self, y, t):
if self.nested_label:
loss = 0.
b, c, h, w, d = t.shape
for i in range(c):
loss += self.seg_lossfun(y[:, 2*i:2*(i+1), ...], t[:, i, ...])
else:
loss = self.seg_lossfun(y, t)
return loss
def compute_accuracy(self, y, t):
if self.nested_label:
b, c, h, w, d = t.shape
y = F.reshape(y, (b, 2, h*c, w, d))
t = F.reshape(t, (b, h*c, w, d))
return F.accuracy(y, t)
def compute_dice_coef(self, y, t):
if self.nested_label:
dice = mean_dice_coefficient(dice_coefficient(y[:, 0:2, ...], t[:, 0, ...]))
for i in range(1, t.shape[1]):
dices = dice_coefficient(y[:, 2*i:2*(i+1), ...], t[:, i, ...])
dice = F.concat((dice, mean_dice_coefficient(dices)), axis=0)
else:
dice = dice_coefficient(y, t, is_brats=self.is_brats)
return dice
def evaluate(self):
summary = reporter_module.DictSummary()
iterator = self._iterators['main']
enc = self._targets['enc']
dec = self._targets['dec']
reporter = Reporter()
observer = object()
reporter.add_observer(self.default_name, observer)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
for batch in it:
x, t = self.converter(batch, self.device)
with chainer.no_backprop_mode(), chainer.using_config('train', False):
if self.dataset == 'msd_bound':
# evaluation method for BRATS dataset only
h, w, d = x.shape[2:]
hc, wc, dc = self.crop_size
if self.nested_label:
y = cupy.zeros((1, 2*(self.nb_labels-1), h, w, d), dtype='float32')
else:
y = cupy.zeros((1, self.nb_labels, h, w, d), dtype='float32')
hker = hc # kernel size
hs = int(0.5*hker) # stride
wker = wc
wc = int(0.5*wker)
dker = dc # kernel size for depth
for i in range(2):
for j in range(2):
for k in range(2):
xx = x[:, :, -i*hker:min(hker*(i+1), h),
-j*wker:min(wker*(j+1), w), -k*dker:min(dker*(k+1), d)]
hs = enc(xx)
yy = dec(hs)
y[:, :, -i*hker:min(hker*(i+1), h),
-j*wker:min(wker*(j+1), w),
-k*dker:min(dker*(k+1), d)] += yy.data
else:
hs = enc(x)
y = dec(hs)
seg_loss = self.compute_loss(y, t)
accuracy = self.compute_accuracy(y, t)
dice = self.compute_dice_coef(y, t)
mean_dice = mean_dice_coefficient(dice)
observation = {}
with reporter.scope(observation):
reporter.report({
'loss/seg': seg_loss,
'acc': accuracy,
'mean_dc': mean_dice
}, observer)
xp = cuda.get_array_module(y)
for i in range(len(dice)):
if not xp.isnan(dice.data[i]):
reporter.report({'dc_{}'.format(i): dice[i]}, observer)
summary.add(observation)
return summary.compute_mean()
|
409298
|
from unittest.mock import patch
from nose.tools import assert_equal, assert_in
from pyecharts import options as opts
from pyecharts.charts import TreeMap
example_data = [
{"value": 40, "name": "我是A"},
{
"value": 180,
"name": "我是B",
"children": [
{
"value": 76,
"name": "我是B.children",
"children": [
{"value": 12, "name": "我是B.children.a"},
{"value": 28, "name": "我是B.children.b"},
{"value": 20, "name": "我是B.children.c"},
{"value": 16, "name": "我是B.children.d"},
],
}
],
},
]
@patch("pyecharts.render.engine.write_utf8_html_file")
def test_treemap_base(fake_writer):
c = TreeMap().add("演示数据", example_data)
c.render()
_, content = fake_writer.call_args[0]
assert_equal(c.theme, "white")
assert_equal(c.renderer, "canvas")
@patch("pyecharts.render.engine.write_utf8_html_file")
def test_treemap_options(fake_writer):
c = TreeMap().add("演示数据", example_data, width="90%", height="100%", roam=False)
c.render()
_, content = fake_writer.call_args[0]
assert_in("width", content)
assert_in("height", content)
assert_in("roam", content)
@patch("pyecharts.render.engine.write_utf8_html_file")
def test_treemap_levels_options(fake_writer):
c = TreeMap().add(
"演示数据",
example_data,
width="90%",
height="100%",
roam=False,
levels=opts.TreeMapLevelsOpts(),
)
c.render()
_, content = fake_writer.call_args[0]
assert_in("levels", content)
|
409333
|
import sys
from .client import LNDClient
from .common import get_macaroon, get_cert
MAJOR = sys.version_info[0]
MINOR = sys.version_info[1]
# only import the async client for python 3.6+
if MAJOR == 3 and MINOR >= 6:
from lndgrpc.aio.async_client import AsyncLNDClient
|
409347
|
from .metrics import get_metric_func, prc_auc, bce, rmse, bounded_mse, bounded_mae, \
bounded_rmse, accuracy, f1_metric, mcc_metric, sid_metric, wasserstein_metric
from .loss_functions import get_loss_func, bounded_mse_loss, \
mcc_class_loss, mcc_multiclass_loss, sid_loss, wasserstein_loss
from .cross_validate import chemprop_train, cross_validate, TRAIN_LOGGER_NAME
from .evaluate import evaluate, evaluate_predictions
from .make_predictions import chemprop_predict, make_predictions, load_model, set_features, load_data, predict_and_save
from .molecule_fingerprint import chemprop_fingerprint, model_fingerprint
from .predict import predict
from .run_training import run_training
from .train import train
__all__ = [
'chemprop_train',
'cross_validate',
'TRAIN_LOGGER_NAME',
'evaluate',
'evaluate_predictions',
'chemprop_predict',
'chemprop_fingerprint',
'make_predictions',
'load_model',
'set_features',
'load_data',
'predict_and_save',
'predict',
'run_training',
'train',
'get_metric_func',
'prc_auc',
'bce',
'rmse',
'bounded_mse',
'bounded_mae',
'bounded_rmse',
'accuracy',
'f1_metric',
'mcc_metric',
'sid_metric',
'wasserstein_metric',
'get_loss_func',
'bounded_mse_loss',
'mcc_class_loss',
'mcc_multiclass_loss',
'sid_loss',
'wasserstein_loss'
]
|
409376
|
class ThresholdStrategy:
"""
Base class for spot finder threshold strategies.
"""
def __init__(self, **kwargs):
"""
Initialise with key word arguments.
"""
pass
def __call__(self, image):
"""
Threshold the image.
"""
raise RuntimeError("Overload Me!")
class DispersionThresholdStrategy(ThresholdStrategy):
"""
A class implementing a 'gain' threshold.
"""
def __init__(self, **kwargs):
"""
Set the threshold algorithm up
"""
# Initialise the base class
ThresholdStrategy.__init__(self, **kwargs)
# Get the parameters
self._kernel_size = kwargs.get("kernel_size", (3, 3))
self._gain = kwargs.get("gain")
self._n_sigma_b = kwargs.get("n_sigma_b", 6)
self._n_sigma_s = kwargs.get("n_sigma_s", 3)
self._min_count = kwargs.get("min_count", 2)
self._threshold = kwargs.get("global_threshold", 0)
# Save the constant gain
self._gain_map = None
# Create a buffer
self.algorithm = {}
def __call__(self, image, mask):
"""
Call the thresholding function
:param image: The image to process
:param mask: The mask to use
:return: The thresholded image
"""
from dials.algorithms.image import threshold
from dials.array_family import flex
# Initialise the algorithm
try:
algorithm = self.algorithm[image.all()]
except Exception:
algorithm = threshold.DispersionThreshold(
image.all(),
self._kernel_size,
self._n_sigma_b,
self._n_sigma_s,
self._threshold,
self._min_count,
)
self.algorithm[image.all()] = algorithm
# Set the gain
if self._gain is not None:
assert self._gain > 0
self._gain_map = flex.double(image.accessor(), self._gain)
self._gain = None
# Compute the threshold
result = flex.bool(flex.grid(image.all()))
if self._gain_map:
algorithm(image, mask, self._gain_map, result)
else:
algorithm(image, mask, result)
# Return the result
return result
class DispersionExtendedThresholdStrategy(ThresholdStrategy):
"""
A class implementing a 'gain' threshold.
"""
def __init__(self, **kwargs):
"""
Set the threshold algorithm up
"""
# Initialise the base class
ThresholdStrategy.__init__(self, **kwargs)
# Get the parameters
self._kernel_size = kwargs.get("kernel_size", (3, 3))
self._gain = kwargs.get("gain")
self._n_sigma_b = kwargs.get("n_sigma_b", 6)
self._n_sigma_s = kwargs.get("n_sigma_s", 3)
self._min_count = kwargs.get("min_count", 2)
self._threshold = kwargs.get("global_threshold", 0)
# Save the constant gain
self._gain_map = None
# Create a buffer
self.algorithm = {}
def __call__(self, image, mask):
"""
Call the thresholding function
:param image: The image to process
:param mask: The mask to use
:return: The thresholded image
"""
from dials.algorithms.image import threshold
from dials.array_family import flex
# Initialise the algorithm
try:
algorithm = self.algorithm[image.all()]
except Exception:
algorithm = threshold.DispersionExtendedThreshold(
image.all(),
self._kernel_size,
self._n_sigma_b,
self._n_sigma_s,
self._threshold,
self._min_count,
)
self.algorithm[image.all()] = algorithm
# Set the gain
if self._gain is not None:
assert self._gain > 0
self._gain_map = flex.double(image.accessor(), self._gain)
self._gain = None
# Compute the threshold
result = flex.bool(flex.grid(image.all()))
if self._gain_map:
algorithm(image, mask, self._gain_map, result)
else:
algorithm(image, mask, result)
# Return the result
return result
|
409377
|
import os
import load as io
class HicoConstants(io.JsonSerializableClass):
def __init__(
self,
clean_dir=os.path.join(os.getcwd(),'data_symlinks/hico_clean'),
#proc_dir=os.path.join(os.getcwd(),'data_symlinks/hico_processed')):
proc_dir=os.path.join(os.getcwd(),'hico_processed/')):
self.clean_dir = clean_dir
self.proc_dir = proc_dir
# Clean constants
self.anno_bbox_mat = os.path.join(self.clean_dir,'anno_bbox.mat')
self.anno_mat = os.path.join(self.clean_dir,'anno.mat')
self.hico_list_hoi_txt = os.path.join(
self.clean_dir,
'hico_list_hoi.txt')
self.hico_list_obj_txt = os.path.join(
self.clean_dir,
'hico_list_obj.txt')
self.hico_list_vb_txt = os.path.join(
self.clean_dir,
'hico_list_vb.txt')
self.images_dir = os.path.join(self.clean_dir,'images')
# Processed constants
self.anno_list_json = os.path.join(self.proc_dir,'anno_list.json')
self.hoi_list_json = os.path.join(self.proc_dir,'hoi_list.json')
self.object_list_json = os.path.join(self.proc_dir,'object_list.json')
self.verb_list_json = os.path.join(self.proc_dir,'verb_list.json')
# Need to run split_ids.py
self.split_ids_json = os.path.join(self.proc_dir,'split_ids.json')
# Need to run hoi_cls_count.py
self.hoi_cls_count_json = os.path.join(self.proc_dir,'hoi_cls_count.json')
self.bin_to_hoi_ids_json = os.path.join(self.proc_dir,'bin_to_hoi_ids.json')
self.faster_rcnn_boxes = os.path.join(self.proc_dir,'faster_rcnn_boxes')
|
409452
|
class Environment:
def __init__(self, width=1000, height=1000):
self.width = width
self.height = height
self.agent = None
self.doors = []
def add_door(self, door):
self.doors.append(door)
def get_env_size(self):
size = (self.height, self.width)
return size
|
409466
|
from __future__ import print_function
import digdag
def generate():
with open("result.csv", "w") as f:
f.write("ok")
def check_generated():
with open("result.csv", "r") as f:
data = f.read()
if len(data) < 2:
raise Exception("Output data is too small")
print("ok.")
class Generator(object):
def run(self):
with open("result.csv", "w") as f:
f.write("ok")
digdag.env.store({"path": "result.csv"})
def check(self, path):
print("checking "+path)
with open(path, "r") as f:
data = f.read()
if len(data) < 2:
raise Exception("Output data is too small")
print("ok.")
|
409516
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class CTLSTMCell(nn.Module):
def __init__(self, hidden_dim, beta=1.0, device=None):
super(CTLSTMCell, self).__init__()
device = device or 'cpu'
self.device = torch.device(device)
self.hidden_dim = hidden_dim
self.linear = nn.Linear(hidden_dim * 2, hidden_dim * 7, bias=True)
self.beta = beta
def forward(
self, rnn_input,
hidden_t_i_minus, cell_t_i_minus, cell_bar_im1):
dim_of_hidden = rnn_input.dim() - 1
input_i = torch.cat((rnn_input, hidden_t_i_minus), dim=dim_of_hidden)
output_i = self.linear(input_i)
gate_input, \
gate_forget, gate_output, gate_pre_c, \
gate_input_bar, gate_forget_bar, gate_decay = output_i.chunk(
7, dim_of_hidden)
gate_input = torch.sigmoid(gate_input)
gate_forget = torch.sigmoid(gate_forget)
gate_output = torch.sigmoid(gate_output)
gate_pre_c = torch.tanh(gate_pre_c)
gate_input_bar = torch.sigmoid(gate_input_bar)
gate_forget_bar = torch.sigmoid(gate_forget_bar)
gate_decay = F.softplus(gate_decay, beta=self.beta)
cell_i = gate_forget * cell_t_i_minus + gate_input * gate_pre_c
cell_bar_i = gate_forget_bar * cell_bar_im1 + gate_input_bar * gate_pre_c
return cell_i, cell_bar_i, gate_decay, gate_output
def decay(self, cell_i, cell_bar_i, gate_decay, gate_output, dtime):
# no need to consider extra_dim_particle here
# cuz this function is applicable to any # of dims
if dtime.dim() < cell_i.dim():
dtime = dtime.unsqueeze(cell_i.dim()-1).expand_as(cell_i)
cell_t_ip1_minus = cell_bar_i + (cell_i - cell_bar_i) * torch.exp(
-gate_decay * dtime)
hidden_t_ip1_minus = gate_output * torch.tanh(cell_t_ip1_minus)
return cell_t_ip1_minus, hidden_t_ip1_minus
|
409531
|
from autoload import load_config
from tests.clazz.testmodule import TestModule
@load_config()
class CustomModuleB1(TestModule):
pass
|
409556
|
import ibmsecurity.utilities.tools
import logging
from ibmsecurity.utilities import tools
logger = logging.getLogger(__name__)
module_uri = "/isam/felb/configuration/services"
requires_modules = None
requires_version = None
requires_model = "Appliance"
def add(isamAppliance, service_name, address, active, port, weight, secure, ssllabel, check_mode=False, force=False):
"""
Creating a server
"""
check_exist, warnings = _check_exist(isamAppliance, service_name, address, port)
if force is True or check_exist is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_post("Creating a server",
"{0}/{1}/servers".format(module_uri, service_name),
{
"active": active,
"address": address,
"port": port,
"weight": weight,
"secure": secure,
"ssllabel": ssllabel
},
requires_version=requires_version, requires_modules=requires_modules, requires_model=requires_model)
else:
return isamAppliance.create_return_object(warnings=warnings)
def delete(isamAppliance, service_name, address, port, check_mode=False, force=False):
"""
deletes a server from specified service name
"""
check_exist, warnings = _check_exist(isamAppliance, service_name, address, port)
if force is True or check_exist is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
id = address + ":" + str(port)
return isamAppliance.invoke_delete("Deleting a server",
"{0}/{1}/servers/{2}".format(module_uri, service_name, id),
requires_version=requires_version, requires_modules=requires_modules, requires_model=requires_model)
else:
return isamAppliance.create_return_object(warnings=warnings)
def get(isamAppliance, service_name, address, port, check_mode=False, force=False):
"""
Retrieves server from specified service name
"""
id = address + ":" + str(port)
return (isamAppliance.invoke_get("Retrieving a server", "{0}/{1}/servers/{2}".format(module_uri, service_name, id),
requires_version=requires_version, requires_modules=requires_modules, requires_model=requires_model))
def get_all(isamAppliance, service_name, check_mode=False, force=False):
"""
Retrieves a list of servers under a specified service
"""
return isamAppliance.invoke_get("Retrieving servers for a service",
"{0}/{1}/servers".format(module_uri, service_name),
requires_version=requires_version, requires_modules=requires_modules, requires_model=requires_model)
def update(isamAppliance, service_name, address, active, port, weight, secure=False, ssllabel=None, new_address=None, new_port=None, check_mode=False, force=False):
"""
Updating server
"""
id = address + ":" + str(port)
json_data = {'active': active, 'secure': secure, 'ssllabel': ssllabel, 'weight': weight}
if new_address is not None:
json_data['address'] = new_address
else:
json_data['address'] = address
if new_port is not None:
json_data['port'] = new_port
else:
json_data['port'] = port
change_required, warnings = _check_update(isamAppliance, service_name, address, port, json_data)
if force is True or change_required is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put("Updating a server",
"{0}/{1}/servers/{2}".format(module_uri, service_name, id),
json_data,
requires_modules=requires_modules,
requires_version=requires_version,
requires_model = requires_model)
else:
return isamAppliance.create_return_object(warnings=warnings)
def _check_update(isamAppliance, service_name, address, port, json_data):
"""
idempontency test
"""
ret_obj = get(isamAppliance, service_name, address, port)
warnings = ret_obj['warnings']
ret_data = ret_obj['data']
if 'id' in ret_data:
del ret_data['id']
else:
return False, warnings
sorted_ret_data = tools.json_sort(ret_data)
sorted_json_data = tools.json_sort(json_data)
logger.debug("Sorted Existing Data:{0}".format(sorted_ret_data))
logger.debug("Sorted Desired Data:{0}".format(sorted_json_data))
if sorted_ret_data != sorted_json_data:
return True, warnings
else:
return False, warnings
def _check_exist(isamAppliance, service_name, address, port):
"""
idempotency test for delete function
"""
id = address + ":" + str(port)
ret_obj = get_all(isamAppliance, service_name)
warnings = ret_obj['warnings']
for obj in ret_obj['data']:
if obj['id'] == id:
return True, warnings
return False, warnings
def compare(isamAppliance1, service_name1, isamAppliance2, service_name2):
"""
Compare cluster configuration between two appliances
"""
ret_obj1 = get_all(isamAppliance1, service_name1)
ret_obj2 = get_all(isamAppliance2, service_name2)
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=[])
|
409574
|
import importlib
from io import BytesIO, StringIO
import logging
import os
from os.path import abspath, basename, dirname, join
from django.conf import settings
from django.core.files.base import File
import pyimagediet as diet
logger = logging.getLogger('image_diet')
THIS_DIR = abspath(dirname(__file__))
DEFAULT_STORAGE = 'django.core.files.storage.FileSystemStorage'
STORAGE_MODULE, STORAGE_CLASSNAME = getattr(
settings, 'DIET_STORAGE', DEFAULT_STORAGE).rsplit('.', 1)
storage_module = importlib.import_module(STORAGE_MODULE)
STORAGE_CLASS = getattr(storage_module, STORAGE_CLASSNAME)
CUSTOM_CONFIG = getattr(settings, 'DIET_CONFIG', '')
def get_configuration():
default_config = join(THIS_DIR, 'default.yml')
config = diet.read_yaml_configuration(default_config)
diet.update_configuration(config,
diet.read_yaml_configuration(CUSTOM_CONFIG))
return config
class DietMixin(object):
def __init__(self, *args, **kwargs):
self.config = get_configuration()
self.temp_dir = self.config.get('tempdir', '/tmp')
super(DietMixin, self).__init__(*args, **kwargs)
def save_to_temp(self, fullname, content):
name = basename(fullname)
path = join(self.temp_dir, name)
mode = 'wb' if type(content) == bytes else 'wt'
with open(path, mode) as f:
f.write(content)
return path
def _save(self, name, content):
file_content = content.read()
tmppath = ""
f = None
try:
tmppath = self.save_to_temp(name, file_content)
changed = diet.diet(tmppath, self.config)
if changed: # pragma: no branch
# If changed, then tmppath points to compressed contents.
with open(tmppath, 'rb') as f:
file_content = f.read() # pragma: no branch
except (diet.DietException) as e:
logger.error(e.msg)
raise
except (OSError, IOError) as e:
msg = 'Cannot save to temp dir ({0})'.format(str(e))
logger.error(msg)
raise
finally:
# Always clean up after ourselves
os.remove(tmppath)
try:
f = File(BytesIO(file_content))
# TypeError is for catching different handling of text in Python3
except TypeError: # pragma: no branch
f = File(StringIO(file_content))
return super(DietMixin, self)._save(name, File(f))
class DietStorage(DietMixin, STORAGE_CLASS):
pass
|
409576
|
from isign_base_test import IsignBaseTest
import logging
log = logging.getLogger(__name__)
class TestSubBundles(IsignBaseTest):
def test_matching_provisioning_profiles(self):
""" TODO - Given an app with sub-bundles, test that provisioning profiles are matched to the correct bundles """
# Get an app with sub-bundles, like the WatchKit app
# In arguments to isign.resign, use multiple provisioning profiles which cannot be applied to all sub-bundles
# Check that the app has the right pprofs in the right places
# On MacOS, test that the app verifies correctly
pass
def test_matching_entitlements(self):
""" TODO - Given an app with sub-bundles, test that entitlements are replaced in the correct bundles """
# Get an app with sub-bundles, like the WatchKit app
# In arguments to isign.resign, use multiple entitlements files
# Check that entitlements are updated in the right places
# On MacOS, check that the app verifies correctly
pass
|
409612
|
import time
from contextlib import contextmanager
from random import random
from typing import Generator, Optional
import redis
class UnableToGetLock(Exception):
pass
class Redis(object):
UnableToGetLock = UnableToGetLock
def __init__(self, app=None):
self.redis = None
if app:
self.init_app(app)
def init_app(self, app):
self.redis = redis.from_url(app.config["REDIS_URL"])
self.logger = app.logger
def __getattr__(self, name):
return getattr(self.redis, name)
@contextmanager
def lock(
self,
lock_key: str,
timeout: float = 3.0,
expire: Optional[float] = None,
nowait: bool = False,
) -> Generator[None, None, None]:
"""
Context manager for using a redis lock with the given key
Arguments:
lock_key (string): key to lock
timeout (float): how long (in seconds) to try locking.
An exception is raised if the lock can't be acquired.
expire (float): how long (in seconds) we can hold lock before it is
automatically released
nowait (bool): if True, don't block if can't acquire the lock
(will instead raise an exception)
"""
conn = self.redis
if expire is None:
expire = timeout
delay = 0.01 + random() / 10
lock = conn.lock(lock_key, timeout=expire, sleep=delay)
self.logger.info("Acquiring lock on %s", lock_key)
acquired = lock.acquire(blocking=not nowait, blocking_timeout=timeout)
start = time.time()
if not acquired:
raise self.UnableToGetLock("Unable to fetch lock on %s" % (lock_key,))
self.logger.info("Successfully acquired lock on %s", lock_key)
try:
yield
finally:
self.logger.info("Releasing lock on %s", lock_key)
try:
lock.release()
except Exception:
self.logger.exception(
"Error releasing lock on %s, acquired around %.2f s ago",
lock_key,
time.time() - start,
)
def incr(self, key: str):
self.redis.incr(key)
def decr(self, key: str):
self.redis.decr(key)
|
409664
|
from cleo.helpers import option
from poetry.core.masonry.builder import Builder
from poetry.core.version.helpers import format_python_constraint
from .env_command import EnvCommand
from ...managed_project import ManagedProject
class BuildCommand(EnvCommand):
name = "build"
description = "Builds a package, as a tarball and a wheel by default."
options = [
option("format", "f", "Limit the format to either sdist or wheel.", flag=False),
option("keep-python-bounds", "k", "don't tighten bounds to python version requirements based on dependencies",
flag=True)
]
loggers = [
"poetry.core.masonry.builders.builder",
"poetry.core.masonry.builders.sdist",
"poetry.core.masonry.builders.wheel",
]
def handle(self) -> None:
fmt = "all"
if self.option("format"):
fmt = self.option("format")
for poetry in self.poetry.projects_graph():
self._build(fmt, poetry)
def _build(self, fmt: str, poetry: ManagedProject):
if poetry.env is None:
return
package = poetry.package
self.line(
"Building <c1>{}</c1> (<c2>{}</c2>)".format(
package.pretty_name, package.version
)
)
env = poetry.env
if not self.option("keep-python-bounds"):
from poetry.puzzle import Solver
from poetry.repositories import Repository
self._io.write_line("Tightening bounds to python version requirements based on dependencies")
solver = Solver(poetry, Repository(), Repository())
bounds = solver.solve().calculate_interpreter_bounds(package.python_constraint)
bounds_constraint_str = format_python_constraint(bounds)
poetry.package.python_versions = bounds_constraint_str
poetry.pyproject.data["tool"]["poetry"]["dependencies"]["python"] = bounds_constraint_str
self._io.write_line(f"Will require python version: {bounds_constraint_str}")
builder = Builder(poetry)
builder.build(fmt, executable=env.python)
|
409684
|
from pyffs.automaton_generation.utils import generate_all_bit_vectors_under_n
def test_generate_all_bit_vectors_under_0():
generated = generate_all_bit_vectors_under_n(0)
expected = [()]
assert expected == generated
def test_generate_all_bit_vectors_under_1():
generated = generate_all_bit_vectors_under_n(1)
expected = [(), (0,), (1,)]
assert expected == generated
def test_generate_all_bit_vectors_under_3():
generated = generate_all_bit_vectors_under_n(3)
expected = [
(),
(0,),
(1,),
(0, 0),
(0, 1),
(1, 0),
(1, 1),
(0, 0, 0),
(0, 0, 1),
(0, 1, 0),
(0, 1, 1),
(1, 0, 0),
(1, 0, 1),
(1, 1, 0),
(1, 1, 1),
]
assert expected == generated
|
409701
|
import collections
import enum
import filecmp
import itertools
import shutil
import sys
import click
from .. import configs
from .common import (
check_installation, get_active_names, get_version,
set_active_versions, version_command,
)
class Overwrite(enum.Enum):
yes = 'yes'
no = 'no'
smart = 'smart'
def safe_publish(target, *, overwrite, comparer, writer, quiet):
should_write = (overwrite == Overwrite.yes or (
overwrite == Overwrite.smart and not (target.exists() and comparer())
))
if not should_write:
return False
if not quiet:
click.echo(' {}'.format(target.name))
try:
writer()
except OSError as e:
click.echo('WARNING: Failed to copy {}.\n{}: {}'.format(
target.name, type(e).__name__, e,
), err=True)
return False
return True
def publish_file(source, target, *, overwrite, quiet):
def comp():
return filecmp.cmp(str(source), str(target))
def copy():
shutil.copy2(str(source), str(target))
return safe_publish(
target, quiet=quiet, overwrite=overwrite, comparer=comp, writer=copy,
)
def publish_shim(source, target, *, relink, overwrite, quiet):
"""Write a shim.
A shim is an pre-compiled executable, with extra data appended to the end
of it. The extra data contain what command(s) the shim should attempt to
execute when launched. Arguments are seperated by NULL characters, and
commands (if there are multiple) are seperated by line feeds. Two extra
line feeds signify the end of the command sequence.
The extra data are encoded with UTF-8, and written *backwards* into the
executable. This makes it easier to read data out.
"""
cmds = [[str(source.resolve(strict=True))]]
if relink:
cmds.append([
sys.executable, '-m', 'pythonup',
'link', '--all', '--overwrite=smart', '--no-user-friendly',
])
data = bytes(reversed(
('\n'.join('\0'.join(args) for args in cmds) + '\n\n').encode('utf-8')
))
def comp():
return target.read_bytes().endswith(data)
def write():
target.write_bytes(configs.get_shim_path().read_bytes() + data)
return safe_publish(
target, quiet=quiet, overwrite=overwrite, comparer=comp, writer=write,
)
def safe_unlink(p):
if not p.exists():
return
try:
p.unlink()
except OSError as e:
click.echo('Failed to remove {} ({})'.format(p, e), err=True)
def collect_version_scripts(versions):
names = set()
scripts = []
shims = []
for version in versions:
version_scripts_dir = version.get_installation().scripts_dir
if not version_scripts_dir.is_dir():
continue
for path in version_scripts_dir.iterdir():
blacklisted_stems = {
# Encourage people to always use qualified commands.
'easy_install', 'pip',
# Fully qualified pip is already populated on installation.
'pip{}'.format(version.arch_free_name),
}
shimmed_stems = {
# Major version names, e.g. "pip3".
'pip{}'.format(version.version_info[0]),
# Fully-qualified easy_install.
'easy_install-{}'.format(version.arch_free_name),
}
if path.name in names or path.stem in blacklisted_stems:
continue
names.add(path.name)
if path.stem in shimmed_stems:
shims.append(path)
else:
scripts.append(path)
return scripts, shims
def activate(versions, *, overwrite=Overwrite.yes,
allow_empty=False, quiet=False):
if not allow_empty and not versions:
click.echo('No active versions.', err=True)
click.get_current_context().exit(1)
source_scripts, shimmed_scripts = collect_version_scripts(versions)
scripts_dir = configs.get_scripts_dir_path()
using_scripts = set()
# TODO: Distinguish between `use` and automatic hook after shimmed pip
# execution. The latter should only write scripts that actually chaged, or
# at least should only log those writes (and overwrite others silently).
if source_scripts or shimmed_scripts or versions:
if not quiet:
click.echo('Publishing scripts....')
for source in source_scripts:
target = scripts_dir.joinpath(source.name)
if not source.is_file():
continue
using_scripts.add(target)
publish_file(source, target, overwrite=overwrite, quiet=quiet)
for source in shimmed_scripts:
target = scripts_dir.joinpath(source.name)
if target in using_scripts:
continue
using_scripts.add(target)
publish_shim(
source, target, relink=True, overwrite=overwrite, quiet=quiet,
)
for version in versions:
target = version.python_major_command
if target in using_scripts:
continue
using_scripts.add(target)
publish_shim(
version.get_installation().python, target,
relink=False, overwrite=overwrite, quiet=quiet,
)
set_active_versions(versions)
stale_scripts = set(scripts_dir.iterdir()) - using_scripts
if stale_scripts:
if not quiet:
click.echo('Cleaning stale scripts...')
for script in stale_scripts:
if not quiet:
click.echo(' {}'.format(script.name))
safe_unlink(script)
def link_commands(version):
installation = version.get_installation()
for path in version.python_commands:
click.echo('Publishing {}'.format(path.name))
publish_shim(
installation.python, path,
relink=False, overwrite=Overwrite.yes, quiet=True,
)
for path in version.pip_commands:
click.echo('Publishing {}'.format(path.name))
publish_shim(
installation.pip, path,
relink=True, overwrite=Overwrite.yes, quiet=True,
)
def unlink_commands(version):
for p in itertools.chain(version.python_commands, version.pip_commands):
click.echo('Unlinking {}'.format(p.name))
safe_unlink(p)
def update_active_versions(*, remove=frozenset()):
current_active_names = set(get_active_names())
active_names = [n for n in current_active_names]
for version in remove:
try:
active_names.remove(version.name)
except ValueError:
continue
click.echo('Deactivating {}'.format(version))
if len(current_active_names) != len(active_names):
activate([get_version(n) for n in active_names], allow_empty=True)
@version_command(plural=True)
def use(ctx, versions, add):
if add is None and not versions:
# Bare "use": Display active versions.
names = get_active_names()
if names:
click.echo(' '.join(names))
else:
click.echo('Not using any versions.', err=True)
return
for version in versions:
check_installation(version)
active_versions = [
get_version(name)
for name in get_active_names()
]
if add:
active_names = set(v.name for v in active_versions)
new_versions = []
for v in versions:
if v.name in active_names:
click.echo('Already using {}.'.format(v), err=True)
else:
new_versions.append(v)
versions = active_versions + new_versions
# Remove duplicate inputs (keep first apperance).
versions = list(collections.OrderedDict(
(version.name, version) for version in versions
).values())
if active_versions == versions:
click.echo('No version changes.', err=True)
return
if versions:
click.echo('Using: {}'.format(', '.join(v.name for v in versions)))
else:
click.echo('Not using any versions.')
activate(versions, allow_empty=(not add))
def link(ctx, command, link_all, overwrite, user_friendly):
if not link_all and not command: # This mistake is more common.
click.echo(ctx.get_usage(), color=ctx.color)
click.echo('\nError: Missing argument "command".', color=ctx.color)
ctx.exit(1)
if link_all and command:
click.echo('--all cannot be used with a command.', err=True)
ctx.exit(1)
active_names = get_active_names()
if not active_names:
if user_friendly:
message = (
'Not using any versions.\n'
'HINT: Use "pythonup use" to use a version first.'
)
click.echo(message, err=True)
ctx.exit(1)
if link_all:
activate(
[get_version(n) for n in active_names],
overwrite=overwrite, allow_empty=True,
)
return
command_name = command # Better variable names.
command = None
for version_name in active_names:
version = get_version(version_name)
try:
command = version.get_installation().find_script(command_name)
except FileNotFoundError:
continue
break
if command is None:
click.echo('Command "{}" not found. Looked in {}: {}'.format(
command_name,
'version' if len(active_names) == 1 else 'versions',
', '.join(active_names),
), err=True)
ctx.exit(1)
target_name = command.name
target = configs.get_scripts_dir_path().joinpath(target_name)
# This can be done in publish_file, but we provide a better error message.
if overwrite != Overwrite.yes and target.exists():
if filecmp.cmp(str(command), str(target)):
return # If the two files are identical, we're good anyway.
click.echo('{} exists. Use --overwrite=yes to overwrite.', err=True)
ctx.exit(1)
ok = publish_file(command, target, overwrite=Overwrite.yes, quiet=True)
if ok:
click.echo('Linked {} from {}'.format(target_name, version))
|
409734
|
import _init_paths
from vrd.test import test_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
import numpy as np
import h5py
import cv2
from fast_rcnn.config import cfg, get_output_dir
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
import h5py
from fast_rcnn.nms_wrapper import nms
import cPickle
from utils.blob import im_list_to_blob
import os
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois, levels = _project_im_rois(im_rois, im_scale_factors)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data': None, 'rois': None}
blobs['data'], im_scale_factors = _get_image_blob(im)
if not cfg.TEST.HAS_RPN:
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors
def im_detect(net, im, boxes=None):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals or None (for RPN)
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, im_scales = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
blobs['rois'] = blobs['rois'][index, :]
boxes = boxes[index, :]
if cfg.TEST.HAS_RPN:
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
if cfg.TEST.HAS_RPN:
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
else:
net.blobs['rois'].reshape(*(blobs['rois'].shape))
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
if cfg.TEST.HAS_RPN:
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
else:
forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
if cfg.TEST.HAS_RPN:
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
# if cfg.TEST.BBOX_REG:
if True:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
fc7 = net.blobs['fc7'].data
return net.blobs['cls_score'].data[:, :], scores, fc7, pred_boxes
def save_result(split):
caffe.set_mode_gpu()
caffe.set_device(0)
m = h5py.File('data/sg_vrd_meta.h5', 'r', 'core')
net = caffe.Net('models/vrd/vgg16/faster_rcnn_end2end/test.prototxt',
'models/vr_obj_model.caffemodel',
caffe.TEST)
net.name = 'sgvrd'
imdb = get_imdb('sg_vrd_2016_%s'%split)
imdb.competition_mode(0)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
h5path = 'output/sg_vrd_2016_%s.hdf5'%split
# if os.path.exists(h5path):
# os.remove(h5path)
h5f = h5py.File(h5path)
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
root = 'data/sg_vrd_2016/Data/sg_%s_images/'%split
_t = {'im_detect': Timer(), 'misc': Timer()}
cnt = 0
thresh = .15
for path, subdirs, files in os.walk(root):
for name in files:
cnt += 1
im_idx = name.split('.')[0]
fpath = os.path.join(path, name)
im = cv2.imread(fpath)
if im == None:
print fpath
box_proposals = None
_t['im_detect'].tic()
score_raw, scores, fc7, boxes = im_detect(net, im, box_proposals)
_t['im_detect'].toc()
# scores = score_raw
res_locations = []
res_visuals = []
res_classemes = []
res_cls_confs = []
boxes_tosort = []
_t['misc'].tic()
for j in xrange(1, 101):
inds = np.where(scores[:, j] > 0.01)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j * 4:(j + 1) * 4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
if len(cls_scores) <= 0:
boxes_tosort.append(cls_dets)
continue
res_loc = cls_boxes
res_vis = fc7[inds]
res_classeme = scores[inds]
res_cls_conf = np.column_stack((np.zeros(cls_scores.shape[0]) + j, cls_scores))
keep = nms(cls_dets, .2, force_cpu=True) # nms threshold
cls_dets = cls_dets[keep, :]
res_loc = res_loc[keep]
res_vis = res_vis[keep]
res_classeme = res_classeme[keep]
res_cls_conf = res_cls_conf[keep]
res_classemes.extend(res_classeme)
res_visuals.extend(res_vis)
res_locations.extend(res_loc)
res_cls_confs.extend(res_cls_conf)
boxes_tosort.append(cls_dets)
# filter based on confidence
inds = np.where(np.array(res_cls_confs)[:, 1] > thresh)[0]
res_classemes = np.array(res_classemes)[inds]
res_visuals = np.array(res_visuals)[inds]
res_locations = np.array(res_locations)[inds]
res_cls_confs = np.array(res_cls_confs)[inds]
h5f.create_dataset(im_idx + '/classemes', dtype='float16', data=res_classemes.astype(np.float16))
h5f.create_dataset(im_idx + '/visuals', dtype='float16', data=res_visuals.astype(np.float16))
h5f.create_dataset(im_idx + '/locations', dtype='short', data=res_locations.astype(np.short))
h5f.create_dataset(im_idx + '/cls_confs', dtype='float16', data=res_cls_confs.astype(np.float16))
_t['misc'].toc()
print 'im_detect: {:d} {:.3f}s {:.3f}s' \
.format(cnt, _t['im_detect'].average_time,
_t['misc'].average_time)
save_result('test')
save_result('train')
|
409739
|
import re
sampletext="""
interface fa0/1
switchport mode trunk
no shut
interface fa0/0
no shut
interface fa1/0
switchport mode trunk
no shut
interface fa2/0
shut
interface fa2/1
switchport mode trunk
no shut
interface te3/1
switchport mode trunk
shut
"""
sampletext=sampletext.split("interface")
#check for interfaces that are in trunk mode
for chunk in sampletext:
if ("mode trunk" in chunk):
intname=re.search("(fa|te)\d+/\d+",chunk)
print ("Trunk enabled on "+intname.group(0))
|
409765
|
from .method_selector import *
from .o3d_aliases import *
from .run_icp import *
from .scan2mesh import *
from .scan2mesh_icp import *
|
409775
|
import datetime
import time, json
from pathlib import Path
from bson import json_util,ObjectId
import jimi
# audit Class
class _audit(jimi.db._document):
_dbCollection = jimi.db.db["audit"]
def add(self, eventSource, eventType, data):
auditData = { "time" : time.time(), "systemID" : systemSettings["systemID"], "source" : eventSource, "type" : eventType, "data" : data }
try:
if auditSettings["db"]["enabled"]:
self._dbCollection.insert_one(jimi.helpers.unicodeEscapeDict(auditData))
except KeyError:
self._dbCollection.insert_one(jimi.helpers.unicodeEscapeDict(auditData))
try:
if auditSettings["file"]["enabled"]:
filename = "{0}{1}{2}.txt".format(datetime.date.today().day,datetime.date.today().month,datetime.date.today().year)
logFile = Path("{0}/{1}".format(auditSettings["file"]["logdir"],filename))
with open(logFile, "a") as logFile:
logLine = "{0}\r\n".format(json.loads(json_util.dumps(auditData))).replace(": True",": true").replace(": False",": false").replace(": None",": null")
logFile.write(logLine)
except KeyError:
pass
auditSettings = jimi.settings.getSetting("audit",None)
systemSettings = jimi.config["system"]
|
409800
|
import pytest
from django.contrib.auth.models import User
from faker import Faker
from news.models import News
@pytest.fixture
def user_factory():
fake = Faker(locale=['en'])
name = fake.unique.name()
user = User.objects.create_user(username=name,
email=f'{name.replace(" ", "_")}<EMAIL>',
password='<PASSWORD>')
user.save()
return user
@pytest.fixture
def news_factory():
fake = Faker(locale=['en'])
title = fake.name()
preview_content = fake.name()
content = f'{title}\n{preview_content}'
news = News(title=title,
preview_content=preview_content,
content=content)
news.save()
return news
|
409870
|
from plaza_routing.integration.routing_engine_service import RoutingEngine
from plaza_routing.integration.routing_strategy.graphhopper_strategy import GraphHopperRoutingStrategy
def get_walking_route(start: tuple, destination: tuple) -> dict:
""" returns the walking route for a start and destination based on a routing strategy """
routing_engine = RoutingEngine(GraphHopperRoutingStrategy())
return routing_engine.route(start, destination)
|
409906
|
import os
import sys
sys.path.append('../../common')
from env_indigo import *
import collections
indigo = Indigo()
indigo.setOption("ignore-noncritical-query-features", "true")
print("***** Aromaticity models *****")
def executeOperation (m, func, msg):
try:
func(m)
print(msg + m.smiles())
except IndigoException as e:
print(msg + getIndigoExceptionText(e))
def arom (m):
m.aromatize()
def dearom (m):
m.dearomatize()
def noneFunc (m):
pass
groups = [
("molecules/basic-generic.sdf", indigo.iterateSDFile),
("molecules/basic-generic.smi", indigo.iterateSmilesFile),
]
for file, method in groups:
print(file)
for idx, m in enumerate(method(joinPathPy(file, __file__))):
print(idx)
for model in ["basic", "generic"]:
print(model)
indigo.setOption("aromaticity-model", model)
try:
m1 = indigo.loadMolecule(m.rawData())
m2 = indigo.loadMolecule(m.rawData())
executeOperation(m1, noneFunc, " Original: ")
executeOperation(m1, arom, " Arom: ")
executeOperation(m2, dearom, " Dearom: ")
executeOperation(m1, dearom, " Arom->Dearom: ")
executeOperation(m2, arom, " Dearom->Arom: ")
except IndigoException as e:
print(" %s" % (getIndigoExceptionText(e)))
print("**** Dearomatization ***")
indigo.setOption("aromaticity-model", "generic")
total_count = 0
bad_mols = collections.defaultdict(list)
for m2 in indigo.iterateSDFile(joinPathPy("molecules/mols-to-dearom.sdf", __file__)):
indigo.setOption("aromaticity-model", "basic")
sm = m2.smiles()
print(sm)
m2.aromatize()
sm2 = m2.smiles()
if sm2 != sm:
print(" aromatized: " + sm2)
indigo.setOption("aromaticity-model", "generic")
for value in [True, False]:
indigo.setOption("dearomatize-verification", value)
m = indigo.loadMolecule(m2.rawData())
try:
m.dearomatize()
sys.stdout.write("%d -> %s" % (value, m.smiles()))
except IndigoException as e:
print(" %s" % (getIndigoExceptionText(e)))
# check if a structre still has aromatic bonds
arom = any(b.bondOrder() == 4 for b in m.iterateBonds())
if arom:
bad_mols[value].append(m2.smiles())
sys.stdout.write(" <- Aromatic")
sys.stdout.write("\n")
m.aromatize()
if m.smiles() != m2.smiles():
print(" arom -> " + m.smiles())
total_count += 1
print("Number of molecules that cannot be dearomatized out of %d:" % (total_count))
print(" without verification: %d" % (len(bad_mols[False])))
print(" with verification: %d" % (len(bad_mols[True])))
|
409912
|
import contextlib
@contextlib.contextmanager
def temp_setattr(ob, attr, new_value):
"""Temporarily set an attribute on an object for the duration of the
context manager."""
replaced = False
old_value = None
if hasattr(ob, attr):
try:
if attr in ob.__dict__:
replaced = True
except AttributeError:
if attr in ob.__slots__:
replaced = True
if replaced:
old_value = getattr(ob, attr)
setattr(ob, attr, new_value)
yield replaced, old_value
if not replaced:
delattr(ob, attr)
else:
setattr(ob, attr, old_value)
|
409938
|
import io
from pyasn1.codec.ber import decoder
from pyasn1.type import univ, namedtype
class HashedType(univ.Sequence):
componentType = namedtype.NamedTypes(namedtype.NamedType('oid', univ.ObjectIdentifier()),
namedtype.NamedType('null', univ.Null())
)
class InnerSignatures(univ.Sequence):
componentType = namedtype.NamedTypes(namedtype.NamedType('first_sign_part', univ.Integer()),
namedtype.NamedType('second_sign_part', univ.Integer())
)
class Signature(univ.Sequence):
componentType = namedtype.NamedTypes(namedtype.NamedType('hashed', HashedType()),
namedtype.NamedType('sign', univ.OctetString())
)
class VirgilSignExtractor:
@classmethod
def extract_sign(cls, signature_data):
"""
Extract signature bytes from virgil crypto pyasn1 structure
Args:
signature_data: signature bytes
Returns:
"""
asn_one_signature_no_compress = decoder.decode(
signature_data,
asn1Spec=Signature()
)
asn_one_signature_no_compress = decoder.decode(
asn_one_signature_no_compress[0]['sign'],
asn1Spec=InnerSignatures()
)
return cls.__long_to_bytes(int(asn_one_signature_no_compress[0]['first_sign_part'])) + \
cls.__long_to_bytes(int(asn_one_signature_no_compress[0]['second_sign_part']))
@classmethod
def __long_to_bytes(cls, val, endianness='big'):
byte_buffer = io.BytesIO()
byte_buffer.write(int(val).to_bytes(32, byteorder=endianness, signed=False))
return bytearray(byte_buffer.getvalue())
|
410006
|
from .service import TenantService
from .midleware import tenant_middleware_factory, tenant_handler
__all__ = (
'TenantService',
'tenant_middleware_factory',
'tenant_handler',
)
|
410034
|
import pytest
from wootrade import Client
from wootrade import ThreadedWebsocketManager
import os
API = os.getenv("API")
SECRET = os.getenv("SECRET")
APPLICATION_ID = os.getenv("APPLICATION_ID")
def test_create_TWM():
wsm = ThreadedWebsocketManager(API, SECRET, APPLICATION_ID, False)
|
410084
|
import os
import mock
import pytest
import bridgy.inventory
from bridgy.inventory import InventorySet, Instance
from bridgy.inventory.aws import AwsInventory
from bridgy.config import Config
def get_aws_inventory(name):
test_dir = os.path.dirname(os.path.abspath(__file__))
cache_dir = os.path.join(test_dir, 'aws_stubs')
aws_obj = AwsInventory(name=name, cache_dir=cache_dir, access_key_id='access_key_id',
secret_access_key='secret_access_key', session_token='session_token',
region='region')
return aws_obj
def test_inventory_set(mocker):
test_dir = os.path.dirname(os.path.abspath(__file__))
cache_dir = os.path.join(test_dir, 'aws_stubs')
aws_obj = get_aws_inventory(name='aws')
inventorySet = InventorySet()
inventorySet.add(aws_obj)
inventorySet.add(aws_obj)
print(aws_obj.instances())
all_instances = inventorySet.instances()
aws_instances = [
Instance(name=u'test-forms', address=u'devbox', aliases=(u'devbox', u'ip-172-31-8-185.us-west-2.compute.internal', u'i-e54cbaeb'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'devlab-forms', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-138.us-west-2.compute.internal', u'i-f7d726f9'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'test-account-svc', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-139.us-west-2.compute.internal', u'i-f4d726fa'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'devlab-pubsrv', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-142.us-west-2.compute.internal', u'i-f5d726fb'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'devlab-game-svc', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-140.us-west-2.compute.internal', u'i-f2d726fc'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'test-game-svc', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-141.us-west-2.compute.internal', u'i-f3d726fd'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'test-pubsrv', address=u'devbox', aliases=(u'devbox', u'ip-172-31-2-38.us-west-2.compute.internal', u'i-0f500447384e95942'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'test-pubsrv', address=u'devbox', aliases=(u'devbox', u'ip-172-31-2-39.us-west-2.compute.internal', u'i-0f500447384e95943'), source='aws (aws)', container_id=None, type='VM')
]
expected_instances = aws_instances + aws_instances
assert len(all_instances) == len(expected_instances)
assert set(all_instances) == set(expected_instances)
def test_inventory_set_filter_sources(mocker):
test_dir = os.path.dirname(os.path.abspath(__file__))
cache_dir = os.path.join(test_dir, 'aws_stubs')
inventorySet = InventorySet()
inventorySet.add(get_aws_inventory(name='aws'))
inventorySet.add(get_aws_inventory(name='awesome'))
print(inventorySet.instances())
all_instances = inventorySet.instances(filter_sources='awesome')
# aws_instances = [
# Instance(name='test-forms', address='devbox', aliases=('devbox', 'ip-172-31-8-185.us-west-2.compute.internal', 'i-e54cbaeb'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='devlab-forms', address='devbox', aliases=('devbox', 'ip-172-31-0-138.us-west-2.compute.internal', 'i-f7d726f9'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='test-account-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-139.us-west-2.compute.internal', 'i-f4d726fa'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='devlab-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-0-142.us-west-2.compute.internal', 'i-f5d726fb'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='devlab-game-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-140.us-west-2.compute.internal', 'i-f2d726fc'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='test-game-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-141.us-west-2.compute.internal', 'i-f3d726fd'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='test-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-2-38.us-west-2.compute.internal', 'i-0f500447384e95942'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='test-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-2-39.us-west-2.compute.internal', 'i-0f500447384e95943'), source='aws (aws)', container_id=None, type='VM')
# ]
awesome_instances = [
Instance(name='test-forms', address='devbox', aliases=('devbox', 'ip-172-31-8-185.us-west-2.compute.internal', 'i-e54cbaeb'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='devlab-forms', address='devbox', aliases=('devbox', 'ip-172-31-0-138.us-west-2.compute.internal', 'i-f7d726f9'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='test-account-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-139.us-west-2.compute.internal', 'i-f4d726fa'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='devlab-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-0-142.us-west-2.compute.internal', 'i-f5d726fb'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='devlab-game-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-140.us-west-2.compute.internal', 'i-f2d726fc'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='test-game-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-141.us-west-2.compute.internal', 'i-f3d726fd'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='test-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-2-38.us-west-2.compute.internal', 'i-0f500447384e95942'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='test-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-2-39.us-west-2.compute.internal', 'i-0f500447384e95943'), source='awesome (aws)', container_id=None, type='VM')
]
assert len(all_instances) == len(awesome_instances)
assert set(all_instances) == set(awesome_instances)
all_instances = inventorySet.instances(filter_sources='bogus')
assert len(all_instances) == 0
|
410100
|
import functools
import operator
import os
import os.path
import sys
import numpy as np
import scipy.special
import pytest
# Bamboo utilities
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# Data
np.random.seed(20200909)
_num_samples = 15
_sequence_length = 9
_input_size = 5
_num_layers = 2
_sample_size = _sequence_length*_input_size + _num_layers*_input_size
_samples = np.random.uniform(low=-1, high=1, size=(_num_samples,_sample_size))
_samples = _samples.astype(np.float32)
# Sample access functions
def get_sample(index):
return _samples[index,:]
def num_samples():
return _num_samples
def sample_dims():
return (_sample_size,)
# ==============================================
# NumPy implementation
# ==============================================
def numpy_gru(x, h, w):
# Cast inputs to float64
def to_float64_list(a):
return [a_
if a_.dtype is np.float64
else a_.astype(np.float64)
for a_ in a]
x = to_float64_list(x)
h = to_float64_list(h)
w = to_float64_list(w)
# Dimensions
sequence_length = len(x)
input_size = x[0].size
num_layers = len(h)
hidden_size = h[0].size
assert len(w) == 4*num_layers, 'incorrect number of weights'
# Unroll GRU
for i in range(num_layers):
for j in range(sequence_length):
ih = np.matmul(w[4*i], x[j]) + w[4*i+2]
hh = np.matmul(w[4*i+1], h[i]) + w[4*i+3]
r = scipy.special.expit(ih[:hidden_size] + hh[:hidden_size])
z = scipy.special.expit(ih[hidden_size:2*hidden_size] + hh[hidden_size:2*hidden_size])
n = np.tanh(ih[2*hidden_size:] + r*hh[2*hidden_size:])
h[i] = (1-z)*n + z*h[i]
x[j] = h[i]
return np.stack(x)
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Skip test on non-GPU systems
# Note: Test requires cuDNN (on GPU) or oneDNN (on CPU).
### @todo Assume LBANN has been built with oneDNN?
if not tools.gpus_per_node(lbann):
message = f'{os.path.basename(__file__)} requires cuDNN or oneDNN'
print('Skip - ' + message)
pytest.skip(message)
mini_batch_size = num_samples() // 2
trainer = lbann.Trainer(mini_batch_size)
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.SGD()
return trainer, model, data_reader, optimizer
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Input data
# Note: Sum with a weights layer so that gradient checking will
# verify that error signals are correct.
x_weights = lbann.Weights(initializer=lbann.ConstantInitializer(value=0.0),
name='input')
h_weights = lbann.Weights(initializer=lbann.ConstantInitializer(value=0.0),
name='inital_hidden')
input_ = lbann.Input(data_field='samples')
input_slice = lbann.Slice(
input_,
slice_points=tools.str_list([0, _sequence_length*_input_size, _sample_size]),
)
x = lbann.Reshape(input_slice, dims=tools.str_list([_sequence_length,_input_size]))
x = lbann.Sum(x, lbann.WeightsLayer(weights=x_weights, hint_layer=x))
h = lbann.Reshape(input_slice, dims=tools.str_list([_num_layers,_input_size]),)
h = lbann.Sum(h, lbann.WeightsLayer(weights=h_weights, hint_layer=h))
x_lbann = x
h_lbann = h
# Objects for LBANN model
obj = []
metrics = []
callbacks = []
# ------------------------------------------
# Multi-layer, unidirectional GRU
# ------------------------------------------
# Note: input_size=hidden_size due to a limitation in oneDNN
# Weights
rnn_weights_numpy = []
for i in range(_num_layers):
ih_matrix = np.random.uniform(
low=-1,
high=1,
size=(3*_input_size,_input_size),
)
hh_matrix = np.random.uniform(
low=-1,
high=1,
size=(3*_input_size,_input_size),
)
ih_bias = np.random.uniform(low=-1, high=1, size=(3*_input_size,))
hh_bias = np.random.uniform(low=-1, high=1, size=(3*_input_size,))
rnn_weights_numpy.extend([ih_matrix, hh_matrix, ih_bias, hh_bias])
rnn_weights_numpy = [w.astype(np.float32) for w in rnn_weights_numpy]
rnn_weights_lbann = [
lbann.Weights(
initializer=lbann.ValueInitializer(
values=tools.str_list(np.nditer(w, order='F'))))
for w in rnn_weights_numpy
]
# LBANN implementation
x = x_lbann
h = h_lbann
y = lbann.GRU(
x,
h,
hidden_size=_input_size,
num_layers=_num_layers,
weights=rnn_weights_lbann,
)
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='Multi-layer, unidirectional'))
# NumPy implementation
vals = []
for i in range(num_samples()):
input_ = get_sample(i).astype(np.float64)
x = input_[:_sequence_length*_input_size].reshape((_sequence_length,_input_size))
h = input_[_sequence_length*_input_size:].reshape((_num_layers,_input_size))
y = numpy_gru(x, h, rnn_weights_numpy)
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Single-layer, unidirectional GRU
# ------------------------------------------
# Weights
rnn_weights_numpy = []
hidden_size = 7
ih_matrix = np.random.uniform(
low=-1,
high=1,
size=(3*hidden_size,_input_size),
)
hh_matrix = np.random.uniform(
low=-1,
high=1,
size=(3*hidden_size,hidden_size),
)
ih_bias = np.random.uniform(low=-1, high=1, size=(3*hidden_size,))
hh_bias = np.random.uniform(low=-1, high=1, size=(3*hidden_size,))
rnn_weights_numpy.extend([ih_matrix, hh_matrix, ih_bias, hh_bias])
rnn_weights_numpy = [w.astype(np.float32) for w in rnn_weights_numpy]
rnn_weights_lbann = [
lbann.Weights(
initializer=lbann.ValueInitializer(
values=tools.str_list(np.nditer(w, order='F'))))
for w in rnn_weights_numpy
]
# LBANN implementation
x = x_lbann
h = h_lbann
h = lbann.Reshape(
lbann.Slice(
lbann.Reshape(h, dims='-1'),
slice_points=tools.str_list([0, hidden_size]),
),
dims='1 -1',
)
y = lbann.GRU(
x,
h,
hidden_size=hidden_size,
num_layers=1,
weights=rnn_weights_lbann,
)
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='Single-layer, unidirectional'))
# NumPy implementation
vals = []
for i in range(num_samples()):
input_ = get_sample(i).astype(np.float64)
x = input_[:_sequence_length*_input_size].reshape((_sequence_length,_input_size))
h = input_[_sequence_length*_input_size:].reshape((_num_layers,_input_size))
h = h.flatten()[:hidden_size].reshape((1,hidden_size))
y = numpy_gru(x, h, rnn_weights_numpy)
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Gradient checking
# ------------------------------------------
callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))
# ------------------------------------------
# Construct model
# ------------------------------------------
num_epochs = 0
return lbann.Model(num_epochs,
layers=lbann.traverse_layer_graph(x_lbann),
objective_function=obj,
metrics=metrics,
callbacks=callbacks)
def construct_data_reader(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
# ==============================================
# Setup PyTest
# ==============================================
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment, __file__):
globals()[_test_func.__name__] = _test_func
|
410156
|
AppLayout(header=header_button,
left_sidebar=None,
center=center_button,
right_sidebar=None,
footer=footer_button)
|
410159
|
from functools import partial
from unittest import TestCase
import os
import os.path as osp
import numpy as np
from datumaro.components.annotation import (
AnnotationType, Bbox, Caption, Cuboid3d, Label, LabelCategories, Mask,
MaskCategories, Points, PointsCategories, Polygon, PolyLine,
)
from datumaro.components.extractor import DatasetItem
from datumaro.components.project import Dataset
from datumaro.plugins.datumaro_format.converter import DatumaroConverter
from datumaro.plugins.datumaro_format.extractor import DatumaroImporter
from datumaro.util.image import Image
from datumaro.util.mask_tools import generate_colormap
from datumaro.util.test_utils import (
Dimensions, TestDir, compare_datasets_strict, test_save_and_load,
)
from .requirements import Requirements, mark_requirement
class DatumaroConverterTest(TestCase):
def _test_save_and_load(self, source_dataset, converter, test_dir,
target_dataset=None, importer_args=None,
compare=compare_datasets_strict, **kwargs):
return test_save_and_load(self, source_dataset, converter, test_dir,
importer='datumaro',
target_dataset=target_dataset, importer_args=importer_args,
compare=compare, **kwargs)
@property
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_dataset(self):
label_categories = LabelCategories(attributes={'a', 'b', 'score'})
for i in range(5):
label_categories.add('cat' + str(i), attributes={'x', 'y'})
mask_categories = MaskCategories(
generate_colormap(len(label_categories.items)))
points_categories = PointsCategories()
for index, _ in enumerate(label_categories.items):
points_categories.add(index, ['cat1', 'cat2'], joints=[[0, 1]])
return Dataset.from_iterable([
DatasetItem(id=100, subset='train', image=np.ones((10, 6, 3)),
annotations=[
Caption('hello', id=1),
Caption('world', id=2, group=5),
Label(2, id=3, attributes={
'x': 1,
'y': '2',
}),
Bbox(1, 2, 3, 4, label=4, id=4, z_order=1, attributes={
'score': 1.0,
}),
Bbox(5, 6, 7, 8, id=5, group=5, attributes={
'a': 1.5,
'b': 'text',
}),
Points([1, 2, 2, 0, 1, 1], label=0, id=5, z_order=4,
attributes={ 'x': 1, 'y': '2', }),
Mask(label=3, id=5, z_order=2, image=np.ones((2, 3)),
attributes={ 'x': 1, 'y': '2', }),
]),
DatasetItem(id=21, subset='train',
annotations=[
Caption('test'),
Label(2),
Bbox(1, 2, 3, 4, label=5, id=42, group=42)
]),
DatasetItem(id=2, subset='val',
annotations=[
PolyLine([1, 2, 3, 4, 5, 6, 7, 8], id=11, z_order=1),
Polygon([1, 2, 3, 4, 5, 6, 7, 8], id=12, z_order=4),
]),
DatasetItem(id=1, subset='test',
annotations=[
Cuboid3d([1.0, 2.0, 3.0], [2.0, 2.0, 4.0], [1.0, 3.0, 4.0],
id=6, label=0, attributes={'occluded': True}, group=6
)
]),
DatasetItem(id=42, subset='test',
attributes={'a1': 5, 'a2': '42'}),
DatasetItem(id=42),
DatasetItem(id=43, image=Image(path='1/b/c.qq', size=(2, 4))),
], categories={
AnnotationType.label: label_categories,
AnnotationType.mask: mask_categories,
AnnotationType.points: points_categories,
})
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load(self):
with TestDir() as test_dir:
self._test_save_and_load(self.test_dataset,
partial(DatumaroConverter.convert, save_images=True), test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_with_no_save_images(self):
with TestDir() as test_dir:
self._test_save_and_load(self.test_dataset,
partial(DatumaroConverter.convert, save_images=True), test_dir,
compare=None, require_images=False)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect(self):
with TestDir() as test_dir:
DatumaroConverter.convert(self.test_dataset, save_dir=test_dir)
self.assertTrue(DatumaroImporter.detect(test_dir))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_relative_paths(self):
test_dataset = Dataset.from_iterable([
DatasetItem(id='1', image=np.ones((4, 2, 3))),
DatasetItem(id='subdir1/1', image=np.ones((2, 6, 3))),
DatasetItem(id='subdir2/1', image=np.ones((5, 4, 3))),
])
with TestDir() as test_dir:
self._test_save_and_load(test_dataset,
partial(DatumaroConverter.convert, save_images=True), test_dir)
@mark_requirement(Requirements.DATUM_231)
def test_can_save_dataset_with_cjk_categories(self):
expected = Dataset.from_iterable([
DatasetItem(id=1, subset='train', image=np.ones((4, 4, 3)),
annotations=[
Bbox(0, 1, 2, 2,
label=0, group=1, id=1,
attributes={ 'is_crowd': False }),
], attributes={'id': 1}),
DatasetItem(id=2, subset='train', image=np.ones((4, 4, 3)),
annotations=[
Bbox(1, 0, 2, 2, label=1, group=2, id=2,
attributes={ 'is_crowd': False }),
], attributes={'id': 2}),
DatasetItem(id=3, subset='train', image=np.ones((4, 4, 3)),
annotations=[
Bbox(0, 1, 2, 2, label=2, group=3, id=3,
attributes={ 'is_crowd': False }),
], attributes={'id': 3}),
],
categories=[
"고양이", "ネコ", "猫"
]
)
with TestDir() as test_dir:
self._test_save_and_load(expected,
partial(DatumaroConverter.convert, save_images=True), test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_dataset_with_cyrillic_and_spaces_in_filename(self):
test_dataset = Dataset.from_iterable([
DatasetItem(id='кириллица с пробелом', image=np.ones((4, 2, 3))),
])
with TestDir() as test_dir:
self._test_save_and_load(test_dataset,
partial(DatumaroConverter.convert, save_images=True),
test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_image_with_arbitrary_extension(self):
expected = Dataset.from_iterable([
DatasetItem(id='q/1', image=Image(path='q/1.JPEG',
data=np.zeros((4, 3, 3))), attributes={'frame': 1}),
DatasetItem(id='a/b/c/2', image=Image(path='a/b/c/2.bmp',
data=np.zeros((3, 4, 3))), attributes={'frame': 2}),
])
with TestDir() as test_dir:
self._test_save_and_load(expected,
partial(DatumaroConverter.convert, save_images=True),
test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_inplace_save_writes_only_updated_data_with_direct_changes(self):
expected = Dataset.from_iterable([
DatasetItem(1, subset='a'),
DatasetItem(2, subset='a', image=np.ones((3, 2, 3))),
DatasetItem(2, subset='b'),
])
with TestDir() as path:
# generate initial dataset
dataset = Dataset.from_iterable([
# modified subset
DatasetItem(1, subset='a'),
# unmodified subset
DatasetItem(2, subset='b'),
# removed subset
DatasetItem(3, subset='c', image=np.ones((2, 2, 3))),
])
dataset.save(path, save_images=True)
dataset.put(DatasetItem(2, subset='a', image=np.ones((3, 2, 3))))
dataset.remove(3, 'c')
dataset.save(save_images=True)
self.assertEqual({'a.json', 'b.json'},
set(os.listdir(osp.join(path, 'annotations'))))
self.assertEqual({'2.jpg'},
set(os.listdir(osp.join(path, 'images', 'a'))))
compare_datasets_strict(self, expected, Dataset.load(path))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_inplace_save_writes_only_updated_data_with_transforms(self):
with TestDir() as path:
expected = Dataset.from_iterable([
DatasetItem(2, subset='test'),
DatasetItem(3, subset='train', image=np.ones((2, 2, 3))),
DatasetItem(4, subset='train', image=np.ones((2, 3, 3))),
DatasetItem(5, subset='test',
point_cloud=osp.join(path, 'point_clouds', 'test', '5.pcd'),
related_images=[
Image(data=np.ones((3, 4, 3)),
path=osp.join(path, 'test', '5', 'image_0.jpg')),
osp.join(path, 'test', '5', 'a', '5.png'),
]
),
])
dataset = Dataset.from_iterable([
DatasetItem(1, subset='a'),
DatasetItem(2, subset='b'),
DatasetItem(3, subset='c', image=np.ones((2, 2, 3))),
DatasetItem(4, subset='d', image=np.ones((2, 3, 3))),
DatasetItem(5, subset='e', point_cloud='5.pcd',
related_images=[
np.ones((3, 4, 3)),
'a/5.png',
]
),
])
dataset.save(path, save_images=True)
dataset.filter('/item[id >= 2]')
dataset.transform('random_split', (('train', 0.5), ('test', 0.5)),
seed=42)
dataset.save(save_images=True)
self.assertEqual(
{'images', 'annotations', 'point_clouds', 'related_images'},
set(os.listdir(path)))
self.assertEqual({'train.json', 'test.json'},
set(os.listdir(osp.join(path, 'annotations'))))
self.assertEqual({'3.jpg', '4.jpg'},
set(os.listdir(osp.join(path, 'images', 'train'))))
self.assertEqual({'train', 'c', 'd'},
set(os.listdir(osp.join(path, 'images'))))
self.assertEqual(set(),
set(os.listdir(osp.join(path, 'images', 'c'))))
self.assertEqual(set(),
set(os.listdir(osp.join(path, 'images', 'd'))))
self.assertEqual({'image_0.jpg'},
set(os.listdir(osp.join(path, 'related_images', 'test', '5'))))
compare_datasets_strict(self, expected, Dataset.load(path))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_with_pointcloud(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='test', point_cloud='1.pcd',
related_images= [
Image(data=np.ones((5, 5, 3)), path='1/a.jpg'),
Image(data=np.ones((5, 4, 3)), path='1/b.jpg'),
Image(size=(5, 3), path='1/c.jpg'),
'1/d.jpg',
],
annotations=[
Cuboid3d([2, 2, 2], [1, 1, 1], [3, 3, 1],
id=1, group=1, label=0, attributes={'x': True}
)
]),
], categories=['label'])
with TestDir() as test_dir:
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='test',
point_cloud=osp.join(test_dir, 'point_clouds',
'test', '1.pcd'),
related_images= [
Image(data=np.ones((5, 5, 3)), path=osp.join(
test_dir, 'related_images', 'test',
'1', 'image_0.jpg')),
Image(data=np.ones((5, 4, 3)), path=osp.join(
test_dir, 'related_images', 'test',
'1', 'image_1.jpg')),
Image(size=(5, 3), path=osp.join(
test_dir, 'related_images', 'test',
'1', 'image_2.jpg')),
osp.join(test_dir, 'related_images', 'test',
'1', 'image_3.jpg'),
],
annotations=[
Cuboid3d([2, 2, 2], [1, 1, 1], [3, 3, 1],
id=1, group=1, label=0, attributes={'x': True}
)
]),
], categories=['label'])
self._test_save_and_load(source_dataset,
partial(DatumaroConverter.convert, save_images=True), test_dir,
target_dataset, compare=None, dimension=Dimensions.dim_3d)
|
410165
|
import numpy as np
from EOSMixture import EOSMixture
from MixtureRules.ClassicMixtureRule import ClassicMixtureRule, ClassicBMixture
from MixtureRules.MixtureRulesInterface import (
BiBehavior,
ThetaiBehavior,
DeltaMixtureRuleBehavior,
EpsilonMixtureRuleBehavior,
MixtureRuleBehavior,
)
from constants import R_IG
class biAdachi1985(BiBehavior):
def getBi(self, i: int, T: float, substances) -> float:
w = substances[i].omega
omega_b = 0.08779 + w * (-0.02181 + w * (-0.06708 + 0.10617 * w))
return omega_b / (substances[i].Pc / (R_IG * substances[i].Tc))
class thetaiAdachi1985(ThetaiBehavior):
def a(self, i: int, T: float, substances):
w = substances[i].omega
omega_ac = 0.43711 + w * (0.02366 + w * (0.10538 + w * 0.10164))
return omega_ac * np.power(R_IG * substances[i].Tc, 2) / substances[i].Pc
def m(self, i: int, T: float, substances):
w = substances[i].omega
return 0.44060 + w * (1.7039 + w * (-1.728 + w * 0.9929))
def alpha(self, i: int, T: float, substances):
_m = self.m(i, T, substances)
return np.power(1.0 + _m * (1.0 - np.sqrt(T / substances[i].Tc)), 2)
def getThetai(self, i: int, T: float, substances) -> float:
return self.alpha(i, T, substances) * self.a(i, T, substances)
class ciAdachi1985(BiBehavior):
def getBi(self, i: int, T: float, substances) -> float:
w = substances[i].omega
omega_c = 0.0506 + w * (0.04184 + w * (0.16413 - w * 0.03975))
return omega_c / (substances[i].Pc / (R_IG * substances[i].Tc))
class cmAdachi1985:
def __init__(self):
self.cmBehavior = ClassicBMixture()
self.ciBehavior = ciAdachi1985()
def cm(self, y, T: float, substances) -> float:
return self.cmBehavior.bm(y, T, self.ciBehavior, substances)
def diffCm(self, i: int, y, T: float, substances) -> float:
return self.ciBehavior.getBi(i, T, substances)
class deltaMixAdachi1985(DeltaMixtureRuleBehavior):
def __init__(self):
self.cm = cmAdachi1985()
def deltam(
self, y, T: float, bib: BiBehavior, bmb: MixtureRuleBehavior, substances
) -> float:
return 2.0 * self.cm.cm(y, T, substances)
def diffDeltam(
self, i: int, y, T: float, bib: BiBehavior, bmb: MixtureRuleBehavior, substances
) -> float:
return 2.0 * self.cm.diffCm(i, y, T, substances)
class epsilonMixAdachi1985(EpsilonMixtureRuleBehavior):
def __init__(self):
self.cm = cmAdachi1985()
def epsilonm(
self, y, T: float, bib: BiBehavior, bmb: MixtureRuleBehavior, substances
) -> float:
return -(self.cm.cm(y, T, substances)) ** 2
def diffEpsilonm(
self, i: int, y, T: float, bib: BiBehavior, bmb: MixtureRuleBehavior, substances
) -> float:
return -2.0 * self.cm.cm(y, T, substances) * self.cm.diffCm(i, y, T, substances)
class Adachi1985(EOSMixture):
def __init__(self, _subs, _k):
super().__init__(_subs, _k)
self.eosname = "Adachi, et al. (1985)"
self.mixRuleBehavior = ClassicMixtureRule()
self.biBehavior = biAdachi1985()
self.thetaiBehavior = thetaiAdachi1985()
self.deltaMixBehavior = deltaMixAdachi1985()
self.epsilonMixBehavior = epsilonMixAdachi1985()
|
410170
|
from django.views.generic import TemplateView, RedirectView
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.http import Http404
from django.shortcuts import render
admin.autodiscover()
def justatemplate(path):
def view(request):
context = {
"show_header": True,
"data": {"data": "is fake"},
}
return render(request, path, context)
return view
def share(request):
valid_panes = ['people', 'housing', 'fun', 'environment', 'history']
pane = request.GET.get('p', 'people')
if pane not in valid_panes:
raise Http404
pane_url = "/api/%s/" % pane
cll = request.GET.get('cll')
if cll:
pane_url = "%s?cll=%s" % (pane_url, cll)
context = {"pane_url": pane_url}
return render(request, "share.html", context)
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^404/$', justatemplate('404.html')),
url(r'^500/$', justatemplate('500.html')),
url(r'^api/', include('sitegeist.api.urls')),
url(r'^dev/$', TemplateView.as_view(template_name='dev.html')),
url(r'^geo/', include('boundaries.urls')),
url(r'^$', TemplateView.as_view(template_name='index.html')),
url(r'^about/$', justatemplate('sitegeist/panes/methodology.html')),
url(r'^share/$', share),
url(r'^android/$', RedirectView.as_view(url='https://play.google.com/store/apps/details?id=com.sunlightfoundation.sitegeist.android')),
url(r'^ios/$', RedirectView.as_view(url='https://itunes.apple.com/us/app/sitegeist/id582687408?ls=1&mt=8')),
url(r'^kickstarter/$', RedirectView.as_view(url='https://www.kickstarter.com/projects/sunlightfoundation/save-sitegeist')),
)
|
410183
|
import errno
import os
from alembic.command import downgrade
from alembic.command import upgrade
from alembic.config import Config
from sqlalchemy import create_engine
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
BASE = declarative_base()
_PACKAGE_DIR = os.path.abspath(os.path.dirname(__file__))
class Database(object):
def __init__(self):
self._url = None
self._initialized = False
self._engine = None
self._session = None
self._external_db = False
self._alembic_config = None
self._sqlite_db_file_path = None
def init_db(self):
db_dir = self._sqlite_default_db_dir()
try:
os.makedirs(db_dir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(db_dir):
pass
else:
raise
print('DB file path:', db_dir)
def setup(self, url=None, echo=False):
if url is None:
db_dir = self._sqlite_default_db_dir()
db_path = os.path.join(db_dir, 'chainerui.db')
self._sqlite_db_file_path = db_path
url = 'sqlite:///' + db_path
else:
self._external_db = True
if url.startswith('sqlite'):
connect_args = {'check_same_thread': False}
else:
connect_args = {}
self._url = url
try:
engine = create_engine(
url,
connect_args=connect_args,
echo=echo
)
except ImportError as e:
print(e, ', Please install the driver to support the external DB')
return False
self._engine = engine
if not self._check():
return False
self._session = scoped_session(
sessionmaker(autocommit=False, autoflush=False, bind=engine)
)
self._initialized = True
self._setup_alembic_config(url)
return True
def _setup_alembic_config(self, url):
ini_path = os.path.join(_PACKAGE_DIR, 'alembic.ini')
config = Config(ini_path)
config.set_main_option(
'script_location', os.path.join(_PACKAGE_DIR, 'migration'))
config.set_main_option('url', url)
self._alembic_config = config
def _check(self):
if not self._external_db:
if not os.path.isdir(self._sqlite_default_db_dir()):
print('DB is not initialized, please run '
'\'chainerui db create\' command before')
return False
try:
connection = self._engine.connect()
connection.close()
except OperationalError as e:
print(e)
return False
return True
def upgrade(self):
upgrade(self.alembic_config, 'head')
def downgrade(self):
downgrade(self.alembic_config, 'base')
def remove_db(self):
if not self._external_db and self._sqlite_db_file_path is not None:
if os.path.exists(self._sqlite_db_file_path):
os.remove(self._sqlite_db_file_path)
self.__init__() # initialize all attribute
def _sqlite_default_db_dir(self):
root = os.path.abspath(
os.path.expanduser(os.getenv('CHAINERUI_ROOT', '~/.chainerui')))
return os.path.join(root, 'db')
@property
def url(self):
if not self._initialized:
raise ValueError('not setup DB URL')
return self._url
@property
def engine(self):
if not self._initialized:
raise ValueError('not setup database engine')
return self._engine
@property
def session(self):
if not self._initialized:
raise ValueError('not setup database session')
return self._session
@property
def alembic_config(self):
if not self._initialized:
raise ValueError('not setup migration configuration')
return self._alembic_config
db = Database()
|
410190
|
import factory
from ipaddr import IPv4Network
from pycroft.model.net import VLAN, Subnet
from tests.factories.base import BaseFactory
class VLANFactory(BaseFactory):
class Meta:
model = VLAN
name = factory.Sequence(lambda n: f"vlan{n+1}")
vid = factory.Sequence(lambda n: n+1)
class Params:
create_subnet = factory.Trait(
subnets=factory.RelatedFactoryList('tests.factories.net.SubnetFactory', 'vlan', size=1)
)
class SubnetFactory(BaseFactory):
class Meta:
model = Subnet
address = factory.Sequence(
lambda n: IPv4Network((f"141.{n // 255}.{n % 255}.0", 24))
)
vlan = factory.SubFactory(VLANFactory)
|
410213
|
import sys
import os
import argparse
#Set up parser and top level args
parser = argparse.ArgumentParser(description='ASCENT: Automated Simulations to Characterize Electrical Nerve Thresholds')
# parser.add_argument('-s','--silent',action='store_true', help = 'silence printing')
parser.add_argument('-v','--verbose',action='store_true', help = 'verbose printing')
#add subparsers
subparsers = parser.add_subparsers(help = 'which script to run', dest='script')
pipeline_parser = subparsers.add_parser('pipeline', help = 'main ASCENT pipeline')
install_parser = subparsers.add_parser('install', help = 'install ASCENT')
env_parser = subparsers.add_parser('env_setup', help = 'Set ASCENT environment variables')
cs_parser = subparsers.add_parser('clean_samples', help = 'Remove all files except those specified from Sample directories')
nsims_parser = subparsers.add_parser('import_n_sims', help = 'Move NEURON outputs into ASCENT directories for analysis')
mmg_parser = subparsers.add_parser('mock_morphology_generator', help = 'Generate mock morpology for an ASCENT run')
ts_parser = subparsers.add_parser('tidy_samples', help = 'Remove specified files from Sample directories')
#add subparser arguments
# pipeline_parser.add_argument('-w','--wait',dest='wait_time', help = 'wait the specified amount of time (hours) for an available COMSOL license')
pipeline_parser.add_argument('run_indices', nargs = '+', help = 'Space separated indices to run the pipeline over')
ts_parser.add_argument('sample_indices', nargs = '+',type=int, help = 'Space separated sample indices to tidy')
nsims_parser.add_argument('run_indices', nargs = '+',type=int, help = 'Space separated run indices to import')
cs_parser.add_argument('sample_indices', nargs = '+',type=int, help = 'Space separated sample indices to clean')
mmg_parser.add_argument('mock_sample_index',type=int, help = 'Mock Sample Index to generate')
install_parser.add_argument('--no-conda',action='store_true', help = 'Skip conda portion of installation')
def parse():
#parse arguments
args = parser.parse_args()
if args.script is None:
parser.print_help()
sys.exit()
return args
|
410215
|
class Result:
def __init__(self, result: bool, error = None, item = None, list = [], comment = None):
self.result = result
self.error = error
self.item = item
self.list = list
self.comment = comment
|
410219
|
from django.views.generic.edit import FormMixin
from braces.views import AjaxResponseMixin, JSONResponseMixin
from django.db.models.query_utils import Q
from django.views.generic import ListView, DetailView, View
from realestate.listing.forms import ListingContactForm
from realestate.listing.models import Listing, Agent
from rest_framework.reverse import reverse_lazy
from sorl.thumbnail.shortcuts import get_thumbnail
from constance import config
class ListingList(ListView):
template_name = 'listing/results.html'
model = Listing
queryset = Listing.objects.active()
paginate_by = config.PROPERTIES_PER_PAGE
class ListingForSaleList(ListView):
template_name = 'listing/results.html'
model = Listing
paginate_by = config.PROPERTIES_PER_PAGE
def get_queryset(self):
ordering = self.kwargs.get('order_by', 'pk')
return Listing.objects.sale().order_by(ordering)
def get_context_data(self, **kwargs):
ctx = super(ListingForSaleList, self).get_context_data(**kwargs)
ctx['sort'] = self.kwargs.get('order_by', 'pk')
return ctx
class ListingForRentList(ListView):
template_name = 'listing/results.html'
model = Listing
paginate_by = config.PROPERTIES_PER_PAGE
def get_queryset(self):
ordering = self.kwargs.get('order_by', 'pk')
return Listing.objects.rent().order_by(ordering)
def get_context_data(self, **kwargs):
ctx = super(ListingForRentList, self).get_context_data(**kwargs)
ctx['sort'] = self.kwargs.get('order_by', 'pk')
return ctx
class ListingView(FormMixin, DetailView):
template_name = 'listing/listing.html'
model = Listing
form_class = ListingContactForm
success_url = reverse_lazy('thank-you')
def get_queryset(self):
if self.request.user.is_staff:
return Listing.objects.all()
return Listing.objects.active()
def get_context_data(self, **kwargs):
context = super(ListingView, self).get_context_data(**kwargs)
form_class = self.get_form_class()
context['form'] = self.get_form(form_class)
return context
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
form.send_contact_form(self.object)
return super(ListingView, self).form_valid(form)
class MapView(JSONResponseMixin, AjaxResponseMixin, View):
def get_ajax(self, request, *args, **kwargs):
from realestate.listing.templatetags.extra_functions import currency
listings = []
for listing in Listing.objects.active():
lat, lng = listing.coords.split(',')
try:
im = get_thumbnail(listing.main_image.imagen, '135x90', crop='center', quality=99).url
except (ValueError, AttributeError):
im = ''
listings.append({
'id': listing.id,
'url': listing.get_absolute_url(),
'street': '%s' % listing.get_address(),
'title': listing.title,
'lat': lat,
'lng': lng,
'price': currency(listing.price),
'img': im,
})
return self.render_json_response({'listings': listings, })
class AgentList(ListView):
model = Agent
context_object_name = 'agents'
template_name = 'listing/agents.html'
def get_queryset(self):
return Agent.objects.active()
class AgentListing(ListView):
model = Listing
template_name = 'listing/agent-listings.html'
paginate_by = config.PROPERTIES_PER_PAGE
context_object_name = 'results'
def get_queryset(self):
return Listing.objects.active(agent=Agent.objects.get(id=self.kwargs.get('agent'))).order_by('-id')
|
410224
|
import torch
import torch.nn as nn
def get_normalization(m_config, conditional=True):
norm = m_config.normalization
if conditional:
if norm == 'NoneNorm':
return ConditionalNoneNorm2d
elif norm == 'InstanceNorm++':
return ConditionalInstanceNorm2dPlus
elif norm == 'InstanceNorm':
return ConditionalInstanceNorm2d
elif norm == 'BatchNorm':
return ConditionalBatchNorm2d
elif norm == 'VarianceNorm':
return ConditionalVarianceNorm2d
else:
raise NotImplementedError("{} does not exist!".format(norm))
else:
if norm == 'BatchNorm':
return nn.BatchNorm2d
elif norm == 'InstanceNorm':
return nn.InstanceNorm2d
elif norm == 'InstanceNorm++':
return InstanceNorm2dPlus
elif norm == 'VarianceNorm':
return VarianceNorm2d
elif norm == 'NoneNorm':
return NoneNorm2d
elif norm is None:
return None
else:
raise NotImplementedError("{} does not exist!".format(norm))
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.bn = nn.BatchNorm2d(num_features, affine=False)
if self.bias:
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.uniform_()
def forward(self, x, y):
out = self.bn(x)
if self.bias:
gamma, beta = self.embed(y).chunk(2, dim=1)
out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1)
else:
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * out
return out
class ConditionalInstanceNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
if bias:
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.uniform_()
def forward(self, x, y):
h = self.instance_norm(x)
if self.bias:
gamma, beta = self.embed(y).chunk(2, dim=-1)
out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
else:
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * h
return out
class ConditionalVarianceNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=False):
super().__init__()
self.num_features = num_features
self.bias = bias
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.normal_(1, 0.02)
def forward(self, x, y):
f_vars = torch.var(x, dim=(2, 3), keepdim=True)
h = x / torch.sqrt(f_vars + 1e-5)
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * h
return out
class VarianceNorm2d(nn.Module):
def __init__(self, num_features, bias=False):
super().__init__()
self.num_features = num_features
self.bias = bias
self.alpha = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
def forward(self, x):
f_vars = torch.var(x, dim=(2, 3), keepdim=True)
h = x / torch.sqrt(f_vars + 1e-5)
out = self.alpha.view(-1, self.num_features, 1, 1) * h
return out
class ConditionalNoneNorm2d(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
if bias:
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, num_features)
self.embed.weight.data.uniform_()
def forward(self, x, y):
if self.bias:
gamma, beta = self.embed(y).chunk(2, dim=-1)
out = gamma.view(-1, self.num_features, 1, 1) * x + beta.view(-1, self.num_features, 1, 1)
else:
gamma = self.embed(y)
out = gamma.view(-1, self.num_features, 1, 1) * x
return out
# noinspection PyUnusedLocal
class NoneNorm2d(nn.Module):
def __init__(self, num_features, bias=True):
super().__init__()
@staticmethod
def forward(x):
return x
class InstanceNorm2dPlus(nn.Module):
def __init__(self, num_features, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
self.alpha = nn.Parameter(torch.zeros(num_features))
self.gamma = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
self.gamma.data.normal_(1, 0.02)
if bias:
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
means = torch.mean(x, dim=(2, 3))
m = torch.mean(means, dim=-1, keepdim=True)
v = torch.var(means, dim=-1, keepdim=True)
means = (means - m) / (torch.sqrt(v + 1e-5))
h = self.instance_norm(x)
if self.bias:
h = h + means[..., None, None] * self.alpha[..., None, None]
out = self.gamma.view(-1, self.num_features, 1, 1) * h + self.beta.view(-1, self.num_features, 1, 1)
else:
h = h + means[..., None, None] * self.alpha[..., None, None]
out = self.gamma.view(-1, self.num_features, 1, 1) * h
return out
class ConditionalInstanceNorm2dPlus(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
if bias:
self.embed = nn.Embedding(num_classes, num_features * 3)
self.embed.weight.data[:, :2 * num_features].normal_(1, 0.02) # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, 2 * num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, 2 * num_features)
self.embed.weight.data.normal_(1, 0.02)
def forward(self, x, y):
means = torch.mean(x, dim=(2, 3))
m = torch.mean(means, dim=-1, keepdim=True)
v = torch.var(means, dim=-1, keepdim=True)
means = (means - m) / (torch.sqrt(v + 1e-5))
h = self.instance_norm(x)
if self.bias:
gamma, alpha, beta = self.embed(y).chunk(3, dim=-1)
h = h + means[..., None, None] * alpha[..., None, None]
out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
else:
gamma, alpha = self.embed(y).chunk(2, dim=-1)
h = h + means[..., None, None] * alpha[..., None, None]
out = gamma.view(-1, self.num_features, 1, 1) * h
return out
|
410246
|
import sublime
import os
class DlvConst(object):
def __init__(self, window):
self.__window = window
self.__settings_file_name = "GoDebug.sublime-settings"
self.__bkpt_settings_file_name = "GoDebug.breakpoint-settings"
self.__watch_settings_file_name = "GoDebug.watch-settings"
self.__project_settings_prefix = "godebug"
self.__panel_group_suffix = "group"
self.__open_at_start_suffix = "open_at_start"
self.__close_at_stop_suffix = "close_at_stop"
self.__title_suffix = "title"
self.__breakpoint_suffix = "breakpoints"
self.__watch_suffix = "watches"
self.__project_exec_suffix = "executables"
self.__project_exec_settings = {}
self.__project_exec_name = None
self.__view_switch = {
self.STACKTRACE_VIEW:
{
self.__panel_group_suffix: self.__get_stacktrace_group,
self.__open_at_start_suffix: self.__get_stacktrace_open_at_start,
self.__close_at_stop_suffix: self.__get_stacktrace_close_at_stop,
self.__title_suffix: self.__get_stacktrace_title
},
self.GOROUTINE_VIEW:
{
self.__panel_group_suffix: self.__get_goroutine_group,
self.__open_at_start_suffix: self.__get_goroutine_open_at_start,
self.__close_at_stop_suffix: self.__get_goroutine_close_at_stop,
self.__title_suffix: self.__get_goroutine_title
},
self.VARIABLE_VIEW:
{
self.__panel_group_suffix: self.__get_variable_group,
self.__open_at_start_suffix: self.__get_variable_open_at_start,
self.__close_at_stop_suffix: self.__get_variable_close_at_stop,
self.__title_suffix: self.__get_variable_title
},
self.WATCH_VIEW:
{
self.__panel_group_suffix: self.__get_watch_group,
self.__open_at_start_suffix: self.__get_watch_open_at_start,
self.__close_at_stop_suffix: self.__get_watch_close_at_stop,
self.__title_suffix: self.__get_watch_title
},
self.SESSION_VIEW:
{
self.__panel_group_suffix: self.__get_session_group,
self.__open_at_start_suffix: self.__get_session_open_at_start,
self.__close_at_stop_suffix: self.__get_session_close_at_stop,
self.__title_suffix: self.__get_session_title
},
self.CONSOLE_VIEW:
{
self.__panel_group_suffix: self.__get_console_group,
self.__open_at_start_suffix: self.__get_console_open_at_start,
self.__close_at_stop_suffix: self.__get_console_close_at_stop,
self.__title_suffix: self.__get_console_title
},
self.BREAKPOINT_VIEW:
{
self.__panel_group_suffix: self.__get_breakpoint_group,
self.__open_at_start_suffix: self.__get_breakpoint_open_at_start,
self.__close_at_stop_suffix: self.__get_breakpoint_close_at_stop,
self.__title_suffix: self.__get_breakpoint_title
},
}
def __get_settings(self, key, default):
if default is None:
raise Exception("Default key %s value cannot be None" % key)
if key in self.__project_exec_settings:
return self.__project_exec_settings[key]
view = self.__window.active_view()
if view is not None:
settings = view.settings()
if settings.has("%s_%s" % (self.__project_settings_prefix, key)):
return settings.get("%s_%s" % (self.__project_settings_prefix, key))
value = sublime.load_settings(self.__settings_file_name).get(key, default)
if value is None:
value = default
return value
def get_view_setting(self, code, key):
return (self.__view_switch[code])[key]()
def set_project_executable(self, name):
view = self.__window.active_view()
if view is not None:
settings = view.settings()
exec_choices = settings.get("%s_%s" % (self.__project_settings_prefix, self.__project_exec_suffix))
if exec_choices is None or type(exec_choices) != dict or not name in exec_choices:
raise Exception("Project executable settings %s not found" % name)
self.__project_exec_settings = exec_choices[name]
self.__project_exec_name = name
def is_project_executable(self):
return self.__project_exec_name is not None
def get_project_executable_name(self):
return self.__project_exec_name
def clear_project_executable(self):
self.__project_exec_settings = {}
self.__project_exec_name = None
def get_project_executables(self):
settings = self.__window.active_view().settings()
exec_choices = settings.get("%s_%s" % (self.__project_settings_prefix, self.__project_exec_suffix))
if exec_choices is not None and type(exec_choices) == dict:
return list(exec_choices)
return None
def __load_project_settings(self, base_name):
settings = sublime.load_settings(base_name)
key = os.path.dirname(self.__window.project_file_name())
return settings.get(key) if settings.has(key) else []
def __save_project_settings(self, base_name, values):
settings = sublime.load_settings(base_name)
key = os.path.dirname(self.__window.project_file_name())
settings.set(key,values)
sublime.save_settings(base_name)
def load_breakpoints(self):
return self.__load_project_settings(self.__bkpt_settings_file_name)
def save_breakpoints(self, bkpts):
self.__save_project_settings(self.__bkpt_settings_file_name, bkpts)
def load_watches(self):
return self.__load_project_settings(self.__watch_settings_file_name)
def save_watches(self, watches):
self.__save_project_settings(self.__watch_settings_file_name, watches)
@property
def STATE_COMMAND(self):
return 'state'
@property
def STACKTRACE_COMMAND(self):
return 'stacktrace'
@property
def GOROUTINE_COMMAND(self):
return 'goroutine'
@property
def VARIABLE_COMMAND(self):
return 'variable'
@property
def WATCH_COMMAND(self):
return 'watch'
@property
def CREATE_BREAKPOINT_COMMAND(self):
return 'createbreakpoint'
@property
def CLEAR_BREAKPOINT_COMMAND(self):
return 'clearbreakpoint'
@property
def BREAKPOINT_COMMAND(self):
return 'listbreakpoints'
@property
def CONTINUE_COMMAND(self):
return 'continue'
@property
def NEXT_COMMAND(self):
return 'next'
@property
def CANCEL_NEXT_COMMAND(self):
return 'cancelnext'
@property
def STEP_COMMAND(self):
return 'step'
@property
def STEPOUT_COMMAND(self):
return 'stepOut'
@property
def RESTART_COMMAND(self):
return 'restart'
@property
def RUNTIME_COMMANDS(self):
return [self.CONTINUE_COMMAND, self.NEXT_COMMAND, self.STEP_COMMAND, self.STEPOUT_COMMAND]
@property
def PANEL_GROUP(self):
return self.__panel_group_suffix
@property
def OPEN_AT_START(self):
return self.__open_at_start_suffix
@property
def CLOSE_AT_STOP(self):
return self.__close_at_stop_suffix
@property
def TITLE(self):
return self.__title_suffix
@property
def STDOUT(self):
return 'stdout'
@property
def DEFAULT_BINARY(self):
return 'dlv'
@property
def DEFAULT_HOST(self):
return 'localhost'
@property
def DEFAULT_PORT(self):
return 3456
@property
def DEFAULT_TIMEOUT(self):
return 10
@property
def BUFFER(self):
return 4096
@property
def DEBUG_MODE(self):
return 'debug'
@property
def TEST_MODE(self):
return 'test'
@property
def REMOTE_MODE(self):
return 'remote'
@property
def DLV_REGION(self):
return 'dlv.suspend_pos'
# The mode of run Delve server, "remote" mean is not need start dlv headless instance
# "debug" | "test" | "remote"
@property
def MODE(self):
return self.__get_settings('mode', self.DEBUG_MODE)
# The binary location of the Delve server
@property
def BINARY(self):
return self.__get_settings("binary", self.DEFAULT_BINARY)
# The host of the Delve server
@property
def HOST(self):
return self.__get_settings('host', self.DEFAULT_HOST)
# The port of the Delve server
@property
def PORT(self):
return self.__get_settings('port', self.DEFAULT_PORT)
# If set, Delve server run in logging mode. Used for "local" or "test" mode
@property
def LOG(self):
return self.__get_settings('log', False)
# Arguments for run the program. (OPTIONAL)
@property
def ARGS(self):
return self.__get_settings('args', '')
# The current working directory where delve starts from.
# Default is project directory. Used for "local" or "test" mode. (OPTIONAL)
@property
def CWD(self):
return self.__get_settings('cwd', '')
# For the larger operation, by socket and background thread, in seconds, must be above zero
@property
def TIMEOUT(self):
value = self.__get_settings('timeout', self.DEFAULT_TIMEOUT)
if value <= 0:
value = self.DEFAULT_TIMEOUT
return value
# Save breakpoints to the settings file before start debug, restore when the project is loaded
@property
def SAVE_BREAKPOINT(self):
return self.__get_settings('save_breakpoints', True)
# Save watches to the settings file before start debug, restore when the project is loaded
@property
def SAVE_WATCH(self):
return self.__get_settings('save_watches', True)
# Whether to log the raw data read from and written to the Delve session and the inferior program
@property
def DEBUG(self):
return self.__get_settings('debug', False)
# File to optionally write all the raw data read from and written to the Delve session and the inferior program.
# Must be set 'stdout' or file name. If file name set without full path, save into project directory
@property
def DEBUG_FILE(self):
return self.__get_settings('debug_file', self.STDOUT)
# Defalt Delve panel layout
@property
def PANEL_LAYOUT(self):
return self.__get_settings('panel_layout',
{
"cols": [0.0, 0.33, 0.66, 1.0],
"rows": [0.0, 0.75, 1.0],
"cells":
[
[0, 0, 3, 1],
[0, 1, 1, 2],
[1, 1, 2, 2],
[2, 1, 3, 2]
]
}
)
# View name
@property
def STACKTRACE_VIEW(self):
return self.STACKTRACE_COMMAND
# View group in Delve panel
def __get_stacktrace_group(self):
return self.__get_settings("%s_%s" % (self.STACKTRACE_VIEW, self.__panel_group_suffix), 2)
# Open view when debugging starts
def __get_stacktrace_open_at_start(self):
return self.__get_settings("%s_%s" % (self.STACKTRACE_VIEW, self.__open_at_start_suffix), True)
# Close view when debugging stops
def __get_stacktrace_close_at_stop(self):
return self.__get_settings("%s_%s" % (self.STACKTRACE_VIEW, self.__close_at_stop_suffix), True)
# View title
def __get_stacktrace_title(self):
return self.__get_settings("%s_%s" % (self.STACKTRACE_VIEW, self.__title_suffix), 'Delve Stacktrace')
# View name
@property
def GOROUTINE_VIEW(self):
return self.GOROUTINE_COMMAND
# View group in Delve panel
def __get_goroutine_group(self):
return self.__get_settings("%s_%s" % (self.GOROUTINE_VIEW, self.__panel_group_suffix), 3)
# Open view when debugging starts
def __get_goroutine_open_at_start(self):
return self.__get_settings("%s_%s" % (self.GOROUTINE_VIEW, self.__open_at_start_suffix), True)
# Close view when debugging stops
def __get_goroutine_close_at_stop(self):
return self.__get_settings("%s_%s" % (self.GOROUTINE_VIEW, self.__close_at_stop_suffix), True)
# View title
def __get_goroutine_title(self):
return self.__get_settings("%s_%s" % (self.GOROUTINE_VIEW, self.__title_suffix), 'Delve Gorounites')
# View name
@property
def VARIABLE_VIEW(self):
return self.VARIABLE_COMMAND
# View group in Delve panel
def __get_variable_group(self):
return self.__get_settings("%s_%s" % (self.VARIABLE_VIEW, self.__panel_group_suffix), 1)
# Open view when debugging starts
def __get_variable_open_at_start(self):
return self.__get_settings("%s_%s" % (self.VARIABLE_VIEW, self.__open_at_start_suffix), True)
# Close view when debugging stops
def __get_variable_close_at_stop(self):
return self.__get_settings("%s_%s" % (self.VARIABLE_VIEW, self.__close_at_stop_suffix), True)
# View title
def __get_variable_title(self):
return self.__get_settings("%s_%s" % (self.VARIABLE_VIEW, self.__title_suffix), 'Delve Variables')
# View name
@property
def WATCH_VIEW(self):
return self.WATCH_COMMAND
# View group in Delve panel
def __get_watch_group(self):
return self.__get_settings("%s_%s" % (self.WATCH_VIEW, self.__panel_group_suffix), 2)
# Open view when debugging starts
def __get_watch_open_at_start(self):
return self.__get_settings("%s_%s" % (self.WATCH_VIEW, self.__open_at_start_suffix), True)
# Close view when debugging stops
def __get_watch_close_at_stop(self):
return self.__get_settings("%s_%s" % (self.WATCH_VIEW, self.__close_at_stop_suffix), True)
# View title
def __get_watch_title(self):
return self.__get_settings("%s_%s" % (self.WATCH_VIEW, self.__title_suffix), 'Delve Watches')
# View name
@property
def SESSION_VIEW(self):
return "session"
# View group in Delve panel
def __get_session_group(self):
return self.__get_settings("%s_%s" % (self.SESSION_VIEW, self.__panel_group_suffix), 1)
# Open view when debugging starts
def __get_session_open_at_start(self):
return True
# Close view when debugging stops
def __get_session_close_at_stop(self):
return True
# View title
def __get_session_title(self):
return self.__get_settings("%s_%s" % (self.SESSION_VIEW, self.__title_suffix), 'Delve Session')
# View name
@property
def CONSOLE_VIEW(self):
return "console"
# View group in Delve panel
def __get_console_group(self):
return self.__get_settings("%s_%s" % (self.CONSOLE_VIEW, self.__panel_group_suffix), 1)
# Open view when debugging starts
def __get_console_open_at_start(self):
return self.__get_settings("%s_%s" % (self.CONSOLE_VIEW, self.__open_at_start_suffix), True)
# Close view when debugging stops
def __get_console_close_at_stop(self):
return self.__get_settings("%s_%s" % (self.CONSOLE_VIEW, self.__close_at_stop_suffix), True)
# View title
def __get_console_title(self):
return self.__get_settings("%s_%s" % (self.CONSOLE_VIEW, self.__title_suffix), 'Delve Console')
# View name
@property
def BREAKPOINT_VIEW(self):
return "breakpoints"
# View group in Delve panel
def __get_breakpoint_group(self):
return self.__get_settings("%s_%s" % (self.BREAKPOINT_VIEW, self.__panel_group_suffix), 3)
# Open view when debugging starts
def __get_breakpoint_open_at_start(self):
return self.__get_settings("%s_%s" % (self.BREAKPOINT_VIEW, self.__open_at_start_suffix), True)
# Close view when debugging stops
def __get_breakpoint_close_at_stop(self):
return self.__get_settings("%s_%s" % (self.BREAKPOINT_VIEW, self.__close_at_stop_suffix), True)
# View title
def __get_breakpoint_title(self):
return self.__get_settings("%s_%s" % (self.BREAKPOINT_VIEW, self.__title_suffix), 'Delve Breakpoints')
|
410252
|
from unittest import TestCase
import phi
class TestCIInstallation(TestCase):
def test_detect_tf_torch_jax(self):
backends = phi.detect_backends()
names = [b.name for b in backends]
self.assertIn('PyTorch', names)
self.assertIn('Jax', names)
self.assertIn('TensorFlow', names)
def test_verify(self):
phi.verify()
|
410270
|
import json
from .wkutils import WebkitObject, Command
def evaluate(expression, objectGroup=None, returnByValue=None):
params = {}
params['expression'] = expression
if(objectGroup):
params['objectGroup'] = objectGroup
if(returnByValue):
params['returnByValue'] = returnByValue
command = Command('Runtime.evaluate', params)
return command
def evaluate_parser(result):
data = RemoteObject(result['result'])
return data
def getProperties(objectId, ownProperties=False):
params = {}
params['objectId'] = str(objectId)
params['ownProperties'] = ownProperties
command = Command('Runtime.getProperties', params)
return command
def getProperties_parser(result):
data = []
for propertyDescriptor in result['result']:
data.append(PropertyDescriptor(propertyDescriptor))
return data
class RemoteObject(WebkitObject):
def __init__(self, value):
self.set(value, 'className')
self.set(value, 'description')
self.set_class(value, 'objectId', RemoteObjectId)
self.set(value, 'subtype')
self.set(value, 'type')
self.set(value, 'value')
def __str__(self):
if self.type == 'boolean':
return str(self.value)
if self.type == 'string':
return str(self.value)
if self.type == 'undefined':
return 'undefined'
if self.type == 'number':
return str(self.value)
if self.type == 'object':
if not self.objectId:
return 'null'
else:
if self.className:
return self.className
if self.description:
return self.description
return '{ ... }'
if self.type == 'function':
return self.description.split('\n')[0]
class PropertyDescriptor(WebkitObject):
def __init__(self, _value):
self.set(_value, 'configurable')
self.set(_value, 'enumerable')
#self.set_class(_value, 'get', RemoteObject)
#self.set_class(_value, 'set', RemoteObject)
self.set(_value, 'name')
self.set_class(_value, 'value', RemoteObject)
self.set(_value, 'wasThrown')
self.set(_value, 'writable')
def __str__(self):
return self.name
class RemoteObjectId(WebkitObject):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __call__(self):
return self.value
def dumps(self):
objid = json.loads(self.value)
return "Object_%d_%d" % (objid['injectedScriptId'], objid['id'])
def loads(self, text):
parts = text.split('_')
self.value = '{"injectedScriptId":%s,"id":%s}' % (parts[1], parts[2])
return self.value
|
410302
|
from pathlib import Path
import aiosqlite
import pytest
from proj.server import try_make_db
@pytest.fixture
def db_path(tmp_path: Path) -> Path:
path = tmp_path / "test_sqlite.db"
try_make_db(path)
return path
@pytest.fixture
async def db(db_path: Path) -> aiosqlite.Connection:
conn = await aiosqlite.connect(db_path)
conn.row_factory = aiosqlite.Row
yield conn
await conn.close()
|
410348
|
from __future__ import absolute_import, print_function, division
import unittest
import numpy
import theano
from theano.tests import unittest_tools as utt
# Skip tests if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda_ndarray
from theano.misc.pycuda_init import pycuda_available
from theano.sandbox.cuda.cula import cula_available
from theano.sandbox.cuda import cula
if not cuda_ndarray.cuda_available:
raise SkipTest('Optional package cuda not available')
if not pycuda_available:
raise SkipTest('Optional package pycuda not available')
if not cula_available:
raise SkipTest('Optional package scikits.cuda.cula not available')
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
class TestCula(unittest.TestCase):
def run_gpu_solve(self, A_val, x_val):
b_val = numpy.dot(A_val, x_val)
A = theano.tensor.matrix("A", dtype="float32")
b = theano.tensor.matrix("b", dtype="float32")
solver = cula.gpu_solve(A, b)
fn = theano.function([A, b], [solver])
res = fn(A_val, b_val)
x_res = numpy.array(res[0])
utt.assert_allclose(x_res, x_val)
def test_diag_solve(self):
numpy.random.seed(1)
A_val = numpy.asarray([[2, 0, 0], [0, 1, 0], [0, 0, 1]],
dtype="float32")
x_val = numpy.random.uniform(-0.4, 0.4, (A_val.shape[1],
1)).astype("float32")
self.run_gpu_solve(A_val, x_val)
def test_sym_solve(self):
numpy.random.seed(1)
A_val = numpy.random.uniform(-0.4, 0.4, (5, 5)).astype("float32")
A_sym = (A_val + A_val.T) / 2.0
x_val = numpy.random.uniform(-0.4, 0.4, (A_val.shape[1],
1)).astype("float32")
self.run_gpu_solve(A_sym, x_val)
def test_orth_solve(self):
numpy.random.seed(1)
A_val = numpy.random.uniform(-0.4, 0.4, (5, 5)).astype("float32")
A_orth = numpy.linalg.svd(A_val)[0]
x_val = numpy.random.uniform(-0.4, 0.4, (A_orth.shape[1],
1)).astype("float32")
self.run_gpu_solve(A_orth, x_val)
def test_uni_rand_solve(self):
numpy.random.seed(1)
A_val = numpy.random.uniform(-0.4, 0.4, (5, 5)).astype("float32")
x_val = numpy.random.uniform(-0.4, 0.4,
(A_val.shape[1], 4)).astype("float32")
self.run_gpu_solve(A_val, x_val)
|
410351
|
import sys
import os
import time
import threading
import torch
from torch.autograd import Variable
import torch.utils.data
from lr_scheduler import *
import cv2
import numpy
from AverageMeter import *
from loss_function import *
import datasets
import balancedsampler
import networks
from my_args import args
import copy
import random
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
META_ALGORITHM = args.meta # [MAML, Reptile]
TRAIN_ITER_CUT = 1e6 if args.train_iter==-1 else args.train_iter
VAL_ITER_CUT = 1e6 if args.val_iter==-1 else args.val_iter
def crop(im, maxH=640, maxW=1280): # crop images if too big (causes out-of-memory error)
# im.size() : NCHW
H, W = im.size(2), im.size(3)
return im[:, :, :min(H, maxH), :min(W, maxW)].clone()
def train():
torch.manual_seed(args.seed)
model = networks.__dict__[args.netName](channel=args.channels,
filter_size = args.filter_size ,
timestep=args.time_step,
training=True)
original_model = networks.__dict__[args.netName](channel=args.channels,
filter_size = args.filter_size ,
timestep=args.time_step,
training=True)
if args.use_cuda:
print("Turn the model into CUDA")
model = model.cuda()
original_model = original_model.cuda()
if not args.SAVED_MODEL==None:
args.SAVED_MODEL ='./model_weights/'+ args.SAVED_MODEL + "/best" + ".pth"
print("Fine tuning on " + args.SAVED_MODEL)
if not args.use_cuda:
pretrained_dict = torch.load(args.SAVED_MODEL, map_location=lambda storage, loc: storage)
# model.load_state_dict(torch.load(args.SAVED_MODEL, map_location=lambda storage, loc: storage))
else:
pretrained_dict = torch.load(args.SAVED_MODEL)
# model.load_state_dict(torch.load(args.SAVED_MODEL))
#print([k for k,v in pretrained_dict.items()])
model_dict = model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(model_dict)
# For comparison in meta training
original_model.load_state_dict(model_dict)
pretrained_dict = None
if type(args.datasetName) == list:
train_sets, test_sets = [],[]
for ii, jj in zip(args.datasetName, args.datasetPath):
tr_s, te_s = datasets.__dict__[ii](jj, split = args.dataset_split,single = args.single_output, task = args.task)
train_sets.append(tr_s)
test_sets.append(te_s)
train_set = torch.utils.data.ConcatDataset(train_sets)
test_set = torch.utils.data.ConcatDataset(test_sets)
else:
train_set, test_set = datasets.__dict__[args.datasetName](args.datasetPath)
train_loader = torch.utils.data.DataLoader(
train_set, batch_size = args.batch_size,
sampler=balancedsampler.RandomBalancedSampler(train_set, int(len(train_set) / args.batch_size )),
num_workers= args.workers, pin_memory=True if args.use_cuda else False)
val_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size,
num_workers=args.workers, pin_memory=True if args.use_cuda else False)
print('{} samples found, {} train samples and {} test samples '.format(len(test_set)+len(train_set),
len(train_set),
len(test_set)))
# if not args.lr == 0:
print("train the interpolation net")
'''optimizer = torch.optim.Adamax([
#{'params': model.initScaleNets_filter.parameters(), 'lr': args.filter_lr_coe * args.lr},
#{'params': model.initScaleNets_filter1.parameters(), 'lr': args.filter_lr_coe * args.lr},
#{'params': model.initScaleNets_filter2.parameters(), 'lr': args.filter_lr_coe * args.lr},
#{'params': model.ctxNet.parameters(), 'lr': args.ctx_lr_coe * args.lr},
#{'params': model.flownets.parameters(), 'lr': args.flow_lr_coe * args.lr},
#{'params': model.depthNet.parameters(), 'lr': args.depth_lr_coe * args.lr},
{'params': model.rectifyNet.parameters(), 'lr': args.rectify_lr}
],
#lr=args.lr, momentum=0, weight_decay=args.weight_decay)
lr=args.lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=args.weight_decay)'''
optimizer = torch.optim.Adamax(model.rectifyNet.parameters(), lr=args.outer_lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=args.weight_decay)
# Fix weights for early layers
for param in model.initScaleNets_filter.parameters():
param.requires_grad = False
for param in model.initScaleNets_filter1.parameters():
param.requires_grad = False
for param in model.initScaleNets_filter2.parameters():
param.requires_grad = False
for param in model.ctxNet.parameters():
param.requires_grad = False
for param in model.flownets.parameters():
param.requires_grad = False
for param in model.depthNet.parameters():
param.requires_grad = False
scheduler = ReduceLROnPlateau(optimizer, 'min',factor=args.factor, patience=args.patience,verbose=True)
print("*********Start Training********")
print("LR is: "+ str(float(optimizer.param_groups[0]['lr'])))
print("EPOCH is: "+ str(int(len(train_set) / args.batch_size )))
print("Num of EPOCH is: "+ str(args.numEpoch))
def count_network_parameters(model):
parameters = filter(lambda p: p.requires_grad, model.parameters())
N = sum([numpy.prod(p.size()) for p in parameters])
return N
print("Num. of model parameters is :" + str(count_network_parameters(model)))
if hasattr(model,'flownets'):
print("Num. of flow model parameters is :" +
str(count_network_parameters(model.flownets)))
if hasattr(model,'initScaleNets_occlusion'):
print("Num. of initScaleNets_occlusion model parameters is :" +
str(count_network_parameters(model.initScaleNets_occlusion) +
count_network_parameters(model.initScaleNets_occlusion1) +
count_network_parameters(model.initScaleNets_occlusion2)))
if hasattr(model,'initScaleNets_filter'):
print("Num. of initScaleNets_filter model parameters is :" +
str(count_network_parameters(model.initScaleNets_filter) +
count_network_parameters(model.initScaleNets_filter1) +
count_network_parameters(model.initScaleNets_filter2)))
if hasattr(model, 'ctxNet'):
print("Num. of ctxNet model parameters is :" +
str(count_network_parameters(model.ctxNet)))
if hasattr(model, 'depthNet'):
print("Num. of depthNet model parameters is :" +
str(count_network_parameters(model.depthNet)))
if hasattr(model,'rectifyNet'):
print("Num. of rectifyNet model parameters is :" +
str(count_network_parameters(model.rectifyNet)))
training_losses = AverageMeter()
#original_training_losses = AverageMeter()
batch_time = AverageMeter()
auxiliary_data = []
saved_total_loss = 10e10
saved_total_PSNR = -1
ikk = 0
for kk in optimizer.param_groups:
if kk['lr'] > 0:
ikk = kk
break
for t in range(args.numEpoch):
print("The id of this in-training network is " + str(args.uid))
print(args)
print("Learning rate for this epoch: %s" % str(round(float(ikk['lr']),7)))
#Turn into training mode
model = model.train()
#for i, (X0_half,X1_half, y_half) in enumerate(train_loader):
_t = time.time()
for i, images in enumerate(train_loader):
if i >= min(TRAIN_ITER_CUT, int(len(train_set) / args.batch_size )):
#(0 if t == 0 else EPOCH):#
break
if args.use_cuda:
images = [im.cuda() for im in images]
images = [Variable(im, requires_grad=False) for im in images]
# For VimeoTriplet
#X0, y, X1 = images[0], images[1], images[2]
# For VimeoSepTuplet
X0, y, X1 = images[2], images[3], images[4]
outerstepsize = args.outer_lr
k = args.num_inner_update # inner loop update iteration
inner_optimizer = torch.optim.Adamax(model.rectifyNet.parameters(),
lr=args.inner_lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=args.weight_decay)
if META_ALGORITHM == "Reptile":
# Reptile setting
weights_before = copy.deepcopy(model.state_dict())
for _k in range(k):
indices = [ [0, 2, 4], [2, 4, 6], [2, 3, 4], [0, 1, 2], [4, 5, 6] ]
total_loss = 0
for ind in indices:
meta_X0, meta_y, meta_X1 = images[ind[0]].clone(), images[ind[1]].clone(), images[ind[2]].clone()
diffs, offsets, filters, occlusions = model(torch.stack((meta_X0, meta_y, meta_X1), dim=0))
pixel_loss, offset_loss, sym_loss = part_loss(diffs, offsets, occlusions, [meta_X0, meta_X1], epsilon=args.epsilon)
_total_loss = sum(x*y if x > 0 else 0 for x,y in zip(args.alpha, pixel_loss))
total_loss = total_loss + _total_loss
# total *= 2 / len(indices)
inner_optimizer.zero_grad()
total_loss.backward()
inner_optimizer.step()
# Reptile update
weights_after = model.state_dict()
model.load_state_dict({name :
weights_before[name] + (weights_after[name] - weights_before[name]) * outerstepsize
for name in weights_before})
with torch.no_grad():
diffs, offsets, filters, occlusions = model(torch.stack((X0, y, X1), dim=0))
pixel_loss, offset_loss, sym_loss = part_loss(diffs, offsets, occlusions, [X0, X1], epsilon=args.epsilon)
total_loss = sum(x*y if x > 0 else 0 for x,y in zip(args.alpha, pixel_loss))
training_losses.update(total_loss.item(), args.batch_size)
elif META_ALGORITHM == "MAML":
#weights_before = copy.deepcopy(model.state_dict())
base_model = copy.deepcopy(model)
#fast_weights = list(filter(lambda p: p.requires_grad, model.parameters()))
for _k in range(k):
indices = [ [0, 2, 4], [2, 4, 6] ]
support_loss = 0
for ind in indices:
meta_X0, meta_y, meta_X1 = images[ind[0]].clone(), images[ind[1]].clone(), images[ind[2]].clone()
diffs, offsets, filters, occlusions = model(torch.stack((meta_X0, meta_y, meta_X1), dim=0))
pixel_loss, offset_loss, sym_loss = part_loss(diffs, offsets, occlusions, [meta_X0, meta_X1], epsilon=args.epsilon)
_total_loss = sum(x*y if x > 0 else 0 for x,y in zip(args.alpha, pixel_loss))
support_loss = support_loss + _total_loss
#grad = torch.autograd.grad(loss, fast_weights)
#fast_weights = list(map(lambda p: p[1] - args.lr * p[0], zip(grad, fast_weights)))
inner_optimizer.zero_grad()
support_loss.backward() # create_graph=True
inner_optimizer.step()
# Forward on query set
diffs, offsets, filters, occlusions = model(torch.stack((X0, y, X1), dim=0))
pixel_loss, offset_loss, sym_loss = part_loss(diffs, offsets, occlusions, [X0, X1], epsilon=args.epsilon)
total_loss = sum(x*y if x > 0 else 0 for x,y in zip(args.alpha, pixel_loss))
training_losses.update(total_loss.item(), args.batch_size)
# copy parameters to comnnect the computational graph
for param, base_param in zip(model.rectifyNet.parameters(), base_model.rectifyNet.parameters()):
param.data = base_param.data
filtered_params = filter(lambda p: p.requires_grad, model.parameters())
optimizer.zero_grad()
grads = torch.autograd.grad(total_loss, list(filtered_params)) # backward on weights_before: FO-MAML
j = 0
#print('[before update]')
#print(list(model.parameters())[45][-1])
for _i, param in enumerate(model.parameters()):
if param.requires_grad:
#param = param - outerstepsize * grads[j]
param.grad = grads[j]
j += 1
optimizer.step()
#print('[after optim.step]')
#print(list(model.parameters())[45][-1])
batch_time.update(time.time() - _t)
_t = time.time()
if i % 100 == 0: #max(1, int(int(len(train_set) / args.batch_size )/500.0)) == 0:
print("Ep[%s][%05d/%d] Time: %.2f Pix: %s TV: %s Sym: %s Total: %s Avg. Loss: %s" % (
str(t), i, int(len(train_set)) // args.batch_size,
batch_time.avg,
str([round(x.item(),5) for x in pixel_loss]),
str([round(x.item(),4) for x in offset_loss]),
str([round(x.item(), 4) for x in sym_loss]),
str([round(x.item(),5) for x in [total_loss]]),
str([round(training_losses.avg, 5)]) ))
batch_time.reset()
if t == 1:
# delete the pre validation weights for cleaner workspace
if os.path.exists(args.save_path + "/epoch" + str(0) +".pth" ):
os.remove(args.save_path + "/epoch" + str(0) +".pth")
if os.path.exists(args.save_path + "/epoch" + str(t-1) +".pth"):
os.remove(args.save_path + "/epoch" + str(t-1) +".pth")
torch.save(model.state_dict(), args.save_path + "/epoch" + str(t) +".pth")
# print("\t\t**************Start Validation*****************")
#Turn into evaluation mode
val_total_losses = AverageMeter()
val_total_pixel_loss = AverageMeter()
val_total_PSNR_loss = AverageMeter()
val_total_tv_loss = AverageMeter()
val_total_pws_loss = AverageMeter()
val_total_sym_loss = AverageMeter()
for i, (images, imgpaths) in enumerate(tqdm(val_loader)):
#if i < 50: #i < 11 or (i > 14 and i < 50):
# continue
if i >= min(VAL_ITER_CUT, int(len(test_set)/ args.batch_size)):
break
if args.use_cuda:
images = [im.cuda() for im in images]
#X0, y, X1 = images[0], images[1], images[2]
#X0, y, X1 = images[2], images[3], images[4]
# define optimizer to update the inner loop
inner_optimizer = torch.optim.Adamax(model.rectifyNet.parameters(),
lr=args.inner_lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=args.weight_decay)
# Reptile testing - save base model weights
weights_base = copy.deepcopy(model.state_dict())
k = args.num_inner_update # 2
model.train()
for _k in range(k):
indices = [ [0, 2, 4], [2, 4, 6] ]
ind = indices[_k % 2]
meta_X0, meta_y, meta_X1 = crop(images[ind[0]]), crop(images[ind[1]]), crop(images[ind[2]])
diffs, offsets, filters, occlusions, _ = model(torch.stack((meta_X0, meta_y, meta_X1), dim=0))
pixel_loss, offset_loss, sym_loss = part_loss(diffs, offsets, occlusions, [meta_X0, meta_X1], epsilon=args.epsilon)
total_loss = sum(x*y if x > 0 else 0 for x,y in zip(args.alpha, pixel_loss))
inner_optimizer.zero_grad()
total_loss.backward()
inner_optimizer.step()
# Actual target validation performance
with torch.no_grad():
if args.datasetName == 'Vimeo_90K_sep':
X0, y, X1 = images[2], images[3], images[4]
#diffs, offsets,filters,occlusions = model(torch.stack((X0,y,X1),dim = 0))
diffs, offsets,filters,occlusions, output = model(torch.stack((X0,y,X1),dim = 0))
pixel_loss, offset_loss,sym_loss = part_loss(diffs, offsets, occlusions, [X0,X1],epsilon=args.epsilon)
val_total_loss = sum(x * y for x, y in zip(args.alpha, pixel_loss))
per_sample_pix_error = torch.mean(torch.mean(torch.mean(diffs[args.save_which] ** 2, dim=1), dim=1), dim=1)
per_sample_pix_error = per_sample_pix_error.data # extract tensor
psnr_loss = torch.mean(20 * torch.log(1.0/torch.sqrt(per_sample_pix_error)))/torch.log(torch.Tensor([10]))
val_total_losses.update(val_total_loss.item(),args.batch_size)
val_total_pixel_loss.update(pixel_loss[args.save_which].item(), args.batch_size)
val_total_tv_loss.update(offset_loss[0].item(), args.batch_size)
val_total_sym_loss.update(sym_loss[0].item(), args.batch_size)
val_total_PSNR_loss.update(psnr_loss[0],args.batch_size)
else: # HD_dataset testing
for j in range(len(images) // 2):
mH, mW = 720, 1280
X0, y, X1 = crop(images[2*j], maxH=mH, maxW=mW), crop(images[2*j+1], maxH=mH, maxW=mW), crop(images[2*j+2], maxH=mH, maxW=mW)
diffs, offsets,filters,occlusions , output = model(torch.stack((X0,y,X1),dim = 0))
pixel_loss, offset_loss,sym_loss = part_loss(diffs, offsets, occlusions, [X0,X1],epsilon=args.epsilon)
val_total_loss = sum(x * y for x, y in zip(args.alpha, pixel_loss))
per_sample_pix_error = torch.mean(torch.mean(torch.mean(diffs[args.save_which] ** 2, dim=1), dim=1), dim=1)
per_sample_pix_error = per_sample_pix_error.data # extract tensor
psnr_loss = torch.mean(20 * torch.log(1.0/torch.sqrt(per_sample_pix_error)))/torch.log(torch.Tensor([10]))
val_total_losses.update(val_total_loss.item(),args.batch_size)
val_total_pixel_loss.update(pixel_loss[args.save_which].item(), args.batch_size)
val_total_tv_loss.update(offset_loss[0].item(), args.batch_size)
val_total_sym_loss.update(sym_loss[0].item(), args.batch_size)
val_total_PSNR_loss.update(psnr_loss[0],args.batch_size)
# Reset model to its base weights
model.load_state_dict(weights_base)
#del weights_base, inner_optimizer, meta_X0, meta_y, meta_X1, X0, y, X1, pixel_loss, offset_loss, sym_loss, total_loss, val_total_loss, diffs, offsets, filters, occlusions
VIZ = False
exp_name = 'meta_test'
if VIZ:
for b in range(images[0].size(0)):
imgpath = imgpaths[0][b]
savepath = os.path.join('checkpoint', exp_name, 'vimeoSeptuplet', imgpath.split('/')[-3], imgpath.split('/')[-2])
if not os.path.exists(savepath):
os.makedirs(savepath)
img_pred = (output[b].data.permute(1, 2, 0).clamp_(0, 1).cpu().numpy()[..., ::-1] * 255).astype(numpy.uint8)
cv2.imwrite(os.path.join(savepath, 'im2_pred.png'), img_pred)
''' # Original validation (not meta)
with torch.no_grad():
if args.use_cuda:
images = [im.cuda() for im in images]
#X0, y, X1 = images[0], images[1], images[2]
X0, y, X1 = images[2], images[3], images[4]
#diffs, offsets,filters,occlusions = model(torch.stack((X0,y,X1),dim = 0))
pixel_loss, offset_loss,sym_loss = part_loss(diffs, offsets, occlusions, [X0,X1],epsilon=args.epsilon)
val_total_loss = sum(x * y for x, y in zip(args.alpha, pixel_loss))
per_sample_pix_error = torch.mean(torch.mean(torch.mean(diffs[args.save_which] ** 2,
dim=1),dim=1),dim=1)
per_sample_pix_error = per_sample_pix_error.data # extract tensor
psnr_loss = torch.mean(20 * torch.log(1.0/torch.sqrt(per_sample_pix_error)))/torch.log(torch.Tensor([10]))
#
val_total_losses.update(val_total_loss.item(),args.batch_size)
val_total_pixel_loss.update(pixel_loss[args.save_which].item(), args.batch_size)
val_total_tv_loss.update(offset_loss[0].item(), args.batch_size)
val_total_sym_loss.update(sym_loss[0].item(), args.batch_size)
val_total_PSNR_loss.update(psnr_loss[0],args.batch_size)
print(".",end='',flush=True)
'''
print("\nEpoch " + str(int(t)) +
"\tlearning rate: " + str(float(ikk['lr'])) +
"\tAvg Training Loss: " + str(round(training_losses.avg,5)) +
"\tValidate Loss: " + str([round(float(val_total_losses.avg), 5)]) +
"\tValidate PSNR: " + str([round(float(val_total_PSNR_loss.avg), 5)]) +
"\tPixel Loss: " + str([round(float(val_total_pixel_loss.avg), 5)]) +
"\tTV Loss: " + str([round(float(val_total_tv_loss.avg), 4)]) +
"\tPWS Loss: " + str([round(float(val_total_pws_loss.avg), 4)]) +
"\tSym Loss: " + str([round(float(val_total_sym_loss.avg), 4)])
)
auxiliary_data.append([t, float(ikk['lr']),
training_losses.avg, val_total_losses.avg, val_total_pixel_loss.avg,
val_total_tv_loss.avg,val_total_pws_loss.avg,val_total_sym_loss.avg])
numpy.savetxt(args.log, numpy.array(auxiliary_data), fmt='%.8f', delimiter=',')
training_losses.reset()
#original_training_losses.reset()
print("\t\tFinished an epoch, Check and Save the model weights")
# we check the validation loss instead of training loss. OK~
if saved_total_loss >= val_total_losses.avg:
saved_total_loss = val_total_losses.avg
torch.save(model.state_dict(), args.save_path + "/best"+".pth")
print("\t\tBest Weights updated for decreased validation loss\n")
else:
print("\t\tWeights Not updated for undecreased validation loss\n")
#schdule the learning rate
scheduler.step(val_total_losses.avg)
print("*********Finish Training********")
if __name__ == '__main__':
sys.setrecursionlimit(100000)# 0xC00000FD exception for the recursive detach of gradients.
threading.stack_size(200000000)# 0xC00000FD exception for the recursive detach of gradients.
thread = threading.Thread(target=train)
thread.start()
thread.join()
exit(0)
|
410352
|
import bisect as bs
#import time #test
# list need to be sorted first!
if __name__ == '__main__':
s = raw_input("What do yo wan to search? ")
#s = "hey"
#st = time.time()
l = []
with open('words.txt', 'r') as fin:
for line in fin:
word = line.strip()
l.append(word)
print bs.bisect(l, s) #count start from 1
#bs.bisect(l, s)
#et = time.time() - st
#print et
|
410389
|
import json
import uuid
import dataclasses
import urllib.request
from typing import Union
from .base import DependencyDB
from ..base import Dependency, DependencyExtra, URL, License
@dataclasses.dataclass
class PyPiDependency(DependencyExtra):
name: str
url: URL
license: License
class PyPiDB(DependencyDB):
name: str = "pypi"
uuid: "uuid.UUID" = uuid.UUID("3bdad2ca-3224-45d7-9035-a1f66e318baf")
def lookup(self, dependency: Dependency) -> Dependency:
# Grab the package data from PyPi
with urllib.request.urlopen(
f"https://pypi.org/pypi/{dependency.name}/json"
) as resp:
package_json = json.load(resp)
return Dependency.mkoverride(
dependency,
url=package_json["info"]["project_urls"]["Homepage"],
license=package_json["info"]["license"],
extra={
self.name: PyPiDependency(
uuid=None,
euuid=uuid.uuid5(self.uuid, dependency.name),
name=dependency.name,
url=f"https://pypi.org/pypi/{dependency.name}",
license=package_json["info"]["license"],
)
},
)
def extra(self, dependency: Dependency) -> Union[None, Dependency]:
raise NotImplementedError
# TODO Implement 404 catch and return None
url = f"https://pypi.org/pypi/{name}/json"
with urllib.request.urlopen(url) as resp:
package_json = json.load(resp)
return PyPiDependency(
uuid=None,
euuid=uuid.uuid5(self.uuid, name),
name=name,
url=f"https://pypi.org/pypi/{name}",
license=package_json["info"]["license"],
)
@classmethod
def applicable(cls, config: str) -> bool:
return bool(config == cls.name)
|
410394
|
import FWCore.ParameterSet.Config as cms
CSCFakeGainsConditions = cms.ESSource("CSCFakeGainsConditions")
CSCFakePedestalsConditions = cms.ESSource("CSCFakePedestalsConditions")
CSCFakeNoiseMatrixConditions = cms.ESSource("CSCFakeNoiseMatrixConditions")
CSCFakeCrosstalkConditions = cms.ESSource("CSCFakeCrosstalkConditions")
|
410428
|
import logging
class ActivityLog:
__instance = None
def __init__(
self,
log_name="activitiy",
log_path="activitiy.log",
log_format="%(asctime)s [%(levelname)s]: %(message)s",
log_level=logging.INFO,
):
if ActivityLog.__instance != None:
raise Exception("This class is a singleton!")
self.logger = logging.getLogger(log_name)
self.logger.setLevel(log_level)
logger_file_handler = logging.FileHandler(log_path)
logger_file_handler.setLevel(log_level)
logger_file_handler.setFormatter(logging.Formatter(log_format))
self.logger.addHandler(logger_file_handler)
ActivityLog.__instance = self
@staticmethod
def getInstance():
""" Static access method """
if ActivityLog.__instance == None:
ActivityLog()
return ActivityLog.__instance
|
410472
|
import unittest
from flask import Flask, request, Response
from werkzeug.exceptions import NotFound
from flask_jsontools import jsonapi, FlaskJsonClient, JsonResponse, make_json_response
class TestJsonApi(unittest.TestCase):
def setUp(self):
# Database
users = [
{'id': 1, 'name': 'a'},
{'id': 2, 'name': 'b'},
{'id': 3, 'name': 'c'},
]
# Init app
self.app = app = Flask(__name__)
self.app.debug = self.app.testing = True
self.app.test_client_class = FlaskJsonClient
# Views
@app.route('/user', methods=['GET'])
@jsonapi
def list_users():
# Just list users
return users
@app.route('/user/<int:id>', methods=['GET'])
@jsonapi
def get_user(id):
# Return a user, or http not found
# Use list_users()
try:
return {'user': list_users()[id-1]}
except IndexError:
raise NotFound('User #{} not found'.format(id))
@app.route('/user/<int:id>', methods=['PATCH'])
@jsonapi
def patch_user(id):
# Try custom http codes
if id == 1:
return {'error': 'Denied'}, 403
# Try PATCH method
req = request.get_json()
users[id-1] = req['user']
return users[id-1]
@app.route('/user/<int:id>', methods=['DELETE'])
def delete_user(id):
# Try returning JsonResponse
if id == 1:
return JsonResponse({'error': 'Denied'}, 403)
# Try DELETE method
del users[id-1]
return make_json_response(True)
def testList(self):
""" Test GET /user: returning json objects """
with self.app.test_client() as c:
rv = c.get('/user')
self.assertEqual(rv.status_code, 200)
self.assertIsInstance(rv, JsonResponse)
self.assertEqual(rv.get_json(), [ {'id': 1, 'name': 'a'}, {'id': 2, 'name': 'b'}, {'id': 3, 'name': 'c'} ])
def testGet(self):
""" Test GET /user/<id>: HTTP Errors """
with self.app.test_client() as c:
# JSON user
rv = c.get('/user/1')
self.assertEqual(rv.status_code, 200)
self.assertIsInstance(rv, JsonResponse)
self.assertEqual(rv.get_json(), {'user': {'id': 1, 'name': 'a'} })
# Text HTTP
rv = c.get('/user/99')
self.assertEqual(rv.status_code, 404)
self.assertIsInstance(rv, Response)
self.assertIn('User #99 not found', str(rv.get_data()))
def testUpdate(self):
""" Test PATCH /user/<id>: custom error codes, exceptions """
with self.app.test_client() as c:
# JSON error
rv = c.patch('/user/1')
self.assertEqual(rv.status_code, 403)
self.assertIsInstance(rv, JsonResponse)
self.assertEqual(rv.get_json(), {'error': 'Denied'})
# JSON user
rv = c.patch('/user/2', {'user': {'id': 2, 'name': 'bbb'}})
self.assertEqual(rv.status_code, 200)
self.assertIsInstance(rv, JsonResponse)
self.assertEqual(rv.get_json(), {'id': 2, 'name': 'bbb'})
# IndexError
self.assertRaises(IndexError, c.patch, '/user/99', {'user': {}})
def testDelete(self):
""" Test DELETE /user/<id>: using JsonResponse """
with self.app.test_client() as c:
# JsonResponse
rv = c.delete('/user/1')
self.assertEqual(rv.status_code, 403)
self.assertIsInstance(rv, JsonResponse)
self.assertEqual(rv.get_json(), {'error': 'Denied'})
# make_json_response
rv = c.delete('/user/2')
self.assertEqual(rv.status_code, 200)
self.assertIsInstance(rv, JsonResponse)
self.assertEqual(rv.get_json(), True)
|
410500
|
import unittest, time, sys, random, math, getpass
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_util, h2o_print as h2p
def write_syn_dataset(csvPathname, rowCount, colCount, SEED, choices):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
naCnt = [0 for j in range(colCount)]
for i in range(rowCount):
rowData = []
for j in range(colCount):
ri = random.choice(choices)
if ri=='0' or ri==' 0':
naCnt[j] += 1
rowData.append(ri)
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
return naCnt
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_summary2_NY0(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
choicesList = [
('N', 'Y', '0'),
('n', 'y', '0'),
('F', 'T', '0'),
('f', 't', '0'),
(' N', ' Y', ' 0'),
(' n', ' y', ' 0'),
(' F', ' T', ' 0'),
(' f', ' t', ' 0'),
]
# white space is stripped
expectedList = [
('N', 'Y', '0'),
('n', 'y', '0'),
('F', 'T', '0'),
('f', 't', '0'),
('N', 'Y', '0'),
('n', 'y', '0'),
('F', 'T', '0'),
('f', 't', '0'),
]
tryList = [
# colname, (min, 25th, 50th, 75th, max)
(100, 200, 'x.hex', choicesList[4], expectedList[4]),
(100, 200, 'x.hex', choicesList[5], expectedList[5]),
(100, 200, 'x.hex', choicesList[6], expectedList[6]),
(100, 200, 'x.hex', choicesList[7], expectedList[7]),
(100, 200, 'x.hex', choicesList[3], expectedList[3]),
(1000, 200, 'x.hex', choicesList[2], expectedList[2]),
(10000, 200, 'x.hex', choicesList[1], expectedList[1]),
(100000, 200, 'x.hex', choicesList[0], expectedList[0]),
]
timeoutSecs = 10
trial = 1
n = h2o.nodes[0]
lenNodes = len(h2o.nodes)
x = 0
timeoutSecs = 60
for (rowCount, colCount, hex_key, choices, expected) in tryList:
# max error = half the bin size?
SEEDPERFILE = random.randint(0, sys.maxint)
x += 1
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
csvPathnameFull = h2i.find_folder_and_filename(None, csvPathname, returnFullPath=True)
print "Creating random", csvPathname
expectedNaCnt = write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE, choices)
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=10, doSummary=False)
print "Parse result['destination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
numRows = inspect["numRows"]
numCols = inspect["numCols"]
summaryResult = h2o_cmd.runSummary(key=hex_key, noPrint=False, numRows=numRows, numCols=numCols)
h2o.verboseprint("summaryResult:", h2o.dump_json(summaryResult))
# only one column
for i in range(colCount):
column = summaryResult['summaries'][i]
colname = column['colname']
coltype = column['type']
nacnt = column['nacnt']
self.assertEqual(nacnt, expectedNaCnt[i], "Column %s Expected %s. nacnt %s incorrect" % (i, expectedNaCnt[i], nacnt))
stats = column['stats']
stattype= stats['type']
self.assertEqual(stattype, 'Enum')
# FIX! we should compare mean and sd to expected?
cardinality = stats['cardinality']
hstart = column['hstart']
hstep = column['hstep']
hbrk = column['hbrk']
self.assertEqual(hbrk, [expected[0], expected[1]])
hcnt = column['hcnt']
hcntTotal = hcnt[0] + hcnt[1]
self.assertEqual(hcntTotal, rowCount - expectedNaCnt[i])
self.assertEqual(rowCount, numRows,
msg="numRows %s should be %s" % (numRows, rowCount))
trial += 1
h2i.delete_keys_at_all_nodes()
if __name__ == '__main__':
h2o.unit_main()
|
410507
|
from netforce import migration
from netforce.model import get_model
from netforce.database import get_connection
class Migration(migration.Migration):
_name="account.multico"
_version="1.111.0"
def migrate(self):
db=get_connection()
print("accounts...")
db.execute("update account_account set company_id=1 where company_id is null")
print("journal entries...")
db.execute("update account_move set company_id=1 where company_id is null")
print("invoices...")
db.execute("update account_invoice set company_id=1 where company_id is null")
print("payments...")
db.execute("update account_payment set company_id=1 where company_id is null")
print("transfers...")
db.execute("update account_transfer set company_id=1 where company_id is null")
print("statements...")
db.execute("update account_statement set company_id=1 where company_id is null")
Migration.register()
|
410535
|
def lonely_integer(m):
answer = 0
for x in m:
answer = answer ^ x
return answer
a = int(input())
b = map(int, input().strip().split(" "))
print(lonely_integer(b))
|
410567
|
from __future__ import print_function
from memory_profiler import profile
import sys
from beem.steem import Steem
from beem.account import Account
from beem.blockchain import Blockchain
from beem.instance import set_shared_steem_instance, clear_cache
from beem.storage import configStorage as config
from beemapi.graphenerpc import GrapheneRPC
import logging
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
@profile
def profiling(node, name_list, shared_instance=True, clear_acc_cache=False, clear_all_cache=True):
print("shared_instance %d clear_acc_cache %d clear_all_cache %d" %
(shared_instance, clear_acc_cache, clear_all_cache))
if not shared_instance:
stm = Steem(node=node)
print(str(stm))
else:
stm = None
acc_dict = {}
for name in name_list:
acc = Account(name, steem_instance=stm)
acc_dict[name] = acc
if clear_acc_cache:
acc.clear_cache()
acc_dict = {}
if clear_all_cache:
clear_cache()
if not shared_instance:
del stm.rpc
if __name__ == "__main__":
stm = Steem()
print("Shared instance: " + str(stm))
set_shared_steem_instance(stm)
b = Blockchain()
account_list = []
for a in b.get_all_accounts(limit=500):
account_list.append(a)
shared_instance = False
clear_acc_cache = False
clear_all_cache = False
node = "https://api.steemit.com"
n = 3
for i in range(1, n + 1):
print("%d of %d" % (i, n))
profiling(node, account_list, shared_instance=shared_instance, clear_acc_cache=clear_acc_cache, clear_all_cache=clear_all_cache)
|
410584
|
class LogHolder():
def __init__(self, dirpath, name):
self.dirpath = dirpath
self.name = name
self.reset(suffix='train')
def write_to_file(self):
with open(self.dirpath + f'{self.name}-metrics-{self.suffix}.txt', 'a') as file:
for i in self.metric_holder:
file.write(str(i) + ' ')
with open(self.dirpath + f'{self.name}-loss-{self.suffix}.txt', 'a') as file:
for i in self.loss_holder:
file.write(str(i) + ' ')
def init_files(self, dirpath, name):
metric_file = open(dirpath + f'{name}-metrics-{self.suffix}.txt', 'w')
loss_file = open(dirpath + f'{name}-loss-{self.suffix}.txt', 'w')
metric_file.close()
loss_file.close()
return None
def write_metric(self, val):
if type(val) == list:
self.metric_holder.extend(val)
else:
self.metric_holder.append(val)
def write_loss(self, val):
self.loss_holder.append(val)
def reset(self, suffix):
self.metric_holder = []
self.loss_holder = []
self.suffix = suffix
self.init_files(self.dirpath, self.name)
|
410634
|
from .usage import *
ConsumerControl = Usage("consumer.ConsumerControl", 0xc0001, CA)
NumericKeyPad = Usage("consumer.NumericKeyPad", 0xc0002, NAry)
ProgrammableButtons = Usage("consumer.ProgrammableButtons", 0xc0003, NAry)
Microphone = Usage("consumer.Microphone", 0xc0004, CA)
Headphone = Usage("consumer.Headphone", 0xc0005, CA)
GraphicEqualizer = Usage("consumer.GraphicEqualizer", 0xc0006, CA)
Plus10 = Usage("consumer.Plus10", 0xc0020, OSC)
Plus100 = Usage("consumer.Plus100", 0xc0021, OSC)
AmPm = Usage("consumer.AmPm", 0xc0022, OSC)
Power = Usage("consumer.Power", 0xc0030, OOC)
Reset = Usage("consumer.Reset", 0xc0031, OSC)
Sleep = Usage("consumer.Sleep", 0xc0032, OSC)
SleepAfter = Usage("consumer.SleepAfter", 0xc0033, OSC)
SleepMode = Usage("consumer.SleepMode", 0xc0034, RTC)
Illumination = Usage("consumer.Illumination", 0xc0035, OOC)
FunctionButtons = Usage("consumer.FunctionButtons", 0xc0036, NAry)
Menu = Usage("consumer.Menu", 0xc0040, OOC)
MenuPick = Usage("consumer.MenuPick", 0xc0041, OSC)
MenuUp = Usage("consumer.MenuUp", 0xc0042, OSC)
MenuDown = Usage("consumer.MenuDown", 0xc0043, OSC)
MenuLeft = Usage("consumer.MenuLeft", 0xc0044, OSC)
MenuRight = Usage("consumer.MenuRight", 0xc0045, OSC)
MenuEscape = Usage("consumer.MenuEscape", 0xc0046, OSC)
MenuValueIncrease = Usage("consumer.MenuValueIncrease", 0xc0047, OSC)
MenuValueDecrease = Usage("consumer.MenuValueDecrease", 0xc0048, OSC)
DataOnScreen = Usage("consumer.DataOnScreen", 0xc0060, OOC)
ClosedCaption = Usage("consumer.ClosedCaption", 0xc0061, OOC)
ClosedCaptionSelect = Usage("consumer.ClosedCaptionSelect", 0xc0062, OSC)
VcrTv = Usage("consumer.VcrTv", 0xc0063, OOC)
BroadcastMode = Usage("consumer.BroadcastMode", 0xc0064, OSC)
Snapshot = Usage("consumer.Snapshot", 0xc0065, OSC)
Still = Usage("consumer.Still", 0xc0066, OSC)
# HUTRR 35
PipToggle = Usage("consumer.PipToggle", 0xc0067, OSC)
PipSwap = Usage("consumer.PipSwap", 0xc0068, OSC)
# HUTRR36
Red = Usage("consumer.Red", 0xc0069, MC)
Green = Usage("consumer.Green", 0xc006a, MC)
Blue = Usage("consumer.Blue", 0xc006b, MC)
Yellow = Usage("consumer.Yellow", 0xc006c, MC)
# HUTRR37
Aspect = Usage("consumer.Aspect", 0xc006d, OSC)
# HUTRR38
ThreeDMode = Usage("consumer.ThreeDMode", 0xc006e, OSC)
# HUTRR41
DisplayBrightnessIncrement = Usage("consumer.DisplayBrightnessIncrement", 0xc006F, RTC)
DisplayBrightnessDecrement = Usage("consumer.DisplayBrightnessDecrement", 0xc0070, RTC)
DisplayBrightness = Usage("consumer.DisplayBrightness", 0xc0071, LC)
DisplayBacklightToggle = Usage("consumer.DisplayBacklightToggle", 0xc0072, OOC)
DisplaySetBrightnesstoMinimum = Usage("consumer.DisplaySetBrightnesstoMinimum", 0xc0073, OSC)
DisplaySetBrightnesstoMaximum = Usage("consumer.DisplaySetBrightnesstoMaximum", 0xc0074, OSC)
DisplaySetAutoBrightness = Usage("consumer.DisplaySetAutoBrightness", 0xc0075, OOC)
Selection = Usage("consumer.Selection", 0xc0080, NAry)
AssignSelection = Usage("consumer.AssignSelection", 0xc0081, OSC)
ModeStep = Usage("consumer.ModeStep", 0xc0082, OSC)
RecallLast = Usage("consumer.RecallLast", 0xc0083, OSC)
EnterChannel = Usage("consumer.EnterChannel", 0xc0084, OSC)
OrderMovie = Usage("consumer.OrderMovie", 0xc0085, OSC)
Channel = Usage("consumer.Channel", 0xc0086, LC)
MediaSelection = Usage("consumer.MediaSelection", 0xc0087, NAry)
MediaSelectComputer = Usage("consumer.MediaSelectComputer", 0xc0088, Sel)
MediaSelectTv = Usage("consumer.MediaSelectTv", 0xc0089, Sel)
MediaSelectWww = Usage("consumer.MediaSelectWww", 0xc008A, Sel)
MediaSelectDvd = Usage("consumer.MediaSelectDvd", 0xc008B, Sel)
MediaSelectTelephone = Usage("consumer.MediaSelectTelephone", 0xc008C, Sel)
MediaSelectProgramGuide = Usage("consumer.MediaSelectProgramGuide", 0xc008D, Sel)
MediaSelectVideoPhone = Usage("consumer.MediaSelectVideoPhone", 0xc008E, Sel)
MediaSelectGames = Usage("consumer.MediaSelectGames", 0xc008F, Sel)
MediaSelectMessages = Usage("consumer.MediaSelectMessages", 0xc0090, Sel)
MediaSelectCd = Usage("consumer.MediaSelectCd", 0xc0091, Sel)
MediaSelectVcr = Usage("consumer.MediaSelectVcr", 0xc0092, Sel)
MediaSelectTuner = Usage("consumer.MediaSelectTuner", 0xc0093, Sel)
Quit = Usage("consumer.Quit", 0xc0094, OSC)
Help = Usage("consumer.Help", 0xc0095, OOC)
MediaSelectTape = Usage("consumer.MediaSelectTape", 0xc0096, Sel)
MediaSelectCable = Usage("consumer.MediaSelectCable", 0xc0097, Sel)
MediaSelectSatellite = Usage("consumer.MediaSelectSatellite", 0xc0098, Sel)
MediaSelectSecurity = Usage("consumer.MediaSelectSecurity", 0xc0099, Sel)
MediaSelectHome = Usage("consumer.MediaSelectHome", 0xc009A, Sel)
MediaSelectCall = Usage("consumer.MediaSelectCall", 0xc009B, Sel)
ChannelIncrement = Usage("consumer.ChannelIncrement", 0xc009C, OSC)
ChannelDecrement = Usage("consumer.ChannelDecrement", 0xc009D, OSC)
MediaSelectSap = Usage("consumer.MediaSelectSap", 0xc009E, Sel)
VcrPlus = Usage("consumer.VcrPlus", 0xc00A0, OSC)
Once = Usage("consumer.Once", 0xc00A1, OSC)
Daily = Usage("consumer.Daily", 0xc00A2, OSC)
Weekly = Usage("consumer.Weekly", 0xc00A3, OSC)
Monthly = Usage("consumer.Monthly", 0xc00A4, OSC)
Play = Usage("consumer.Play", 0xc00B0, OOC)
Pause = Usage("consumer.Pause", 0xc00B1, OOC)
Record = Usage("consumer.Record", 0xc00B2, OOC)
FastForward = Usage("consumer.FastForward", 0xc00B3, OOC)
Rewind = Usage("consumer.Rewind", 0xc00B4, OOC)
ScanNextTrack = Usage("consumer.ScanNextTrack", 0xc00B5, OSC)
ScanPreviousTrack = Usage("consumer.ScanPreviousTrack", 0xc00B6, OSC)
Stop = Usage("consumer.Stop", 0xc00B7, OSC)
Eject = Usage("consumer.Eject", 0xc00B8, OSC)
RandomPlay = Usage("consumer.RandomPlay", 0xc00B9, OOC)
SelectDisc = Usage("consumer.SelectDisc", 0xc00BA, NAry)
EnterDisc = Usage("consumer.EnterDisc", 0xc00BB, MC)
Repeat = Usage("consumer.Repeat", 0xc00BC, OSC)
Tracking = Usage("consumer.Tracking", 0xc00BD, LC)
TrackNormal = Usage("consumer.TrackNormal", 0xc00BE, OSC)
SlowTracking = Usage("consumer.SlowTracking", 0xc00BF, LC)
FrameForward = Usage("consumer.FrameForward", 0xc00C0, RTC)
FrameBack = Usage("consumer.FrameBack", 0xc00C1, RTC)
Mark = Usage("consumer.Mark", 0xc00C2, OSC)
ClearMark = Usage("consumer.ClearMark", 0xc00C3, OSC)
RepeatFromMark = Usage("consumer.RepeatFromMark", 0xc00C4, OOC)
ReturnToMark = Usage("consumer.ReturnToMark", 0xc00C5, OSC)
SearchMarkForward = Usage("consumer.SearchMarkForward", 0xc00C6, OSC)
SearchMarkBackwards = Usage("consumer.SearchMarkBackwards", 0xc00C7, OSC)
CounterReset = Usage("consumer.CounterReset", 0xc00C8, OSC)
ShowCounter = Usage("consumer.ShowCounter", 0xc00C9, OSC)
TrackingIncrement = Usage("consumer.TrackingIncrement", 0xc00CA, RTC)
TrackingDecrement = Usage("consumer.TrackingDecrement", 0xc00CB, RTC)
StopEject = Usage("consumer.StopEject", 0xc00CC, OSC)
PlayPause = Usage("consumer.PlayPause", 0xc00CD, OSC)
PlaySkip = Usage("consumer.PlaySkip", 0xc00CE, OSC)
# HUTRR45
VoiceCommand = Usage("consumer.VoiceCommand", 0xc00cf, OSC)
Volume = Usage("consumer.Volume", 0xc00E0, LC)
Balance = Usage("consumer.Balance", 0xc00E1, LC)
Mute = Usage("consumer.Mute", 0xc00E2, OOC)
Bass = Usage("consumer.Bass", 0xc00E3, LC)
Treble = Usage("consumer.Treble", 0xc00E4, LC)
BassBoost = Usage("consumer.BassBoost", 0xc00E5, OOC)
SurroundMode = Usage("consumer.SurroundMode", 0xc00E6, OSC)
Loudness = Usage("consumer.Loudness", 0xc00E7, OOC)
Mpx = Usage("consumer.Mpx", 0xc00E8, OOC)
VolumeIncrement = Usage("consumer.VolumeIncrement", 0xc00E9, RTC)
VolumeDecrement = Usage("consumer.VolumeDecrement", 0xc00EA, RTC)
SpeedSelect = Usage("consumer.SpeedSelect", 0xc00F0, OSC)
PlaybackSpeed = Usage("consumer.PlaybackSpeed", 0xc00F1, NAry)
StandardPlay = Usage("consumer.StandardPlay", 0xc00F2, Sel)
LongPlay = Usage("consumer.LongPlay", 0xc00F3, Sel)
ExtendedPlay = Usage("consumer.ExtendedPlay", 0xc00F4, Sel)
Slow = Usage("consumer.Slow", 0xc00F5, OSC)
FanEnable = Usage("consumer.FanEnable", 0xc0100, OOC)
FanSpeed = Usage("consumer.FanSpeed", 0xc0101, LC)
LightEnable = Usage("consumer.LightEnable", 0xc0102, OOC)
LightIlluminationLevel = Usage("consumer.LightIlluminationLevel", 0xc0103, LC)
ClimateControlEnable = Usage("consumer.ClimateControlEnable", 0xc0104, OOC)
RoomTemperature = Usage("consumer.RoomTemperature", 0xc0105, LC)
SecurityEnable = Usage("consumer.SecurityEnable", 0xc0106, OOC)
FireAlarm = Usage("consumer.FireAlarm", 0xc0107, OSC)
PoliceAlarm = Usage("consumer.PoliceAlarm", 0xc0108, OSC)
Proximity = Usage("consumer.Proximity", 0xc0109, LC)
Motion = Usage("consumer.Motion", 0xc010A, OSC)
DuressAlarm = Usage("consumer.DuressAlarm", 0xc010B, OSC)
HoldupAlarm = Usage("consumer.HoldupAlarm", 0xc010C, OSC)
MedicalAlarm = Usage("consumer.MedicalAlarm", 0xc010D, OSC)
BalanceRight = Usage("consumer.BalanceRight", 0xc0150, RTC)
BalanceLeft = Usage("consumer.BalanceLeft", 0xc0151, RTC)
BassIncrement = Usage("consumer.BassIncrement", 0xc0152, RTC)
BassDecrement = Usage("consumer.BassDecrement", 0xc0153, RTC)
TrebleIncrement = Usage("consumer.TrebleIncrement", 0xc0154, RTC)
TrebleDecrement = Usage("consumer.TrebleDecrement", 0xc0155, RTC)
SpeakerSystem = Usage("consumer.SpeakerSystem", 0xc0160, CL)
ChannelLeft = Usage("consumer.ChannelLeft", 0xc0161, CL)
ChannelRight = Usage("consumer.ChannelRight", 0xc0162, CL)
ChannelCenter = Usage("consumer.ChannelCenter", 0xc0163, CL)
ChannelFront = Usage("consumer.ChannelFront", 0xc0164, CL)
ChannelCenterFront = Usage("consumer.ChannelCenterFront", 0xc0165, CL)
ChannelSide = Usage("consumer.ChannelSide", 0xc0166, CL)
ChannelSurround = Usage("consumer.ChannelSurround", 0xc0167, CL)
ChannelLowFrequencyEnhancement = Usage("consumer.ChannelLowFrequencyEnhancement", 0xc0168, CL)
ChannelTop = Usage("consumer.ChannelTop", 0xc0169, CL)
ChannelUnknown = Usage("consumer.ChannelUnknown", 0xc016A, CL)
SubChannel = Usage("consumer.SubChannel", 0xc0170, LC)
SubChannelIncrement = Usage("consumer.SubChannelIncrement", 0xc0171, OSC)
SubChannelDecrement = Usage("consumer.SubChannelDecrement", 0xc0172, OSC)
AlternateAudioIncrement = Usage("consumer.AlternateAudioIncrement", 0xc0173, OSC)
AlternateAudioDecrement = Usage("consumer.AlternateAudioDecrement", 0xc0174, OSC)
ApplicationLaunchButtons = Usage("consumer.ApplicationLaunchButtons", 0xc0180, NAry)
AlLaunchButtonConfigurationTool = Usage("consumer.AlLaunchButtonConfigurationTool", 0xc0181, Sel)
AlProgrammableButtonConfiguration = Usage("consumer.AlProgrammableButtonConfiguration", 0xc0182, Sel)
AlConsumerControlConfiguration = Usage("consumer.AlConsumerControlConfiguration", 0xc0183, Sel)
AlWordProcessor = Usage("consumer.AlWordProcessor", 0xc0184, Sel)
AlTextEditor = Usage("consumer.AlTextEditor", 0xc0185, Sel)
AlSpreadsheet = Usage("consumer.AlSpreadsheet", 0xc0186, Sel)
AlGraphicsEditor = Usage("consumer.AlGraphicsEditor", 0xc0187, Sel)
AlPresentationApp = Usage("consumer.AlPresentationApp", 0xc0188, Sel)
AlDatabaseApp = Usage("consumer.AlDatabaseApp", 0xc0189, Sel)
AlEmailReader = Usage("consumer.AlEmailReader", 0xc018A, Sel)
AlNewsreader = Usage("consumer.AlNewsreader", 0xc018B, Sel)
AlVoicemail = Usage("consumer.AlVoicemail", 0xc018C, Sel)
AlContactsAddressBook = Usage("consumer.AlContactsAddressBook", 0xc018D, Sel)
AlCalendarSchedule = Usage("consumer.AlCalendarSchedule", 0xc018E, Sel)
AlTaskProjectManager = Usage("consumer.AlTaskProjectManager", 0xc018F, Sel)
AlLogJournalTimecard = Usage("consumer.AlLogJournalTimecard", 0xc0190, Sel)
AlCheckbookFinance = Usage("consumer.AlCheckbookFinance", 0xc0191, Sel)
AlCalculator = Usage("consumer.AlCalculator", 0xc0192, Sel)
AlAVCapturePlayback = Usage("consumer.AlAVCapturePlayback", 0xc0193, Sel)
AlLocalMachineBrowser = Usage("consumer.AlLocalMachineBrowser", 0xc0194, Sel)
AlLanWanBrowser = Usage("consumer.AlLanWanBrowser", 0xc0195, Sel)
AlInternetBrowser = Usage("consumer.AlInternetBrowser", 0xc0196, Sel)
AlRemoteNetworkingIspConnect = Usage("consumer.AlRemoteNetworkingIspConnect", 0xc0197, Sel)
AlNetworkConference = Usage("consumer.AlNetworkConference", 0xc0198, Sel)
AlNetworkChat = Usage("consumer.AlNetworkChat", 0xc0199, Sel)
AlTelephonyDialer = Usage("consumer.AlTelephonyDialer", 0xc019A, Sel)
AlLogon = Usage("consumer.AlLogon", 0xc019B, Sel)
AlLogoff = Usage("consumer.AlLogoff", 0xc019C, Sel)
AlLogonLogoff = Usage("consumer.AlLogonLogoff", 0xc019D, Sel)
AlTerminalLockScreensaver = Usage("consumer.AlTerminalLockScreensaver", 0xc019E, Sel)
AlControlPanel = Usage("consumer.AlControlPanel", 0xc019F, Sel)
AlCommandLineProcessorRun = Usage("consumer.AlCommandLineProcessorRun", 0xc01A0, Sel)
AlProcessTaskManager = Usage("consumer.AlProcessTaskManager", 0xc01A1, Sel)
AlSelectTaskApplication = Usage("consumer.AlSelectTaskApplication", 0xc01A2, Sel)
AlNextTaskApplication = Usage("consumer.AlNextTaskApplication", 0xc01A3, Sel)
AlPreviousTaskApplication = Usage("consumer.AlPreviousTaskApplication", 0xc01A4, Sel)
AlPreemptiveHaltTaskApplication = Usage("consumer.AlPreemptiveHaltTaskApplication", 0xc01A5, Sel)
AlIntegratedHelpCenter = Usage("consumer.AlIntegratedHelpCenter", 0xc01A6, Sel)
AlDocuments = Usage("consumer.AlDocuments", 0xc01A7, Sel)
AlThesaurus = Usage("consumer.AlThesaurus", 0xc01A8, Sel)
AlDictionary = Usage("consumer.AlDictionary", 0xc01A9, Sel)
AlDesktop = Usage("consumer.AlDesktop", 0xc01AA, Sel)
AlSpellCheck = Usage("consumer.AlSpellCheck", 0xc01AB, Sel)
AlGrammarCheck = Usage("consumer.AlGrammarCheck", 0xc01AC, Sel)
AlWirelessStatus = Usage("consumer.AlWirelessStatus", 0xc01AD, Sel)
AlKeyboardLayout = Usage("consumer.AlKeyboardLayout", 0xc01AE, Sel)
AlVirusProtection = Usage("consumer.AlVirusProtection", 0xc01AF, Sel)
AlEncryption = Usage("consumer.AlEncryption", 0xc01B0, Sel)
AlScreenSaver = Usage("consumer.AlScreenSaver", 0xc01B1, Sel)
AlAlarms = Usage("consumer.AlAlarms", 0xc01B2, Sel)
AlClock = Usage("consumer.AlClock", 0xc01B3, Sel)
AlFileBrowser = Usage("consumer.AlFileBrowser", 0xc01B4, Sel)
AlPowerStatus = Usage("consumer.AlPowerStatus", 0xc01B5, Sel)
AlImageBrowser = Usage("consumer.AlImageBrowser", 0xc01B6, Sel)
AlAudioBrowser = Usage("consumer.AlAudioBrowser", 0xc01B7, Sel)
AlMovieBrowser = Usage("consumer.AlMovieBrowser", 0xc01B8, Sel)
AlDigitalRightsManager = Usage("consumer.AlDigitalRightsManager", 0xc01B9, Sel)
AlDigitalWallet = Usage("consumer.AlDigitalWallet", 0xc01BA, Sel)
AlInstantMessaging = Usage("consumer.AlInstantMessaging", 0xc01BC, Sel)
AlOemFeaturesTipsTutorialBrowser = Usage("consumer.AlOemFeaturesTipsTutorialBrowser", 0xc01BD, Sel)
AlOemHelp = Usage("consumer.AlOemHelp", 0xc01BE, Sel)
AlOnlineCommunity = Usage("consumer.AlOnlineCommunity", 0xc01BF, Sel)
AlEntertainmentContentBrowser = Usage("consumer.AlEntertainmentContentBrowser", 0xc01C0, Sel)
AlOnlineShoppingBrowser = Usage("consumer.AlOnlineShoppingBrowser", 0xc01C1, Sel)
AlSmartcardInformationHelp = Usage("consumer.AlSmartcardInformationHelp", 0xc01C2, Sel)
AlMarketMonitorFinanceBrowser = Usage("consumer.AlMarketMonitorFinanceBrowser", 0xc01C3, Sel)
AlCustomizedCorporateNewsBrowser = Usage("consumer.AlCustomizedCorporateNewsBrowser", 0xc01C4, Sel)
AlOnlineActivityBrowser = Usage("consumer.AlOnlineActivityBrowser", 0xc01C5, Sel)
AlResearchSearchBrowser = Usage("consumer.AlResearchSearchBrowser", 0xc01C6, Sel)
AlAudioPlayer = Usage("consumer.AlAudioPlayer", 0xc01C7, Sel)
GenericGuiApplicationControls = Usage("consumer.GenericGuiApplicationControls", 0xc0200, NAry)
AcNew = Usage("consumer.AcNew", 0xc0201, Sel)
AcOpen = Usage("consumer.AcOpen", 0xc0202, Sel)
AcClose = Usage("consumer.AcClose", 0xc0203, Sel)
AcExit = Usage("consumer.AcExit", 0xc0204, Sel)
AcMaximize = Usage("consumer.AcMaximize", 0xc0205, Sel)
AcMinimize = Usage("consumer.AcMinimize", 0xc0206, Sel)
AcSave = Usage("consumer.AcSave", 0xc0207, Sel)
AcPrint = Usage("consumer.AcPrint", 0xc0208, Sel)
AcProperties = Usage("consumer.AcProperties", 0xc0209, Sel)
AcUndo = Usage("consumer.AcUndo", 0xc021A, Sel)
AcCopy = Usage("consumer.AcCopy", 0xc021B, Sel)
AcCut = Usage("consumer.AcCut", 0xc021C, Sel)
AcPaste = Usage("consumer.AcPaste", 0xc021D, Sel)
AcSelectAll = Usage("consumer.AcSelectAll", 0xc021E, Sel)
AcFind = Usage("consumer.AcFind", 0xc021F, Sel)
AcFindAndReplace = Usage("consumer.AcFindAndReplace", 0xc0220, Sel)
AcSearch = Usage("consumer.AcSearch", 0xc0221, Sel)
AcGoTo = Usage("consumer.AcGoTo", 0xc0222, Sel)
AcHome = Usage("consumer.AcHome", 0xc0223, Sel)
AcBack = Usage("consumer.AcBack", 0xc0224, Sel)
AcForward = Usage("consumer.AcForward", 0xc0225, Sel)
AcStop = Usage("consumer.AcStop", 0xc0226, Sel)
AcRefresh = Usage("consumer.AcRefresh", 0xc0227, Sel)
AcPreviousLink = Usage("consumer.AcPreviousLink", 0xc0228, Sel)
AcNextLink = Usage("consumer.AcNextLink", 0xc0229, Sel)
AcBookmarks = Usage("consumer.AcBookmarks", 0xc022A, Sel)
AcHistory = Usage("consumer.AcHistory", 0xc022B, Sel)
AcSubscriptions = Usage("consumer.AcSubscriptions", 0xc022C, Sel)
AcZoomIn = Usage("consumer.AcZoomIn", 0xc022D, Sel)
AcZoomOut = Usage("consumer.AcZoomOut", 0xc022E, Sel)
AcZoom = Usage("consumer.AcZoom", 0xc022F, LC)
AcFullScreenView = Usage("consumer.AcFullScreenView", 0xc0230, Sel)
AcNormalView = Usage("consumer.AcNormalView", 0xc0231, Sel)
AcViewToggle = Usage("consumer.AcViewToggle", 0xc0232, Sel)
AcScrollUp = Usage("consumer.AcScrollUp", 0xc0233, Sel)
AcScrollDown = Usage("consumer.AcScrollDown", 0xc0234, Sel)
AcScroll = Usage("consumer.AcScroll", 0xc0235, LC)
AcPanLeft = Usage("consumer.AcPanLeft", 0xc0236, Sel)
AcPanRight = Usage("consumer.AcPanRight", 0xc0237, Sel)
AcPan = Usage("consumer.AcPan", 0xc0238, LC)
AcNewWindow = Usage("consumer.AcNewWindow", 0xc0239, Sel)
AcTileHorizontally = Usage("consumer.AcTileHorizontally", 0xc023A, Sel)
AcTileVertically = Usage("consumer.AcTileVertically", 0xc023B, Sel)
AcFormat = Usage("consumer.AcFormat", 0xc023C, Sel)
AcEdit = Usage("consumer.AcEdit", 0xc023D, Sel)
AcBold = Usage("consumer.AcBold", 0xc023E, Sel)
AcItalics = Usage("consumer.AcItalics", 0xc023F, Sel)
AcUnderline = Usage("consumer.AcUnderline", 0xc0240, Sel)
AcStrikethrough = Usage("consumer.AcStrikethrough", 0xc0241, Sel)
AcSubscript = Usage("consumer.AcSubscript", 0xc0242, Sel)
AcSuperscript = Usage("consumer.AcSuperscript", 0xc0243, Sel)
AcAllCaps = Usage("consumer.AcAllCaps", 0xc0244, Sel)
AcRotate = Usage("consumer.AcRotate", 0xc0245, Sel)
AcResize = Usage("consumer.AcResize", 0xc0246, Sel)
AcFlipHorizontal = Usage("consumer.AcFlipHorizontal", 0xc0247, Sel)
AcFlipVertical = Usage("consumer.AcFlipVertical", 0xc0248, Sel)
AcMirrorHorizontal = Usage("consumer.AcMirrorHorizontal", 0xc0249, Sel)
AcMirrorVertical = Usage("consumer.AcMirrorVertical", 0xc024A, Sel)
AcFontSelect = Usage("consumer.AcFontSelect", 0xc024B, Sel)
AcFontColor = Usage("consumer.AcFontColor", 0xc024C, Sel)
AcFontSize = Usage("consumer.AcFontSize", 0xc024D, Sel)
AcJustifyLeft = Usage("consumer.AcJustifyLeft", 0xc024E, Sel)
AcJustifyCenterH = Usage("consumer.AcJustifyCenterH", 0xc024F, Sel)
AcJustifyRight = Usage("consumer.AcJustifyRight", 0xc0250, Sel)
AcJustifyBlockH = Usage("consumer.AcJustifyBlockH", 0xc0251, Sel)
AcJustifyTop = Usage("consumer.AcJustifyTop", 0xc0252, Sel)
AcJustifyCenterV = Usage("consumer.AcJustifyCenterV", 0xc0253, Sel)
AcJustifyBottom = Usage("consumer.AcJustifyBottom", 0xc0254, Sel)
AcJustifyBlockV = Usage("consumer.AcJustifyBlockV", 0xc0255, Sel)
AcIndentDecrease = Usage("consumer.AcIndentDecrease", 0xc0256, Sel)
AcIndentIncrease = Usage("consumer.AcIndentIncrease", 0xc0257, Sel)
AcNumberedList = Usage("consumer.AcNumberedList", 0xc0258, Sel)
AcRestartNumbering = Usage("consumer.AcRestartNumbering", 0xc0259, Sel)
AcBulletedList = Usage("consumer.AcBulletedList", 0xc025A, Sel)
AcPromote = Usage("consumer.AcPromote", 0xc025B, Sel)
AcDemote = Usage("consumer.AcDemote", 0xc025C, Sel)
AcYes = Usage("consumer.AcYes", 0xc025D, Sel)
AcNo = Usage("consumer.AcNo", 0xc025E, Sel)
AcCancel = Usage("consumer.AcCancel", 0xc025F, Sel)
AcCatalog = Usage("consumer.AcCatalog", 0xc0260, Sel)
AcBuyCheckout = Usage("consumer.AcBuyCheckout", 0xc0261, Sel)
AcAddToCart = Usage("consumer.AcAddToCart", 0xc0262, Sel)
AcExpand = Usage("consumer.AcExpand", 0xc0263, Sel)
AcExpandAll = Usage("consumer.AcExpandAll", 0xc0264, Sel)
AcCollapse = Usage("consumer.AcCollapse", 0xc0265, Sel)
AcCollapseAll = Usage("consumer.AcCollapseAll", 0xc0266, Sel)
AcPrintPreview = Usage("consumer.AcPrintPreview", 0xc0267, Sel)
AcPasteSpecial = Usage("consumer.AcPasteSpecial", 0xc0268, Sel)
AcInsertMode = Usage("consumer.AcInsertMode", 0xc0269, Sel)
AcDelete = Usage("consumer.AcDelete", 0xc026A, Sel)
AcLock = Usage("consumer.AcLock", 0xc026B, Sel)
AcUnlock = Usage("consumer.AcUnlock", 0xc026C, Sel)
AcProtect = Usage("consumer.AcProtect", 0xc026D, Sel)
AcUnprotect = Usage("consumer.AcUnprotect", 0xc026E, Sel)
AcAttachComment = Usage("consumer.AcAttachComment", 0xc026F, Sel)
AcDeleteComment = Usage("consumer.AcDeleteComment", 0xc0270, Sel)
AcViewComment = Usage("consumer.AcViewComment", 0xc0271, Sel)
AcSelectWord = Usage("consumer.AcSelectWord", 0xc0272, Sel)
AcSelectSentence = Usage("consumer.AcSelectSentence", 0xc0273, Sel)
AcSelectParagraph = Usage("consumer.AcSelectParagraph", 0xc0274, Sel)
AcSelectColumn = Usage("consumer.AcSelectColumn", 0xc0275, Sel)
AcSelectRow = Usage("consumer.AcSelectRow", 0xc0276, Sel)
AcSelectTable = Usage("consumer.AcSelectTable", 0xc0277, Sel)
AcSelectObject = Usage("consumer.AcSelectObject", 0xc0278, Sel)
AcRedoRepeat = Usage("consumer.AcRedoRepeat", 0xc0279, Sel)
AcSort = Usage("consumer.AcSort", 0xc027A, Sel)
AcSortAscending = Usage("consumer.AcSortAscending", 0xc027B, Sel)
AcSortDescending = Usage("consumer.AcSortDescending", 0xc027C, Sel)
AcFilter = Usage("consumer.AcFilter", 0xc027D, Sel)
AcSetClock = Usage("consumer.AcSetClock", 0xc027E, Sel)
AcViewClock = Usage("consumer.AcViewClock", 0xc027F, Sel)
AcSelectTimeZone = Usage("consumer.AcSelectTimeZone", 0xc0280, Sel)
AcEditTimeZones = Usage("consumer.AcEditTimeZones", 0xc0281, Sel)
AcSetAlarm = Usage("consumer.AcSetAlarm", 0xc0282, Sel)
AcClearAlarm = Usage("consumer.AcClearAlarm", 0xc0283, Sel)
AcSnoozeAlarm = Usage("consumer.AcSnoozeAlarm", 0xc0284, Sel)
AcResetAlarm = Usage("consumer.AcResetAlarm", 0xc0285, Sel)
AcSynchronize = Usage("consumer.AcSynchronize", 0xc0286, Sel)
AcSendReceive = Usage("consumer.AcSendReceive", 0xc0287, Sel)
AcSendTo = Usage("consumer.AcSendTo", 0xc0288, Sel)
AcReply = Usage("consumer.AcReply", 0xc0289, Sel)
AcReplyAll = Usage("consumer.AcReplyAll", 0xc028A, Sel)
AcForwardMsg = Usage("consumer.AcForwardMsg", 0xc028B, Sel)
AcSend = Usage("consumer.AcSend", 0xc028C, Sel)
AcAttachFile = Usage("consumer.AcAttachFile", 0xc028D, Sel)
AcUpload = Usage("consumer.AcUpload", 0xc028E, Sel)
AcDownload = Usage("consumer.AcDownload", 0xc028F, Sel)
AcSetBorders = Usage("consumer.AcSetBorders", 0xc0290, Sel)
AcInsertRow = Usage("consumer.AcInsertRow", 0xc0291, Sel)
AcInsertColumn = Usage("consumer.AcInsertColumn", 0xc0292, Sel)
AcInsertFile = Usage("consumer.AcInsertFile", 0xc0293, Sel)
AcInsertPicture = Usage("consumer.AcInsertPicture", 0xc0294, Sel)
AcInsertObject = Usage("consumer.AcInsertObject", 0xc0295, Sel)
AcInsertSymbol = Usage("consumer.AcInsertSymbol", 0xc0296, Sel)
AcSaveAndClose = Usage("consumer.AcSaveAndClose", 0xc0297, Sel)
AcRename = Usage("consumer.AcRename", 0xc0298, Sel)
AcMerge = Usage("consumer.AcMerge", 0xc0299, Sel)
AcSplit = Usage("consumer.AcSplit", 0xc029A, Sel)
AcDisributeHorizontally = Usage("consumer.AcDisributeHorizontally", 0xc029B, Sel)
AcDistributeVertically = Usage("consumer.AcDistributeVertically", 0xc029C, Sel)
# HUTRR42c
ExtendedKeyboardAttributesCollection = Usage("consumer.ExtendedKeyboardAttributesCollection", 0xc02C0, CL)
KeyboardFormFactor = Usage("consumer.KeyboardFormFactor", 0xc02C1, SV)
KeyboardKeyType = Usage("consumer.KeyboardKeyType", 0xc02C2, SV)
KeyboardPhysicalLayout = Usage("consumer.KeyboardPhysicalLayout", 0xc02C3, SV)
VendorSpecificKeyboardPhysicalLayout = Usage("consumer.VendorSpecificKeyboardPhysicalLayout", 0xc02C4, SV)
KeyboardIETFLanguageTagIndex = Usage("consumer.KeyboardIETFLanguageTagIndex", 0xc02C5, SV)
ImplementedKeyboardInputAssistControls = Usage("consumer.ImplementedKeyboardInputAssistControls", 0xc02C6, SV)
KeyboardInputAssistPrevious = Usage("consumer.KeyboardInputAssistPrevious", 0xc02C7, Sel)
KeyboardInputAssistNext = Usage("consumer.KeyboardInputAssistNext", 0xc02C8, Sel)
KeyboardInputAssistPreviousGroup = Usage("consumer.KeyboardInputAssistPreviousGroup", 0xc02C9, Sel)
KeyboardInputAssistNextGroup = Usage("consumer.KeyboardInputAssistNextGroup", 0xc02CA, Sel)
KeyboardInputAssistAccept = Usage("consumer.KeyboardInputAssistAccept", 0xc02CB, Sel)
KeyboardInputAssistCancel = Usage("consumer.KeyboardInputAssistCancel", 0xc02CC, Sel)
|
410636
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import tensorflow as tf
import keras
from keras.models import Model
from keras.layers import Dense, Input, Dropout
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from utils.resBlock import res_block
from utils.data_reader import read_h5_data
from utils.writeANNProperties import writeANNProperties
from utils.customObjects import coeff_r2, SGDRScheduler
# define the labels
labels = []
with open('GRI_13', 'r') as f:
species = f.readlines()
for line in species:
# remove linebreak which is the last character of the string
current_place = line[:-1]
# add item to the list
labels.append(current_place)
# append other fields: heatrelease, T, PVs
# labels.append('heatRelease')
labels.append('T')
labels.append('PVs')
# labels.remove('H')
# labels.remove('CH2O')
# labels.remove('HO2')
# labels.append('H')
# tabulate psi, mu, alpha
# labels.append('psi')
# labels.append('mu')
# labels.append('alpha')
# labels.remove('AR')
# labels.remove('N2')
input_features = ['f', 'zeta', 'pv']
# define the type of scaler: MinMax or Standard
# read in the data
X, y, df, in_scaler, out_scaler = read_h5_data('./data/tables_of_fgm_psi_n2fix.h5',
input_features=input_features,
labels=labels,
i_scaler='no', o_scaler='cbrt_std')
# write the OpenFOAM ANNProperties file
scaler = 'Standard'
if(hasattr(out_scaler.std,'mean_')):
writeANNProperties(in_scaler,out_scaler,scaler,labels)
# split into train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.01)
# %%
print('set up ANN')
# ANN parameters
dim_input = X_train.shape[1]
dim_label = y_train.shape[1]
n_neuron = 100
branches = 3
scale = 3
batch_norm = False
# This returns a tensor
inputs = Input(shape=(dim_input,), name='input_1')
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(n_neuron, activation='relu')(inputs)
# less then 2 res_block, there will be variance
x = res_block(x, scale, n_neuron, stage=1, block='a', bn=batch_norm, branches=branches)
x = res_block(x, scale, n_neuron, stage=1, block='b', bn=batch_norm, branches=branches)
# x = res_block(x, scale, n_neuron, stage=1, block='c', bn=batch_norm, branches=branches)
x = Dense(100, activation='relu')(x)
# x = Dropout(0.1)(x)
predictions = Dense(dim_label, activation='linear', name='output_1')(x)
model = Model(inputs=inputs, outputs=predictions)
model.summary()
# %%
vsplit = 0.1
batch_size = 1024 * 8
# checkpoint (save the best model based validate loss)
filepath = "./tmp/weights.best.cntk.hdf5"
checkpoint = ModelCheckpoint(filepath,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min',
period=10)
epoch_size = X_train.shape[0]
a = 0
base = 2
clc = 2
for i in range(8):
a += base * clc ** (i)
epochs, c_len = a, base
schedule = SGDRScheduler(min_lr=1e-6, max_lr=1e-4,
steps_per_epoch=np.ceil(epoch_size / batch_size),
cycle_length=c_len, lr_decay=0.8, mult_factor=clc)
callbacks_list = [checkpoint,schedule]
# callbacks_list = [checkpoint]
loss_type = 'mse'
for i in range(1):
# fit the model
model.compile(loss=loss_type,
optimizer='adam',
metrics=[coeff_r2])
model.load_weights("./tmp/weights.best.cntk.hdf5")
history = model.fit(
X_train, y_train,
epochs=epochs,
batch_size=batch_size,
validation_split=vsplit,
verbose=2,
callbacks=callbacks_list,
shuffle=True)
# loss
fig = plt.figure()
plt.semilogy(history.history['loss'])
if vsplit:
plt.semilogy(history.history['val_loss'])
plt.title(loss_type)
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
model.save('./tmp/gri13_cbrt.h5')
#%%
n_res = 501
sp='PVs'
for i in range(11):
# pv_level = 0.98+i*0.002
pv_level = i /10
f_1 = np.linspace(0, 1, n_res)
z_1 = np.zeros(n_res)
pv_1 = np.ones(n_res) * pv_level
case_1 = np.vstack((f_1, z_1, pv_1))
# case_1 = np.vstack((pv_1,z_1,f_1))
case_1 = case_1.T
out = out_scaler.inverse_transform(model.predict(in_scaler.transform(case_1)))
out = pd.DataFrame(out, columns=labels)
table_val=df[(df.pv==pv_level) & (df.zeta==0)][sp]
fig = plt.figure()
plt.xlim([0,0.2])
plt.plot(f_1,out[sp],'k')
plt.plot(f_1,table_val,'rd')
plt.title(pv_level)
plt.show()
#%%
n_res = 501
for sp in labels:
f_level = 0.04
f_1 = np.ones(n_res) * f_level
z_1 = np.zeros(n_res)
pv_1 = np.linspace(0,1,n_res)
case_1 = np.vstack((f_1, z_1, pv_1))
# case_1 = np.vstack((pv_1,z_1,f_1))
case_1 = case_1.T
out = out_scaler.inverse_transform(model.predict(in_scaler.transform(case_1)))
out = pd.DataFrame(out, columns=labels)
table_val=df[(df.f==f_level) & (df.zeta==0)][sp]
fig = plt.figure()
plt.plot(pv_1,out[sp],'k')
plt.plot(pv_1,table_val,'rd',ms=1)
plt.title(sp+':'+str(f_level)+'_max_'+str(df[sp].max()))
plt.show()
#%%
# sp='CH4'#NH3,H2
# f_level = 0.044
# f_1 = np.ones(n_res) * f_level
# z_1 = np.zeros(n_res)
# pv_1 = np.linspace(0,1,n_res)
# case_1 = np.vstack((f_1, z_1, pv_1))
# # case_1 = np.vstack((pv_1,z_1,f_1))
#
# case_1 = case_1.T
# out = out_scaler.inverse_transform(model.predict(in_scaler.transform(case_1)))
# out = pd.DataFrame(out, columns=labels)
# table_val=df[(df.f==f_level) & (df.zeta==0)][sp]
#
# fig = plt.figure()
# plt.plot(pv_1,np.cbrt(out[sp]),'k')
# plt.plot(pv_1,np.cbrt(table_val),'rd',ms=1)
# plt.title(sp+':'+str(f_level))
# plt.show()
|
410645
|
import os, sys, subprocess
#apt-get install gnuplot transfig
passed = {}
has_osv = os.path.isdir(os.path.expanduser('~/osv'))
def runbench_cython(path, name):
data = open(os.path.join(path, name), 'rb').read()
open('/tmp/cython_module.pyx', 'wb').write(data)
subprocess.check_call([
'cython',
'/tmp/cython_module.pyx'
])
script = [
'import pyximport, time',
'pyximport.install()',
'import cython_module',
'T = time.clock()',
'cython_module.run_benchmark()',
'print time.clock()-T'
]
open('/tmp/run-cython.py', 'wb').write('\n'.join(script))
proc = subprocess.Popen(
['python', 'run-cython.py',], stdout=subprocess.PIPE,
cwd='/tmp'
)
proc.wait()
data = proc.stdout.read()
for line in data.splitlines():
try:
T = float(line.strip())
except ValueError:
print line
return T
def runbench_rs(path, name, strip=False):
url = os.path.join(path, name)
if os.path.isfile(url.replace('.py', '-rs.py')):
url = url.replace('.py', '-rs.py')
if strip:
subprocess.check_call([
'pythia',
'--convert2python=/tmp/input.rapyd',
url
])
else:
open('/tmp/input.rapyd','wb').write(open(url,'rb').read())
data = open('/tmp/input.rapyd', 'rb').read()
data = data.replace('from runtime import *', '')
data = data.replace('from time import clock', 'JS("var clock = function(){return (new Date()).getTime()/1000;}")')
data = 'def list(a): return a\n' + data
data = 'import stdlib\n' + data
open('/tmp/input.rapyd', 'wb').write(data)
tmpjs = '/tmp/rapyd-output.js'
subprocess.check_call(['nodejs', '/usr/local/bin/rapydscript', '/tmp/input.rapyd', '--bare', '--beautify', '--output', tmpjs])
#rapydata = open(tmpjs,'rb').read(),
proc = subprocess.Popen(['nodejs', tmpjs], stdout=subprocess.PIPE)
proc.wait()
T = proc.stdout.read().splitlines()[0] ## extra lines could contain compiler warnings, etc.
return str(float(T.strip()))
def runbench_py(path, name, interp='python3', cores=None):
data = open(os.path.join(path, name), 'rb').read()
data = data.replace('from runtime import *', '')
data = data.replace('with oo:', '')
lines = []
for ln in data.splitlines():
if ln.strip().startswith('v8->('):
continue
elif ln.startswith('THREADS=') and cores:
ln = 'THREADS=%s' %cores
lines.append(ln)
open('/tmp/input.py', 'wb').write('\n'.join(lines))
subprocess.check_call([
'pythia',
'--convert2python=/tmp/output.py',
'/tmp/input.py'
])
proc = subprocess.Popen(
[interp, '/tmp/output.py',], stdout=subprocess.PIPE
)
proc.wait()
data = proc.stdout.read()
for line in data.splitlines():
try:
T = float(line.strip())
except ValueError:
print line
return T
def runbench(path, name, backend='javascript', pgo=False, cores=None, osv=False):
srcpath = os.path.join(path, name)
if cores:
data = open(os.path.join(path, name), 'rb').read()
srcpath = '/tmp/_benchmark.py'
lines = []
for ln in data.splitlines():
if ln.startswith('THREADS='):
ln = 'THREADS=%s' %cores
lines.append(ln)
open(srcpath, 'wb').write('\n'.join(lines))
cmd = [
'pythia',
'--'+backend,
'--v8-natives',
'--release',
srcpath
]
if pgo:
cmd.append('--gcc-pgo')
if osv:
cmd.append('--osv')
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
proc.wait()
T = None
data = proc.stdout.read()
for line in data.splitlines():
try:
T = float(line.strip())
print 'bench time: ' + line
print T
except ValueError:
print line
assert T is not None
if backend=='javascript':
js = name[:-2] + 'js'
passed[ name ] = open('/tmp/'+js).read().split('/*end-builtins*/')[-1]
return T
BENCHES = [
'fannkuch.py',
#'thread_shared_vector.py',
]
[
'thread_collision.py',
'pystone.py',
'recursive_fib.py',
'operator_overloading.py',
'add.py',
'float.py',
'copy_list.py',
#'richards.py',
#'nbody.py',
#'operator_overloading_functor.py',
#'operator_overloading_nonfunctor.py',
]
TYPED = [
'thread_shared_vector.py',
'thread_collision.py',
'recursive_fib.py',
'fannkuch.py',
'float.py',
'pystone.py',
'richards.py',
'operator_overloading.py',
'add.py',
'copy_list.py',
]
VsPython = {
'pypy' : [],
'pypy-stm': [],
'javascript':[],
'c++' : [],
'c++stack' : [],
'go' : [],
}
for name in BENCHES:
print name
times = {}
#try:
# times['rapyd'] = runbench_rs('./bench', name)
#except:
# pass
pypystm = False
if os.path.isfile( os.path.expanduser('~/pypy-stm-2.5.1-linux64/bin/pypy-stm') ):
pypystm = os.path.expanduser('~/pypy-stm-2.5.1-linux64/bin/pypy-stm')
if '--cython' in sys.argv:
cyname = name.replace('.py','-cython.pyx')
if os.path.isfile('./bench/'+cyname):
times['cython'] = runbench_cython('./bench', cyname)
else:
print 'can not find cython version:', cyname
if not '--skip-python' in sys.argv:
times['python'] = runbench_py('./bench', name)
times['pypy'] = runbench_py('./bench', name, interp='pypy')
if name.startswith('thread_'):
if not '--skip-python' in sys.argv:
times['python(single)'] = runbench_py('./bench', name, cores=1)
times['pypy(single)'] = runbench_py('./bench', name, interp='pypy', cores=1)
if pypystm:
times['pypy-stm'] = runbench_py('./bench', name, interp=pypystm)
if name.startswith('thread_'):
times['pypy-stm(single)'] = runbench_py('./bench', name, interp=pypystm, cores=1)
times['pypy-stm(custom)'] = runbench_py('./bench', name.replace('.py','-pypy.py'), interp=pypystm)
if not name.startswith('thread_'):
times['javascript'] = runbench('./bench', name, 'javascript')
if name in TYPED:
nametyped = name.replace('.py','-typed.py')
#times['rust'] = runbench('./bench', nametyped, 'rust')
gotyped = name.replace('.py','-typed-go.py')
if os.path.isfile('./bench/'+gotyped):
times['go'] = runbench('./bench', gotyped, 'go')
elif '--go' in sys.argv:
times['go'] = runbench('./bench', nametyped, 'go')
times['c++'] = runbench('./bench', nametyped, 'c++')
if os.path.isfile('rusthon-c++-build.gcda'):
print 'removing old .gcda (PGO dump)'
os.unlink('rusthon-c++-build.gcda')
if name.startswith('thread_'):
times['c++(single)'] = runbench('./bench', nametyped, 'c++', cores=1)
nametyped = name.replace('.py','-typed-stack.py')
if os.path.isfile('./bench/'+nametyped):
times['c++stack'] = runbench('./bench', nametyped, 'c++')
## only faster with if/else branches?
#times['c++PGO'] = runbench('./bench', nametyped, 'c++', pgo=True)
#if not os.path.isfile('rusthon-c++-build.gcda'):
# raise RuntimeError('failed to compile PGO optimized binary')
print times
perf_header = [
'font=Helvetica',
'fontsz=12',
'=color_per_datum',
'yformat=%g',
]
if name=='pystone.py':
perf_header.append('ylabel=Pystones')
else:
perf_header.append('ylabel=seconds')
perf = []
if 'python' in times:
perf.append('Python3 %s' % times['python'])
if 'python(single)' in times:
perf.append('Python3(single) %s' % times['python(single)'])
if 'pypy(single)' in times:
perf.append('PyPy(multi) %s' % times['pypy'])
perf.append('PyPy(single) %s' % times['pypy(single)'])
if 'pypy(custom)' in times:
perf.append('PyPy(custom) %s' % times['pypy(custom)'])
elif 'pypy' in times:
perf.append('PyPy %s' % times['pypy'])
if 'pypy-stm' in times:
if 'pypy-stm(single)' in times:
perf.append('PyPy-STM(single) %s' % times['pypy-stm(single)'])
perf.append('PyPy-STM(multi) %s' % times['pypy-stm'])
if 'pypy-stm(custom)' in times:
perf.append('PyPy-STM(custom) %s' % times['pypy-stm(custom)'])
else:
perf.append('PyPy-STM %s' % times['pypy-stm'])
if 'javascript' in times:
perf.append('Pythia->JS %s' % times['javascript'])
if 'rapyd' in times:
perf.append('RapydScript %s' % times['rapyd'])
if 'cython' in times:
perf.append('Cython %s' % times['cython'])
if 'c++' in times:
if 'thread' in name:
if 'c++(single)' in times:
perf.append('Pythia->C++STM(single) %s' % times['c++(single)'])
perf.append('Pythia->C++STM(multi) %s' % times['c++'])
else:
perf.append('Pythia->C++STM %s' % times['c++'])
else:
perf.append('Pythia->C++ %s' % times['c++'])
if 'c++stack' in times:
perf.append('Pythia->C++STACK %s' % times['c++stack'])
if 'c++PGO' in times:
perf.append('Pythia->C++PGO %s' % times['c++PGO'])
if 'go' in times:
perf.append('Pythia->Go %s' % times['go'])
perf_path = '/tmp/%s.perf' %name
open( perf_path, 'wb' ).write( '\n'.join( perf_header+perf ).encode('utf-8') )
os.system( './bargraph.pl -eps %s > /tmp/%s.eps' %(perf_path,name))
subprocess.check_call([
'convert',
'-density', '400',
'/tmp/%s.eps' % name,
'-resize', '1400x1600',
'-transparent', 'white',
'./bench/%s.png' % name
])
if 'python' in times:
for tag in times:
if tag=='python':
continue
if name=='pystone.py':
score = times[tag] / times['python']
else:
score = times['python'] / times[tag]
print '%s: %s times faster than python' %(tag, score)
if tag not in VsPython: VsPython[tag] = []
VsPython[tag].append(score)
elif 'pypy' in times:
for tag in times:
if tag.startswith('pypy'):
continue
if name=='pystone.py':
score = times[tag] / times['pypy']
else:
score = times['pypy'] / times[tag]
print '%s: %s times faster than pypy' %(tag, score)
if tag not in VsPython: VsPython[tag] = []
VsPython[tag].append(score)
print VsPython
if len(BENCHES) > 4:
Titles = {
'python' : 'Python3 %s',
'pypy' : 'PyPy %s',
'pypy-stm' : 'PyPy-STM %s',
'javascript' : 'Pythia->JS %s',
'c++' : 'Pythia->C++ %s',
'c++stack' : 'Pythia->C++STACK %s',
'go' : 'Pythia->GO %s',
}
perf = [
'font=Helvetica',
'fontsz=12',
'=color_per_datum',
'yformat=%g',
'ylabel=speed'
]
for key in VsPython:
if VsPython[key] and key in Titles and len(VsPython[key]) >= 4:
scores = VsPython[key]
avg = sum(scores) / len(scores)
perf.append( Titles[key] % avg )
name = 'speed-vs-python'
perf_path = '/tmp/%s.perf' %name
open( perf_path, 'wb' ).write( '\n'.join( perf ).encode('utf-8') )
os.system( './bargraph.pl -eps %s > /tmp/%s.eps' %(perf_path,name))
subprocess.check_call([
'convert',
'-density', '400',
'/tmp/%s.eps' % name,
'-resize', '1400x1600',
'-transparent', 'white',
'./bench/%s.png' % name
])
|
410660
|
from flexx.util.testing import run_tests_if_main, raises, skip
import re
from flexx.util.logging import logger, capture_log, set_log_level
def test_debug():
logger.debug('test')
def test_info():
logger.info('test')
def test_warning():
logger.warning('test')
def test_set_log_level():
with raises(ValueError):
set_log_level('notaloglevel')
with raises(TypeError):
set_log_level([])
def test_capture():
with capture_log('info') as log:
logger.warning('AA')
logger.info('BB')
msg1 = log[0]
msg2 = log[1]
assert 'flexx' in msg1
assert 'AA' in msg1
assert '[W ' in msg1
assert 'flexx' in msg2
assert 'BB' in msg2
assert '[I' in msg2
def test_match():
# Match based on string
with capture_log('info', 'foo') as log:
logger.info('AA foo')
logger.info('BB bar') # no foo
logger.debug('CC foo') # too high level
logger.info('DD fXo') # no foo
assert len(log) == 1
assert 'AA' in log[0]
# Match based on regexp
with capture_log('info', re.compile('f.o')) as log:
logger.info('AA foo')
logger.info('BB bar') # no foo
logger.debug('CC foo') # too high level
logger.info('DD fXo')
assert len(log) == 2
assert 'AA' in log[0]
assert 'DD' in log[1]
# No match
with capture_log('info', '') as log:
logger.info('AA foo')
logger.info('BB bar')
logger.debug('CC foo') # too high level
logger.info('DD fXo')
assert len(log) == 3
def test_debug_does_more():
def caller_func_bla():
logger.debug('AA foo')
logger.info('BB bar')
with capture_log('debug') as log:
caller_func_bla()
assert len(log) == 2
assert 'caller_func_bla' in log[0]
assert 'caller_func_bla' in log[1]
run_tests_if_main()
|
410675
|
from .resource_enum import ResourceEnum
class ConditionEnum(ResourceEnum):
E = "Excellent"
G = "Good"
F = "Fair"
P = "Poor"
|
410731
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.sea_slugs import sea_slugs
def test_sea_slugs():
"""Test module sea_slugs.py by downloading
sea_slugs.csv and testing shape of
extracted data has 36 rows and 2 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = sea_slugs(test_path)
try:
assert x_train.shape == (36, 2)
except:
shutil.rmtree(test_path)
raise()
|
410739
|
from typing import List
import json
import re
from instagram_api import response
from instagram_api.exceptions import InternalException, SettingsException
from .base import CollectionBase
__all__ = ['Account']
class Account(CollectionBase):
def get_current_user(self) -> response.UserInfoResponse:
return self._ig.request('accounts/current_user/').add_params(
edit=True,
).get_response(response.UserInfoResponse)
def set_biography(self, biography: str) -> response.UserInfoResponse:
"""
Edit your biography.
You are able to add `@mentions` and `#hashtags` to your biography, but
be aware that Instagram disallows certain web URLs and shorteners.
Also keep in mind that anyone can read your biography (even if your account is private).
WARNING: Remember to also call `edit_profile()` after using this
function, so that you act like the real app!
:param biography: str
Biography text. Use "" for nothing.
:raise: AssertionError
:return: response.UserInfoResponse
"""
assert isinstance(biography, str) and len(biography) <= 150, (
'Please provide a 0 to 150 character string as biography.'
)
return self._ig.request('accounts/set_biography/').add_posts(
raw_text=biography,
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
device_id=self._ig.device_id,
_csrftoken=self._ig.client.get_token(),
).get_response(response.UserInfoResponse)
def set_gender(self, gender: str = '') -> response.UserInfoResponse:
g = gender.lower()
if g == 'male':
gender_id = 1
elif g == 'female':
gender_id = 2
elif not g:
gender_id = 3
else:
gender_id = 4
return self._ig.request('accounts/set_gender/').set_signed_post(False).add_posts(
gender=gender_id,
_csrftoken=self._ig.client.get_token(),
_uuid=self._ig.uuid,
custom_gender=gender if gender_id == 4 else '',
).get_response(response.UserInfoResponse)
def edit_profile(self,
url: str,
phone: str,
name: str,
biography: str,
email: str,
gender: str,
new_username: str = None) -> response.UserInfoResponse:
user_response = self.get_current_user()
current_user = user_response.user
if not current_user:
raise InternalException('Unable to find current account username while preparing profile edit.')
old_username = current_user.username
username = new_username if isinstance(new_username, str) and len(new_username) > 0 else old_username
return self._ig.request('accounts/edit_profile/').add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
_csrftoken=self._ig.client.get_token(),
external_url=url,
phone_number=phone,
username=username,
first_name=name,
biography=biography,
email=email,
gender=gender,
device_id=self._ig.device_id,
)
def change_profile_picture(self, filename: str) -> response.UserInfoResponse:
...
def remove_profile_picture(self) -> response.UserInfoResponse:
return self._ig.request('accounts/remove_profile_picture/').add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
_csrftoken=self._ig.client.get_token(),
).get_response(response.UserInfoResponse)
def set_public(self) -> response.UserInfoResponse:
return self._ig.request('accounts/set_public/').add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
_csrftoken=self._ig.client.get_token(),
).get_response(response.UserInfoResponse)
def set_private(self) -> response.UserInfoResponse:
return self._ig.request('accounts/set_private/').add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
_csrftoken=self._ig.client.get_token(),
).get_response(response.UserInfoResponse)
def switch_to_business_profile(self) -> response.SwitchBusinessProfileResponse:
return self._ig.request(
'business_conversion/get_business_convert_social_context/'
).get_response(response.SwitchBusinessProfileResponse)
def switch_to_personal_profile(self) -> response.SwitchPersonalProfileResponse:
return self._ig.request('accounts/convert_to_personal/').add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
_csrftoken=self._ig.client.get_token(),
).get_response(response.SwitchPersonalProfileResponse)
def set_business_info(self,
phone_number: str,
email: str,
category_id: str) -> response.CreateBusinessInfoResponse:
return self._ig.request('accounts/create_business_info/').add_posts(
set_public='true',
entry_point='setting',
public_phone_contact=json.dumps({
'public_phone_number': phone_number,
'business_contact_method': 'CALL',
}),
public_email=email,
category_id=category_id,
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
_csrftoken=self._ig.client.get_token(),
).get_response(response.CreateBusinessInfoResponse)
def check_username(self, username: str) -> response.CheckUsernameResponse:
return self._ig.request('users/check_username/').add_posts(
_uuid=self._ig.uuid,
username=username,
_csrftoken=self._ig.client.get_token(),
_uid=self._ig.account_id,
).get_response(response.CheckUsernameResponse)
def get_comment_filter(self) -> response.CommentFilterResponse:
return self._ig.request('accounts/get_comment_filter/').get_response(response.CommentFilterResponse)
def set_comment_filter(self, config_value: int) -> response.CommentFilterSetResponse:
return self._ig.request('accounts/set_comment_filter/').add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
_csrftoken=self._ig.client.get_token(),
config_value=config_value,
).get_response(response.CommentFilterSetResponse)
def get_comment_category_filter_disabled(self) -> response.CommentCategoryFilterResponse:
return self._ig.request(
'accounts/get_comment_category_filter_disabled/'
).get_response(response.CommentCategoryFilterResponse)
def get_comment_filter_keywords(self) -> response.CommentFilterKeywordsResponse:
return self._ig.request(
'accounts/get_comment_filter_keywords/'
).get_response(response.CommentFilterKeywordsResponse)
def set_comment_filter_keywords(self, keywords: List[str]) -> response.CommentFilterSetResponse:
return self._ig.request(
'accounts/set_comment_filter_keywords/'
).add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
_csrftoken=self._ig.client.get_token(),
keywords=keywords,
).get_response(response.CommentFilterSetResponse)
def change_password(self, old_password: str, new_password: str) -> response.ChangePasswordResponse:
return self._ig.request('accounts/change_password/').add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
_csrftoken=self._ig.client.get_token(),
old_password=<PASSWORD>,
new_password1=<PASSWORD>,
new_password2=<PASSWORD>,
).get_response(response.ChangePasswordResponse)
def get_security_info(self) -> response.AccountSecurityInfoResponse:
return self._ig.request('accounts/account_security_info/').add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
_csrftoken=self._ig.client.get_token(),
).get_response(response.AccountSecurityInfoResponse)
@staticmethod
def _clean_phone_number(phone_number: str) -> str:
return '+' + re.sub(r'[^0-9]', '', phone_number)
def send_two_factor_enable_sms(self, phone_number: str) -> response.SendTwoFactorEnableSMSResponse:
cleaned_number = self._clean_phone_number(phone_number)
return self._ig.request('accounts/send_two_factor_enable_sms/').add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
_csrftoken=self._ig.client.get_token(),
device_id=self._ig.device_id,
phone_number=cleaned_number,
).get_response(response.SendTwoFactorEnableSMSResponse)
def enable_two_factor_sms(self,
phone_number: str,
verification_code: str) -> response.EnableTwoFactorSMSResponse:
cleaned_number = self._clean_phone_number(phone_number)
return self._ig.request('accounts/enable_sms_two_factor/').add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
_csrftoken=self._ig.client.get_token(),
device_id=self._ig.device_id,
phone_number=cleaned_number,
verification_code=verification_code,
).get_response(response.EnableTwoFactorSMSResponse)
def disable_two_factor_sms(self) -> response.DisableTwoFactorSMSResponse:
return self._ig.request('accounts/disable_sms_two_factor/').add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
_csrftoken=self._ig.client.get_token(),
).get_response(response.DisableTwoFactorSMSResponse)
def _save_presence_status(self, disabled: bool):
try:
self._ig.settings.set('presence_disabled', '1' if disabled else '0')
except SettingsException:
pass
def get_presence_status(self) -> response.PresenceStatusResponse:
result = self._ig.request(
'accounts/get_presence_disabled/'
).set_signed_get(True).get_response(response.PresenceStatusResponse)
self._save_presence_status(result.disabled)
return result
def enable_presence(self) -> response.GenericResponse:
result = self._ig.request('accounts/set_presence_disabled/').add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
disabled='0',
_csrftoken=self._ig.client.get_token(),
).get_response(response.GenericResponse)
self._save_presence_status(False)
return result
def disable_presence(self) -> response.GenericResponse:
result = self._ig.request('accounts/set_presence_disabled/').add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
disabled='1',
_csrftoken=self._ig.client.get_token(),
).get_response(response.GenericResponse)
self._save_presence_status(True)
return result
def send_confirm_email(self) -> response.SendConfirmEmailResponse:
return self._ig.request('accounts/send_confirm_email/').add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
_send_source='edit_profile',
_csrftoken=self._ig.client.get_token(),
).get_response(response.SendConfirmEmailResponse)
def send_sms_code(self, phone_number: str) -> response.SendSMSCodeResponse:
cleaned_number = self._clean_phone_number(phone_number)
return self._ig.request('accounts/send_sms_code/').add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
_csrftoken=self._ig.client.get_token(),
phone_number=cleaned_number,
)
def verify_sms_code(self, phone_number: str, verification_code: str) -> response.VerifySMSCodeResponse:
cleaned_number = self._clean_phone_number(phone_number)
return self._ig.request('accounts/verify_sms_code/').add_posts(
_uuid=self._ig.uuid,
_uid=self._ig.account_id,
phone_number=cleaned_number,
verification_code=verification_code,
_csrftoken=self._ig.client.get_token(),
).get_response(response.VerifySMSCodeResponse)
def set_contact_point_prefill(self, usage: str) -> response.GenericResponse:
return self._ig.request('accounts/contact_point_prefill/').set_needs_auth(False).add_posts(
phone_id=self._ig.phone_id,
_csrftoken=self._ig.client.get_token(),
usage=usage,
).get_response(response.GenericResponse)
def get_badge_notifications(self) -> response.BadgeNotificationsResponse:
return self._ig.request('notifications/badge/').set_signed_post(False).add_posts(
_uuid=self._ig.uuid,
_csrftoken=self._ig.client.get_token(),
user_ids=self._ig.account_id,
phone_id=self._ig.phone_id,
).get_response(response.BadgeNotificationsResponse)
def get_process_contact_point_signals(self) -> response.GenericResponse:
return self._ig.request('accounts/process_contact_point_signals/').add_posts(
google_tokens='[]',
phone_id=self._ig.phone_id,
_uid=self._ig.account_id,
_uuid=self._ig.uuid,
device_id=self._ig.device_id,
_csrftoken=self._ig.client.get_token(),
).get_response(response.GenericResponse)
|
410771
|
import numpy as np
import scipy.linalg
import theano
from theano.tensor import as_tensor_variable
import theano.tests.unittest_tools
from theano.gof import Op, Apply
class MatrixSquareRoot(Op):
def make_node(self, x):
x = as_tensor_variable(x)
assert x.ndim == 2
return Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
x, = inputs
z, = outputs
z[0] = scipy.linalg.sqrtm(x).real
def grad(self, inputs, g_outputs):
x, = inputs
gz, = g_outputs
return [MatrixSquareRootGrad()(self(x), gz)]
def infer_shape(self, node, shapes):
return shapes
sqrtm = MatrixSquareRoot()
class MatrixSquareRootGrad(Op):
def make_node(self, sqrtx, gz):
sqrtx = as_tensor_variable(sqrtx)
gz = as_tensor_variable(gz)
assert sqrtx.ndim == 2
assert gz.ndim == 2
return Apply(self, [sqrtx, gz], [sqrtx.type()])
def perform(self, node, inputs, outputs):
sqrtx, gz = inputs
z, = outputs
z[0] = scipy.linalg.solve_sylvester(sqrtx, sqrtx, gz)
def infer_shape(self, node, shapes):
return [shapes[0]]
def main():
x = theano.tensor.matrix()
sum_x = sqrtm(x).sum()
sum_x_fn = theano.function([x], sum_x)
n = 50
L = np.random.uniform(-1, 1, size=(n, n + 500)) * .1
cov = L.dot(L.T) + np.eye(n) * .5
print sum_x_fn(cov)
grad = theano.grad(sum_x, x)
grad_fn = theano.function([x], grad)
print grad_fn(cov)
def reg_cov_mat(x, a, b, c):
return (a * np.exp(-b * (x[:, np.newaxis] - x)**2) +
c * np.eye(x.shape[0]))
for i in xrange(10):
cov = reg_cov_mat(np.random.uniform(0, 1, size=n), 1, 8, .1)
theano.tests.unittest_tools.verify_grad(sqrtm, [cov])
if __name__ == '__main__':
main()
|
410779
|
import math
import random
import time
#from multiprocessing.dummy import Pool
from multiprocessing import Pool
def y_is_in_circle(x, y):
"""Test if x,y coordinate lives within the radius of the unit circle"""
return x * x + y * y <= 1.0
def estimate_nbr_points_in_circle(nbr_samples):
nbr_in_circle = 0
for n in xrange(nbr_samples):
x = random.uniform(0.0, 1.0)
y = random.uniform(0.0, 1.0)
if y_is_in_circle(x, y):
nbr_in_circle += 1
return nbr_in_circle
pool = Pool()
nbr_samples = int(1e7)
nbr_parallel_blocks = 4
map_inputs = [nbr_samples] * nbr_parallel_blocks
t1 = time.time()
results = pool.map(estimate_nbr_points_in_circle, map_inputs)
# pool.close()
print results
print "Took {}s".format(time.time() - t1)
nbr_in_circle = sum(results)
combined_nbr_samples = sum(map_inputs)
pi_estimate = float(nbr_in_circle) / combined_nbr_samples * 4
print "Estimated pi", pi_estimate
print "Pi", math.pi
|
410809
|
import torch
import torch.nn.functional
from torch.nn.utils.rnn import PackedSequence
import padertorch as pt
__all__ = [
'softmax_cross_entropy',
]
IGNORE_INDEX = -1
def softmax_cross_entropy(x, t):
"""Allow inputs to be of type `PackedSequence`.
In my understanding, all dimensions but the last should be treated as
independent dimensions. Therefore, I argue for x.size() == (..., K) where
t.size() == (...). Similarly, for sequences x.size() == (T, B, ..., K) and
t.size() == (T, B, ...).
Check the test case for typical usage.
Params:
x: `Tensor` or `PackedSequence` holding a multidimensional array whose
elements indicate unnormalized log probabilities (logits).
t: Same object type as `x`. Holds integers of ground truth labels.
Returns:
>>> x = torch.randn(100, 3)
>>> t = torch.randint(0, 3, size=(100,), dtype=torch.long)
>>> softmax_cross_entropy(x, t).size()
torch.Size([])
"""
if isinstance(x, torch.Tensor) and isinstance(t, torch.Tensor):
pass
elif isinstance(x, PackedSequence) and isinstance(t, PackedSequence):
# Data is already organized such that no padding is necessary.
x, t = x.data, t.data
else:
raise ValueError(f'Incompatible types: {type(x)}, {type(t)}')
assert x.size()[:-1] == t.size(), f'{x.size()}, {t.size()}'
# remember torch.nn.CrossentropyLoss already includes softmax
loss_fn = torch.nn.CrossEntropyLoss(ignore_index=IGNORE_INDEX)
return loss_fn(pt.ops.move_axis(x, -1, 1), t)
|
410815
|
import asyncio
from typing import Generic, Iterable, Tuple, TypeVar
from agraffe.types import ASGIApp, Message, Scope
Req = TypeVar('Req')
Res = TypeVar('Res')
class HttpCycleBase(Generic[Req, Res]):
def __init__(self, request: Req):
self.request = request
self.status_code = 200
self.headers: Iterable[Tuple[str, str]] = ()
self.body = b''
def __call__(self, app: ASGIApp) -> None:
loop = asyncio.get_event_loop()
instance = self.run(app)
task = loop.create_task(instance)
loop.run_until_complete(task)
@property
def response(self) -> Res:
raise NotImplementedError
async def send(self, message: Message) -> None:
if message['type'] == 'http.response.start':
self.status_code = message['status']
self.headers = tuple(
(key.decode('latin-1'), value.decode('latin-1'))
for (key, value) in message['headers']
)
elif message['type'] == 'http.response.body':
self.body = message['body']
return None
async def run(self, app: ASGIApp) -> None:
await app(self.scope, self.receive, self.send)
async def receive(self) -> Message:
raise NotImplementedError
@property
def scope(self) -> Scope:
raise NotImplementedError
|
410817
|
import base64
import re
import urllib
import zlib
from urllib.request import Request
import defusedxml.ElementTree as ET
from retrying import retry
from src.util.logging import log
'''
Original from: https://github.com/aggixx/PoBPreviewBot/blob/master/util.py
&& https://github.com/aggixx/PoBPreviewBot/blob/master/pastebin.py
'''
def fetch_paste_key(content):
"""
Fetches the last paste key in a message.
:param content: message.content
:return: paste key to retrieve pastebin content
"""
if 'raw' in content:
content = content.replace('raw/', '')
regex = r"pastebin.com\/(\S*)"
results = re.findall(regex, content)
return results
def decode_base64_and_inflate(b64string):
try:
decoded_data = base64.b64decode(b64string)
return zlib.decompress(decoded_data)
except zlib.error as err:
log.error("ZLib Error in paste: err={}. Data={}".format(err, b64string))
except ValueError as err:
log.error("Value Error in paste: err={}".format(err))
def decode_to_xml(enc, encoding='windows-1252'):
enc = enc.replace("-", "+").replace("_", "/")
xml_str = decode_base64_and_inflate(enc)
log.debug("XML={}".format(xml_str))
xml = None
try:
xml = ET.fromstring(xml_str.decode(encoding))
except TypeError as err:
log.debug("Could not parse the pastebin as xml msg={}".format(err))
return xml
def urllib_error_retry(attempt_number, ms_since_first_attempt):
delay = 1 * (2 ** (attempt_number - 1))
log.error("An error occurred during get_url_data(). Sleeping for {:.0f}s before retrying...".format(delay))
return delay * 1000
@retry(wait_exponential_multiplier=1000,
stop_max_attempt_number=2,
wait_func=urllib_error_retry)
def get_raw_data(url):
q = Request(url)
q.add_header('Cache-Control', 'max-age=0')
try:
url = urllib.request.urlopen(q)
except urllib.error.HTTPError as e:
return None
content = url.read().decode('utf-8')
if "Possible Spam Detected" in content:
raise CaptchaError("Pastebin marked this as possible spam. Please reupload and clear captchas before retrying.")
return content # read and encode as utf-8
def get_as_xml(paste_key):
raw_url = 'https://pastebin.com/raw/' + paste_key
log.debug("Retrieved from raw_url={}".format(raw_url))
data = get_raw_data(raw_url)
return data
class CaptchaError(Exception):
def __init__(self, message):
self.message = message
class CaptchaError(Exception):
def __init__(self, message):
self.message = message
|
410821
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import pandas as pd
import torch
import kvt
def compute_metrics(predicts, labels):
N, H, W = predicts.shape
predicts = predicts.reshape((-1, H*W))
labels = labels.reshape((-1, H*W))
sum_p = np.sum(predicts, axis=1)
sum_l = np.sum(labels, axis=1)
intersection = np.sum(np.logical_and(predicts, labels), axis=1)
numer = 2*intersection
denom = sum_p + sum_l
dice = numer / (denom + 1e-6)
empty_indices = np.where(sum_l <= 0)[0]
non_empty_indices = np.where(sum_l > 0)[0]
if len(non_empty_indices) == 0:
non_empty_mean_dice = 0.0
else:
non_empty_dice = dice[non_empty_indices]
non_empty_mean_dice = float(np.mean(non_empty_dice))
all_non_empty_index = np.where(numer > 0)[0]
all_empty_index = np.where(denom == 0)[0]
dice[all_empty_index] = 1
mean_dice = float(np.mean(dice))
cls_accuracy = (len(all_non_empty_index) + len(all_empty_index)) / N
correct_indices = np.where((sum_p > 0) == (sum_l > 0))[0]
incorrect_indices = np.where((sum_p > 0) != (sum_l > 0))[0]
tp = len(np.where(sum_l[correct_indices] > 0)[0])
tn = len(np.where(sum_l[correct_indices] == 0)[0])
fp = len(np.where(sum_l[incorrect_indices] == 0)[0])
fn = len(np.where(sum_l[incorrect_indices] > 0)[0])
precision = tp / (tp + fp + 1e-10)
recall = tp / (tp + fn + 1e-10)
tnr = tn / (tn + fp + 1e-10)
fpr = fp / (fp + tn + 1e-10)
return {'score': mean_dice,
'mean_dice': mean_dice,
'mean_dice_non_empty': non_empty_mean_dice,
'cls_acc': cls_accuracy,
'precision': precision,
'recall': recall,
'tnr': tnr,
'fpr': fpr}
@kvt.HOOKS.register
class CloudMetricHook:
def __call__(self, outputs, labels, data, is_train, split):
probabilities = outputs['probabilities']
labels = outputs['labels']
is_train = False
if isinstance(probabilities, torch.Tensor):
assert isinstance(probabilities, torch.Tensor)
assert isinstance(labels, torch.Tensor)
labels = labels.detach().cpu().numpy()
probabilities = probabilities.detach().cpu().numpy()
if 'cls_probabilities' in outputs:
cls_probabilities = outputs['cls_probabilities']
if isinstance(cls_probabilities, torch.Tensor):
cls_probabilities = cls_probabilities.detach().cpu().numpy()
else:
cls_probabilities = None
assert probabilities.shape == labels.shape
cls_thres = np.array([0.7,0.7,0.7,0.7])
thres = np.array([0.4,0.4,0.4,0.4])
predictions = (probabilities > thres[None,:,None,None]).astype(int)
if cls_probabilities is not None:
cls_predictions = (cls_probabilities > cls_thres).astype(int)
predictions = predictions * cls_predictions[:,:,None,None]
B,C,H,W = predictions.shape
return compute_metrics(predictions.reshape(-1,H,W), labels.reshape(-1,H,W))
|
410835
|
class InvalidRating(ValueError): pass
class AuthRequired(TypeError): pass
class CannotChangeVote(Exception): pass
class CannotDeleteVote(Exception): pass
class IPLimitReached(Exception): pass
|
410898
|
class TransmissionData(object,IDisposable):
"""
A class representing information on all external file references
in a document.
TransmissionData(other: TransmissionData)
"""
def Dispose(self):
""" Dispose(self: TransmissionData) """
pass
@staticmethod
def DocumentIsNotTransmitted(filePath):
"""
DocumentIsNotTransmitted(filePath: ModelPath) -> bool
Determines whether the document at a given file location
is not transmitted.
filePath: The path to the document whose transmitted state will be checked.
Returns: False if the document is a transmitted file,true otherwise.
"""
pass
def GetAllExternalFileReferenceIds(self):
"""
GetAllExternalFileReferenceIds(self: TransmissionData) -> ICollection[ElementId]
Gets the ids of all ExternalFileReferences.
Returns: The ids of all ExternalFileReferences.
"""
pass
def GetDesiredReferenceData(self,elemId):
"""
GetDesiredReferenceData(self: TransmissionData,elemId: ElementId) -> ExternalFileReference
Gets the ExternalFileReference representing path
and load status
information to be used the next time
this TransmissionData's document is
loaded.
elemId: The ElementId of the Element which the external file
reference is a
component of.
Returns: An ExternalFileReference containing the requested
path and load status
information for an external file
"""
pass
def GetLastSavedReferenceData(self,elemId):
"""
GetLastSavedReferenceData(self: TransmissionData,elemId: ElementId) -> ExternalFileReference
Gets the ExternalFileReference representing path
and load status
information concerning the most
recent time this TransmissionData's
document was opened.
elemId: The ElementId of the Element which the external file
reference is a
component of.
Returns: An ExternalFileReference containing the previous
path and load status
information for an external file
"""
pass
@staticmethod
def IsDocumentTransmitted(filePath):
"""
IsDocumentTransmitted(filePath: ModelPath) -> bool
Determines whether the document at a given file location
is transmitted.
filePath: The path to the document whose transmitted state will be checked.
Returns: True if the document is a transmitted file,false otherwise.
"""
pass
@staticmethod
def ReadTransmissionData(path):
"""
ReadTransmissionData(path: ModelPath) -> TransmissionData
Reads the TransmissionData associated with the
file at the given location.
path: A ModelPath indicating the file Revit should read
the TransmissionData of.
If this ModelPath is a file path,it must be an absolute path.
Returns: The TransmissionData containing external file
information for the file at
the given location.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: TransmissionData,disposing: bool) """
pass
def SetDesiredReferenceData(self,elemId,path,pathType,shouldLoad):
"""
SetDesiredReferenceData(self: TransmissionData,elemId: ElementId,path: ModelPath,pathType: PathType,shouldLoad: bool)
Sets the ExternalFileReference information which
Revit should use the next
time it opens the document
which this TransmissionData belongs to.
elemId: The id of the element associated with this reference.
path: A ModelPath indicating the location to load the external
file reference
from.
pathType: A PathType value indicating what type of path the ModelPath is.
shouldLoad: True if the external file should be loaded the next time Revit
opens the
document. False if it should be unloaded.
"""
pass
@staticmethod
def WriteTransmissionData(path,data):
"""
WriteTransmissionData(path: ModelPath,data: TransmissionData)
Writes the given TransmissionData into the Revit file at the
given location.
path: A ModelPath indicating the file Revit should write
the TransmissionData of.
This ModelPath must be a file path and an absolute path.
data: The TransmissionData to be written into the document.
Note that Revit will
not check that the ElementIds in
the TransmissionData correspond to real
Elements.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,other):
""" __new__(cls: type,other: TransmissionData) """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsTransmitted=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Determines whether this file has been transmitted or not.
Get: IsTransmitted(self: TransmissionData) -> bool
Set: IsTransmitted(self: TransmissionData)=value
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: TransmissionData) -> bool
"""
UserData=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""A string which users can store notes in.
Get: UserData(self: TransmissionData) -> str
Set: UserData(self: TransmissionData)=value
"""
Version=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The format version for TransmissionData
Get: Version(self: TransmissionData) -> int
"""
|
410914
|
import sys
from oic.oauth2 import PBase
from oic.utils.webfinger import OIC_ISSUER
from oic.utils.webfinger import WebFinger
__author__ = 'roland'
wf = WebFinger(OIC_ISSUER)
wf.httpd = PBase()
print (wf.discovery_query(sys.argv[1]))
|
410974
|
from flask import Blueprint
from .load import load
blueprint = Blueprint('students', __name__, cli_group=None) # type:ignore
blueprint.cli.command('load')(load) # type: ignore
|
411006
|
import numpy as np
from smartsim import Client
def create_data(seed, size):
np.random.seed(seed)
x = np.random.uniform(-15.0, 15.0, size=size)
return x
if __name__ == "__main__":
import argparse
argparser = argparse.ArgumentParser()
argparser.add_argument("--cluster", default=False, action="store_true")
args = argparser.parse_args()
client = None
if args.cluster:
client = Client(cluster=True)
else:
client = Client(cluster=False)
for i in range(0, 5):
data = create_data(i, 100)
client.put_array_nd_float64(str(i), data)
for d in data:
print(d)
|
411049
|
from threadedsubscriber import ThreadedSubscriber
import redis
import socket
class Reporter:
def __init__(self, host, port):
self.host = host
self.port = port
self.name = socket.getfqdn()
self.client = redis.StrictRedis(host=self.host, port=self.port, db=0)
self.channels = ["sensors.data", "members.add"]
def find_members(self):
members = self.client.hgetall("members")
live = []
for member in members:
if(self.client.get(member+".live")):
live.append(member)
return live
def on_message(self, channel, message):
print("Channel: "+channel + " - " + message )
# print(channel, message)
def set_on_sensor_data(self, cb):
self.on_sensor_data_cb = cb
def subscribe(self):
self.subscriber = ThreadedSubscriber(self.client, self.channels, self.on_sensor_data_cb)
self.subscriber.run()
def set_key(self, key, value):
self.client.set(key, value)
def get_key(self, key):
return self.client.get(key)
|
411052
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
l1demonstage1 = DQMEDAnalyzer('L1TDEMON',
HistFolder = cms.untracked.string('L1TEMU'),
HistFile = cms.untracked.string('l1demon.root'),
disableROOToutput = cms.untracked.bool(True),
DataEmulCompareSource = cms.InputTag("l1compareforstage1"),
VerboseFlag = cms.untracked.int32(0),
RunInFilterFarm = cms.untracked.bool(False),
COMPARE_COLLS = cms.untracked.vuint32(
0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0
# ETP,HTP,RCT,GCT,DTP,DTF,CTP,CTF,RPC,LTC,GMT,GT
)
)
|
411087
|
import os.path as op
from skbold.core import MvpBetween
from skbold import testdata_path, roidata_path
import os
import pytest
import numpy as np
cmd = 'cp -r %s/run1.feat %s/mock_subjects/sub00%i'
_ = [os.system(cmd % (testdata_path, testdata_path, i+1)) for i in range(9)
if not op.isdir(op.join(testdata_path, 'mock_subjects',
'sub00%i' % (i + 1), 'run1.feat'))]
dpath = op.join(testdata_path, 'mock_subjects', 'sub*', 'run1.feat', 'stats')
bmask = op.join(roidata_path, 'other', 'GrayMatter_prob.nii.gz')
slist = ['sub001', 'sub002', 'sub003', 'sub004']
@pytest.mark.parametrize("source",
[{'Contrast1': {'path': dpath + '/cope1.nii.gz'}},
{'Contrast1': {'path': dpath + '/cope1.nii.gz'},
'Contrast2': {'path': dpath + '/cope2.nii.gz'}}])
@pytest.mark.parametrize("mask", [bmask, None])
@pytest.mark.parametrize("subject_list", [None, slist])
def test_mvp_between_create(source, mask, subject_list):
source = dict()
source['Contrast1'] = {'path': op.join(testdata_path, 'mock_subjects',
'sub*', 'run1.feat', 'stats',
'cope1.nii.gz')}
mvp = MvpBetween(source=source, subject_idf='sub???', mask=mask)
mvp.create()
@pytest.fixture
def mvp1c():
mask = op.join(roidata_path, 'other', 'GrayMatter_prob.nii.gz')
source = dict()
source['Contrast1'] = {'path': op.join(testdata_path, 'mock_subjects',
'sub*', 'run1.feat', 'stats',
'cope1.nii.gz')}
mvp = MvpBetween(source=source, subject_idf='sub???', mask=mask)
mvp.create()
return mvp
@pytest.fixture
def mvp2c():
mask = op.join(roidata_path, 'other', 'GrayMatter_prob.nii.gz')
source = dict()
source['Contrast1'] = {'path': op.join(testdata_path, 'mock_subjects',
'sub*', 'run1.feat', 'stats',
'cope1.nii.gz')}
source['Contrast2'] = {'path': op.join(testdata_path, 'mock_subjects',
'sub*', 'run1.feat', 'stats',
'cope2.nii.gz')}
mvp = MvpBetween(source=source, subject_idf='sub???', mask=mask)
mvp.create()
return mvp
def test_mvp_between_add_y(mvp1c):
fpath = op.join(testdata_path, 'sample_behav.tsv')
mvp1c.add_y(fpath, col_name='var_categorical', index_col=0, remove=999)
assert(len(mvp1c.y) == 7)
assert(mvp1c.common_subjects == ['sub001', 'sub002', 'sub004',
'sub005', 'sub006', 'sub007', 'sub009'])
assert(len(mvp1c.common_subjects) == mvp1c.X.shape[0] == mvp1c.y.size)
mvp1c.add_y(fpath, col_name='var_categorical', index_col=0, remove=999,
ensure_balanced=True)
assert(mvp1c.y.mean() == 0.5)
@pytest.mark.parametrize("mvp", [mvp1c(), mvp2c()])
def test_mvp_between_write_4D(mvp):
fpath = op.join(testdata_path, 'sample_behav.tsv')
mvp.add_y(fpath, col_name='var_categorical', index_col=0, remove=999)
mvp.write_4D(testdata_path)
for data_name in mvp.data_name:
assert(op.isfile(op.join(testdata_path, '%s.nii.gz' % data_name)))
os.remove(op.join(testdata_path, '%s.nii.gz' % data_name))
os.remove(op.join(testdata_path, 'y_4D_nifti.txt'))
@pytest.mark.parametrize("mvp", [mvp1c(), mvp2c()])
def test_mvp_between_split(mvp):
fpath = op.join(testdata_path, 'sample_behav.tsv')
mvp.split(fpath, col_name='group', target='train')
@pytest.mark.parametrize("params", [{'type': 'percentile',
'high': 60, 'low': 40},
{'type': 'constant', 'cutoff': 100},
{'type': 'median'},
{'type': 'zscore', 'std': 0.25}])
def test_mvp_between_binarize_y(mvp1c, params):
fpath = op.join(testdata_path, 'sample_behav.tsv')
mvp1c.add_y(fpath, col_name='var_continuous', index_col=0)
mvp1c.binarize_y(params, ensure_balanced=True, save_path=testdata_path)
assert((np.unique(mvp1c.y) == [0, 1]).all())
assert(op.isfile(op.join(testdata_path, 'binarize_params.pkl')))
def test_mvp_between_apply_binarization_params(mvp1c):
fpath = op.join(testdata_path, 'sample_behav.tsv')
mvp1c.add_y(fpath, col_name='var_continuous', index_col=0)
mvp1c.apply_binarization_params(op.join(testdata_path,
'binarize_params.pkl'))
os.remove(op.join(testdata_path, 'binarize_params.pkl'))
def test_mvp_between_update_sample(mvp1c):
fpath = op.join(testdata_path, 'sample_behav.tsv')
mvp1c.add_y(fpath, col_name='var_categorical', index_col=0,
remove=999)
idx = np.array([True, True, True, False, True, True, False])
mvp1c.update_sample(idx)
assert(len(mvp1c.y) == 5)
assert(len(mvp1c.y) == mvp1c.X.shape[0] == len(mvp1c.common_subjects))
assert(mvp1c.common_subjects == ['sub001', 'sub002', 'sub004',
'sub006', 'sub007'])
|
411096
|
import open3d as o3d
import torch
import h5py
import numpy as np
import torch.utils.data as data
import torchvision.transforms as transforms
import os
import random
#from utils import *
def read_points(filename, dataset):
if dataset == 'suncg' or dataset == 'fusion':
pcd = o3d.read_point_cloud(filename)
coord = torch.from_numpy(np.array(pcd.points)).float()
color = torch.from_numpy(np.array(pcd.colors)).float()
return coord, color
elif dataset == 'shapenet':
hash_tab = {
'all': {
'name': 'Test',
'label': 100,
'emd1': 0.0,
'emd2': 0.0,
'emd3': 0.0,
'cd1': 0.0,
'cd2': 0.0,
'cd3': 0.0,
'cnt': 0
},
'04530566': {
'name': 'Watercraft',
'label': 1,
'emd1': 0.0,
'emd2': 0.0,
'emd3': 0.0,
'cd1': 0.0,
'cd2': 0.0,
'cd3': 0.0,
'cnt': 0
},
'02933112': {
'name': 'Cabinet',
'label': 2,
'emd1': 0.0,
'emd2': 0.0,
'emd3': 0.0,
'cd1': 0.0,
'cd2': 0.0,
'cd3': 0.0,
'cnt': 0
},
'04379243': {
'name': 'Table',
'label': 3,
'emd1': 0.0,
'emd2': 0.0,
'emd3': 0.0,
'cd1': 0.0,
'cd2': 0.0,
'cd3': 0.0,
'cnt': 0
},
'02691156': {
'name': 'Airplane',
'label': 4,
'emd1': 0.0,
'emd2': 0.0,
'emd3': 0.0,
'cd1': 0.0,
'cd2': 0.0,
'cd3': 0.0,
'cnt': 0
},
'02958343': {
'name': 'Car',
'label': 5,
'emd1': 0.0,
'emd2': 0.0,
'emd3': 0.0,
'cd1': 0.0,
'cd2': 0.0,
'cd3': 0.0,
'cnt': 0
},
'03001627': {
'name': 'Chair',
'label': 6,
'emd1': 0.0,
'emd2': 0.0,
'emd3': 0.0,
'cd1': 0.0,
'cd2': 0.0,
'cd3': 0.0,
'cnt': 0
},
'04256520': {
'name': 'Couch',
'label': 7,
'emd1': 0.0,
'emd2': 0.0,
'emd3': 0.0,
'cd1': 0.0,
'cd2': 0.0,
'cd3': 0.0,
'cnt': 0
},
'03636649': {
'name': 'Lamp',
'label': 8,
'emd1': 0.0,
'emd2': 0.0,
'emd3': 0.0,
'cd1': 0.0,
'cd2': 0.0,
'cd3': 0.0,
'cnt': 0
}
}
fh5 = h5py.File(filename, 'r')
label = float(hash_tab[filename.split("/")[-2]]['label'])
coord = torch.from_numpy(np.array(fh5['data'])).float()
color = torch.from_numpy(
np.ones_like(np.array(fh5['data'])) / 11 * label).float()
return coord, color
def resample_pcd(pcd, n):
"""Drop or duplicate points so that pcd has exactly n points"""
idx = np.random.permutation(pcd.shape[0])
if idx.shape[0] < n:
idx = np.concatenate(
[idx, np.random.randint(pcd.shape[0], size=n - pcd.shape[0])])
return pcd[idx[:n]], idx[:n]
class ShapeNet(data.Dataset):
def __init__(self, train=True, npoints=2048, dataset_name='shapenet'):
self.dataset = dataset_name
if train:
if self.dataset == 'suncg':
self.list_path = './data/train_suncg.list'
elif self.dataset == 'fusion':
self.list_path = './data/train_fusion.list'
elif self.dataset == 'shapenet':
self.list_path = './data/train_shapenet.list'
else:
if self.dataset == 'suncg':
self.list_path = './data/valid_suncg.list'
elif self.dataset == 'fusion':
self.list_path = './data/test_fusion.list'
elif self.dataset == 'shapenet':
self.list_path = './data/valid_shapenet.list'
self.npoints = npoints
self.train = train
with open(os.path.join(self.list_path)) as file:
self.model_list = [line.strip().replace('/', '/') for line in file]
random.shuffle(self.model_list)
self.len = len(self.model_list)
def __getitem__(self, index):
model_id = self.model_list[index]
scan_id = index
if self.train:
if self.dataset == 'suncg':
part, part_color = read_points(
os.path.join(
"/media/wangyida/HDD/database/SUNCG_Yida/train/pcd_partial/",
'%s.pcd' % model_id), self.dataset)
comp, comp_color = read_points(
os.path.join(
"/media/wangyida/HDD/database/SUNCG_Yida/train/pcd_complete/",
'%s.pcd' % model_id), self.dataset)
if self.dataset == 'fusion':
part, part_color = read_points(
os.path.join(
"/media/wangyida/HDD/database/050_200/train/pcd_partial/",
'%s.pcd' % model_id), self.dataset)
comp, comp_color = read_points(
os.path.join(
"/media/wangyida/HDD/database/050_200/train/pcd_complete/",
'%s.pcd' % model_id), self.dataset)
elif self.dataset == 'shapenet':
part, part_color = read_points(
os.path.join(
"/media/wangyida/HDD/database/shapenet/train/partial/",
'%s.h5' % model_id), self.dataset)
comp, comp_color = read_points(
os.path.join(
"/media/wangyida/HDD/database/shapenet16384/train/gt/",
'%s.h5' % model_id), self.dataset)
else:
if self.dataset == 'suncg':
part, part_color = read_points(
os.path.join(
"/media/wangyida/HDD/database/SUNCG_Yida/test/pcd_partial/",
'%s.pcd' % model_id))
comp, comp_color = read_points(
os.path.join(
"/media/wangyida/HDD/database/SUNCG_Yida/test/pcd_complete/",
'%s.pcd' % model_id), self.dataset)
elif self.dataset == 'fusion':
part, part_color = read_points(
os.path.join(
"/media/wangyida/HDD/database/050_200/test/pcd_partial/",
'%s.pcd' % model_id))
comp, comp_color = read_points(
os.path.join(
"/media/wangyida/HDD/database/050_200/test/pcd_complete/",
'%s.pcd' % model_id), self.dataset)
elif self.dataset == 'shapenet':
part, part_color = read_points(
os.path.join(
"/media/wangyida/HDD/database/shapenet/val/partial/",
'%s.h5' % model_id), self.dataset)
comp, comp_color = read_points(
os.path.join(
"/media/wangyida/HDD/database/shapenet16384/val/gt/",
'%s.h5' % model_id), self.dataset)
part_sampled, idx_sampled = resample_pcd(part, self.npoints)
part_seg = np.round(part_color[idx_sampled] * 11)
comp_sampled, idx_sampled = resample_pcd(comp, self.npoints * 8)
comp_seg = np.round(comp_color[idx_sampled] * 11)
"""
comp_seg = []
for i in range (1, 12):
import ipdb; ipdb.set_trace()
comp_seg.append(resample_pcd(comp_sampled[comp_color == i], 512))
"""
return model_id, part_sampled, comp_sampled, part_seg, comp_seg
def __len__(self):
return self.len
|
411184
|
from utils import tprint, plt
import numpy as np
from scipy.stats import rankdata
import sys
from gaussian_process import SparseGPRegressor
from hybrid import HybridMLPEnsembleGP
from process_davis2011kinase import process, visualize_heatmap
from train_davis2011kinase import train
def acquisition_rank(y_pred, var_pred, beta=1.):
return rankdata(y_pred) + (beta * rankdata(-var_pred))
def acquisition_ucb(y_pred, var_pred, beta=1.):
return y_pred - (beta * var_pred)
def acquisition_scatter(y_unk_pred, var_unk_pred, acquisition, regress_type):
y_unk_pred = y_unk_pred[:]
y_unk_pred[y_unk_pred > 10000] = 10000
plt.figure()
plt.scatter(y_unk_pred, var_unk_pred, alpha=0.5, c=-acquisition,
cmap='hot')
plt.title(regress_type.title())
plt.xlabel('Predicted score')
plt.ylabel('Variance')
plt.savefig('figures/acquisition_unknown_{}.png'
.format(regress_type), dpi=200)
plt.close()
def debug_selection(regress_type='gp'):
y_unk_pred = np.loadtxt('target/ypred_unknown_regressors{}.txt'
.format(regress_type))
var_unk_pred = np.loadtxt('target/variance_unknown_regressors{}.txt'
.format(regress_type))
for beta in [ 'rank', 100000, 500000, 1000000, ]:
if beta == 'rank':
acquisition = acquisition_rank(y_unk_pred, var_unk_pred)
else:
acquisition = acquisition_ucb(y_unk_pred, var_unk_pred, beta=beta)
acquisition_scatter(y_unk_pred, var_unk_pred, acquisition, regress_type)
for beta in range(1, 11):
acquisition = acquisition_rank(y_unk_pred, var_unk_pred, beta=beta)
print('beta: {}, Kd: {}'.format(beta, y_obs_pred[np.argmax(acquisition)]))
exit()
def select_candidates(point=False, **kwargs):
y_unk_pred = kwargs['y_unk_pred']
var_unk_pred = kwargs['var_unk_pred']
X_unk = kwargs['X_unk']
y_unk = kwargs['y_unk']
idx_unk = kwargs['idx_unk']
n_candidates = kwargs['n_candidates']
chems = kwargs['chems']
prots = kwargs['prots']
if 'beta' in kwargs:
beta = kwargs['beta']
else:
beta = 20.
if point:
tprint('Exploiting (using point prediction only)...')
acquisition = acquisition_rank(y_unk_pred, var_unk_pred, beta=0.)
else:
tprint('Exploiting...')
acquisition = acquisition_rank(y_unk_pred, var_unk_pred, beta=beta)
max_acqs = np.argsort(-acquisition)[:n_candidates]
for max_acq in max_acqs:
i, j = idx_unk[max_acq]
chem = chems[i]
prot = prots[j]
if y_unk is None:
tprint('\tAcquire {} {} <--> {} with predicted Kd value {:.3f}'
' and uncertainty {:.3f}'
.format((i, j), chem, prot, y_unk_pred[max_acq],
var_unk_pred[max_acq]**2))
else:
tprint('\tAcquire {} {} <--> {} with real Kd value {}'
.format((i, j), chem, prot, y_unk[max_acq]))
return list(max_acqs)
def select_candidates_per_quadrant(explore=False, **kwargs):
y_unk_pred = kwargs['y_unk_pred']
var_unk_pred = kwargs['var_unk_pred']
X_unk = kwargs['X_unk']
y_unk = kwargs['y_unk']
idx_unk = kwargs['idx_unk']
n_candidates = kwargs['n_candidates']
chems = kwargs['chems']
prots = kwargs['prots']
acquisition = acquisition_rank(y_unk_pred, var_unk_pred)
acquired = []
quad_names = [ 'side', 'repurpose', 'novel' ]
orig_idx = np.array(list(range(X_unk.shape[0])))
for quad_name in quad_names:
if explore:
tprint('Exploring quadrant {}'.format(quad_name))
else:
tprint('Considering quadrant {}'.format(quad_name))
quad = [ i for i, idx in enumerate(idx_unk)
if idx in set(kwargs['idx_' + quad_name]) ]
y_unk_quad = y_unk_pred[quad]
var_unk_quad = var_unk_pred[quad]
idx_unk_quad = [ idx for i, idx in enumerate(idx_unk)
if idx in set(kwargs['idx_' + quad_name]) ]
if explore:
max_acqs = sorted(set([
np.argmax(acquisition_rank(y_unk_quad, var_unk_quad, cand))
for cand in range(1, n_candidates + 1)
]))
else:
max_acqs = np.argsort(-acquisition[quad])[:n_candidates]
for max_acq in max_acqs:
i, j = idx_unk_quad[max_acq]
chem = chems[i]
prot = prots[j]
if y_unk is None:
tprint('\tAcquire {} {} <--> {} with predicted Kd value {}'
.format((i, j), chem, prot, y_unk_quad[max_acq]))
else:
tprint('\tAcquire {} {} <--> {} with real Kd value {}'
.format((i, j), chem, prot, y_unk[quad][max_acq]))
acquired += list(orig_idx[quad][max_acqs])
return acquired
def select_candidates_per_protein(**kwargs):
y_unk_pred = kwargs['y_unk_pred']
var_unk_pred = kwargs['var_unk_pred']
X_unk = kwargs['X_unk']
y_unk = kwargs['y_unk']
idx_unk = kwargs['idx_unk']
chems = kwargs['chems']
prots = kwargs['prots']
n_candidates = kwargs['n_candidates']
acquisition = acquisition_rank(y_unk_pred, var_unk_pred)
acquired = []
orig_idx = np.array(list(range(X_unk.shape[0])))
for prot_idx, prot in enumerate(prots):
involves_prot = [ j == prot_idx for i, j in idx_unk ]
idx_unk_prot = [ (i, j) for i, j in idx_unk if j == prot_idx ]
max_acqs = np.argsort(-acquisition[involves_prot])[:n_candidates]
tprint('Protein {}'.format(prot))
for max_acq in max_acqs:
i, j = idx_unk_prot[max_acq]
chem = chems[i]
prot = prots[j]
if y_unk is None:
tprint('\tAcquire {} {} <--> {} with predicted Kd value {:.3f}'
' and uncertainty {:.3f}'
.format((i, j), chem, prot, y_unk_pred[involves_prot][max_acq],
var_unk_pred[involves_prot][max_acq]**2))
else:
tprint('\tAcquire {} {} <--> {} with real Kd value {}'
.format((i, j), chem, prot, y_unk[involves_prot][max_acq]))
acquired.append(orig_idx[involves_prot][max_acq])
return acquired
def select_candidates_per_partition(**kwargs):
y_unk_pred = kwargs['y_unk_pred']
var_unk_pred = kwargs['var_unk_pred']
X_unk = kwargs['X_unk']
y_unk = kwargs['y_unk']
idx_unk = kwargs['idx_unk']
n_partitions = kwargs['n_candidates']
chems = kwargs['chems']
prots = kwargs['prots']
chem2feature = kwargs['chem2feature']
acquisition = acquisition_rank(y_unk_pred, var_unk_pred)
if 'partition' in kwargs:
partition = kwargs['partition']
else:
# Partition unknown space using k-means on chemicals.
from sklearn.cluster import KMeans
labels = KMeans(
n_clusters=n_partitions,
init='k-means++',
n_init=3,
random_state=10,
).fit_predict(np.array([
chem2feature[chem] for chem in chems
]))
partition = []
for p in range(n_partitions):
partition.append([
idx for idx, (i, j) in enumerate(idx_unk)
if labels[i] == p
])
orig2new_idx = { i: i for i in range(X_unk.shape[0]) }
for pi in range(len(partition)):
if len(partition[pi]) == 0:
tprint('Partition {} is empty'.format(pi))
continue
partition_pi = set(list(partition[pi]))
idx_unk_part = [ idx for i, idx in enumerate(idx_unk)
if i in partition_pi ]
max_acq = np.argmax(acquisition[partition[pi]])
i, j = idx_unk_part[max_acq]
chem = chems[i]
prot = prots[j]
tprint('Partition {}'.format(pi))
if y_unk is None:
tprint('\tAcquire {} {} <--> {} with predicted Kd value {:.3f}'
' and uncertainty {:.3f}'
.format((i, j), chem, prot, y_unk_pred[partition[pi]][max_acq],
var_unk_pred[partition[pi]][max_acq]**2))
else:
tprint('\tAcquire {} {} <--> {} with real Kd value {}'
.format((i, j), chem, prot, y_unk[partition[pi]][max_acq]))
orig_max_acq = partition[pi][max_acq]
for i in orig2new_idx:
if i == orig_max_acq:
orig2new_idx[i] = None
elif orig2new_idx[i] is None:
pass
elif i > orig_max_acq:
orig2new_idx[i] -= 1
# Acquire one point per partition.
acquired = sorted([ i for i in orig2new_idx if orig2new_idx[i] is None ])
# Make sure new partition indices match new unknown dataset.
for pi in range(len(partition)):
partition[pi] = np.array([
orig2new_idx[p] for p in partition[pi]
if orig2new_idx[p] is not None
])
kwargs['partition'] = partition
return acquired, kwargs
def acquire(**kwargs):
if 'scheme' in kwargs:
scheme = kwargs['scheme']
else:
scheme = 'exploit'
if 'n_candidates' in kwargs:
n_candidates = kwargs['n_candidates']
else:
kwargs['n_candidates'] = 1
if scheme == 'exploit':
acquired = select_candidates(**kwargs)
elif scheme == 'pointexploit':
acquired = select_candidates(point=True, **kwargs)
elif scheme == 'explore':
acquired = select_candidates(explore=True, **kwargs)
elif scheme == 'quad':
acquired = select_candidates_per_quadrant(**kwargs)
elif scheme == 'quadexplore':
acquired = select_candidates_per_quadrant(explore=True, **kwargs)
elif scheme == 'perprot':
acquired = select_candidates_per_protein(**kwargs)
elif scheme == 'partition':
acquired, kwargs = select_candidates_per_partition(**kwargs)
return acquired, kwargs
def iterate(**kwargs):
prots = kwargs['prots']
X_obs = kwargs['X_obs']
y_obs = kwargs['y_obs']
idx_obs = kwargs['idx_obs']
X_unk = kwargs['X_unk']
y_unk = kwargs['y_unk']
idx_unk = kwargs['idx_unk']
regressor = kwargs['regressor']
regress_type = kwargs['regress_type']
if regress_type == 'cmf':
kwargs['y_unk_pred'] = regressor.predict(idx_unk)
else:
kwargs['y_unk_pred'] = regressor.predict(X_unk)
kwargs['var_unk_pred'] = regressor.uncertainties_
acquired, kwargs = acquire(**kwargs)
# Reset observations.
X_acquired = X_unk[acquired]
y_acquired = y_unk[acquired]
X_obs = np.vstack((X_obs, X_acquired))
y_obs = np.hstack((y_obs, y_acquired))
[ idx_obs.append(idx_unk[a]) for a in acquired ]
# Reset unknowns.
unacquired = [ i for i in range(X_unk.shape[0]) if i not in set(acquired) ]
X_unk = X_unk[unacquired]
y_unk = y_unk[unacquired]
idx_unk = [ idx for i, idx in enumerate(idx_unk) if i not in set(acquired) ]
kwargs['X_obs'] = X_obs
kwargs['y_obs'] = y_obs
kwargs['idx_obs'] = idx_obs
kwargs['X_unk'] = X_unk
kwargs['y_unk'] = y_unk
kwargs['idx_unk'] = idx_unk
return kwargs
if __name__ == '__main__':
#debug_selection('hybrid')
param_dict = process()
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('regress_type', help='model to use')
parser.add_argument('scheme', help='acquisition strategy')
parser.add_argument('n_candidates', type=int, help='number to acquire')
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--beta', type=float, default=1,
help='explore/exploit tradeoff parameter')
args = parser.parse_args()
param_dict['regress_type'] = args.regress_type
param_dict['scheme'] = args.scheme
param_dict['n_candidates'] = args.n_candidates
param_dict['seed'] = args.seed
param_dict['beta'] = args.beta
n_iter = 1
for i in range(n_iter):
tprint('Iteration {}'.format(i))
param_dict = train(**param_dict)
param_dict = iterate(**param_dict)
|
411187
|
from pybricks.media.ev3dev import Font, Image, ImageFile
from pybricks.parameters import Color
from typing import Union
class Screen:
"""
A stub class to represent the screen member of the EV3Brick class.
Attributes:
height (int): The height of the screen in pixels.
width (int): The width of the screen in pixels.
"""
def __init__(self):
self.width = 178 # type: int
self.height = 128 # type: int
def clear(self):
"""
Clears the screen. All pixels on the screen will be set to Color.WHITE.
"""
...
def draw_text(self, x: int, y: int, text: str, text_color: Color = Color.BLACK, background_color: Color = None):
"""
Draws text on the screen.
The most recent font set using set_font() will be used or Font.DEFAULT if no font has been set yet.
Args:
x (int): The x-axis value where the left side of the text will start.
y (int): The y-axis value where the top of the text will start.
text (str): The text to draw.
text_color (Color): The color used for drawing the text.
background_color (Color): The color used to fill the rectangle behind the text or None for transparent background.
"""
...
def print(self, *args, sep: str = "", end: str = "\n"):
"""
Prints a line of text on the screen.
This method works like the builtin print() function, but it writes on the screen instead.
You can set the font using set_font(). If no font has been set, Font.DEFAULT will be used. The text is always printed used black text with a white background.
Unlike the builtin print(), the text does not wrap if it is too wide to fit on the screen. It just gets cut off. But if the text would go off of the bottom of the screen, the entire image is scrolled up and the text is printed in the new blank area at the bottom of the screen.
Args:
args (object): Zero or more objects to print.
sep (str): Separator that will be placed between each object that is printed.
end (str): End of line that will be printed after the last object.
"""
...
def set_font(self, font: Font):
"""
Sets the font used for writing on the screen.
The font is used for both draw_text() and print().
Args:
font (Font): The font to use.
"""
...
def load_image(self, source: Union[str, Image, ImageFile]):
"""
Clears this image, then draws the source image centered in the screen.
Args:
source (ImageFile, Image, or str): The source Image. If the argument is a string (or ImageFile), then the source image is loaded from file.
"""
...
def draw_image(self, x: int, y: int, source: Union[str, Image, ImageFile], transparent: Color = None):
"""
Draws the source image on the screen.
Args:
x (int): The x-axis value where the left side of the image will start.
y (int): The y-axis value where the top of the image will start.
source (ImageFile, Image, str): The source Image. If the argument is a string (or ImageFile), then the source image is loaded from file.
transparent (Color): The color of image to treat as transparent or None for no transparency.
"""
...
def draw_pixel(self, x: int, y: int, color: Color = Color.BLACK):
"""
Draws a single pixel on the screen.
Args:
x (int): The x coordinate of the pixel.
y (int): The y coordinate of the pixel.
color (Color): The color of the pixel.
"""
...
def draw_line(self, x1: int, y1: int, x2: int, y2: int, width: int = 1, color: Color = Color.BLACK):
"""
Draws a line on the screen.
Args:
x1 (int): The x coordinate of the starting point of the line.
y1 (int): The y coordinate of the starting point of the line.
x2 (int): The x coordinate of the ending point of the line.
y2 (int): The y coordinate of the ending point of the line.
width (int): The width of the line in pixels.
color (Color): The color of the line.
"""
...
def draw_box(self, x1: int, y1: int, x2: int, y2: int, r: int = 0, fill: bool = False, color: Color = Color.BLACK):
"""
Draws a box on the screen.
Args:
x1 (int): The x coordinate of the left side of the box.
y1 (int): The y coordinate of the top of the box.
x2 (int): The x coordinate of the right side of the box.
y2 (int): The y coordinate of the bottom of the box.
r (int): The radius of the corners of the box.
fill (bool): If True, the box will be filled with color, otherwise only the outline of the box will be drawn.
color (Color): The color of the box.
"""
...
def draw_circle(self, x: int, y: int, r: int, fill: bool = False, color: Color = Color.BLACK):
"""
Draws a circle on the screen.
Args:
x (int): The x coordinate of the center of the circle.
y (int): The y coordinate of the center of the circle.
r (int): The radius of the circle.
fill (bool): If True, the circle will be filled with color, otherwise only the circumference will be drawn.
color (Color): The color of the circle.
"""
...
def save(self, filename: str):
"""
Saves the screen as a .png file.
Args:
filename (str): The path to the file to be saved.
Raises:
TypeError: filename is not a string
OSError: There was a problem saving the file.
"""
...
|
411203
|
from orion.evaluation.common import _accuracy, _f1_score, _precision, _recall, _weighted_segment
def _point_partition(expected, observed, start=None, end=None):
expected = set(expected)
observed = set(observed)
edge_start = min(expected.union(observed))
if start is not None:
edge_start = start
edge_end = max(expected.union(observed))
if end is not None:
edge_end = end
length = int(edge_end) - int(edge_start) + 1
expected_parts = [0] * length
observed_parts = [0] * length
for edge in expected:
expected_parts[edge - edge_start] = 1
for edge in observed:
observed_parts[edge - edge_start] = 1
return expected_parts, observed_parts, None
def point_confusion_matrix(expected, observed, data=None, start=None, end=None):
"""Compute the confusion matrix between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
tuple:
number of true negative, false positive, false negative, true positive.
"""
def _ws(x, y, z, w):
return _weighted_segment(x, y, _point_partition, z, w)
if data is not None:
start = data['timestamp'].min()
end = data['timestamp'].max()
if not isinstance(expected, list):
expected = list(expected['timestamp'])
if not isinstance(observed, list):
observed = list(observed['timestamp'])
return _ws(expected, observed, start, end)
def point_accuracy(expected, observed, data=None, start=None, end=None):
"""Compute an accuracy score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
Accuracy score between the ground truth and detected anomalies.
"""
return _accuracy(expected, observed, data, start, end, cm=point_confusion_matrix)
def point_precision(expected, observed, data=None, start=None, end=None):
"""Compute an precision score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
Precision score between the ground truth and detected anomalies.
"""
return _precision(expected, observed, data, start, end, cm=point_confusion_matrix)
def point_recall(expected, observed, data=None, start=None, end=None):
"""Compute an recall score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
Recall score between the ground truth and detected anomalies.
"""
return _recall(expected, observed, data, start, end, cm=point_confusion_matrix)
def point_f1_score(expected, observed, data=None, start=None, end=None):
"""Compute an f1 score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
F1 score between the ground truth and detected anomalies.
"""
return _f1_score(expected, observed, data, start, end, cm=point_confusion_matrix)
|
411204
|
from django.test import TestCase
from robber import expect
from data.factories import AllegationFactory, OfficerFactory, OfficerAllegationFactory
from social_graph.queries.geographic_data_query import GeographyCrsDataQuery, GeographyTrrsDataQuery
from trr.factories import TRRFactory
class GeographyCrsDataQueryTestCase(TestCase):
def test_data(self):
officer_1 = OfficerFactory(id=1)
officer_2 = OfficerFactory(id=2)
officer_3 = OfficerFactory(id=3)
officer_4 = OfficerFactory(id=4)
officers = [officer_1, officer_2, officer_3, officer_4]
allegation_1 = AllegationFactory(crid='123')
allegation_2 = AllegationFactory(crid='456')
allegation_3 = AllegationFactory(crid='789')
AllegationFactory(crid='987')
OfficerAllegationFactory(
officer=officer_1,
allegation=allegation_1
)
OfficerAllegationFactory(
officer=officer_1,
allegation=allegation_2
)
OfficerAllegationFactory(
officer=officer_2,
allegation=allegation_2
)
expected_data = [allegation_1.crid, allegation_2.crid, allegation_3.crid]
results = [item.crid for item in list(GeographyCrsDataQuery([allegation_3.crid], officers).data())]
expect(results).to.eq(expected_data)
class GeographyTrrsDataQueryTestCase(TestCase):
def test_data(self):
officer_1 = OfficerFactory(id=1)
officer_2 = OfficerFactory(id=2)
officer_3 = OfficerFactory(id=3)
officer_4 = OfficerFactory(id=4)
officer_5 = OfficerFactory(id=5)
officers = [officer_1, officer_2, officer_3, officer_4]
trr_1 = TRRFactory(id=1, officer=officer_3)
trr_2 = TRRFactory(id=2, officer=officer_4)
trr_3 = TRRFactory(id=3, officer=officer_4)
TRRFactory(id=4, officer=officer_5)
expected_data = [trr_1.id, trr_2.id, trr_3.id]
results = [item.id for item in list(GeographyTrrsDataQuery([trr_3.id], officers).data())]
expect(results).to.eq(expected_data)
|
411216
|
import archon.feeds.cryptocompare as cryptocompare
e = "Kucoin"
h = cryptocompare.get_hist("LTC","BTC",e)
|
411242
|
from unittest import TestCase
from unittest.mock import patch, ANY
import responses
import azkaban_cli.azkaban
from azkaban_cli.exceptions import FetchScheduleError, SessionError
class AzkabanFetchScheduleTest(TestCase):
def setUp(self):
"""
Creates an Azkaban instance and set a logged session for all fetch schedule tests
"""
self.azk = azkaban_cli.azkaban.Azkaban()
self.host = 'http://azkaban-mock.com'
self.user = 'username'
self.session_id = 'aebe406b-d5e6-4056-add6-bf41091e42c6'
self.azk.set_logged_session(self.host, self.user, self.session_id)
self.project_id = 123
self.flow = 'FlowTest'
@responses.activate
def test_fetch_schedule(self):
"""
Test fetch schedule method from Azkaban class
"""
responses.add(
responses.GET,
self.host + "/schedule",
json={
'schedule': {
'scheduleId': '456'
}
},
status=200
)
self.azk.fetch_schedule(self.project_id, self.flow)
@patch('azkaban_cli.azkaban.api.fetch_schedule_request')
def test_fetch_schedule_request_called(self, mock_fetch_schedule_request):
"""
Test if fetch schedule method from Azkaban class is calling fetch schedule request with expected arguments
"""
self.azk.fetch_schedule(self.project_id, self.flow)
mock_fetch_schedule_request.assert_called_with(ANY, self.host, self.session_id, self.project_id, self.flow)
@responses.activate
def test_project_doesnt_exist_fetch_flows(self):
"""
Test if fetch schedule method from Azkaban class raises FetchSchedule if request returns error caused by project doesn't exist
"""
responses.add(
responses.GET,
self.host + "/schedule",
json={},
status=200
)
with self.assertRaises(FetchScheduleError):
self.azk.fetch_schedule(self.project_id, self.flow)
@responses.activate
def test_flow_doesnt_exist_fetch_flows(self):
"""
Test if fetch schedule method from Azkaban class raises FetchSchedule if request returns error caused by flow doesn't exist
"""
responses.add(
responses.GET,
self.host + "/schedule",
json={},
status=200
)
with self.assertRaises(FetchScheduleError):
self.azk.fetch_schedule(self.project_id, self.flow)
@responses.activate
def test_error_session_expired_fetch_schedule(self):
"""
Test if fetch schedule method from Azkaban class raises SessionError if request returns error caused by session expired
"""
responses.add(responses.GET, self.host + "/schedule", json={"error": "session"}, status=200)
with self.assertRaises(SessionError):
self.azk.fetch_schedule(self.project_id, self.flow)
|
411268
|
class Page(object):
def __init__(self, url: str, path: str, links: list, pars: list, title: str, typo: str):
self.url = url
self.path = path
self.links = links
self.pars = pars
self.title = title
self.typo= typo
def set_url(self,url):
self.url = url
def set_path(self,path):
self.path = path
def add_link(self,link):
self.links.append(link)
def set_title(self,title):
self.title = title
def add_par(self,par):
self.pars.append(par)
def set_typo(self,ty):
self.typo=ty
def del_link(self,link):
try:
links.remove(link)
except:
return f"No se puedo borrar {link}\n inexistente?"
return f"{link} ha sido removido con exito"
def del_par(self,par):
try:
pars.remove(par)
except:
return f"No se puedo borrar {par}\n parrafo inexistente?"
return f"{par} ha sido removido con exito"
def get_url(self):
return self.url
def get_path(self):
return self.path
def get_links(self):
return self.link
def get_title(self):
return self.title
def get_pars(self):
return self.pars
def get_typo(self):
return self.typo
def __str__(self):
r=f"type: {self.typo}"
r+=f"\npath: {self.path}"
r+=f"\nurl: {self.url}"
r+=f"\ntitle: {self.title}"
r+=f"\npars: {self.pars}"
r+=f"\nlinks: {self.links}"
return r
class Website(Page):
def __init__(self, domain:str, subdomains: list, pages: list):
self.domain=domain
self.subdomains=subdomains
self.pages=pages
def rename_domain(self, dom):
self.domain=dom
def add_subdomain(self, sub):
self.subdomains.append(sub)
def add_page(self, page):
self.pages.append(page)
def del_sub(self,subs):
try:
self.subdomains.remove(subs)
except:
return f"{subs} ha sido eliminado"
return f"{subs} no ha podido ser eliminado\n inexistente?"
def del_page(self,page):
try:
self.pages.remove(page)
except:
return f"{page} ha sido eliminado"
return f"{page} no ha podido ser eliminado\n inexistente?"
def get_domain(self):
return self.domain
def get_subdomains(self):
return self.subdomains
def get_pages(self):
r=""
for i in pages:
r+="\n"
r+=f"{i}"
r+="#########"
return r
def __str__(self):
r=f"Domain:{self.domain}"
r+=f"\nsubDomains:{self.subdomains}"
r+="\nPAGINAS:\n"
for i in self.pages:
r+="############\n"
r+=f"{i}"
r+="\n"
return r
def buscador(web: Website , page: Page):
if page in web.pages:
return "Pagina Encontrada."
else:
return "Error 404"
def main():
##PRUEBA DE FUNCIONALIDAD DE LA CLASE "PAGE"##
links=["www.facebook.com","www.twitter.com"]
pars=["<p>Hola</p>",
"<p>me</p>",
"<p>llamo así</p>"]
pagina1=Page("github.com/emiliobg1997","/index.html",links,pars, "My repo","html")
links=["google.com","youtube.com"]
pars=["<p>Hola</p>",
"<p>me</p>",
"<p>llamo asa</p>"]
pagina2=Page("github.com/lufergamo2502","/index.html",links,pars, "his repo","html")
links=["xda-devs.com","linkedin.com"]
pars=["<p>Hola</p>",
"<p>me</p>",
"<p>llamo ase</p>"]
pagina3=Page("github.com/asanchez","/index.html",links,pars, "her repo","html")
##PRUEBA DE FUNCIONALIDAD DE LA CLASE "websites"
domain="github.com"
subdomains=["subgit","sub2"]
pags=[pagina1,pagina2,pagina3]
web=Website(domain,subdomains,pags)
print(web)
##PRUEBA DE FUNCIONALIDAD DE LOS METODOS
print(buscador(web,pagina1))
if __name__=="__main__":
main()
|
411322
|
from tensorflow import keras as tf
import cv2
import numpy as np
from collections import Counter
from sklearn.preprocessing import LabelEncoder
import os
class Dataset:
def __init__(self, arch, path_of_dataset=None):
self.n_classes = 0
self.bad_data = []
self.X = []
self.Y = []
self.classes = []
self.maxOccuringShape = None
if path_of_dataset is not None:
self.path_of_dataset = path_of_dataset
self.populate_dataset()
self.one_hot_encoding()
self.getMaxOccuringShape()
self.normalize()
if arch == "dense":
self.flatten()
self.convertToArray()
def populate_dataset(self):
for directory in os.listdir(self.path_of_dataset):
if directory.startswith('.'):
continue
self.classes.append(directory)
self.n_classes += 1
for img in os.listdir(os.path.join(self.path_of_dataset, directory)):
if img.startswith('.') or img.startswith('_'):
continue
try:
self.X.append(cv2.imread(os.path.join(self.path_of_dataset, directory, img)))
self.Y.append(directory)
except:
pass
print('Classes: ', self.classes)
def one_hot_encoding(self):
encoder = LabelEncoder()
self.Y = encoder.fit_transform(self.Y)
self.Y = tf.utils.to_categorical(self.Y)
def getMaxOccuringShape(self):
shapes = []
for i in range(len(self.X)):
self.X[i] = np.array(self.X[i])
if len(self.X[i].shape) > 1:
shapes.append(self.X[i].shape)
self.maxOccuringShape = Counter(shapes).most_common()
print('Shape: ', self.maxOccuringShape[0][0])
def normalize(self):
for i in range(len(self.X)):
try:
self.X[i] = cv2.resize(self.X[i], self.maxOccuringShape[0][0][:2], self.X[i])
self.X[i] = cv2.normalize(self.X[i], self.X[i], 0, 1, cv2.NORM_MINMAX, cv2.CV_32F)
except:
self.bad_data.append(i)
def flatten(self):
for i in range(len(self.X)):
self.X[i] = np.array(self.X[i]).ravel()
def convertToArray(self):
for i in self.bad_data:
np.delete(self.Y, i)
np.delete(self.X, i)
self.X = np.asarray(self.X, dtype=np.float)
self.Y = np.array(self.Y)
class DenseNet:
def __init__(self, use_pretrained_model, path_of_dataset=None, neurons_per_layer=None, activations=None,
model_path=None, epochs=None):
self.model_path = model_path
self.model = tf.models.Model()
self.use_pretrained_model = use_pretrained_model
if use_pretrained_model:
self.dataset = Dataset(arch='dense')
items = list(os.listdir(model_path))
if 'nnio.l.cfg' not in items:
print('Err: Not a valid model path, Configuration missing')
return
with open(os.path.join(model_path, 'nnio.l.cfg'), 'r') as f:
config = f.readlines()
self.dataset.n_classes = int(config[1].replace('\n', ''))
self.dataset.maxOccuringShape = config[2].replace('\n', '').replace('[', '').replace(']', '').replace(
'(', '').replace(')', '').split(',')[:2]
self.dataset.maxOccuringShape = [int(i.replace(' ', '')) for i in self.dataset.maxOccuringShape]
self.dataset.classes = config[0].replace('\n', '')
print(
'Model initialized with:\n{}\n{}\n{}'.format(self.dataset.n_classes, self.dataset.maxOccuringShape,
self.dataset.classes))
else:
assert path_of_dataset is not None and neurons_per_layer is not None and activations is not None and model_path is not None and epochs is not None, "Err: Required args not passed for object initialization"
self.path_of_dataset = path_of_dataset
self.neurons_per_layer = neurons_per_layer
self.activations = activations
self.epochs = epochs
self.dataset = Dataset('dense', path_of_dataset)
self.DenseNet()
self.fit()
def DenseNet(self):
self.model = tf.models.Sequential()
self.model.add(tf.Input([np.prod(self.dataset.maxOccuringShape[0][0])]))
for i in range(len(self.neurons_per_layer)):
self.model.add(tf.layers.Dense(self.neurons_per_layer[i], activation=self.activations[i]))
self.model.add(tf.layers.Dense(self.dataset.n_classes, activation='softmax'))
self.model.compile(optimizer='adam', loss='categorical_crossentropy')
def summary(self):
self.model.summary()
def fit(self):
self.model.fit(self.dataset.X, self.dataset.Y, epochs=self.epochs)
self.model.save(self.model_path)
with open(os.path.join(self.model_path, 'nnio.l.cfg'), 'w') as f:
f.write(str(self.dataset.classes) + '\n')
f.write(str(self.dataset.n_classes) + '\n')
f.write(str(self.dataset.maxOccuringShape) + '\n')
def predict(self, x):
img = cv2.imread(x)
if self.use_pretrained_model:
self.model = tf.models.load_model(self.model_path)
img = cv2.resize(img, tuple(self.dataset.maxOccuringShape), img)
else:
img = cv2.resize(img, self.dataset.maxOccuringShape[0][0][:2], img)
cv2.normalize(img, img, 0, 1, cv2.NORM_MINMAX, cv2.CV_32F)
img = np.array(img)
img = img.ravel()
img = np.expand_dims(img, 0)
print("Prediction: ", np.array(self.model.predict(img)).argmax())
class ConvNet:
def __init__(self, use_pretrained_model, path_of_dataset=None, filters_per_layer=None, activations=None,
model_path=None, epochs=None):
self.model_path = model_path
self.model = tf.models.Model()
self.use_pretrained_model = use_pretrained_model
if use_pretrained_model:
self.dataset = Dataset(arch='conv')
items = list(os.listdir(model_path))
if 'nnio.l.cfg' not in items:
print('Err: Not a valid model path, Configuration missing')
return
with open(os.path.join(model_path, 'nnio.l.cfg'), 'r') as f:
config = f.readlines()
self.dataset.n_classes = int(config[1].replace('\n', ''))
self.dataset.maxOccuringShape = config[2].replace('\n', '').replace('[', '').replace(']', '').replace(
'(', '').replace(')', '').split(',')[:2]
self.dataset.maxOccuringShape = [int(i.replace(' ', '')) for i in self.dataset.maxOccuringShape]
self.dataset.classes = config[0].replace('\n', '')
print(
'Model initialized with:\n{}\n{}\n{}'.format(self.dataset.n_classes, self.dataset.maxOccuringShape,
self.dataset.classes))
else:
assert path_of_dataset is not None and filters_per_layer is not None and activations is not None and model_path is not None and epochs is not None, "Err: Required args not passed for object initialization"
self.path_of_dataset = path_of_dataset
self.filters_per_layer = filters_per_layer
self.activations = activations
self.epochs = epochs
self.dataset = Dataset('conv', path_of_dataset)
self.ConvNet()
self.summary()
self.fit()
def ConvNet(self):
self.model = tf.models.Sequential()
self.model.add(tf.Input((self.dataset.maxOccuringShape[0][0])))
for i in range(len(self.filters_per_layer)):
self.model.add(tf.layers.Conv2D(self.filters_per_layer[i], kernel_size=(3, 3), activation=self.activations[i]))
self.model.add(tf.layers.Flatten())
self.model.add(tf.layers.Dense(self.dataset.n_classes, activation='softmax'))
self.model.compile(optimizer='adam', loss='categorical_crossentropy')
def summary(self):
self.model.summary()
def fit(self):
self.model.fit(self.dataset.X, self.dataset.Y, epochs=self.epochs)
self.model.save(self.model_path)
with open(os.path.join(self.model_path, 'nnio.l.cfg'), 'w') as f:
f.write(str(self.dataset.classes) + '\n')
f.write(str(self.dataset.n_classes) + '\n')
f.write(str(self.dataset.maxOccuringShape) + '\n')
def predict(self, x):
img = cv2.imread(x)
if self.use_pretrained_model:
self.model = tf.models.load_model(self.model_path)
img = cv2.resize(img, tuple(self.dataset.maxOccuringShape), img)
else:
img = cv2.resize(img, self.dataset.maxOccuringShape[0][0][:2], img)
cv2.normalize(img, img, 0, 1, cv2.NORM_MINMAX, cv2.CV_32F)
img = np.array(img)
img = np.expand_dims(img, 0)
print("Prediction: ", np.array(self.model.predict(img)).argmax())
|
411331
|
import re
import json
import time
# This code was inspired by Jay2K1's Hangouts parser. You can see the
# blogpost for the original at:
# http://blog.jay2k1.com/2014/11/10/how-to-export-and-backup-your-google-hangouts-chat-history/
# He also runs a webservice for parsing Google Hangouts JSON files at:
# http://hangoutparser.jay2k1.com/
def replaceSmileys(string):
# replaces UTF-8 graphical emoticons by their ASCII equivalents
# list of emoji codes taken from https://aprescott.com/posts/hangouts-emoji
patterns = [
u'\U0001F41D', # -<@% ? honeybee
u'\U0001F435', # :(|) ? monkey face
u'\U0001F437', # :(:) ? pig face
u'\U0001F473', # (]:{ ? man with turban
u'\U0001F494', # <\3 </3 ? broken heart
u'\U0001F49C', # <3 ? purple heart
u'\U0001F4A9', # ~@~ ? pile of poo
u'\U0001F600', # :D :-D ? grinning face
u'\U0001F601', # ^_^ ? grinning face with smiling eyes
u'\U0001F602', # XD
u'\U0001F603', # :) :-) =) ? smiling face with open mouth
u'\U0001F604', # =D ? smiling face with open mouth and smiling eyes
u'\U0001F605', # ^_^;; ? smiling face with open mouth and cold sweat
u'\U0001F607', # O:) O:-) O=) ? smiling face with halo
u'\U0001F608', # }:) }:-) }=) ? smiling face with horns
u'\U0001F609', # ;) ;-) ? winking face
u'\U0001F60E', # B) B-) ? smiling face with sunglasses
u'\U0001F610', # :-| :| =| ? neutral face
u'\U0001F611', # -_- ? expressionless face
u'\U0001F613', # o_o; ? face with cold sweat
u'\U0001F614', # u_u ? pensive face
u'\U0001F615', # :\ :/ :-\ :-/ =\ =/ ? confused face
u'\U0001F616', # :S :-S :s :-s ? confounded face
u'\U0001F617', # :* :-* ? kissing face
u'\U0001F618', # ;* ;-* ? face throwing a kiss
u'\U0001F61B', # :P :-P =P :p :-p =p ? face with stuck-out tongue
u'\U0001F61C', # ;P ;-P ;p ;-p ? face with stuck-out tongue and winking eye
u'\U0001F61E', # :( :-( =( ? disappointed face
u'\U0001F621', # >.< >:( >:-( >=( ? pouting face
u'\U0001F622', # T_T :'( ;_; ='( ? crying face
u'\U0001F623', # >_< ? persevering face
u'\U0001F626', # D: ? frowning face with open mouth
u'\U0001F62E', # o.o :o :-o =o ? face with open mouth
u'\U0001F632', # O.O :O :-O =O ? astonished face
u'\U0001F634', # O.O :O :-O =O ? astonished face
u'\U0001F635', # x_x X-O X-o X( X-( ? dizzy face
u'\U0001F638', # :X) :3 (=^..^=) (=^.^=) =^_^= ? grinning cat face with smiling eyes
u'\U0001F64C' # Dunno, but it needs to be replaced for ASCII
]
replacements = [
'-<@%',
':(|)',
':(:)',
'(]:{',
'</3',
'<3',
'~@~',
':D',
'^_^',
'XD',
':)',
'=D',
'^_^;;',
'O:)',
'}:)',
';)',
'B-)',
':|',
'-_-',
'o_o;',
'u_u',
':/',
':S',
':*',
';*',
':P',
';P',
':(',
'>.<',
":'(",
'>_<',
'D:',
':o',
':O',
'-_-Zzz',
'x_x',
':3',
'_'
]
for index in range(len(patterns)):
string = re.sub(patterns[index], replacements[index], string)
return string
def hangoutsToArray(json_input, timestamp_format):
# set the desired timestamp format here
# the default is '%Y-%m-%d %H:%M:%S' which is YYYY-MM-DD HH:mm:ss.
#timestamp_format = '%Y-%m-%d %H:%M:%S'
# decode JSON
decoded = json.loads(json_input)
# extract useful part
rawconvos = decoded['conversations']
#print "%r" % rawconvos
retval = []
# loop through conversations
for i in range(len(rawconvos)):
#print "i is %d" % i
#print "attempting in_conv: %s" % rawconvos[i]['conversation_state']['conversation']
# first, get metadata
retval.append({})
convo = rawconvos[i]
#print "%r" % convo
in_conv = rawconvos[i]['conversation']['conversation']
in_event = rawconvos[i]['events']
pdata = in_conv['participant_data']
retval[i]['type'] = in_conv['type']
retval[i]['msgcount'] = len(in_event)
retval[i]['name'] = in_conv['name'] if 'name' in in_conv.keys() else ""
# conversation participants
for j in range(len(pdata)):
id = pdata[j]['id']['chat_id']
# use "unknown_<chat_id>" as name if they don't have a fallback_name
name = pdata[j]['fallback_name'] if 'fallback_name' in pdata[j].keys() else "unknown_%s" % id
if not 'members' in retval[i].keys():
retval[i]['members'] = {}
retval[i]['members'][id] = name
# loop through messages/events
messages = []
for k in range(len(in_event)):
messages.append({})
messages[k]['timestamp'] = in_event[k]['timestamp']
messages[k]['datetime'] = time.strftime(timestamp_format,time.localtime(int(messages[k]['timestamp'][0:10])))
messages[k]['sender_id'] = in_event[k]['sender_id']['chat_id']
messages[k]['sender'] = retval[i]['members'][messages[k]['sender_id']] if messages[k]['sender_id'] in retval[i]['members'].keys() else "unknown_%s" % id
messages[k]['event_type'] = in_event[k]['event_type']
if messages[k]['event_type'] == 'RENAME_CONVERSATION':
newname = in_event[k]['conversation_rename']['new_name']
oldname = in_event[k]['conversation_rename']['old_name']
messages[k]['message'] = "changed conversation name %s%s" % \
(("from '%s'" % oldname) if oldname else "",
("to '%s'" % newname) if newname else "")
elif messages[k]['event_type'] == 'HANGOUT_EVENT':
if in_event[k]['hangout_event']['event_type'] == 'START_HANGOUT':
messages[k]['message'] = 'started a video chat'
elif in_event[k]['hangout_event']['event_type'] == 'END_HANGOUT':
messages[k]['message'] = 'ended a video chat'
else:
messages[k]['message'] = in_event[k]['hangout_event']['event_type']
elif messages[k]['event_type'] == 'REGULAR_CHAT_MESSAGE':
messages[k]['message'] = ""
msg = ""
msghtml = ""
# join message segments together
if 'segment' in in_event[k]['chat_message']['message_content'].keys():
for event in in_event[k]['chat_message']['message_content']['segment']:
if not 'text' in event.keys():
continue
if event['type'] == 'TEXT':
msg += event['text']
msghtml += re.sub("\n", "<br>", event['text'])
elif event['type'] == 'LINK':
msg += event['text']
msghtml += '<a href="%s" target="_blank">%s</a>' % (event['link_data']['link_target'], event['text'])
elif event['type'] == 'LINE_BREAK':
msg += event['text']
msghtml += re.sub("\n", "<br>", event['text'])
# handle attachments
elif 'attachment' in in_event[k]['chat_message']['message_content'].keys():
# loop through attachments
for att in in_event[k]['chat_message']['message_content']['attachment']:
# echo "<pre>";print_r($att);echo "</pre>";
if att['embed_item']['type'][0] == 'PLUS_PHOTO':
imgurl = att['embed_item']['plus_photo']['url']
msg += imgurl
msghtml += '<a href="%s" target="_blank"><img src="%s" alt="attached image" style="max-width:%s"></a>' % (imgurl, imgurl, "100%")
# replace unicode emoticon characters by smileys
messages[k]['message'] = replaceSmileys(msg)
if msg != msghtml:
messages[k]['message_html'] = replaceSmileys(msghtml)
elif messages[k]['event_type'] == 'ADD_USER':
newuserid = in_event[k]['membership_change']['participant_id'][0]['chat_id']
newusername = retval[i]['members'][newuserid] if newuserid in retval[i]['members'].keys() else 'unknown_%s' % newuserid
messages[k]['message'] = "added user '%s' to conversation" % newusername
elif messages[k]['event_type'] == 'REMOVE_USER':
newuserid = in_event[k]['membership_change']['participant_id'][0]['chat_id']
newusername = retval[i]['members'][newuserid] if newuserid in retval[i]['members'].keys() else 'unknown_%s' % newuserid
messages[k]['message'] = "removed user '%s' from conversation" % newusername
elif messages[k]['event_type'] == 'SMS':
messages[k]['message'] = ""
# join message segments together
if 'segment' in in_event[k]['chat_message']['message_content'].keys():
for l in range(len(in_event[k]['chat_message']['message_content']['segment'])):
if not 'text' in in_event[k]['chat_message']['message_content']['segment'][l].keys():
continue
messages[k]['message'] += in_event[k]['chat_message']['message_content']['segment'][l]['text']
# replace unicode emoticon characters by smileys
messages[k]['message'] = replaceSmileys(messages[k]['message'])
elif messages[k]['event_type'] == 'OTR_MODIFICATION':
messages[k]['message'] = 'unknown OTR_MODIFICATION'
elif messages[k]['event_type'] == 'VOICEMAIL':
messages[k]['message'] = "new voicemail:\n"
# join message segments together
if 'segment' in in_event[k]['chat_message']['message_content'].keys():
for l in range(len(in_event[k]['chat_message']['message_content']['segment'])):
if not 'text' in in_event[k]['chat_message']['message_content']['segment'][l].keys():
continue
messages[k]['message'] += in_event[k]['chat_message']['message_content']['segment'][l]['text']
# replace unicode emoticon characters by smileys
messages[k]['message'] = replaceSmileys(messages[k]['message'])
# sort messages by timestamp because for some reason they're cluttered
messages.sort(cmp=lambda a,b: int(a['timestamp']) - int(b['timestamp']))
# add the messages array to the conversation array
retval[i]['messages'] = messages
return retval
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.