code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# Generated by Django 3.1 on 2020-09-01 21:58
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0003_auto_20200830_2005'),
]
operations = [
migrations.RenameModel(
old_name='Profiles',
new_name='Profile',
),
]
| [
"django.db.migrations.RenameModel",
"django.db.migrations.swappable_dependency"
] | [((184, 241), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (215, 241), False, 'from django.db import migrations\n'), ((323, 386), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""Profiles"""', 'new_name': '"""Profile"""'}), "(old_name='Profiles', new_name='Profile')\n", (345, 386), False, 'from django.db import migrations\n')] |
from argparse import ArgumentParser
from diary.database import connect
from diary.presenter import display_entries
from diary.utils import custom_date
from diary.generator import generate_command
import logging
import re
import os
__version__ = '2.2.0'
try:
# Strip non- word or dash characters from device name
DEVICE_NAME = re.sub(r'[^\w-]', '', os.uname().nodename)
except:
DEVICE_NAME = 'unknown'
# SETUP MAIN PARSER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
parser = ArgumentParser(
description='A program for writing and viewing a personal diary')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
parser.add_argument('-b', '--base', default=os.path.expandvars('$HOME/.diary'),
help='path to base folder (defaults to `~/.diary`)')
subparsers = parser.add_subparsers(title='subcommands')
# EDIT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def edit_command(conn, search_terms, entry_id, editor, message, **kwargs):
if entry_id:
entry = conn.find_by_id(entry_id) if entry_id else conn.most_recent_entry()
else:
entry = conn.search_entries(*search_terms, descending=True).__next__()
if entry:
if message is not None:
entry.text = message
else:
entry.command_line_edit(editor)
else:
print('No entry to edit')
subparser = subparsers.add_parser('edit',
description='Open Vim to edit the most recent entry',
help='edit the most recent entry or a specified entry')
subparser.add_argument('search_terms', nargs='*',
help='any number of regular expressions to search for')
subparser.add_argument('--entry-id',
help='entry id of the form "$timestamp-$device_name"')
subparser.add_argument('-e', '--editor', default='vim',
help='editor to write the entry with (defaults to `vim`)')
subparser.add_argument('-m', '--message',
help='directly set the text of the entry to MESSAGE')
subparser.set_defaults(func=edit_command)
# NEW ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def new_command(conn, date, editor, message, device_name, **kwargs):
entry = conn.new_entry(date, device_name)
if message is not None:
entry.text = message
else:
entry.command_line_edit(editor)
subparser = subparsers.add_parser('new',
description='Open Vim to edit a new entry',
help='create a new entry')
subparser.add_argument('-d', '--date', type=custom_date,
help='date of the new entry (defaults to now)')
subparser.add_argument('-e', '--editor', default='vim',
help='editor to write the entry with (defaults to `vim`)')
subparser.add_argument('-m', '--message',
help='directly set the text of the entry to MESSAGE')
subparser.add_argument('--device-name', default=DEVICE_NAME,
help='name of the device the entry was created on ' +
'(defaults to `{}`)'.format(DEVICE_NAME))
subparser.set_defaults(func=new_command)
# SEARCH ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def search_command(conn, search_terms, descending, after, before, pipe_to, **kwargs):
entries = conn.search_entries(*search_terms, descending=descending,
min_date=after, max_date=before)
display_entries(entries, pipe_to, search_terms)
subparser = subparsers.add_parser('search', aliases=['list'],
description='Display entries containing all of the given search terms',
help='display all entries, with optional search filter')
subparser.add_argument('search_terms', nargs='*',
help='any number of regular expressions to search for')
subparser.add_argument('--pipe-to', metavar='COMMAND', default='less -R',
help='pipe output to the given command')
#TODO this is shared with wordcount script below, abstract it
subparser.add_argument('--before', type=custom_date, metavar='DATE',
help='only show entries occurring before DATE')
subparser.add_argument('--after', type=custom_date, metavar='DATE',
help='only show entries occurring after DATE')
sort_order = subparser.add_mutually_exclusive_group()
sort_order.add_argument('--asc', action='store_false', dest='descending',
help='sort in ascending date order')
sort_order.add_argument('--desc', action='store_true', dest='descending',
help='sort in descending date order')
subparser.set_defaults(func=search_command, descending=True)
# WORDCOUNT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def wordcount_command(conn, group_by, descending, after, before, **kwargs):
if group_by is None: group_by = 'Total'
wordcounts = {}
entry_counts = {}
for entry in conn.get_entries(descending=descending, min_date=after,
max_date=before):
group = entry.date.strftime(group_by)
if group not in wordcounts:
wordcounts[group], entry_counts[group] = 0, 0
wordcounts[group] += entry.wordcount
entry_counts[group] += 1
results = [ (group, wordcounts[group], entry_counts[group])
for group in sorted(wordcounts.keys()) ]
if len(results)>1:
results.append( ('Total', sum(wordcounts.values()),
sum(entry_counts.values())) )
if len(results)==0:
results.append( ('Total', 0, 0) )
max_lengths = {'len_group': max(len(str(result[0])) for result in results),
'len_wc': len(str(results[-1][1])),
'len_ec': len(str(results[-1][2]))}
fmt_str = '{0:>{len_group}}: {1:>{len_wc}} words, {2:>{len_ec}} entries'
for result in results:
print(fmt_str.format(*result, **max_lengths))
subparser = subparsers.add_parser('wordcount', aliases=['wc'],
description='Pretty print aggregated wordcount totals',
help='print wordcount statistics')
group_by = subparser.add_mutually_exclusive_group()
group_by.add_argument('-y', '--year', action='store_const', const='%Y',
dest='group_by', help='group by year')
group_by.add_argument('-m', '--month', action='store_const', const='%Y-%m',
dest='group_by', help='group by month')
group_by.add_argument('-d', '--day', action='store_const', const='%Y-%m-%d',
dest='group_by', help='group by day')
group_by.add_argument('-w', '--weekday', action='store_const', const='%u %a',
dest='group_by', help='group by weekday')
group_by.add_argument('-g', '--group-by', metavar='DATE_FORMAT',
dest='group_by', help='format entry dates with DATE_FORMAT and combine '
'wordcount totals for all entries which have the '
'same formatted date, e.g. "%%Y-%%m-%%d"')
subparser.add_argument('--before', type=custom_date, metavar='DATE',
help='only show entries occurring before DATE')
subparser.add_argument('--after', type=custom_date, metavar='DATE',
help='only show entries occurring after DATE')
sort_order = subparser.add_mutually_exclusive_group()
sort_order.add_argument('--asc', action='store_false', dest='descending',
help='sort in ascending date order')
sort_order.add_argument('--desc', action='store_true', dest='descending',
help='sort in descending date order')
subparser.set_defaults(func=wordcount_command)
# GENERATE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# generate_command is imported at the top
subparser = subparsers.add_parser('generate',
description='Create a HTML representation of your diary',
help='generate HTML diary')
subparser.add_argument('-o', '--out',
help='directory to place HTML (defaults to {your_base_dir}/html)')
subparser.add_argument('-c', '--clean', action='store_true',
help='remove out dir before generating')
subparser.set_defaults(func=generate_command)
# PROCESS ARGS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def process_args(arg_list=None):
args = parser.parse_args(arg_list)
if hasattr(args, 'func'):
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s - %(levelname)s - %(message)s',
filename=os.path.join(args.base, '{}.log'.format(DEVICE_NAME)))
conn = connect(args.base)
args.func(conn, **vars(args))
else:
parser.print_usage()
if __name__ == '__main__':
process_args()
| [
"argparse.ArgumentParser",
"os.path.expandvars",
"diary.presenter.display_entries",
"diary.database.connect",
"os.uname"
] | [((507, 592), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""A program for writing and viewing a personal diary"""'}), "(description='A program for writing and viewing a personal diary'\n )\n", (521, 592), False, 'from argparse import ArgumentParser\n'), ((3336, 3383), 'diary.presenter.display_entries', 'display_entries', (['entries', 'pipe_to', 'search_terms'], {}), '(entries, pipe_to, search_terms)\n', (3351, 3383), False, 'from diary.presenter import display_entries\n'), ((738, 772), 'os.path.expandvars', 'os.path.expandvars', (['"""$HOME/.diary"""'], {}), "('$HOME/.diary')\n", (756, 772), False, 'import os\n'), ((8285, 8303), 'diary.database.connect', 'connect', (['args.base'], {}), '(args.base)\n', (8292, 8303), False, 'from diary.database import connect\n'), ((358, 368), 'os.uname', 'os.uname', ([], {}), '()\n', (366, 368), False, 'import os\n')] |
from copy import deepcopy
from logics.utils.parsers import parser_utils
from logics.classes.exceptions import NotWellFormed
from logics.classes.predicate import PredicateFormula
from logics.utils.parsers.standard_parser import StandardParser
class PredicateParser(StandardParser):
"""Parser for predicate languages.
Extends ``StandardParser``. Has two additional parameters to specify infix predicates and functions.
Also includes some changes in the format of the valid input:
* Atomics must be given in format ``"R(a, b, c)"`` for prefix predicates, or ``"a = b"`` for infix predicates
* Infix predicate formuale must come without outer parentheses, e.g. ``"(a = b)"`` is not well formed
* Outermost parentheses in infix function terms can be ommited, e.g. both ``"0+(0+0)"`` and ``"(0+(0+0))"`` are ok
* Infix predicates and function symbols CANNOT be given in prefix notation
* Quantified formulae come in format ∀x (A) or ∀x ∈ T (A) - Always add parentheses to the quantified formula
Parameters
----------
language: logics.classes.propositional.Language or logics.classes.propositional.InfiniteLanguage
Instance of Language or InfiniteLanguage
parse_replacement_dict: dict, optional
Dictionary of the form ({string: string, ...}). See below for an explanation
unparse_replacement_dict: dict, optional
Same as the above parameter
infix_cts: list of str, optional
The list of constants that will be written in infix notation
infix_pred: list of str, optional
The list of predicates that will be written in infix notation
infix_func: list of str, optional
The list of function symbols that will be written in infix notation
comma_separator: str, optional
Character (preferrably of len 1) used to separate the premises or separate the conclusions within an inference
inference_separator: str, optional
Character (preferrably of len 1) used to separate between the premises and conclusions in an inference
derivation_step_separator: str, optional
Character (preferrably of len 1) used to separate the components of a derivation step
Examples
--------
>>> from logics.instances.predicate.languages import real_number_arithmetic_language
>>> from logics.utils.parsers.predicate_parser import PredicateParser
>>> replacement_dict = {
... '¬': '~', 'not ': '~',
... '&': '∧', ' and ': '∧', # notice the spaces before and after 'and'
... 'v': '∨', ' or ': '∨',
... ' then ': '→', '-->': '→', 'if ': '', # 'if p then q' it will convert to 'p then q'
... ' iff ': '↔', '<->': '↔',
... 'forall ': '∀', 'exists ': '∃', ' in ': '∈'
... }
>>> real_number_arithmetic_parser = PredicateParser(language=real_number_arithmetic_language,
... parse_replacement_dict=replacement_dict,
... infix_cts=['∧', '∨', '→', '↔'],
... infix_pred=['=', '<', '>'], infix_func=['+', '*', '**'])
>>> real_number_arithmetic_parser.parse("0.5 + 0.5 = 1")
['=', ('+', '0.5', '0.5'), '1']
>>> f = real_number_arithmetic_parser.parse("1 + 1 = 2 or exists x (x + 1 = 2)")
>>> f
['∨', ['=', ('+', '1', '1'), '2'], ['∃', 'x', ['=', ('+', 'x', '1'), '2']]]
>>> type(f)
<class 'logics.classes.predicate.formula.PredicateFormula'>
>>> real_number_arithmetic_parser.unparse(f)
'1 + 1 = 2 ∨ ∃x (x + 1 = 2)'
>>> # Infix predicates and function symbols cannot be given in prefix notation
>>> real_number_arithmetic_parser.parse("=(+(1,1),2)")
Traceback (most recent call last):
...
IndexError: string index out of range
Examples with a predefined parser for a language with prefix predicates and function symbols (see below for
more predefined instances):
>>> from logics.utils.parsers.predicate_parser import classical_predicate_parser
>>> classical_predicate_parser.parse("R(a, b) or P(f(a))")
['∨', ['R', 'a', 'b'], ['P', ('f', 'a')]]
>>> classical_predicate_parser.parse("forall x in f(a) (if ~P(x) then P(x))")
['∀', 'x', '∈', ('f', 'a'), ['→', ['~', ['P', 'x']], ['P', 'x']]]
"""
def __init__(self, language, parse_replacement_dict, unparse_replacement_dict=None, infix_cts=None, infix_pred=None,
infix_func=None, comma_separator=',', inference_separator='/', derivation_step_separator=';'):
if infix_pred is None:
infix_pred = list()
if infix_func is None:
infix_func = list()
self.infix_pred = infix_pred
self.infix_func = infix_func
super().__init__(language=language, parse_replacement_dict=parse_replacement_dict,
unparse_replacement_dict=unparse_replacement_dict,
infix_cts=infix_cts, comma_separator=comma_separator, inference_separator=inference_separator,
derivation_step_separator=derivation_step_separator)
# ------------------------------------------------------------------------------------------------------------------
# PARSE FORMULA METHODS
def _is_atomic(self, string):
"""To identify if a string as an atomic formula, check that it does not contain constants and quantifiers"""
for quant in self.language.quantifiers:
if quant in string:
return False
for ctt in self.language.constants():
if ctt in string:
return False
return True
def _parse_atomic(self, string):
# First check if it is a sentential constant
if self.language.is_sentential_constant_string(string):
return PredicateFormula([string])
# Check for an infix predicate
# There can only be one, so this will suffice, no need to call parser_utils.get_main_constant
infix_predicate = False
for pred in self.infix_pred:
if pred in string:
infix_predicate = True
pred_index = string.index(pred)
break
if infix_predicate:
# Infix predicate formulae are always binary
return PredicateFormula([pred, self.parse_term(string[:pred_index], replace=False),
self.parse_term(string[pred_index+len(pred):], replace=False)])
# Non-infix predicate
for pred in self.language.predicates() | set(self.language.predicate_variables):
if string[:len(pred) + 1] == pred + '(':
arity = self.language.arity(pred)
unparsed_terms = parser_utils.separate_arguments(string[len(pred):], ',')
if len(unparsed_terms) != arity:
raise NotWellFormed(f'Incorrect arity for predicate {pred} in atomic {string}')
parsed_arguments = [self.parse_term(term, replace=False) for term in unparsed_terms]
return PredicateFormula([pred] + parsed_arguments)
# If you did not return thus far, string is not a wff
raise NotWellFormed(f'String {string} is not a valid atomic formula')
def parse_term(self, string, replace=True):
"""Parses an individual term
If `replace` is ``True``, will apply the `parse_replacement_dict` to the string before parsing the term.
Otherwise, it will not.
Examples
--------
>>> from logics.utils.parsers.predicate_parser import realnumber_arithmetic_parser
>>> realnumber_arithmetic_parser.parse_term("1+1")
('+', '1', '1')
>>> realnumber_arithmetic_parser.parse_term("1+(1+2)")
('+', '1', ('+', '1', '2'))
>>> realnumber_arithmetic_parser.parse_term("(1+(1+2))")
('+', '1', ('+', '1', '2'))
"""
# If a valid individual variable or constant, return it as it came
if replace:
string = self._prepare_to_parse(string)
if self.language._is_valid_individual_constant_or_variable(string):
return string
# Search for an infix operator
# First try adding external parentheses (in order to avoid giving external ones)
infix_term = self._parse_infix_term(f'({string})')
if infix_term is not None:
return infix_term
# Then without adding external parentheses
infix_term = self._parse_infix_term(string)
if infix_term is not None:
return infix_term
# If it did not find infix operators, must be a prefix one
for func_symbol in self.language.function_symbols:
if string[:len(func_symbol) + 1] == func_symbol + '(':
arity = self.language.arity(func_symbol)
unparsed_arguments = parser_utils.separate_arguments(string[len(func_symbol):], ',')
if len(unparsed_arguments) != arity:
raise NotWellFormed(f'Incorrect arity for function symbol {func_symbol} in term {string}')
parsed_arguments = tuple(self.parse_term(term, replace=False) for term in unparsed_arguments)
return (func_symbol,) + parsed_arguments
# If you did not return thus far, string is not a term
raise NotWellFormed(f'String {string} is not a valid term')
def _parse_infix_term(self, string):
# If not between parentheses, its something of the form 's(0+0)' and not '(0+0)'
if string[0] != '(' or string[-1] != ')':
return None
infix_function, index = parser_utils.get_main_constant(string, self.infix_func)
if infix_function is not None:
return (infix_function, self.parse_term(string[1:index], replace=False),
self.parse_term(string[index + len(infix_function):-1], replace=False))
return None
def _parse_molecular(self, string, Formula=PredicateFormula):
"""Here we need only add the quantifier case and call super"""
for quantifier in self.language.quantifiers:
# The string begins with the quantifier
if string[:len(quantifier)] == quantifier:
current_index = len(quantifier) # The current index is the position after the quantifier
# Get the variable
variable = None
for char_index in range(current_index, len(string)):
if self.language._is_valid_variable(string[len(quantifier):char_index+1]):
variable = string[len(quantifier):char_index+1]
current_index = char_index + 1 # The current index is the position after the variable
else:
break
if variable is None:
raise NotWellFormed(f'Incorrect variable specification in quantified formula {string}')
# See if the quantifier is bounded and parse the bound
bounded = False
formula_opening_parenthesis_index = parser_utils.get_last_opening_parenthesis(string)
if formula_opening_parenthesis_index is None:
raise NotWellFormed(f'Quantified formula in {string} must come between parentheses')
if string[current_index] == '∈':
bounded = True
current_index += 1
unparsed_term = string[current_index:formula_opening_parenthesis_index]
parsed_term = self.parse_term(unparsed_term, replace=False)
# Lastly, parse the formula
unparsed_formula = string[formula_opening_parenthesis_index+1:-1]
parsed_formula = self.parse(unparsed_formula)
if not bounded:
return PredicateFormula([quantifier, variable, parsed_formula])
else:
return PredicateFormula([quantifier, variable, '∈', parsed_term, parsed_formula])
return super()._parse_molecular(string, PredicateFormula)
# ------------------------------------------------------------------------------------------------------------------
# UNPARSE FORMULA METHODS
def _unparse_term(self, term, add_parentheses=False):
# Atomic term
if not isinstance(term, tuple):
return term
# Molecular term (function symbol with arguments)
# Prefix function symbol
if term[0] not in self.infix_func:
unparsed_term = term[0] + '('
for arg in term[1:]:
unparsed_term += self._unparse_term(arg) + ', '
return unparsed_term[:-2] + ')'
# Infix (and thus binary) function symbol
else:
if not add_parentheses:
return f'{self._unparse_term(term[1], True)} {term[0]} {self._unparse_term(term[2], True)}'
else:
# Infix terms inside other infix terms must come between parentheses
return f'({self._unparse_term(term[1], True)} {term[0]} {self._unparse_term(term[2], True)})'
def _unparse_atomic(self, formula):
# Prefix predicate symbol
if formula[0] not in self.infix_pred:
unparsed_formula = formula[0] + '('
for arg in formula[1:]:
unparsed_formula += self._unparse_term(arg) + ', '
return unparsed_formula[:-2] + ')'
# Infix (and thus binary) predicate symbol
return f'{self._unparse_term(formula[1])} {formula[0]} {self._unparse_term(formula[2])}'
def _unparse_molecular(self, formula, remove_external_parentheses):
# Quantified formula
if formula.main_symbol in self.language.quantifiers:
# Bounded
if formula[2] == '∈':
return f'{formula[0]}{formula[1]} ∈ {self._unparse_term(formula[3])} ({self._unparse_formula(formula[4], remove_external_parentheses=True)})'
# Unbounded
return f'{formula[0]}{formula[1]} ({self._unparse_formula(formula[2], remove_external_parentheses=True)})'
# Non-quantified formula
return super()._unparse_molecular(formula, remove_external_parentheses)
# ----------------------------------------------------------------------------------------------------------------------
# Parser for arithmetic truth, does Godel coding of things inside Tr predicate
# For example, Tr(⌜x=x⌝) will be parsed as PredicateFormula(['Tr', '514951']).
class ArithmeticTruthParser(PredicateParser):
"""Parser for arithmetic truth
Subclasses PredicateParser, but does Godel coding of things inside Tr predicate
Parameters
----------
godel_encoding_function: callable
The function with which you wish to encode sentences inside Tr predicates
godel_decoding_function: callable
The function with which you wish to decode sentences inside Tr predicates
everything_else_in_PredicateParser
Everything else present in the parent PredicateParser class
Examples
--------
>>> from logics.instances.predicate.languages import arithmetic_truth_language
>>> from logics.utils.parsers.parser_utils import godel_encode, godel_decode
>>> from logics.utils.parsers.predicate_parser import ArithmeticTruthParser
>>> replacement_dict = {
... '¬': '~', 'not ': '~',
... '&': '∧', ' and ': '∧', # notice the spaces before and after 'and'
... 'v': '∨', ' or ': '∨',
... ' then ': '→', '-->': '→', 'if ': '', # 'if p then q' it will convert to 'p then q'
... ' iff ': '↔', '<->': '↔',
... 'forall ': '∀', 'exists ': '∃', ' in ': '∈'
... }
>>> replacement_dict.update({
... '⌜': 'quote(',
... '⌝': ')'
... })
>>> arithmetic_truth_parser = ArithmeticTruthParser(godel_encoding_function=godel_encode,
... godel_decoding_function=godel_decode,
... language=arithmetic_truth_language,
... parse_replacement_dict=replacement_dict,
... infix_cts=['∧', '∨', '→', '↔'],
... infix_pred=['=', '<', '>'], infix_func=['+', '*', '**'])
>>> arithmetic_truth_parser.parse('0=0+0')
['=', '0', ('+', '0', '0')]
>>> arithmetic_truth_parser.parse('Tr(⌜0=0+0⌝)')
['Tr', '04908990']
>>> arithmetic_truth_parser.parse('Tr(⌜Tr(⌜0=0⌝)⌝)')
['Tr', '4999919899999190490199199']
>>> arithmetic_truth_parser.parse('λ iff ~Tr(⌜λ⌝)')
['↔', ['λ'], ['~', ['Tr', '79999']]]
"""
def __init__(self, godel_encoding_function, godel_decoding_function, *args, **kwargs):
# These are two functions that take a string (an UNPARSED formula) and return another string (its code)
self.godel_encode = godel_encoding_function
self.godel_decode = godel_decoding_function
super().__init__(*args, **kwargs)
def _prepare_to_parse(self, string):
"""Replaces quote(sentence) for code_of_sentence"""
string = super()._prepare_to_parse(string)
string = self._remove_quotations(string)
return string
def _remove_quotations(self, string):
# Search for the first apparition of quote and encode the content
while 'quote(' in string:
opening_parenthesis_index = string.index('quote(') + 5 # index of the opening parenthesis
# Get where the closing parenthesis is
closing_parenthesis_index = parser_utils.get_closing_parenthesis(string[opening_parenthesis_index:]) \
+ opening_parenthesis_index
string_to_encode = string[opening_parenthesis_index+1:closing_parenthesis_index]
codified_string = self.godel_encode(string_to_encode)
string = string[:string.index('quote(')] + codified_string + string[closing_parenthesis_index+1:]
return string
def _parse_atomic(self, string):
"""Since codes are numerals like 514951 and not s(s(...)) we need to provide a special clause for the truth pred
otherwise Tr(514951) will raise NotWellFormed
"""
if string[:3] == 'Tr(':
arity = 1
unparsed_terms = parser_utils.separate_arguments(string[2:], ',')
if len(unparsed_terms) != arity:
raise NotWellFormed(f'Incorrect arity for predicate Tr in atomic {string}')
code = unparsed_terms[0]
try:
int(code)
except ValueError:
raise NotWellFormed(f'String {string} must have a numeral as the argument of Tr')
# Do not parse the term, just return the numeral
return PredicateFormula(['Tr', code])
return super()._parse_atomic(string)
# ----------------------------------------------------------------------------------------------------------------------
# INSTANCES
from logics.instances.predicate.languages import classical_function_language, \
arithmetic_language, real_number_arithmetic_language, arithmetic_truth_language
from logics.utils.parsers.standard_parser import classical_parse_replacement_dict
predicate_replacement_dict = deepcopy(classical_parse_replacement_dict)
predicate_replacement_dict.update({
' in ': '∈',
'forall ': '∀',
'exists ': '∃'
})
classical_predicate_parser = PredicateParser(language=classical_function_language,
parse_replacement_dict=predicate_replacement_dict,
infix_cts=['∧', '∨', '→', '↔'])
arithmetic_parser = PredicateParser(language=arithmetic_language,
parse_replacement_dict=predicate_replacement_dict,
infix_cts=['∧', '∨', '→', '↔'],
infix_pred=['=', '<', '>'], infix_func=['+', '*', '**'])
realnumber_arithmetic_parser = PredicateParser(language=real_number_arithmetic_language,
parse_replacement_dict=predicate_replacement_dict,
infix_cts=['∧', '∨', '→', '↔'],
infix_pred=['=', '<', '>'], infix_func=['+', '-', '*', '**', '/', '//'])
truth_predicate_replacement_dict = deepcopy(classical_parse_replacement_dict)
truth_predicate_replacement_dict.update({
'⌜': 'quote(',
'⌝': ')'
})
arithmetic_truth_parser = ArithmeticTruthParser(godel_encoding_function=parser_utils.godel_encode,
godel_decoding_function=parser_utils.godel_decode,
language=arithmetic_truth_language,
parse_replacement_dict=truth_predicate_replacement_dict,
infix_cts=['∧', '∨', '→', '↔'],
infix_pred=['=', '<', '>'], infix_func=['+', '*', '**'])
| [
"logics.utils.parsers.parser_utils.get_closing_parenthesis",
"logics.classes.exceptions.NotWellFormed",
"logics.classes.predicate.PredicateFormula",
"logics.utils.parsers.parser_utils.get_main_constant",
"logics.utils.parsers.parser_utils.separate_arguments",
"logics.utils.parsers.parser_utils.get_last_op... | [((19659, 19701), 'copy.deepcopy', 'deepcopy', (['classical_parse_replacement_dict'], {}), '(classical_parse_replacement_dict)\n', (19667, 19701), False, 'from copy import deepcopy\n'), ((20793, 20835), 'copy.deepcopy', 'deepcopy', (['classical_parse_replacement_dict'], {}), '(classical_parse_replacement_dict)\n', (20801, 20835), False, 'from copy import deepcopy\n'), ((7334, 7397), 'logics.classes.exceptions.NotWellFormed', 'NotWellFormed', (['f"""String {string} is not a valid atomic formula"""'], {}), "(f'String {string} is not a valid atomic formula')\n", (7347, 7397), False, 'from logics.classes.exceptions import NotWellFormed\n'), ((9483, 9536), 'logics.classes.exceptions.NotWellFormed', 'NotWellFormed', (['f"""String {string} is not a valid term"""'], {}), "(f'String {string} is not a valid term')\n", (9496, 9536), False, 'from logics.classes.exceptions import NotWellFormed\n'), ((9774, 9829), 'logics.utils.parsers.parser_utils.get_main_constant', 'parser_utils.get_main_constant', (['string', 'self.infix_func'], {}), '(string, self.infix_func)\n', (9804, 9829), False, 'from logics.utils.parsers import parser_utils\n'), ((5967, 5993), 'logics.classes.predicate.PredicateFormula', 'PredicateFormula', (['[string]'], {}), '([string])\n', (5983, 5993), False, 'from logics.classes.predicate import PredicateFormula\n'), ((18694, 18742), 'logics.utils.parsers.parser_utils.separate_arguments', 'parser_utils.separate_arguments', (['string[2:]', '""","""'], {}), "(string[2:], ',')\n", (18725, 18742), False, 'from logics.utils.parsers import parser_utils\n'), ((19169, 19199), 'logics.classes.predicate.PredicateFormula', 'PredicateFormula', (["['Tr', code]"], {}), "(['Tr', code])\n", (19185, 19199), False, 'from logics.classes.predicate import PredicateFormula\n'), ((7213, 7256), 'logics.classes.predicate.PredicateFormula', 'PredicateFormula', (['([pred] + parsed_arguments)'], {}), '([pred] + parsed_arguments)\n', (7229, 7256), False, 'from logics.classes.predicate import PredicateFormula\n'), ((11242, 11291), 'logics.utils.parsers.parser_utils.get_last_opening_parenthesis', 'parser_utils.get_last_opening_parenthesis', (['string'], {}), '(string)\n', (11283, 11291), False, 'from logics.utils.parsers import parser_utils\n'), ((17951, 18023), 'logics.utils.parsers.parser_utils.get_closing_parenthesis', 'parser_utils.get_closing_parenthesis', (['string[opening_parenthesis_index:]'], {}), '(string[opening_parenthesis_index:])\n', (17987, 18023), False, 'from logics.utils.parsers import parser_utils\n'), ((18810, 18879), 'logics.classes.exceptions.NotWellFormed', 'NotWellFormed', (['f"""Incorrect arity for predicate Tr in atomic {string}"""'], {}), "(f'Incorrect arity for predicate Tr in atomic {string}')\n", (18823, 18879), False, 'from logics.classes.exceptions import NotWellFormed\n'), ((7015, 7088), 'logics.classes.exceptions.NotWellFormed', 'NotWellFormed', (['f"""Incorrect arity for predicate {pred} in atomic {string}"""'], {}), "(f'Incorrect arity for predicate {pred} in atomic {string}')\n", (7028, 7088), False, 'from logics.classes.exceptions import NotWellFormed\n'), ((9153, 9242), 'logics.classes.exceptions.NotWellFormed', 'NotWellFormed', (['f"""Incorrect arity for function symbol {func_symbol} in term {string}"""'], {}), "(\n f'Incorrect arity for function symbol {func_symbol} in term {string}')\n", (9166, 9242), False, 'from logics.classes.exceptions import NotWellFormed\n'), ((11004, 11090), 'logics.classes.exceptions.NotWellFormed', 'NotWellFormed', (['f"""Incorrect variable specification in quantified formula {string}"""'], {}), "(\n f'Incorrect variable specification in quantified formula {string}')\n", (11017, 11090), False, 'from logics.classes.exceptions import NotWellFormed\n'), ((11380, 11458), 'logics.classes.exceptions.NotWellFormed', 'NotWellFormed', (['f"""Quantified formula in {string} must come between parentheses"""'], {}), "(f'Quantified formula in {string} must come between parentheses')\n", (11393, 11458), False, 'from logics.classes.exceptions import NotWellFormed\n'), ((12003, 12059), 'logics.classes.predicate.PredicateFormula', 'PredicateFormula', (['[quantifier, variable, parsed_formula]'], {}), '([quantifier, variable, parsed_formula])\n', (12019, 12059), False, 'from logics.classes.predicate import PredicateFormula\n'), ((12109, 12183), 'logics.classes.predicate.PredicateFormula', 'PredicateFormula', (["[quantifier, variable, '∈', parsed_term, parsed_formula]"], {}), "([quantifier, variable, '∈', parsed_term, parsed_formula])\n", (12125, 12183), False, 'from logics.classes.predicate import PredicateFormula\n'), ((19013, 19088), 'logics.classes.exceptions.NotWellFormed', 'NotWellFormed', (['f"""String {string} must have a numeral as the argument of Tr"""'], {}), "(f'String {string} must have a numeral as the argument of Tr')\n", (19026, 19088), False, 'from logics.classes.exceptions import NotWellFormed\n')] |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import, print_function
import itertools
from functools import partial
from twisted.internet.defer import inlineCallbacks
from twisted.internet.interfaces import IStreamClientEndpoint
from twisted.internet.endpoints import UNIXClientEndpoint
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.internet.error import ReactorNotRunning
try:
_TLS = True
from twisted.internet.endpoints import SSL4ClientEndpoint
from twisted.internet.ssl import optionsForClientTLS, CertificateOptions
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
from OpenSSL import SSL
except ImportError as e:
_TLS = False
if 'OpenSSL' not in str(e):
raise
import txaio
from autobahn.twisted.websocket import WampWebSocketClientFactory
from autobahn.twisted.rawsocket import WampRawSocketClientFactory
from autobahn.wamp import component
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
__all__ = ('Component')
def _is_ssl_error(e):
"""
Internal helper.
This is so we can just return False if we didn't import any
TLS/SSL libraries. Otherwise, returns True if this is an
OpenSSL.SSL.Error
"""
if _TLS:
return isinstance(e, SSL.Error)
return False
def _unique_list(seq):
"""
Return a list with unique elements from sequence, preserving order.
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def _create_transport_serializer(serializer_id):
if serializer_id in [u'msgpack', u'mgspack.batched']:
# try MsgPack WAMP serializer
try:
from autobahn.wamp.serializer import MsgPackSerializer
except ImportError:
pass
else:
if serializer_id == u'mgspack.batched':
return MsgPackSerializer(batched=True)
else:
return MsgPackSerializer()
if serializer_id in [u'json', u'json.batched']:
# try JSON WAMP serializer
try:
from autobahn.wamp.serializer import JsonSerializer
except ImportError:
pass
else:
if serializer_id == u'json.batched':
return JsonSerializer(batched=True)
else:
return JsonSerializer()
raise RuntimeError('could not create serializer for "{}"'.format(serializer_id))
def _create_transport_serializers(transport):
"""
Create a list of serializers to use with a WAMP protocol factory.
"""
serializers = []
for serializer_id in transport.serializers:
if serializer_id == u'msgpack':
# try MsgPack WAMP serializer
try:
from autobahn.wamp.serializer import MsgPackSerializer
except ImportError:
pass
else:
serializers.append(MsgPackSerializer(batched=True))
serializers.append(MsgPackSerializer())
elif serializer_id == u'json':
# try JSON WAMP serializer
try:
from autobahn.wamp.serializer import JsonSerializer
except ImportError:
pass
else:
serializers.append(JsonSerializer(batched=True))
serializers.append(JsonSerializer())
else:
raise RuntimeError(
"Unknown serializer '{}'".format(serializer_id)
)
return serializers
def _create_transport_factory(reactor, transport, session_factory):
"""
Create a WAMP-over-XXX transport factory.
"""
if transport.type == 'websocket':
# FIXME: forward WebSocket options
serializers = _create_transport_serializers(transport)
return WampWebSocketClientFactory(session_factory, url=transport.url, serializers=serializers)
elif transport.type == 'rawsocket':
# FIXME: forward RawSocket options
serializer = _create_transport_serializer(transport.serializer)
return WampRawSocketClientFactory(session_factory, serializer=serializer)
else:
assert(False), 'should not arrive here'
def _create_transport_endpoint(reactor, endpoint_config):
"""
Create a Twisted client endpoint for a WAMP-over-XXX transport.
"""
if IStreamClientEndpoint.providedBy(endpoint_config):
endpoint = IStreamClientEndpoint(endpoint_config)
else:
# create a connecting TCP socket
if endpoint_config['type'] == 'tcp':
version = int(endpoint_config.get('version', 4))
host = str(endpoint_config['host'])
port = int(endpoint_config['port'])
timeout = int(endpoint_config.get('timeout', 10)) # in seconds
tls = endpoint_config.get('tls', None)
# create a TLS enabled connecting TCP socket
if tls:
if not _TLS:
raise RuntimeError('TLS configured in transport, but TLS support is not installed (eg OpenSSL?)')
# FIXME: create TLS context from configuration
if IOpenSSLClientConnectionCreator.providedBy(tls):
# eg created from twisted.internet.ssl.optionsForClientTLS()
context = IOpenSSLClientConnectionCreator(tls)
elif isinstance(tls, CertificateOptions):
context = tls
elif tls is True:
context = optionsForClientTLS(host)
else:
raise RuntimeError('unknown type {} for "tls" configuration in transport'.format(type(tls)))
if version == 4:
endpoint = SSL4ClientEndpoint(reactor, host, port, context, timeout=timeout)
elif version == 6:
# there is no SSL6ClientEndpoint!
raise RuntimeError('TLS on IPv6 not implemented')
else:
assert(False), 'should not arrive here'
# create a non-TLS connecting TCP socket
else:
if version == 4:
endpoint = TCP4ClientEndpoint(reactor, host, port, timeout=timeout)
elif version == 6:
try:
from twisted.internet.endpoints import TCP6ClientEndpoint
except ImportError:
raise RuntimeError('IPv6 is not supported (please upgrade Twisted)')
endpoint = TCP6ClientEndpoint(reactor, host, port, timeout=timeout)
else:
assert(False), 'should not arrive here'
# create a connecting Unix domain socket
elif endpoint_config['type'] == 'unix':
path = endpoint_config['path']
timeout = int(endpoint_config.get('timeout', 10)) # in seconds
endpoint = UNIXClientEndpoint(reactor, path, timeout=timeout)
else:
assert(False), 'should not arrive here'
return endpoint
class Component(component.Component):
"""
A component establishes a transport and attached a session
to a realm using the transport for communication.
The transports a component tries to use can be configured,
as well as the auto-reconnect strategy.
"""
log = txaio.make_logger()
session_factory = ApplicationSession
"""
The factory of the session we will instantiate.
"""
def _check_native_endpoint(self, endpoint):
if IStreamClientEndpoint.providedBy(endpoint):
pass
elif isinstance(endpoint, dict):
if 'tls' in endpoint:
tls = endpoint['tls']
if isinstance(tls, (dict, bool)):
pass
elif IOpenSSLClientConnectionCreator.providedBy(tls):
pass
elif isinstance(tls, CertificateOptions):
pass
else:
raise ValueError(
"'tls' configuration must be a dict, CertificateOptions or"
" IOpenSSLClientConnectionCreator provider"
)
else:
raise ValueError(
"'endpoint' configuration must be a dict or IStreamClientEndpoint"
" provider"
)
def _connect_transport(self, reactor, transport, session_factory):
"""
Create and connect a WAMP-over-XXX transport.
"""
transport_factory = _create_transport_factory(reactor, transport, session_factory)
transport_endpoint = _create_transport_endpoint(reactor, transport.endpoint)
return transport_endpoint.connect(transport_factory)
# XXX think: is it okay to use inlineCallbacks (in this
# twisted-only file) even though we're using txaio?
@inlineCallbacks
def start(self, reactor=None):
"""
This starts the Component, which means it will start connecting
(and re-connecting) to its configured transports. A Component
runs until it is "done", which means one of:
- There was a "main" function defined, and it completed successfully;
- Something called ``.leave()`` on our session, and we left successfully;
- ``.stop()`` was called, and completed successfully;
- none of our transports were able to connect successfully (failure);
:returns: a Deferred that fires (with ``None``) when we are
"done" or with a Failure if something went wrong.
"""
if reactor is None:
self.log.warn("Using default reactor")
from twisted.internet import reactor
yield self.fire('start', reactor, self)
# transports to try again and again ..
transport_gen = itertools.cycle(self._transports)
reconnect = True
self.log.debug('Entering re-connect loop')
while reconnect:
# cycle through all transports forever ..
transport = next(transport_gen)
# only actually try to connect using the transport,
# if the transport hasn't reached max. connect count
if transport.can_reconnect():
delay = transport.next_delay()
self.log.debug(
'trying transport {transport_idx} using connect delay {transport_delay}',
transport_idx=transport.idx,
transport_delay=delay,
)
yield sleep(delay)
try:
transport.connect_attempts += 1
yield self._connect_once(reactor, transport)
transport.connect_sucesses += 1
except Exception as e:
transport.connect_failures += 1
f = txaio.create_failure()
self.log.error(u'component failed: {error}', error=txaio.failure_message(f))
self.log.debug(u'{tb}', tb=txaio.failure_format_traceback(f))
# If this is a "fatal error" that will never work,
# we bail out now
if isinstance(e, ApplicationError):
if e.error in [u'wamp.error.no_such_realm']:
reconnect = False
self.log.error(u"Fatal error, not reconnecting")
raise
# self.log.error(u"{error}: {message}", error=e.error, message=e.message)
elif _is_ssl_error(e):
# Quoting pyOpenSSL docs: "Whenever
# [SSL.Error] is raised directly, it has a
# list of error messages from the OpenSSL
# error queue, where each item is a tuple
# (lib, function, reason). Here lib, function
# and reason are all strings, describing where
# and what the problem is. See err(3) for more
# information."
for (lib, fn, reason) in e.args[0]:
self.log.error(u"TLS failure: {reason}", reason=reason)
self.log.error(u"Marking this transport as failed")
transport.failed()
else:
f = txaio.create_failure()
self.log.error(
u'Connection failed: {error}',
error=txaio.failure_message(f),
)
# some types of errors should probably have
# stacktraces logged immediately at error
# level, e.g. SyntaxError?
self.log.debug(u'{tb}', tb=txaio.failure_format_traceback(f))
raise
else:
reconnect = False
else:
# check if there is any transport left we can use
# to connect
if not self._can_reconnect():
self.log.info("No remaining transports to try")
reconnect = False
def _run(reactor, components):
if isinstance(components, Component):
components = [components]
if type(components) != list:
raise ValueError(
'"components" must be a list of Component objects - encountered'
' {0}'.format(type(components))
)
for c in components:
if not isinstance(c, Component):
raise ValueError(
'"components" must be a list of Component objects - encountered'
'item of type {0}'.format(type(c))
)
log = txaio.make_logger()
def component_success(c, arg):
log.debug("Component {c} successfully completed: {arg}", c=c, arg=arg)
return arg
def component_failure(f):
log.error("Component error: {msg}", msg=txaio.failure_message(f))
log.debug("Component error: {tb}", tb=txaio.failure_format_traceback(f))
return None
# all components are started in parallel
dl = []
for c in components:
# a component can be of type MAIN or SETUP
d = c.start(reactor)
txaio.add_callbacks(d, partial(component_success, c), component_failure)
dl.append(d)
d = txaio.gather(dl, consume_exceptions=False)
def all_done(arg):
log.debug("All components ended; stopping reactor")
try:
reactor.stop()
except ReactorNotRunning:
pass
txaio.add_callbacks(d, all_done, all_done)
return d
def run(components):
# only for Twisted > 12
from twisted.internet.task import react
react(_run, [components])
| [
"autobahn.twisted.util.sleep",
"twisted.internet.endpoints.SSL4ClientEndpoint",
"twisted.internet.task.react",
"twisted.internet.interfaces.IStreamClientEndpoint.providedBy",
"twisted.internet.endpoints.UNIXClientEndpoint",
"autobahn.twisted.rawsocket.WampRawSocketClientFactory",
"twisted.internet.inter... | [((5685, 5734), 'twisted.internet.interfaces.IStreamClientEndpoint.providedBy', 'IStreamClientEndpoint.providedBy', (['endpoint_config'], {}), '(endpoint_config)\n', (5717, 5734), False, 'from twisted.internet.interfaces import IStreamClientEndpoint\n'), ((8681, 8700), 'txaio.make_logger', 'txaio.make_logger', ([], {}), '()\n', (8698, 8700), False, 'import txaio\n'), ((15142, 15161), 'txaio.make_logger', 'txaio.make_logger', ([], {}), '()\n', (15159, 15161), False, 'import txaio\n'), ((15775, 15817), 'txaio.gather', 'txaio.gather', (['dl'], {'consume_exceptions': '(False)'}), '(dl, consume_exceptions=False)\n', (15787, 15817), False, 'import txaio\n'), ((15998, 16040), 'txaio.add_callbacks', 'txaio.add_callbacks', (['d', 'all_done', 'all_done'], {}), '(d, all_done, all_done)\n', (16017, 16040), False, 'import txaio\n'), ((16154, 16179), 'twisted.internet.task.react', 'react', (['_run', '[components]'], {}), '(_run, [components])\n', (16159, 16179), False, 'from twisted.internet.task import react\n'), ((5149, 5241), 'autobahn.twisted.websocket.WampWebSocketClientFactory', 'WampWebSocketClientFactory', (['session_factory'], {'url': 'transport.url', 'serializers': 'serializers'}), '(session_factory, url=transport.url, serializers=\n serializers)\n', (5175, 5241), False, 'from autobahn.twisted.websocket import WampWebSocketClientFactory\n'), ((5755, 5793), 'twisted.internet.interfaces.IStreamClientEndpoint', 'IStreamClientEndpoint', (['endpoint_config'], {}), '(endpoint_config)\n', (5776, 5793), False, 'from twisted.internet.interfaces import IStreamClientEndpoint\n'), ((8871, 8913), 'twisted.internet.interfaces.IStreamClientEndpoint.providedBy', 'IStreamClientEndpoint.providedBy', (['endpoint'], {}), '(endpoint)\n', (8903, 8913), False, 'from twisted.internet.interfaces import IStreamClientEndpoint\n'), ((11161, 11194), 'itertools.cycle', 'itertools.cycle', (['self._transports'], {}), '(self._transports)\n', (11176, 11194), False, 'import itertools\n'), ((5408, 5474), 'autobahn.twisted.rawsocket.WampRawSocketClientFactory', 'WampRawSocketClientFactory', (['session_factory'], {'serializer': 'serializer'}), '(session_factory, serializer=serializer)\n', (5434, 5474), False, 'from autobahn.twisted.rawsocket import WampRawSocketClientFactory\n'), ((15696, 15725), 'functools.partial', 'partial', (['component_success', 'c'], {}), '(component_success, c)\n', (15703, 15725), False, 'from functools import partial\n'), ((15927, 15941), 'twisted.internet.reactor.stop', 'reactor.stop', ([], {}), '()\n', (15939, 15941), False, 'from twisted.internet import reactor\n'), ((3226, 3257), 'autobahn.wamp.serializer.MsgPackSerializer', 'MsgPackSerializer', ([], {'batched': '(True)'}), '(batched=True)\n', (3243, 3257), False, 'from autobahn.wamp.serializer import MsgPackSerializer\n'), ((3299, 3318), 'autobahn.wamp.serializer.MsgPackSerializer', 'MsgPackSerializer', ([], {}), '()\n', (3316, 3318), False, 'from autobahn.wamp.serializer import MsgPackSerializer\n'), ((3615, 3643), 'autobahn.wamp.serializer.JsonSerializer', 'JsonSerializer', ([], {'batched': '(True)'}), '(batched=True)\n', (3629, 3643), False, 'from autobahn.wamp.serializer import JsonSerializer\n'), ((3685, 3701), 'autobahn.wamp.serializer.JsonSerializer', 'JsonSerializer', ([], {}), '()\n', (3699, 3701), False, 'from autobahn.wamp.serializer import JsonSerializer\n'), ((6483, 6530), 'twisted.internet.interfaces.IOpenSSLClientConnectionCreator.providedBy', 'IOpenSSLClientConnectionCreator.providedBy', (['tls'], {}), '(tls)\n', (6525, 6530), False, 'from twisted.internet.interfaces import IOpenSSLClientConnectionCreator\n'), ((8250, 8300), 'twisted.internet.endpoints.UNIXClientEndpoint', 'UNIXClientEndpoint', (['reactor', 'path'], {'timeout': 'timeout'}), '(reactor, path, timeout=timeout)\n', (8268, 8300), False, 'from twisted.internet.endpoints import UNIXClientEndpoint\n'), ((15375, 15399), 'txaio.failure_message', 'txaio.failure_message', (['f'], {}), '(f)\n', (15396, 15399), False, 'import txaio\n'), ((15447, 15480), 'txaio.failure_format_traceback', 'txaio.failure_format_traceback', (['f'], {}), '(f)\n', (15477, 15480), False, 'import txaio\n'), ((4267, 4298), 'autobahn.wamp.serializer.MsgPackSerializer', 'MsgPackSerializer', ([], {'batched': '(True)'}), '(batched=True)\n', (4284, 4298), False, 'from autobahn.wamp.serializer import MsgPackSerializer\n'), ((4335, 4354), 'autobahn.wamp.serializer.MsgPackSerializer', 'MsgPackSerializer', ([], {}), '()\n', (4352, 4354), False, 'from autobahn.wamp.serializer import MsgPackSerializer\n'), ((6643, 6679), 'twisted.internet.interfaces.IOpenSSLClientConnectionCreator', 'IOpenSSLClientConnectionCreator', (['tls'], {}), '(tls)\n', (6674, 6679), False, 'from twisted.internet.interfaces import IOpenSSLClientConnectionCreator\n'), ((7065, 7130), 'twisted.internet.endpoints.SSL4ClientEndpoint', 'SSL4ClientEndpoint', (['reactor', 'host', 'port', 'context'], {'timeout': 'timeout'}), '(reactor, host, port, context, timeout=timeout)\n', (7083, 7130), False, 'from twisted.internet.endpoints import SSL4ClientEndpoint\n'), ((7508, 7564), 'twisted.internet.endpoints.TCP4ClientEndpoint', 'TCP4ClientEndpoint', (['reactor', 'host', 'port'], {'timeout': 'timeout'}), '(reactor, host, port, timeout=timeout)\n', (7526, 7564), False, 'from twisted.internet.endpoints import TCP4ClientEndpoint\n'), ((11874, 11886), 'autobahn.twisted.util.sleep', 'sleep', (['delay'], {}), '(delay)\n', (11879, 11886), False, 'from autobahn.twisted.util import sleep\n'), ((4626, 4654), 'autobahn.wamp.serializer.JsonSerializer', 'JsonSerializer', ([], {'batched': '(True)'}), '(batched=True)\n', (4640, 4654), False, 'from autobahn.wamp.serializer import JsonSerializer\n'), ((4691, 4707), 'autobahn.wamp.serializer.JsonSerializer', 'JsonSerializer', ([], {}), '()\n', (4705, 4707), False, 'from autobahn.wamp.serializer import JsonSerializer\n'), ((7871, 7927), 'twisted.internet.endpoints.TCP6ClientEndpoint', 'TCP6ClientEndpoint', (['reactor', 'host', 'port'], {'timeout': 'timeout'}), '(reactor, host, port, timeout=timeout)\n', (7889, 7927), False, 'from twisted.internet.endpoints import TCP6ClientEndpoint\n'), ((9141, 9188), 'twisted.internet.interfaces.IOpenSSLClientConnectionCreator.providedBy', 'IOpenSSLClientConnectionCreator.providedBy', (['tls'], {}), '(tls)\n', (9183, 9188), False, 'from twisted.internet.interfaces import IOpenSSLClientConnectionCreator\n'), ((12192, 12214), 'txaio.create_failure', 'txaio.create_failure', ([], {}), '()\n', (12212, 12214), False, 'import txaio\n'), ((6838, 6863), 'twisted.internet.ssl.optionsForClientTLS', 'optionsForClientTLS', (['host'], {}), '(host)\n', (6857, 6863), False, 'from twisted.internet.ssl import optionsForClientTLS, CertificateOptions\n'), ((12286, 12310), 'txaio.failure_message', 'txaio.failure_message', (['f'], {}), '(f)\n', (12307, 12310), False, 'import txaio\n'), ((12359, 12392), 'txaio.failure_format_traceback', 'txaio.failure_format_traceback', (['f'], {}), '(f)\n', (12389, 12392), False, 'import txaio\n'), ((13754, 13776), 'txaio.create_failure', 'txaio.create_failure', ([], {}), '()\n', (13774, 13776), False, 'import txaio\n'), ((13910, 13934), 'txaio.failure_message', 'txaio.failure_message', (['f'], {}), '(f)\n', (13931, 13934), False, 'import txaio\n'), ((14198, 14231), 'txaio.failure_format_traceback', 'txaio.failure_format_traceback', (['f'], {}), '(f)\n', (14228, 14231), False, 'import txaio\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-09 17:24
from __future__ import unicode_literals
import Curriculums.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Curriculums', '0015_remove_flyers_nombrearchivo'),
]
operations = [
migrations.AddField(
model_name='diplomas',
name='diploma',
field=models.ImageField(blank=True, null=True, upload_to=Curriculums.models.generate_path_flyer),
),
migrations.AlterField(
model_name='diplomas',
name='link',
field=models.CharField(default='www.tecnologiasdedicadas.com/Curriculum/{{ user.pk }}', max_length=256),
),
migrations.AlterField(
model_name='flyers',
name='link',
field=models.CharField(default='www.tecnologiasdedicadas.com/Curriculum/{{ user.pk }}', max_length=256),
),
]
| [
"django.db.models.ImageField",
"django.db.models.CharField"
] | [((439, 534), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': 'Curriculums.models.generate_path_flyer'}), '(blank=True, null=True, upload_to=Curriculums.models.\n generate_path_flyer)\n', (456, 534), False, 'from django.db import migrations, models\n'), ((651, 753), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""www.tecnologiasdedicadas.com/Curriculum/{{ user.pk }}"""', 'max_length': '(256)'}), "(default=\n 'www.tecnologiasdedicadas.com/Curriculum/{{ user.pk }}', max_length=256)\n", (667, 753), False, 'from django.db import migrations, models\n'), ((868, 970), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""www.tecnologiasdedicadas.com/Curriculum/{{ user.pk }}"""', 'max_length': '(256)'}), "(default=\n 'www.tecnologiasdedicadas.com/Curriculum/{{ user.pk }}', max_length=256)\n", (884, 970), False, 'from django.db import migrations, models\n')] |
#! /usr/bin/env python3
import nba, os
for netdev in nba.get_netdevices():
print(netdev)
for coproc in nba.get_coprocessors():
print(coproc)
node_cpus = nba.get_cpu_node_mapping()
for node_id, cpus in enumerate(node_cpus):
print('Cores in NUMA node {0}: [{1}]'.format(node_id, ', '.join(map(str, cpus))))
# The values read by the framework are:
# - system_params
# - io_threads
# - comp_threads
# - coproc_threads
# - queues
# - thread_connections
system_params = {
'IO_BATCH_SIZE': int(os.environ.get('NBA_IO_BATCH_SIZE', 32)),
'COMP_BATCH_SIZE': int(os.environ.get('NBA_COMP_BATCH_SIZE', 32)),
'COMP_PPDEPTH': int(os.environ.get('NBA_COMP_PPDEPTH', 16)),
'COPROC_PPDEPTH': int(os.environ.get('NBA_COPROC_PPDEPTH', 64)),
}
print("IO batch size: {0[IO_BATCH_SIZE]}, computation batch size: {0[COMP_BATCH_SIZE]}".format(system_params))
print("Computation pipeline depth: {0[COMP_PPDEPTH]}".format(system_params))
print("Coprocessor pipeline depth: {0[COPROC_PPDEPTH]}".format(system_params))
print("# logical cores: {0}, # physical cores {1} (hyperthreading {2})".format(
nba.num_logical_cores, nba.num_physical_cores,
"enabled" if nba.ht_enabled else "disabled"
))
_ht_diff = nba.num_physical_cores if nba.ht_enabled else 0
# The following objects are not "real" -- just namedtuple instances.
# They only store metdata w/o actual side-effects such as creation of threads.
no_port = int(os.environ.get('NBA_SINGLE_CPU_MULTI_PORT', 1))
print ("using " + str(no_port) + " ports for 1 cpu")
attached_rxqs_temp = []
for i in range(no_port):
attached_rxqs_temp.append((i, 0))
io_threads = [
# core_id, list of (port_id, rxq_idx), mode
nba.IOThread(core_id=node_cpus[0][0], attached_rxqs=attached_rxqs_temp, mode='normal'),
]
comp_threads = [
# core_id
nba.CompThread(core_id=node_cpus[0][0] + _ht_diff),
]
coproc_threads = [
# core_id, device_id
nba.CoprocThread(core_id=node_cpus[0][7] + _ht_diff, device_id=0),
]
comp_input_queues = [
# node_id, template
nba.Queue(node_id=0, template='swrx'),
]
coproc_input_queues = [
# node_id, template
nba.Queue(node_id=0, template='taskin'),
]
coproc_completion_queues = [
# node_id, template
nba.Queue(node_id=0, template='taskout'),
]
queues = comp_input_queues + coproc_input_queues + coproc_completion_queues
thread_connections = [
# from-thread, to-thread, queue-instance
(io_threads[0], comp_threads[0], comp_input_queues[0]),
(comp_threads[0], coproc_threads[0], coproc_input_queues[0]),
(coproc_threads[0], comp_threads[0], coproc_completion_queues[0]),
]
# cpu_ratio is only used in weighted random LBs and ignored in other ones.
# Sangwook: It would be better to write 'cpu_ratio' only when it is needed,
# but it seems Python wrapper doesn't allow it..
LB_mode = str(os.environ.get('NBA_LOADBALANCER_MODE', 'CPUOnlyLB'))
LB_cpu_ratio = float(os.environ.get('NBA_LOADBALANCER_CPU_RATIO', 1.0))
load_balancer = nba.LoadBalancer(mode=LB_mode, cpu_ratio=LB_cpu_ratio)
| [
"nba.Queue",
"nba.get_coprocessors",
"nba.get_netdevices",
"nba.CompThread",
"nba.CoprocThread",
"os.environ.get",
"nba.IOThread",
"nba.LoadBalancer",
"nba.get_cpu_node_mapping"
] | [((54, 74), 'nba.get_netdevices', 'nba.get_netdevices', ([], {}), '()\n', (72, 74), False, 'import nba, os\n'), ((108, 130), 'nba.get_coprocessors', 'nba.get_coprocessors', ([], {}), '()\n', (128, 130), False, 'import nba, os\n'), ((162, 188), 'nba.get_cpu_node_mapping', 'nba.get_cpu_node_mapping', ([], {}), '()\n', (186, 188), False, 'import nba, os\n'), ((2987, 3041), 'nba.LoadBalancer', 'nba.LoadBalancer', ([], {'mode': 'LB_mode', 'cpu_ratio': 'LB_cpu_ratio'}), '(mode=LB_mode, cpu_ratio=LB_cpu_ratio)\n', (3003, 3041), False, 'import nba, os\n'), ((1428, 1474), 'os.environ.get', 'os.environ.get', (['"""NBA_SINGLE_CPU_MULTI_PORT"""', '(1)'], {}), "('NBA_SINGLE_CPU_MULTI_PORT', 1)\n", (1442, 1474), False, 'import nba, os\n'), ((1686, 1776), 'nba.IOThread', 'nba.IOThread', ([], {'core_id': 'node_cpus[0][0]', 'attached_rxqs': 'attached_rxqs_temp', 'mode': '"""normal"""'}), "(core_id=node_cpus[0][0], attached_rxqs=attached_rxqs_temp,\n mode='normal')\n", (1698, 1776), False, 'import nba, os\n'), ((1811, 1861), 'nba.CompThread', 'nba.CompThread', ([], {'core_id': '(node_cpus[0][0] + _ht_diff)'}), '(core_id=node_cpus[0][0] + _ht_diff)\n', (1825, 1861), False, 'import nba, os\n'), ((1914, 1979), 'nba.CoprocThread', 'nba.CoprocThread', ([], {'core_id': '(node_cpus[0][7] + _ht_diff)', 'device_id': '(0)'}), '(core_id=node_cpus[0][7] + _ht_diff, device_id=0)\n', (1930, 1979), False, 'import nba, os\n'), ((2034, 2071), 'nba.Queue', 'nba.Queue', ([], {'node_id': '(0)', 'template': '"""swrx"""'}), "(node_id=0, template='swrx')\n", (2043, 2071), False, 'import nba, os\n'), ((2128, 2167), 'nba.Queue', 'nba.Queue', ([], {'node_id': '(0)', 'template': '"""taskin"""'}), "(node_id=0, template='taskin')\n", (2137, 2167), False, 'import nba, os\n'), ((2229, 2269), 'nba.Queue', 'nba.Queue', ([], {'node_id': '(0)', 'template': '"""taskout"""'}), "(node_id=0, template='taskout')\n", (2238, 2269), False, 'import nba, os\n'), ((2845, 2897), 'os.environ.get', 'os.environ.get', (['"""NBA_LOADBALANCER_MODE"""', '"""CPUOnlyLB"""'], {}), "('NBA_LOADBALANCER_MODE', 'CPUOnlyLB')\n", (2859, 2897), False, 'import nba, os\n'), ((2920, 2969), 'os.environ.get', 'os.environ.get', (['"""NBA_LOADBALANCER_CPU_RATIO"""', '(1.0)'], {}), "('NBA_LOADBALANCER_CPU_RATIO', 1.0)\n", (2934, 2969), False, 'import nba, os\n'), ((506, 545), 'os.environ.get', 'os.environ.get', (['"""NBA_IO_BATCH_SIZE"""', '(32)'], {}), "('NBA_IO_BATCH_SIZE', 32)\n", (520, 545), False, 'import nba, os\n'), ((575, 616), 'os.environ.get', 'os.environ.get', (['"""NBA_COMP_BATCH_SIZE"""', '(32)'], {}), "('NBA_COMP_BATCH_SIZE', 32)\n", (589, 616), False, 'import nba, os\n'), ((643, 681), 'os.environ.get', 'os.environ.get', (['"""NBA_COMP_PPDEPTH"""', '(16)'], {}), "('NBA_COMP_PPDEPTH', 16)\n", (657, 681), False, 'import nba, os\n'), ((710, 750), 'os.environ.get', 'os.environ.get', (['"""NBA_COPROC_PPDEPTH"""', '(64)'], {}), "('NBA_COPROC_PPDEPTH', 64)\n", (724, 750), False, 'import nba, os\n')] |
#import conf.bootstrap as config
#import conf.datakey as datakey
from .hashicorp_base import ConnBase
import consul
import os
import json
from ..utils.io import convert_yaml
from ..utils.logger import Logger
class ConsulCon(ConnBase):
"""Class to construct the dict properties for the app from Consul and Vault
"""
exception_key = ['path']
exception_dict = {}
cons = None
def __init__(self, params = None, exception_dict = None):
"""Constructor inisiating all properties
"""
ConnBase.__init__(self)
# if exception dict is known
if exception_dict:
self.exception_dict = exception_dict
# construct the consul and vault params
consul_params = self.get_configs_dict(self._content['consul'], self.exception_key) if not params else params
# construct the consul
self.cons = consul.Consul(**consul_params)
def get_kv(self, type = 'json'):
"""run config constructor return dict all configs
Keyword arguments :
type -- The type of the value text format
"""
type_enum = {
'json' : lambda x: json.loads(x.decode('utf-8')) if x else '',
'yaml' : lambda x: convert_yaml(x) if x else ''
}
temp = self.cons.kv.get(self.exception_dict['path'])[1]['Value']
result = type_enum[type](temp)
return result
| [
"consul.Consul"
] | [((890, 920), 'consul.Consul', 'consul.Consul', ([], {}), '(**consul_params)\n', (903, 920), False, 'import consul\n')] |
import tclab
import time
import numpy as np
import sys
import first_principles_model as fp
def doublet_test(data_file='step_test.csv', show_plot=True):
'''doublet test the system and save data to given file path'''
import Adafruit_DHT # Only importable on the Pi itself
tc1 = tclab.TCLab()
tc1.LED(100)
# Bogus data row added to make concatenation work, never goes anywhere
data = [1, 1, 1, 1, 1, 1, 1, 1]
csv_file_header = 'time,control output,box humidity,box temp,outside humidity,outside temp,heater 1 temp,heater 2 temp,P,I,D,SP,Err'
start_time = time.time()
u = 0
tc1.Q1(u)
tc1.Q2(u)
current_time = 0
while current_time < 1200:
try:
# read temp, humidity and time
humid_in, temp_in = Adafruit_DHT.read_retry(
11, 4, retries=5, delay_seconds=1)
humid_out, temp_out = Adafruit_DHT.read_retry(
11, 17, retries=5, delay_seconds=1)
current_time = time.time() - start_time
if humid_in is None:
# Rejects failed readings
continue
if humid_in > 100:
# Corrupted data, so ignore it
continue
if current_time > 60:
u = 100
if current_time > 800:
u = 50
tc1.Q1(u)
tc1.Q2(u)
# print current values
print('time: {:.1f}, u: {}, h_in: {}, t_in: {}, h1: {}, h2: {}, h_out: {}, t_out: {}'
.format(current_time, u, humid_in, temp_in, tc1.T1, tc1.T2, humid_out, temp_out))
data = np.vstack([data, [current_time, u, humid_in,
temp_in, humid_out, temp_out, tc1.T1, tc1.T2]])
np.savetxt(data_file, data[1:],
delimiter=',', header=csv_file_header)
except KeyboardInterrupt:
print('Exiting...')
tc1.LED(0)
return
except ValueError as error:
# Handles cases when the heater overheats
print(error)
def run_controller(run_time, PID_parameters, show_plot=True):
'''
Run the main loop
run_time total run time in minutes
show_plot whether to show the dynamic plot of the system
'''
Kc, tau_I, tau_D = PID_parameters
import Adafruit_DHT # Only importable on the Pi itself
tc1 = tclab.TCLab()
tc1.LED(100)
# Bogus data row added to make concatenation work, never goes anywhere
data = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
csv_file_header = 'time,control output,box humidity,box temp,outside humidity,outside temp,heater 1 temp,heater 2 temp,P,I,D,SP,Err'
start_time = time.time()
u = 0
Qss = 0 # 0% heater to start
err = np.zeros(run_time*60)
sp = np.ones(run_time*60)*25
# Set up the set point
sp[10:300] = 303.15 - 273.15 # 30 degrees C
sp[300:550] = 298.15 - 273.15 # 25 degrees C
sp[550:800] = 310.15 - 273.15 # 37 degrees C
sp[800:3000] = 307.15 - 273.15 # 34 degrees C
sp[3000:] = 300.15 - 273.15 # 27 degrees C
integral_err_sum = 0
u_max = 100
u_min = 0
prev_temp = 0
prev_time = start_time
i = 0
tc1.Q1(u)
tc1.Q2(u)
while True:
try:
# read temp, humidity and time
humid_in, temp_in = Adafruit_DHT.read_retry(
11, 4, retries=5, delay_seconds=1)
humid_out, temp_out = Adafruit_DHT.read_retry(
11, 17, retries=5, delay_seconds=1)
current_time = time.time() - start_time
dtime = current_time - prev_time
if (humid_in is None) or (humid_out is None):
# Rejects failed readings
continue
if humid_in > 100:
# Corrupted data, so ignore it
continue
# PID controller to determine u
print("i", i)
err[i] = sp[i] - temp_in
if i > 10:
integral_err_sum = integral_err_sum + err[i] * dtime
print("error", err[i])
ddt = temp_in - prev_temp
P = Kc * err[i]
I = Kc/tau_I * integral_err_sum
D = - Kc * tau_D * ddt
prev_temp = temp_in
u = (Qss + P + I + D) * 100
if i > 10:
if u > u_max:
u = u_max
integral_err_sum = integral_err_sum - err[i] * dtime
if u < u_min:
u = u_min
integral_err_sum = integral_err_sum - err[i] * dtime
i += 1
prev_time = current_time
# Set the heater outputs
tc1.Q1(u)
tc1.Q2(u)
# print current values
print('time: {:.1f}, u: {}, h_in: {}, t_in: {}, h1: {}, h2: {}, h_out: {}, t_out: {}, P: {:.2f}, I: {:.2f}, D: {:.2f}'
.format(current_time, u, humid_in, temp_in, tc1.T1, tc1.T2, humid_out, temp_out, P, I, D, sp[i], err))
data = np.vstack([data, [current_time, u, humid_in,
temp_in, humid_out, temp_out, tc1.T1, tc1.T2, P, I, D, sp[i], err[i]]])
np.savetxt('data.csv', data[1:],
delimiter=',', header=csv_file_header)
if current_time > run_time*60:
print('Run finished. Exiting...')
tc1.LED(0)
return
except KeyboardInterrupt:
print('Exiting...')
tc1.LED(0)
return
except ValueError as error:
# Handles cases when the heater overheats
print(error)
| [
"tclab.TCLab",
"numpy.ones",
"Adafruit_DHT.read_retry",
"numpy.zeros",
"numpy.vstack",
"numpy.savetxt",
"time.time"
] | [((291, 304), 'tclab.TCLab', 'tclab.TCLab', ([], {}), '()\n', (302, 304), False, 'import tclab\n'), ((588, 599), 'time.time', 'time.time', ([], {}), '()\n', (597, 599), False, 'import time\n'), ((2418, 2431), 'tclab.TCLab', 'tclab.TCLab', ([], {}), '()\n', (2429, 2431), False, 'import tclab\n'), ((2730, 2741), 'time.time', 'time.time', ([], {}), '()\n', (2739, 2741), False, 'import time\n'), ((2797, 2820), 'numpy.zeros', 'np.zeros', (['(run_time * 60)'], {}), '(run_time * 60)\n', (2805, 2820), True, 'import numpy as np\n'), ((2828, 2850), 'numpy.ones', 'np.ones', (['(run_time * 60)'], {}), '(run_time * 60)\n', (2835, 2850), True, 'import numpy as np\n'), ((779, 837), 'Adafruit_DHT.read_retry', 'Adafruit_DHT.read_retry', (['(11)', '(4)'], {'retries': '(5)', 'delay_seconds': '(1)'}), '(11, 4, retries=5, delay_seconds=1)\n', (802, 837), False, 'import Adafruit_DHT\n'), ((889, 948), 'Adafruit_DHT.read_retry', 'Adafruit_DHT.read_retry', (['(11)', '(17)'], {'retries': '(5)', 'delay_seconds': '(1)'}), '(11, 17, retries=5, delay_seconds=1)\n', (912, 948), False, 'import Adafruit_DHT\n'), ((1641, 1737), 'numpy.vstack', 'np.vstack', (['[data, [current_time, u, humid_in, temp_in, humid_out, temp_out, tc1.T1,\n tc1.T2]]'], {}), '([data, [current_time, u, humid_in, temp_in, humid_out, temp_out,\n tc1.T1, tc1.T2]])\n', (1650, 1737), True, 'import numpy as np\n'), ((1786, 1856), 'numpy.savetxt', 'np.savetxt', (['data_file', 'data[1:]'], {'delimiter': '""","""', 'header': 'csv_file_header'}), "(data_file, data[1:], delimiter=',', header=csv_file_header)\n", (1796, 1856), True, 'import numpy as np\n'), ((3372, 3430), 'Adafruit_DHT.read_retry', 'Adafruit_DHT.read_retry', (['(11)', '(4)'], {'retries': '(5)', 'delay_seconds': '(1)'}), '(11, 4, retries=5, delay_seconds=1)\n', (3395, 3430), False, 'import Adafruit_DHT\n'), ((3482, 3541), 'Adafruit_DHT.read_retry', 'Adafruit_DHT.read_retry', (['(11)', '(17)'], {'retries': '(5)', 'delay_seconds': '(1)'}), '(11, 17, retries=5, delay_seconds=1)\n', (3505, 3541), False, 'import Adafruit_DHT\n'), ((5082, 5202), 'numpy.vstack', 'np.vstack', (['[data, [current_time, u, humid_in, temp_in, humid_out, temp_out, tc1.T1,\n tc1.T2, P, I, D, sp[i], err[i]]]'], {}), '([data, [current_time, u, humid_in, temp_in, humid_out, temp_out,\n tc1.T1, tc1.T2, P, I, D, sp[i], err[i]]])\n', (5091, 5202), True, 'import numpy as np\n'), ((5251, 5322), 'numpy.savetxt', 'np.savetxt', (['"""data.csv"""', 'data[1:]'], {'delimiter': '""","""', 'header': 'csv_file_header'}), "('data.csv', data[1:], delimiter=',', header=csv_file_header)\n", (5261, 5322), True, 'import numpy as np\n'), ((993, 1004), 'time.time', 'time.time', ([], {}), '()\n', (1002, 1004), False, 'import time\n'), ((3586, 3597), 'time.time', 'time.time', ([], {}), '()\n', (3595, 3597), False, 'import time\n')] |
import os
from requests.models import HTTPError
from server.client.client_utils import ClientUtils
import json
CLIENT_DIRECTORY = "./"
CLIENT_KEYWORD = "client"
class Client:
def __init__(self, args):
# If vID exists, read it
if os.path.isfile('vID'):
with open('vID') as f:
self.vid = f.read()
self.utils = ClientUtils(args.csv)
self.handle_client(args)
# Determines what action the client wants to do
def handle_client(self, args):
try:
if args.register:
self.register()
elif args.submit:
self.submit()
elif args.subparse is not None:
if args.subparse.lower() == 'stats' or args.subparse.lower() == 's':
if args.runs_for_group_run != -1:
self.utils.get_team_runs_for_group_run(self.vid, args.runs_for_group_run)
elif args.runs_for_submission != -1:
self.utils.get_runs_for_submission(self.vid, args.runs_for_submission)
elif args.get_submissions:
self.utils.get_submissions(self.vid)
elif args.get_group_runs:
self.utils.get_group_runs(self.vid)
elif args.get_code_for_submission != -1:
self.utils.get_code_from_submission(self.vid, args.get_code_for_submission)
elif args.get_errors_for_submission != -1:
self.utils.get_errors_for_submission(self.vid, args.get_errors_for_submission)
else:
self.get_submission_stats()
elif args.subparse.lower() == 'get_seed' or args.subparse.lower() == 'gs':
self.utils.get_seed_for_run(self.vid, args.run_id)
elif args.subparse.lower() == 'leaderboard' or args.subparse.lower() == "l":
if args.over_time:
self.utils.get_team_score_over_time(self.vid)
else:
self.utils.get_leaderboard(args.include_alumni, args.group_id)
else:
print("The server command needs more information. Try 'python launcher.pyz s -h' for help")
except HTTPError as e:
print(f"Error: {json.loads(e.response._content)['error']}")
def register(self):
# Check if vID already exists and cancel out
if os.path.isfile('vID'):
print('You have already registered.')
return
# Ask for teamname
teamname = input("Enter your teamname: ")
if teamname == '':
print("Teamname can't be empty.")
return
unis = self.utils.get_unis()
print("Select a university (id)")
self.utils.to_table(unis)
uni_id = int(input())
if uni_id not in map(lambda x: x['uni_id'], unis):
print("Not a valid uni id")
return
team_types = self.utils.get_team_types()
print("Select a team type (id)")
self.utils.to_table(team_types)
team_type = int(input())
if team_type not in map(lambda x: x['team_type_id'], team_types):
print("Not a valid team type")
return
response = self.utils.register(
{"type": team_type, "uni": uni_id, "name": teamname})
if not response.ok:
print('Teamname contains illegal characters or is already taken.')
return
# Receive uuid
# vID = await self.reader.read(BUFFER_SIZE)
# vID = vID.decode()
v_id = response.content
if v_id == '':
print('Something broke.')
return
# Put uuid into file for verification (vID)
with open('vID', 'w+') as f:
f.write(v_id.decode('UTF-8'))
print("Registration successful.")
print(
"You have been given an ID file in your Byte-le folder. Don't move or lose it!")
print("You can give a copy to your teammates so they can submit and view stats.")
def submit(self):
if not self.verify():
print('You need to register first.')
return
# Check and verify client file
file = None
for filename in os.listdir(CLIENT_DIRECTORY):
if CLIENT_KEYWORD.upper() not in filename.upper():
# Filters out files that do not contain CLIENT_KEYWORD in their filename
continue
if os.path.isdir(os.path.join(CLIENT_DIRECTORY, filename)):
# Skips folders
continue
user_check = input(f'Submitting {filename}, is this ok? (y/n): ')
if 'y' in user_check.lower():
file = filename
break
else:
file = input(
'Could not find file: please manually type file name: ')
if not os.path.isfile(CLIENT_DIRECTORY + file):
print('File not found.')
return
# Send client file
print('Submitting file.')
with open(CLIENT_DIRECTORY + file) as fl:
fil = "".join(fl.readlines())
self.utils.submit_file(fil, self.vid)
print('File sent successfully.')
def get_submission_stats(self):
res = self.utils.get_submission_stats(self.vid)
print("Current Submission stats for submission {0} in group run {1}".format(res["sub_id"], res["run_group_id"]))
print(f"Your submission has been run {len(res['data'])} out of {res['runs_per_client']} times")
self.utils.to_table(res["data"])
def verify(self):
# Check vID for uuid
if not os.path.isfile('vID'):
print("Cannot find vID, please register first.")
return False
return True
| [
"json.loads",
"os.listdir",
"os.path.join",
"os.path.isfile",
"server.client.client_utils.ClientUtils"
] | [((253, 274), 'os.path.isfile', 'os.path.isfile', (['"""vID"""'], {}), "('vID')\n", (267, 274), False, 'import os\n'), ((369, 390), 'server.client.client_utils.ClientUtils', 'ClientUtils', (['args.csv'], {}), '(args.csv)\n', (380, 390), False, 'from server.client.client_utils import ClientUtils\n'), ((2495, 2516), 'os.path.isfile', 'os.path.isfile', (['"""vID"""'], {}), "('vID')\n", (2509, 2516), False, 'import os\n'), ((4356, 4384), 'os.listdir', 'os.listdir', (['CLIENT_DIRECTORY'], {}), '(CLIENT_DIRECTORY)\n', (4366, 4384), False, 'import os\n'), ((4997, 5036), 'os.path.isfile', 'os.path.isfile', (['(CLIENT_DIRECTORY + file)'], {}), '(CLIENT_DIRECTORY + file)\n', (5011, 5036), False, 'import os\n'), ((5766, 5787), 'os.path.isfile', 'os.path.isfile', (['"""vID"""'], {}), "('vID')\n", (5780, 5787), False, 'import os\n'), ((4593, 4633), 'os.path.join', 'os.path.join', (['CLIENT_DIRECTORY', 'filename'], {}), '(CLIENT_DIRECTORY, filename)\n', (4605, 4633), False, 'import os\n'), ((2362, 2393), 'json.loads', 'json.loads', (['e.response._content'], {}), '(e.response._content)\n', (2372, 2393), False, 'import json\n')] |
# This program kills instances of Chrome that may have been left over
# by crashes of the main script
import os
import signal
import subprocess
def kill_chrome_instances():
p = subprocess.Popen(['ps', '-A'], stdout = subprocess.PIPE)
out, err = p.communicate()
if err == None:
for line in out.splitlines():
if "chrome" in line or "chromedriver" in line:
pid = pid = int(line.split(None, 1)[0])
os.kill(pid, signal.SIGKILL)
kill_chrome_instances() | [
"subprocess.Popen",
"os.kill"
] | [((183, 237), 'subprocess.Popen', 'subprocess.Popen', (["['ps', '-A']"], {'stdout': 'subprocess.PIPE'}), "(['ps', '-A'], stdout=subprocess.PIPE)\n", (199, 237), False, 'import subprocess\n'), ((461, 489), 'os.kill', 'os.kill', (['pid', 'signal.SIGKILL'], {}), '(pid, signal.SIGKILL)\n', (468, 489), False, 'import os\n')] |
#
# Generated with ExternalWrappingTypeBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from sima.sima.blueprints.namedobject import NamedObjectBlueprint
class ExternalWrappingTypeBlueprint(NamedObjectBlueprint):
""""""
def __init__(self, name="ExternalWrappingType", package_path="sima/riflex", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(Attribute("mass","number","Mass per unit length",default=0.0))
self.attributes.append(Attribute("buoyancy","number","Buoyancy volume/length",default=0.0))
self.attributes.append(Attribute("gyrationRadius","number","Radius of gyration around x-axis",default=0.0))
self.attributes.append(Attribute("coveredFraction","number","Fraction of the segment that is covered. 0 < FRAC < 1.0.",default=0.0))
self.attributes.append(Attribute("wrappingItemLength","number","Length of wrapping item. Only used for graphics.",default=1.0))
self.attributes.append(Attribute("tangentialDrag","number","Drag force coefficient in tangential direction",default=0.0))
self.attributes.append(Attribute("normalDrag","number","Drag force coefficient in normal direction",default=0.0))
self.attributes.append(Attribute("tangentialAddedMass","number","Added mass per length in tangential direction",default=0.0))
self.attributes.append(Attribute("normalAddedMass","number","Added mass per length in normal direction",default=0.0))
self.attributes.append(Attribute("tangentialLinearDrag","number","Linear drag force coefficients in tangential direction",default=0.0))
self.attributes.append(Attribute("normalLinearDrag","number","Linear drag force coefficients in tangential direction",default=0.0)) | [
"dmt.dimension.Dimension",
"dmt.attribute.Attribute"
] | [((580, 623), 'dmt.attribute.Attribute', 'Attribute', (['"""name"""', '"""string"""', '""""""'], {'default': '""""""'}), "('name', 'string', '', default='')\n", (589, 623), False, 'from dmt.attribute import Attribute\n'), ((653, 703), 'dmt.attribute.Attribute', 'Attribute', (['"""description"""', '"""string"""', '""""""'], {'default': '""""""'}), "('description', 'string', '', default='')\n", (662, 703), False, 'from dmt.attribute import Attribute\n'), ((733, 775), 'dmt.attribute.Attribute', 'Attribute', (['"""_id"""', '"""string"""', '""""""'], {'default': '""""""'}), "('_id', 'string', '', default='')\n", (742, 775), False, 'from dmt.attribute import Attribute\n'), ((927, 991), 'dmt.attribute.Attribute', 'Attribute', (['"""mass"""', '"""number"""', '"""Mass per unit length"""'], {'default': '(0.0)'}), "('mass', 'number', 'Mass per unit length', default=0.0)\n", (936, 991), False, 'from dmt.attribute import Attribute\n'), ((1021, 1091), 'dmt.attribute.Attribute', 'Attribute', (['"""buoyancy"""', '"""number"""', '"""Buoyancy volume/length"""'], {'default': '(0.0)'}), "('buoyancy', 'number', 'Buoyancy volume/length', default=0.0)\n", (1030, 1091), False, 'from dmt.attribute import Attribute\n'), ((1121, 1211), 'dmt.attribute.Attribute', 'Attribute', (['"""gyrationRadius"""', '"""number"""', '"""Radius of gyration around x-axis"""'], {'default': '(0.0)'}), "('gyrationRadius', 'number', 'Radius of gyration around x-axis',\n default=0.0)\n", (1130, 1211), False, 'from dmt.attribute import Attribute\n'), ((1237, 1352), 'dmt.attribute.Attribute', 'Attribute', (['"""coveredFraction"""', '"""number"""', '"""Fraction of the segment that is covered. 0 < FRAC < 1.0."""'], {'default': '(0.0)'}), "('coveredFraction', 'number',\n 'Fraction of the segment that is covered. 0 < FRAC < 1.0.', default=0.0)\n", (1246, 1352), False, 'from dmt.attribute import Attribute\n'), ((1378, 1488), 'dmt.attribute.Attribute', 'Attribute', (['"""wrappingItemLength"""', '"""number"""', '"""Length of wrapping item. Only used for graphics."""'], {'default': '(1.0)'}), "('wrappingItemLength', 'number',\n 'Length of wrapping item. Only used for graphics.', default=1.0)\n", (1387, 1488), False, 'from dmt.attribute import Attribute\n'), ((1514, 1618), 'dmt.attribute.Attribute', 'Attribute', (['"""tangentialDrag"""', '"""number"""', '"""Drag force coefficient in tangential direction"""'], {'default': '(0.0)'}), "('tangentialDrag', 'number',\n 'Drag force coefficient in tangential direction', default=0.0)\n", (1523, 1618), False, 'from dmt.attribute import Attribute\n'), ((1644, 1740), 'dmt.attribute.Attribute', 'Attribute', (['"""normalDrag"""', '"""number"""', '"""Drag force coefficient in normal direction"""'], {'default': '(0.0)'}), "('normalDrag', 'number',\n 'Drag force coefficient in normal direction', default=0.0)\n", (1653, 1740), False, 'from dmt.attribute import Attribute\n'), ((1766, 1874), 'dmt.attribute.Attribute', 'Attribute', (['"""tangentialAddedMass"""', '"""number"""', '"""Added mass per length in tangential direction"""'], {'default': '(0.0)'}), "('tangentialAddedMass', 'number',\n 'Added mass per length in tangential direction', default=0.0)\n", (1775, 1874), False, 'from dmt.attribute import Attribute\n'), ((1900, 2000), 'dmt.attribute.Attribute', 'Attribute', (['"""normalAddedMass"""', '"""number"""', '"""Added mass per length in normal direction"""'], {'default': '(0.0)'}), "('normalAddedMass', 'number',\n 'Added mass per length in normal direction', default=0.0)\n", (1909, 2000), False, 'from dmt.attribute import Attribute\n'), ((2026, 2144), 'dmt.attribute.Attribute', 'Attribute', (['"""tangentialLinearDrag"""', '"""number"""', '"""Linear drag force coefficients in tangential direction"""'], {'default': '(0.0)'}), "('tangentialLinearDrag', 'number',\n 'Linear drag force coefficients in tangential direction', default=0.0)\n", (2035, 2144), False, 'from dmt.attribute import Attribute\n'), ((2170, 2284), 'dmt.attribute.Attribute', 'Attribute', (['"""normalLinearDrag"""', '"""number"""', '"""Linear drag force coefficients in tangential direction"""'], {'default': '(0.0)'}), "('normalLinearDrag', 'number',\n 'Linear drag force coefficients in tangential direction', default=0.0)\n", (2179, 2284), False, 'from dmt.attribute import Attribute\n'), ((879, 893), 'dmt.dimension.Dimension', 'Dimension', (['"""*"""'], {}), "('*')\n", (888, 893), False, 'from dmt.dimension import Dimension\n')] |
import torpy.http.requests
from torrent_crawler.sources.tmdb import Tmdb
from torrent_crawler.trackers.rutor import Rutor
class TorrentCrawler:
def search(self, text: str):
with torpy.http.requests.tor_requests_session() as session:
result = []
for source in self.sources:
result.extend(source.search(session, text))
return result
sources = [Tmdb()]
trackers = [Rutor()]
| [
"torrent_crawler.sources.tmdb.Tmdb",
"torrent_crawler.trackers.rutor.Rutor"
] | [((414, 420), 'torrent_crawler.sources.tmdb.Tmdb', 'Tmdb', ([], {}), '()\n', (418, 420), False, 'from torrent_crawler.sources.tmdb import Tmdb\n'), ((438, 445), 'torrent_crawler.trackers.rutor.Rutor', 'Rutor', ([], {}), '()\n', (443, 445), False, 'from torrent_crawler.trackers.rutor import Rutor\n')] |
# coding: utf-8
"""
cifrum API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class AdjustedValuesApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def adjusted_close_values(self, registration_number, currency, start_date, end_date, period_frequency, interpolation_type, **kwargs): # noqa: E501
"""Returns adjusted close values of a mutual fund by registrationNumber # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.adjusted_close_values(registration_number, currency, start_date, end_date, period_frequency, interpolation_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str registration_number: (required)
:param str currency: (required)
:param str start_date: (required)
:param str end_date: (required)
:param str period_frequency: (required)
:param str interpolation_type: (required)
:return: ModelsRawValues
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.adjusted_close_values_with_http_info(registration_number, currency, start_date, end_date, period_frequency, interpolation_type, **kwargs) # noqa: E501
else:
(data) = self.adjusted_close_values_with_http_info(registration_number, currency, start_date, end_date, period_frequency, interpolation_type, **kwargs) # noqa: E501
return data
def adjusted_close_values_with_http_info(self, registration_number, currency, start_date, end_date, period_frequency, interpolation_type, **kwargs): # noqa: E501
"""Returns adjusted close values of a mutual fund by registrationNumber # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.adjusted_close_values_with_http_info(registration_number, currency, start_date, end_date, period_frequency, interpolation_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str registration_number: (required)
:param str currency: (required)
:param str start_date: (required)
:param str end_date: (required)
:param str period_frequency: (required)
:param str interpolation_type: (required)
:return: ModelsRawValues
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registration_number', 'currency', 'start_date', 'end_date', 'period_frequency', 'interpolation_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method adjusted_close_values" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registration_number' is set
if ('registration_number' not in params or
params['registration_number'] is None):
raise ValueError("Missing the required parameter `registration_number` when calling `adjusted_close_values`") # noqa: E501
# verify the required parameter 'currency' is set
if ('currency' not in params or
params['currency'] is None):
raise ValueError("Missing the required parameter `currency` when calling `adjusted_close_values`") # noqa: E501
# verify the required parameter 'start_date' is set
if ('start_date' not in params or
params['start_date'] is None):
raise ValueError("Missing the required parameter `start_date` when calling `adjusted_close_values`") # noqa: E501
# verify the required parameter 'end_date' is set
if ('end_date' not in params or
params['end_date'] is None):
raise ValueError("Missing the required parameter `end_date` when calling `adjusted_close_values`") # noqa: E501
# verify the required parameter 'period_frequency' is set
if ('period_frequency' not in params or
params['period_frequency'] is None):
raise ValueError("Missing the required parameter `period_frequency` when calling `adjusted_close_values`") # noqa: E501
# verify the required parameter 'interpolation_type' is set
if ('interpolation_type' not in params or
params['interpolation_type'] is None):
raise ValueError("Missing the required parameter `interpolation_type` when calling `adjusted_close_values`") # noqa: E501
collection_formats = {}
path_params = {}
if 'registration_number' in params:
path_params['registrationNumber'] = params['registration_number'] # noqa: E501
query_params = []
if 'currency' in params:
query_params.append(('currency', params['currency'])) # noqa: E501
if 'start_date' in params:
query_params.append(('startDate', params['start_date'])) # noqa: E501
if 'end_date' in params:
query_params.append(('endDate', params['end_date'])) # noqa: E501
if 'period_frequency' in params:
query_params.append(('periodFrequency', params['period_frequency'])) # noqa: E501
if 'interpolation_type' in params:
query_params.append(('interpolationType', params['interpolation_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/adjusted-values/mut-ru/{registrationNumber}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ModelsRawValues', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"swagger_client.api_client.ApiClient",
"six.iteritems"
] | [((3730, 3761), 'six.iteritems', 'six.iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (3743, 3761), False, 'import six\n'), ((755, 766), 'swagger_client.api_client.ApiClient', 'ApiClient', ([], {}), '()\n', (764, 766), False, 'from swagger_client.api_client import ApiClient\n')] |
import os
import io
import discord
import time
import matplotlib.font_manager
from tle import constants
from matplotlib import pyplot as plt
from matplotlib import rcParams
fontprop = matplotlib.font_manager.FontProperties(
fname=constants.NOTO_SANS_CJK_REGULAR_FONT_PATH
)
# String wrapper to avoid the underscore behavior in legends
#
# In legends, matplotlib ignores labels that begin with _
# https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.legend
# However, this check is only done for actual string objects.
class StrWrap:
def __init__(self, s):
self.string = s
def __str__(self):
return self.string
def get_current_figure_as_file():
filename = os.path.join(constants.TEMP_DIR, f"tempplot_{time.time()}.png")
plt.savefig(
filename,
facecolor=plt.gca().get_facecolor(),
bbox_inches="tight",
pad_inches=0.25,
)
with open(filename, "rb") as file:
discord_file = discord.File(
io.BytesIO(file.read()), filename="plot.png"
)
os.remove(filename)
return discord_file
def plot_rating_bg(ranks):
ymin, ymax = plt.gca().get_ylim()
bgcolor = plt.gca().get_facecolor()
for rank in ranks:
plt.axhspan(
rank.low,
rank.high,
facecolor=rank.color_graph,
alpha=0.8,
edgecolor=bgcolor,
linewidth=0.5,
)
locs, labels = plt.xticks()
for loc in locs:
plt.axvline(loc, color=bgcolor, linewidth=0.5)
plt.ylim(ymin, ymax)
| [
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.axhspan",
"matplotlib.pyplot.ylim",
"time.time",
"matplotlib.pyplot.axvline",
"os.remove"
] | [((1056, 1075), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (1065, 1075), False, 'import os\n'), ((1447, 1459), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (1457, 1459), True, 'from matplotlib import pyplot as plt\n'), ((1540, 1560), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (1548, 1560), True, 'from matplotlib import pyplot as plt\n'), ((1238, 1347), 'matplotlib.pyplot.axhspan', 'plt.axhspan', (['rank.low', 'rank.high'], {'facecolor': 'rank.color_graph', 'alpha': '(0.8)', 'edgecolor': 'bgcolor', 'linewidth': '(0.5)'}), '(rank.low, rank.high, facecolor=rank.color_graph, alpha=0.8,\n edgecolor=bgcolor, linewidth=0.5)\n', (1249, 1347), True, 'from matplotlib import pyplot as plt\n'), ((1489, 1535), 'matplotlib.pyplot.axvline', 'plt.axvline', (['loc'], {'color': 'bgcolor', 'linewidth': '(0.5)'}), '(loc, color=bgcolor, linewidth=0.5)\n', (1500, 1535), True, 'from matplotlib import pyplot as plt\n'), ((1146, 1155), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1153, 1155), True, 'from matplotlib import pyplot as plt\n'), ((1181, 1190), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1188, 1190), True, 'from matplotlib import pyplot as plt\n'), ((748, 759), 'time.time', 'time.time', ([], {}), '()\n', (757, 759), False, 'import time\n'), ((820, 829), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (827, 829), True, 'from matplotlib import pyplot as plt\n')] |
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QTimer, Qt
from PyQt5.QtGui import QIcon, QPalette, QColor
s=m=h=0
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.setWindowTitle('Timer --Arvind')
Form.setWindowIcon(QIcon('Images/Timer.png'))
Form.resize(360, 240)
Form.setMinimumSize(QtCore.QSize(360, 240))
Form.setMaximumSize(QtCore.QSize(360, 240))
self.textBrowser = QtWidgets.QTextBrowser(Form)
self.textBrowser.setGeometry(QtCore.QRect(10, 190, 341, 41))
self.textBrowser.setObjectName("textBrowser")
self.lcdNumber = QtWidgets.QLCDNumber(Form)
self.lcdNumber.setGeometry(QtCore.QRect(10, 10, 341, 141))
self.lcdNumber.setObjectName("lcdNumber")
time = "{:02}:{:02}:{:02}".format(0,0,0)
self.lcdNumber.setDigitCount(len(time))
self.lcdNumber.display(time)
self.widget = QtWidgets.QWidget(Form)
self.widget.setGeometry(QtCore.QRect(10, 160, 341, 25))
self.widget.setObjectName("widget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButtonStart = QtWidgets.QPushButton(self.widget)
self.pushButtonStart.setObjectName("pushButtonStart")
self.horizontalLayout.addWidget(self.pushButtonStart)
self.pushButtonPause = QtWidgets.QPushButton(self.widget)
self.pushButtonPause.setObjectName("pushButtonPause")
self.horizontalLayout.addWidget(self.pushButtonPause)
self.pushButtonLap = QtWidgets.QPushButton(self.widget)
self.pushButtonLap.setObjectName("pushButtonLap")
self.horizontalLayout.addWidget(self.pushButtonLap)
self.pushButtonReset = QtWidgets.QPushButton(self.widget)
self.pushButtonReset.setObjectName("pushButtonReset")
self.horizontalLayout.addWidget(self.pushButtonReset)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
self.Timer = QTimer()
self.Timer.timeout.connect(self.LCD)
self.pushButtonStart.clicked.connect(self.Start)
self.pushButtonPause.clicked.connect(self.Pause)
self.pushButtonLap.clicked.connect(self.Lap)
self.pushButtonReset.clicked.connect(self.Reset)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Timer --Arvind"))
self.pushButtonStart.setText(_translate("Form", "Start"))
self.pushButtonPause.setText(_translate("Form", "Pause"))
self.pushButtonLap.setText(_translate("Form", "Lap"))
self.pushButtonReset.setText(_translate("Form", "Reset"))
def Start(self):
global s, m, h
self.Timer.start(1000)
def Pause(self):
self.Timer.stop()
def Reset(self):
global s, m, h
self.Timer.stop()
s = m= h= 0
time = "{:02}:{:02}:{:02}".format(h, m, s)
self.lcdNumber.setDigitCount(len(time))
self.lcdNumber.display(time)
self.textBrowser.setText('')
def Lap(self):
global s, m, h
if self.Timer.isActive():
self.textBrowser.append('The Lap is : {}'.format(str(self.time)))
else:
self.textBrowser.setText('')
def LCD(self):
global s, m, h
if s < 59:
s += 1
else:
if m < 59:
s = 0
m += 1
elif m == 59 and h < 24:
h += 1
m = 0
s = 0
else:
self.Timer.stop()
self.time = "{:02}:{:02}:{:02}".format(h, m, s)
self.lcdNumber.setDigitCount(len(self.time))
self.lcdNumber.display(self.time)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
app.setStyle('Fusion')
palette = QPalette()
palette.setColor(QPalette.Window, QColor(83, 83, 83))
palette.setColor(QPalette.WindowText, Qt.white)
palette.setColor(QPalette.Base, QColor(25, 25, 25))
palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
palette.setColor(QPalette.ToolTipBase, Qt.white)
palette.setColor(QPalette.ToolTipText, Qt.white)
palette.setColor(QPalette.Text, Qt.white)
palette.setColor(QPalette.Button, QColor(53, 53, 53))
palette.setColor(QPalette.ButtonText, Qt.white)
palette.setColor(QPalette.BrightText, Qt.red)
palette.setColor(QPalette.Link, QColor(42, 130, 218))
palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
palette.setColor(QPalette.HighlightedText, Qt.gray)
app.setPalette(palette)
app.setStyleSheet("QToolTip { color: #ffffff; background-color: #2a82da; border: 1px solid white; }")
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QTextBrowser",
"PyQt5.QtGui.QPalette",
"PyQt5.QtGui.QIcon",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtCore.QTimer",
"PyQt5.QtGui.QColor",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidget... | [((3980, 4012), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (4002, 4012), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4055, 4065), 'PyQt5.QtGui.QPalette', 'QPalette', ([], {}), '()\n', (4063, 4065), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor\n'), ((4934, 4953), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (4951, 4953), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((485, 513), 'PyQt5.QtWidgets.QTextBrowser', 'QtWidgets.QTextBrowser', (['Form'], {}), '(Form)\n', (507, 513), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((662, 688), 'PyQt5.QtWidgets.QLCDNumber', 'QtWidgets.QLCDNumber', (['Form'], {}), '(Form)\n', (682, 688), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((964, 987), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['Form'], {}), '(Form)\n', (981, 987), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1128, 1162), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['self.widget'], {}), '(self.widget)\n', (1149, 1162), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1319, 1353), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.widget'], {}), '(self.widget)\n', (1340, 1353), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1509, 1543), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.widget'], {}), '(self.widget)\n', (1530, 1543), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1697, 1731), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.widget'], {}), '(self.widget)\n', (1718, 1731), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1881, 1915), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.widget'], {}), '(self.widget)\n', (1902, 1915), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2082, 2125), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Form'], {}), '(Form)\n', (2119, 2125), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2148, 2156), 'PyQt5.QtCore.QTimer', 'QTimer', ([], {}), '()\n', (2154, 2156), False, 'from PyQt5.QtCore import QTimer, Qt\n'), ((4104, 4122), 'PyQt5.QtGui.QColor', 'QColor', (['(83)', '(83)', '(83)'], {}), '(83, 83, 83)\n', (4110, 4122), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor\n'), ((4212, 4230), 'PyQt5.QtGui.QColor', 'QColor', (['(25)', '(25)', '(25)'], {}), '(25, 25, 25)\n', (4218, 4230), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor\n'), ((4277, 4295), 'PyQt5.QtGui.QColor', 'QColor', (['(53)', '(53)', '(53)'], {}), '(53, 53, 53)\n', (4283, 4295), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor\n'), ((4487, 4505), 'PyQt5.QtGui.QColor', 'QColor', (['(53)', '(53)', '(53)'], {}), '(53, 53, 53)\n', (4493, 4505), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor\n'), ((4645, 4665), 'PyQt5.QtGui.QColor', 'QColor', (['(42)', '(130)', '(218)'], {}), '(42, 130, 218)\n', (4651, 4665), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor\n'), ((4708, 4728), 'PyQt5.QtGui.QColor', 'QColor', (['(42)', '(130)', '(218)'], {}), '(42, 130, 218)\n', (4714, 4728), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor\n'), ((297, 322), 'PyQt5.QtGui.QIcon', 'QIcon', (['"""Images/Timer.png"""'], {}), "('Images/Timer.png')\n", (302, 322), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor\n'), ((382, 404), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(360)', '(240)'], {}), '(360, 240)\n', (394, 404), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((434, 456), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(360)', '(240)'], {}), '(360, 240)\n', (446, 456), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((551, 581), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(190)', '(341)', '(41)'], {}), '(10, 190, 341, 41)\n', (563, 581), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((724, 754), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(10)', '(341)', '(141)'], {}), '(10, 10, 341, 141)\n', (736, 754), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1020, 1050), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(160)', '(341)', '(25)'], {}), '(10, 160, 341, 25)\n', (1032, 1050), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
import hashlib
import os
import pickle
from zoltpy.quantile_io import json_io_dict_from_quantile_csv_file
from zoltpy import util
from zoltpy.connection import ZoltarConnection
from zoltpy.covid19 import COVID_TARGETS, covid19_row_validator, validate_quantile_csv_file
import glob
import json
import sys
UPDATE = False
if len(sys.argv) >1:
if sys.argv[1].lower() == 'update':
print('Only updating')
UPDATE = True
# util function to get filename from the path
def get_filename_from_path(path):
print(path, path.split(os.path.sep)[-1])
return path.split(os.path.sep)[-1]
g_db = None
def get_db():
global g_db
if g_db is None:
g_db = json.load(open('code/zoltar_scripts/validated_file_db.json'))
return g_db
def dump_db():
global g_db
with open('code/zoltar_scripts/validated_file_db.json', 'w') as fw:
json.dump(g_db, fw, indent=4)
list_of_model_directories = os.listdir('./data-processed/')
for directory in list_of_model_directories:
if "." in directory:
continue
# Get all forecasts in the directory of this model
path = './data-processed/'+directory+'/'
forecasts = glob.glob(path + "*.csv")
for forecast in forecasts:
with open(forecast, "rb") as f:
# Get the current hash of a processed file
checksum = hashlib.md5(f.read()).hexdigest()
db = get_db()
# Validate covid19 file
if UPDATE and db.get(get_filename_from_path(forecast), None) == checksum:
continue
errors_from_validation = validate_quantile_csv_file(forecast)
# Upload forecast
if "no errors" == errors_from_validation:
# Check this hash against the previous version of hash
if db.get(get_filename_from_path(forecast), None) != checksum:
db[get_filename_from_path(forecast)] = checksum
else:
print(errors_from_validation)
print('Dumping db')
dump_db() | [
"zoltpy.covid19.validate_quantile_csv_file",
"json.dump",
"os.listdir",
"glob.glob"
] | [((928, 959), 'os.listdir', 'os.listdir', (['"""./data-processed/"""'], {}), "('./data-processed/')\n", (938, 959), False, 'import os\n'), ((1162, 1187), 'glob.glob', 'glob.glob', (["(path + '*.csv')"], {}), "(path + '*.csv')\n", (1171, 1187), False, 'import glob\n'), ((869, 898), 'json.dump', 'json.dump', (['g_db', 'fw'], {'indent': '(4)'}), '(g_db, fw, indent=4)\n', (878, 898), False, 'import json\n'), ((1567, 1603), 'zoltpy.covid19.validate_quantile_csv_file', 'validate_quantile_csv_file', (['forecast'], {}), '(forecast)\n', (1593, 1603), False, 'from zoltpy.covid19 import COVID_TARGETS, covid19_row_validator, validate_quantile_csv_file\n')] |
def test_simple():
import pyglobal
import settings
assert pyglobal.get('abc') == 123
assert pyglobal.get('qwerty') == ''
assert pyglobal.get('Hello') == 'World!'
assert pyglobal.get('SECRET_KEY') == '!!!CHANGE!!!'
def test_hack():
import pyglobal
pyglobal.set('SECRET_KEY', '*******')
# Check if library can access the page global variable
def get_glob(*args, **kwargs):
global GLOBAL_SETTING
try:
len(GLOBAL_SETTING)
raise AssertionError('Should not be able to access this object!')
except (AttributeError, NameError):
pass
pyglobal.get = get_glob
pyglobal.get('SECRET_KEY', None)
# User can still manually grab the variable even though it is not defined in __all__.
pyglobal.GLOBAL_SETTING
try:
len(pyglobal.GLOBAL_SETTING)
raise AssertionError('Global Settings should not have a length')
except (TypeError, AttributeError):
pass
try:
for k in pyglobal.GLOBAL_SETTING:
pass
raise AssertionError('Global Settings should not be iterable')
except (TypeError, AttributeError):
pass
def run_memory():
# Check your memory usage. It should not go up continuously.
import pyglobal
while True:
pyglobal.default('default', 'oi')
pyglobal.set('SECRET_KEY', "Hello World!")
pyglobal.set('Other', {'a': 1, "b": 2})
pyglobal.set('SECRET_KEY', "Hello World!", scope='MyScope')
pyglobal.get('SECRET_KEY')
pyglobal.get('SECRET_KEY', scope='MyScope')
if __name__ == '__main__':
import sys
test_simple()
test_hack()
# sys.argv.append('--run_memory')
if '--run_memory' in sys.argv:
run_memory()
print('All pyglobal tests finished successfully!')
| [
"pyglobal.set",
"pyglobal.default",
"pyglobal.get"
] | [((285, 322), 'pyglobal.set', 'pyglobal.set', (['"""SECRET_KEY"""', '"""*******"""'], {}), "('SECRET_KEY', '*******')\n", (297, 322), False, 'import pyglobal\n'), ((665, 697), 'pyglobal.get', 'pyglobal.get', (['"""SECRET_KEY"""', 'None'], {}), "('SECRET_KEY', None)\n", (677, 697), False, 'import pyglobal\n'), ((72, 91), 'pyglobal.get', 'pyglobal.get', (['"""abc"""'], {}), "('abc')\n", (84, 91), False, 'import pyglobal\n'), ((110, 132), 'pyglobal.get', 'pyglobal.get', (['"""qwerty"""'], {}), "('qwerty')\n", (122, 132), False, 'import pyglobal\n'), ((150, 171), 'pyglobal.get', 'pyglobal.get', (['"""Hello"""'], {}), "('Hello')\n", (162, 171), False, 'import pyglobal\n'), ((196, 222), 'pyglobal.get', 'pyglobal.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (208, 222), False, 'import pyglobal\n'), ((1311, 1344), 'pyglobal.default', 'pyglobal.default', (['"""default"""', '"""oi"""'], {}), "('default', 'oi')\n", (1327, 1344), False, 'import pyglobal\n'), ((1353, 1395), 'pyglobal.set', 'pyglobal.set', (['"""SECRET_KEY"""', '"""Hello World!"""'], {}), "('SECRET_KEY', 'Hello World!')\n", (1365, 1395), False, 'import pyglobal\n'), ((1404, 1443), 'pyglobal.set', 'pyglobal.set', (['"""Other"""', "{'a': 1, 'b': 2}"], {}), "('Other', {'a': 1, 'b': 2})\n", (1416, 1443), False, 'import pyglobal\n'), ((1452, 1511), 'pyglobal.set', 'pyglobal.set', (['"""SECRET_KEY"""', '"""Hello World!"""'], {'scope': '"""MyScope"""'}), "('SECRET_KEY', 'Hello World!', scope='MyScope')\n", (1464, 1511), False, 'import pyglobal\n'), ((1520, 1546), 'pyglobal.get', 'pyglobal.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (1532, 1546), False, 'import pyglobal\n'), ((1555, 1598), 'pyglobal.get', 'pyglobal.get', (['"""SECRET_KEY"""'], {'scope': '"""MyScope"""'}), "('SECRET_KEY', scope='MyScope')\n", (1567, 1598), False, 'import pyglobal\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
####################
def ld_to_dl(ld):
dl = {}
for i, d in enumerate(ld):
for key in d.keys():
value = d[key]
if i == 0:
dl[key] = [value]
else:
dl[key].append(value)
return dl
####################
results = np.load('results.npy', allow_pickle=True)
results = ld_to_dl(results)
df = pd.DataFrame.from_dict(results)
print (df.columns)
####################
# example:
# y_mean[skip][cards][alloc][profile][rpr_alloc][layer]
'''
block = df[ df['alloc'] == 'block' ][ df['rpr_alloc'] == 'centroids' ]
print (block)
block = df.query('(alloc == "block") & (rpr_alloc == "centroids")')
print (block)
'''
####################
x = df.query('(alloc == "block") & (rpr_alloc == "centroids") & (profile == 1)')
mac_per_cycle = x['nmac'] / x['cycle']
print (mac_per_cycle)
####################
| [
"numpy.load",
"pandas.DataFrame.from_dict"
] | [((374, 415), 'numpy.load', 'np.load', (['"""results.npy"""'], {'allow_pickle': '(True)'}), "('results.npy', allow_pickle=True)\n", (381, 415), True, 'import numpy as np\n'), ((449, 480), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['results'], {}), '(results)\n', (471, 480), True, 'import pandas as pd\n')] |
# Copyright (c) 2013, omar and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from typing import Dict
import frappe
# import frappe
def execute(filters=None):
columns = get_columns()
data = get_data(filters)
return columns, data
def get_columns():
return [{
"fieldname": "bank",
"fieldtype": "Data",
"label": "البنوك",
},
{
"fieldname": "snd_total_amount",
"fieldtype": "Data",
"label": "اجمالي الحركات الصادرة",
},
{
"fieldname": "snd_count",
"fieldtype": "Data",
"label": "عدد الحركات الصادرة",
},
{
"fieldname": "snd_total_fees",
"fieldtype": "Data",
"label": "اجمالي العمولات الصادرة",
},
{
"fieldname": "snd_total_amount_fees",
"fieldtype": "Data",
"label": "اجمالي الصادر",
},
{
"fieldname": "rcv_total_amount",
"fieldtype": "Data",
"label": "اجمالي الحركات الواردة",
},
{
"fieldname": "rcv_count",
"fieldtype": "Data",
"label": "عدد الحركات الواردة",
},
{
"fieldname": "rcv_total_fees",
"fieldtype": "Data",
"label": "اجمالي العمولات الواردة",
},
{
"fieldname": "rcv_total_amount_fees",
"fieldtype": "Data",
"label": "اجمالي الوارد",
}
]
def get_data(filters=None):
data = []
data_obj = dict()
from_date, to_date, currency = filters.get('from'), filters.get('to'), filters.get('currency')
currency_code = frappe.db.get_value("Bank Currency", currency, ["currency_code"])
snd_data = frappe.db.sql("""
SELECT c.system_code as bank, sum(amount) as total_amount,
sum(receiver_bank_fee + sender_bank_fee + swift_fee) as total_fees, count(receiver_bank) as count,
(sum(amount) + sum(receiver_bank_fee + sender_bank_fee + swift_fee)) as total_amount_fees
FROM `tabBank Payment Order` as p
INNER JOIN `tabBank Company` as c ON receiver_bank=c.name
WHERE transaction_state_sequence='Post' AND (p.creation BETWEEN %s AND %s ) AND currency=%s
GROUP BY receiver_bank
""", (from_date, to_date, currency))
rcv_data = frappe.db.sql("""
SELECT req_bank_id as bank, sum(req_bank_intr_bk_sttlm_amt) as total_amount,
sum( IFNULL(retail_fees, "0")+ IFNULL(interchange_fees, "0") + IFNULL(switch_fees, "0") ) as total_fees,
count(req_bank_id) as count,
(sum(req_bank_intr_bk_sttlm_amt) + sum(retail_fees + interchange_fees + switch_fees)) as total_amount_fees
FROM `tabBank Payment Received` as p
WHERE (p.creation BETWEEN %s AND %s ) AND (status_recieved_flg=1 OR (psh_sts_rcv_flg=1 AND psh_sts_rcv_txt='ACSC')) AND req_bank_intr_bk_sttlm_amt_ccy=%s
GROUP BY req_bank_id
""", (from_date, to_date, currency_code))
for d in snd_data:
obj = {
"bank": d[0],
"snd_total_amount": d[1],
"snd_total_fees": d[2],
"snd_count": d[3],
"snd_total_amount_fees": float(d[1]) + float(d[2]),
"rcv_total_amount": 0,
"rcv_total_fees": 0,
"rcv_count": 0,
"rcv_total_amount_fees": 0
}
data_obj[d[0]] = obj
for d in rcv_data:
if d[0] in data_obj.keys():
obj = {
"rcv_total_amount": d[1],
"rcv_total_fees": d[2],
"rcv_count": d[3],
"rcv_total_amount_fees": float(d[1]) + float(d[2])
}
data_obj[d[0]].update(obj)
else:
obj = {
"bank":d[0],
"snd_total_amount":0,
"snd_total_fees": 0,
"snd_count": 0,
"snd_total_amount_fees":0,
"rcv_total_amount": d[1],
"rcv_total_fees": d[2],
"rcv_count": d[3],
"rcv_total_amount_fees": float(d[1]) + float(d[2])
}
data_obj[d[0]] = obj
for d in data_obj.values():
data.append(d)
return data | [
"frappe.db.sql",
"frappe.db.get_value"
] | [((1410, 1475), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Bank Currency"""', 'currency', "['currency_code']"], {}), "('Bank Currency', currency, ['currency_code'])\n", (1429, 1475), False, 'import frappe\n'), ((1489, 2084), 'frappe.db.sql', 'frappe.db.sql', (['"""\n SELECT c.system_code as bank, sum(amount) as total_amount, \n sum(receiver_bank_fee + sender_bank_fee + swift_fee) as total_fees, count(receiver_bank) as count,\n (sum(amount) + sum(receiver_bank_fee + sender_bank_fee + swift_fee)) as total_amount_fees\n FROM `tabBank Payment Order` as p\n INNER JOIN `tabBank Company` as c ON receiver_bank=c.name\n WHERE transaction_state_sequence=\'Post\' AND (p.creation BETWEEN %s AND %s ) AND currency=%s\n GROUP BY receiver_bank\n """', '(from_date, to_date, currency)'], {}), '(\n """\n SELECT c.system_code as bank, sum(amount) as total_amount, \n sum(receiver_bank_fee + sender_bank_fee + swift_fee) as total_fees, count(receiver_bank) as count,\n (sum(amount) + sum(receiver_bank_fee + sender_bank_fee + swift_fee)) as total_amount_fees\n FROM `tabBank Payment Order` as p\n INNER JOIN `tabBank Company` as c ON receiver_bank=c.name\n WHERE transaction_state_sequence=\'Post\' AND (p.creation BETWEEN %s AND %s ) AND currency=%s\n GROUP BY receiver_bank\n """\n , (from_date, to_date, currency))\n', (1502, 2084), False, 'import frappe\n'), ((2088, 2768), 'frappe.db.sql', 'frappe.db.sql', (['"""\n SELECT req_bank_id as bank, sum(req_bank_intr_bk_sttlm_amt) as total_amount, \n sum( IFNULL(retail_fees, "0")+ IFNULL(interchange_fees, "0") + IFNULL(switch_fees, "0") ) as total_fees, \n count(req_bank_id) as count, \n (sum(req_bank_intr_bk_sttlm_amt) + sum(retail_fees + interchange_fees + switch_fees)) as total_amount_fees\n FROM `tabBank Payment Received` as p\n WHERE (p.creation BETWEEN %s AND %s ) AND (status_recieved_flg=1 OR (psh_sts_rcv_flg=1 AND psh_sts_rcv_txt=\'ACSC\')) AND req_bank_intr_bk_sttlm_amt_ccy=%s\n GROUP BY req_bank_id\n """', '(from_date, to_date, currency_code)'], {}), '(\n """\n SELECT req_bank_id as bank, sum(req_bank_intr_bk_sttlm_amt) as total_amount, \n sum( IFNULL(retail_fees, "0")+ IFNULL(interchange_fees, "0") + IFNULL(switch_fees, "0") ) as total_fees, \n count(req_bank_id) as count, \n (sum(req_bank_intr_bk_sttlm_amt) + sum(retail_fees + interchange_fees + switch_fees)) as total_amount_fees\n FROM `tabBank Payment Received` as p\n WHERE (p.creation BETWEEN %s AND %s ) AND (status_recieved_flg=1 OR (psh_sts_rcv_flg=1 AND psh_sts_rcv_txt=\'ACSC\')) AND req_bank_intr_bk_sttlm_amt_ccy=%s\n GROUP BY req_bank_id\n """\n , (from_date, to_date, currency_code))\n', (2101, 2768), False, 'import frappe\n')] |
# -*- coding: utf-8 -*-
"""
Test module imports
===================
"""
import sys
def test_module_imports():
try:
import ahrs
except:
sys.exit("[ERROR] Package AHRS not found. Go to root directory of package and type:\n\n\tpip install .\n")
try:
import numpy, scipy, matplotlib
except ModuleNotFoundError:
sys.exit("[ERROR] You don't have the required packages. Try reinstalling the package.")
| [
"sys.exit"
] | [((162, 278), 'sys.exit', 'sys.exit', (['"""[ERROR] Package AHRS not found. Go to root directory of package and type:\n\n\tpip install .\n"""'], {}), '(\n """[ERROR] Package AHRS not found. Go to root directory of package and type:\n\n\tpip install .\n"""\n )\n', (170, 278), False, 'import sys\n'), ((358, 455), 'sys.exit', 'sys.exit', (['"""[ERROR] You don\'t have the required packages. Try reinstalling the package."""'], {}), '(\n "[ERROR] You don\'t have the required packages. Try reinstalling the package."\n )\n', (366, 455), False, 'import sys\n')] |
import json
from unittest import mock
from unittest.mock import patch, call, sentinel, mock_open, Mock
import pytest
from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, \
parse_cmd_line_args
MOCK_JSON = '''
{
"barbells": "1*350", "dumbbells": "2*120", "sizes": [
{"weight": 5, "thickness": 30, "quantity": 6},
{"weight": 11.5, "thickness": 40, "quantity": 2}
]
}
'''
@patch("builtins.open", mock_open(read_data=MOCK_JSON))
@patch("truffshuff.GymStock", autospec=True)
def test_read_inventory_complete(patched_gym_stock):
barbells, dumbbells, weight_dict = read_inventory(sentinel.path)
inventory = json.loads(MOCK_JSON)
barbells_spec = inventory.get("barbells", "0")
barbells_spec = list(map(int, barbells_spec.split("*")))
dumbbells_spec = inventory.get("dumbbells", "0")
dumbbells_spec = list(map(int, dumbbells_spec.split("*")))
assert barbells == barbells_spec[0]
assert dumbbells == dumbbells_spec[0]
patched_gym_stock.check_bar_capacities.assert_called_once_with(barbells_spec, dumbbells_spec)
@patch("truffshuff.read_inventory", return_value=[1, 2, {sentinel.plate: 2}])
def test_accept_inventory_file_complete(patched_read_inventory):
gym_stock = accept_inventory_file(["-i", sentinel.path])
patched_read_inventory.assert_called_once_with(sentinel.path)
assert gym_stock.weight_dict == {sentinel.plate: 2}
@patch("truffshuff.read_inventory", return_value=[1, 2, {sentinel.plate: 0}])
@patch("truffshuff.GymStock", autospec=True)
def test_accept_inventory_file_wout_weights(mock_gymstock, patched_read_inventory):
gym_stock = accept_inventory_file(["-i", sentinel.path])
patched_read_inventory.assert_called_once_with(sentinel.path)
mock_gymstock.assert_called_once_with(1, 2)
mock_gymstock.return_value.elicit_weights.assert_called_once_with([sentinel.plate])
@patch("truffshuff.read_inventory", return_value=(0, 0, {sentinel.plate: 2}))
@patch("truffshuff.elicit_bars", return_value=mock.create_autospec(GymStock))
def test_accept_inventory_file_wout_bars(patched_elicit_bars, patched_read_inventory):
gym_stock = accept_inventory_file(["-i", sentinel.path])
patched_read_inventory.assert_called_once_with(sentinel.path)
patched_elicit_bars.assert_called_once_with()
assert gym_stock.weight_dict == {sentinel.plate: 2}
@patch("truffshuff.parse_cmd_line_args", return_value=Mock(spec=GymStock))
@patch("truffshuff.accept_inventory_file", return_value=None)
def test_parse_args(patched_accept_inventory, patched_parse_cmd_line):
parse_args(sentinel.arg_list)
patched_accept_inventory.assert_called_once_with(sentinel.arg_list)
patched_parse_cmd_line.assert_called_once_with(sentinel.arg_list)
patched_parse_cmd_line.return_value.balance_plates.assert_called_once_with()
@patch("builtins.input", side_effect=["1", "2"])
@patch("truffshuff.GymStock", autospec=True, STD_BARBELL_CAPACITY=sentinel.barbell_cap,
STD_DUMBBELL_CAPACITY=sentinel.dumbbell_cap)
def test_parse_cmd_line_args_interactive(mock_gym_stock, patched_input):
parse_cmd_line_args([])
mock_gym_stock.assert_called_once_with(1, 2)
patched_input.assert_has_calls([
call("How many barbells (append '*thread_length' to change from {}mm)? ".
format(mock_gym_stock.STD_BARBELL_THREAD_LEN)),
call("How many dumbbells (append '*thread_length' to change from {}mm)? ".
format(mock_gym_stock.STD_DUMBBELL_THREAD_LEN))
])
mock_gym_stock.return_value.elicit_weights.assert_called_once_with(DEFAULT_PLATES)
@patch("truffshuff.GymStock", autospec=True)
def test_parse_cmd_line_args_semi_interactive(mock_gym_stock):
parse_cmd_line_args(["1", "2"])
mock_gym_stock.assert_called_once_with("1", "2")
mock_gym_stock.return_value.elicit_weights.assert_called_once_with(DEFAULT_PLATES)
@patch("truffshuff.show_usage")
def test_parse_cmd_line_args_help(patched_show_usage):
parse_cmd_line_args(["--help"])
patched_show_usage.assert_called_once_with()
@patch("truffshuff.GymStock", autospec=True)
def test_parse_cmd_line_args_std_cmdline(mock_gym_stock):
weight_qtys = ["0", "4", "2"]
parse_cmd_line_args(["1", "2"] + weight_qtys)
mock_gym_stock.assert_called_once_with("1", "2")
mock_gym_stock.return_value.set_weights_quantities.assert_called_once_with(weight_qtys, DEFAULT_PLATES)
@patch("truffshuff.GymStock", autospec=True)
def test_parse_cmd_line_args_std_cmdline_fails(mock_gym_stock):
weight_qtys = ["0", "doh!", "2"]
mock_gym_stock.return_value.set_weights_quantities.side_effect = ValueError
with pytest.raises(SystemExit) as e_info:
parse_cmd_line_args(["1", "2"] + weight_qtys)
mock_gym_stock.assert_called_once_with("1", "2")
mock_gym_stock.return_value.set_weights_quantities.assert_called_once_with(weight_qtys, DEFAULT_PLATES)
@patch("truffshuff.GymStock", autospec=True)
def test_parse_cmd_line_args_mixed_cmdline_fails(mock_gym_stock):
weight_qtys = ["0", "2.5*12*4", "2"]
with pytest.raises(SystemExit) as e_info:
parse_cmd_line_args(["1", "2"] + weight_qtys)
mock_gym_stock.assert_called_once_with("1", "2")
mock_gym_stock.return_value.set_weights_quantities.assert_not_called()
@patch("truffshuff.GymStock", autospec=True)
def test_parse_cmd_line_args_custom_cmdline(mock_gym_stock):
weight_qtys = ["3*15*4", "6*25*4", "12*35*2"]
parse_cmd_line_args(["1", "2"] + weight_qtys)
mock_gym_stock.assert_called_once_with("1", "2")
mock_gym_stock.return_value.set_custom_weights.assert_called_once_with(weight_qtys)
@patch("truffshuff.GymStock", autospec=True)
def test_parse_cmd_line_args_custom_cmdline_fails(mock_gym_stock):
weight_qtys = ["3*15*4", "f**"]
mock_gym_stock.return_value.set_custom_weights.side_effect = ValueError
with pytest.raises(SystemExit) as e_info:
parse_cmd_line_args(["1", "2"] + weight_qtys)
mock_gym_stock.assert_called_once_with("1", "2")
mock_gym_stock.return_value.set_custom_weights.assert_called_once_with(weight_qtys)
@patch("builtins.input", side_effect=["2", "2000", "000", "0", "xA", "1", "4*120"])
def test_input_bar_specifier(patched_input):
assert [2] == input_bar_specifier("xyz", 32)
assert [2000] == input_bar_specifier("xyz", 32)
assert [0] == input_bar_specifier("xyz", 32)
assert [0] == input_bar_specifier("xyz", 32)
assert [1] == input_bar_specifier("xyz", 32)
assert [4, 120] == input_bar_specifier("xyz", 32)
| [
"json.loads",
"unittest.mock.Mock",
"truffshuff.parse_args",
"unittest.mock.create_autospec",
"unittest.mock.mock_open",
"truffshuff.input_bar_specifier",
"truffshuff.parse_cmd_line_args",
"pytest.raises",
"truffshuff.accept_inventory_file",
"unittest.mock.patch",
"truffshuff.read_inventory"
] | [((526, 569), 'unittest.mock.patch', 'patch', (['"""truffshuff.GymStock"""'], {'autospec': '(True)'}), "('truffshuff.GymStock', autospec=True)\n", (531, 569), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((1141, 1217), 'unittest.mock.patch', 'patch', (['"""truffshuff.read_inventory"""'], {'return_value': '[1, 2, {sentinel.plate: 2}]'}), "('truffshuff.read_inventory', return_value=[1, 2, {sentinel.plate: 2}])\n", (1146, 1217), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((1469, 1545), 'unittest.mock.patch', 'patch', (['"""truffshuff.read_inventory"""'], {'return_value': '[1, 2, {sentinel.plate: 0}]'}), "('truffshuff.read_inventory', return_value=[1, 2, {sentinel.plate: 0}])\n", (1474, 1545), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((1547, 1590), 'unittest.mock.patch', 'patch', (['"""truffshuff.GymStock"""'], {'autospec': '(True)'}), "('truffshuff.GymStock', autospec=True)\n", (1552, 1590), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((1941, 2017), 'unittest.mock.patch', 'patch', (['"""truffshuff.read_inventory"""'], {'return_value': '(0, 0, {sentinel.plate: 2})'}), "('truffshuff.read_inventory', return_value=(0, 0, {sentinel.plate: 2}))\n", (1946, 2017), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((2494, 2554), 'unittest.mock.patch', 'patch', (['"""truffshuff.accept_inventory_file"""'], {'return_value': 'None'}), "('truffshuff.accept_inventory_file', return_value=None)\n", (2499, 2554), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((2886, 2933), 'unittest.mock.patch', 'patch', (['"""builtins.input"""'], {'side_effect': "['1', '2']"}), "('builtins.input', side_effect=['1', '2'])\n", (2891, 2933), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((2935, 3071), 'unittest.mock.patch', 'patch', (['"""truffshuff.GymStock"""'], {'autospec': '(True)', 'STD_BARBELL_CAPACITY': 'sentinel.barbell_cap', 'STD_DUMBBELL_CAPACITY': 'sentinel.dumbbell_cap'}), "('truffshuff.GymStock', autospec=True, STD_BARBELL_CAPACITY=sentinel.\n barbell_cap, STD_DUMBBELL_CAPACITY=sentinel.dumbbell_cap)\n", (2940, 3071), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((3645, 3688), 'unittest.mock.patch', 'patch', (['"""truffshuff.GymStock"""'], {'autospec': '(True)'}), "('truffshuff.GymStock', autospec=True)\n", (3650, 3688), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((3931, 3961), 'unittest.mock.patch', 'patch', (['"""truffshuff.show_usage"""'], {}), "('truffshuff.show_usage')\n", (3936, 3961), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((4105, 4148), 'unittest.mock.patch', 'patch', (['"""truffshuff.GymStock"""'], {'autospec': '(True)'}), "('truffshuff.GymStock', autospec=True)\n", (4110, 4148), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((4455, 4498), 'unittest.mock.patch', 'patch', (['"""truffshuff.GymStock"""'], {'autospec': '(True)'}), "('truffshuff.GymStock', autospec=True)\n", (4460, 4498), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((4944, 4987), 'unittest.mock.patch', 'patch', (['"""truffshuff.GymStock"""'], {'autospec': '(True)'}), "('truffshuff.GymStock', autospec=True)\n", (4949, 4987), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((5326, 5369), 'unittest.mock.patch', 'patch', (['"""truffshuff.GymStock"""'], {'autospec': '(True)'}), "('truffshuff.GymStock', autospec=True)\n", (5331, 5369), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((5675, 5718), 'unittest.mock.patch', 'patch', (['"""truffshuff.GymStock"""'], {'autospec': '(True)'}), "('truffshuff.GymStock', autospec=True)\n", (5680, 5718), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((6142, 6228), 'unittest.mock.patch', 'patch', (['"""builtins.input"""'], {'side_effect': "['2', '2000', '000', '0', 'xA', '1', '4*120']"}), "('builtins.input', side_effect=['2', '2000', '000', '0', 'xA', '1',\n '4*120'])\n", (6147, 6228), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((662, 691), 'truffshuff.read_inventory', 'read_inventory', (['sentinel.path'], {}), '(sentinel.path)\n', (676, 691), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((708, 729), 'json.loads', 'json.loads', (['MOCK_JSON'], {}), '(MOCK_JSON)\n', (718, 729), False, 'import json\n'), ((493, 523), 'unittest.mock.mock_open', 'mock_open', ([], {'read_data': 'MOCK_JSON'}), '(read_data=MOCK_JSON)\n', (502, 523), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((1299, 1343), 'truffshuff.accept_inventory_file', 'accept_inventory_file', (["['-i', sentinel.path]"], {}), "(['-i', sentinel.path])\n", (1320, 1343), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((1691, 1735), 'truffshuff.accept_inventory_file', 'accept_inventory_file', (["['-i', sentinel.path]"], {}), "(['-i', sentinel.path])\n", (1712, 1735), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((2199, 2243), 'truffshuff.accept_inventory_file', 'accept_inventory_file', (["['-i', sentinel.path]"], {}), "(['-i', sentinel.path])\n", (2220, 2243), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((2630, 2659), 'truffshuff.parse_args', 'parse_args', (['sentinel.arg_list'], {}), '(sentinel.arg_list)\n', (2640, 2659), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((3151, 3174), 'truffshuff.parse_cmd_line_args', 'parse_cmd_line_args', (['[]'], {}), '([])\n', (3170, 3174), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((3756, 3787), 'truffshuff.parse_cmd_line_args', 'parse_cmd_line_args', (["['1', '2']"], {}), "(['1', '2'])\n", (3775, 3787), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((4021, 4052), 'truffshuff.parse_cmd_line_args', 'parse_cmd_line_args', (["['--help']"], {}), "(['--help'])\n", (4040, 4052), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((4245, 4290), 'truffshuff.parse_cmd_line_args', 'parse_cmd_line_args', (["(['1', '2'] + weight_qtys)"], {}), "(['1', '2'] + weight_qtys)\n", (4264, 4290), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((5485, 5530), 'truffshuff.parse_cmd_line_args', 'parse_cmd_line_args', (["(['1', '2'] + weight_qtys)"], {}), "(['1', '2'] + weight_qtys)\n", (5504, 5530), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((2064, 2094), 'unittest.mock.create_autospec', 'mock.create_autospec', (['GymStock'], {}), '(GymStock)\n', (2084, 2094), False, 'from unittest import mock\n'), ((2472, 2491), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'GymStock'}), '(spec=GymStock)\n', (2476, 2491), False, 'from unittest.mock import patch, call, sentinel, mock_open, Mock\n'), ((4689, 4714), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (4702, 4714), False, 'import pytest\n'), ((4734, 4779), 'truffshuff.parse_cmd_line_args', 'parse_cmd_line_args', (["(['1', '2'] + weight_qtys)"], {}), "(['1', '2'] + weight_qtys)\n", (4753, 4779), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((5104, 5129), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (5117, 5129), False, 'import pytest\n'), ((5149, 5194), 'truffshuff.parse_cmd_line_args', 'parse_cmd_line_args', (["(['1', '2'] + weight_qtys)"], {}), "(['1', '2'] + weight_qtys)\n", (5168, 5194), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((5907, 5932), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (5920, 5932), False, 'import pytest\n'), ((5952, 5997), 'truffshuff.parse_cmd_line_args', 'parse_cmd_line_args', (["(['1', '2'] + weight_qtys)"], {}), "(['1', '2'] + weight_qtys)\n", (5971, 5997), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((6288, 6318), 'truffshuff.input_bar_specifier', 'input_bar_specifier', (['"""xyz"""', '(32)'], {}), "('xyz', 32)\n", (6307, 6318), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((6340, 6370), 'truffshuff.input_bar_specifier', 'input_bar_specifier', (['"""xyz"""', '(32)'], {}), "('xyz', 32)\n", (6359, 6370), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((6389, 6419), 'truffshuff.input_bar_specifier', 'input_bar_specifier', (['"""xyz"""', '(32)'], {}), "('xyz', 32)\n", (6408, 6419), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((6438, 6468), 'truffshuff.input_bar_specifier', 'input_bar_specifier', (['"""xyz"""', '(32)'], {}), "('xyz', 32)\n", (6457, 6468), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((6487, 6517), 'truffshuff.input_bar_specifier', 'input_bar_specifier', (['"""xyz"""', '(32)'], {}), "('xyz', 32)\n", (6506, 6517), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n'), ((6541, 6571), 'truffshuff.input_bar_specifier', 'input_bar_specifier', (['"""xyz"""', '(32)'], {}), "('xyz', 32)\n", (6560, 6571), False, 'from truffshuff import parse_args, input_bar_specifier, DEFAULT_PLATES, accept_inventory_file, read_inventory, GymStock, parse_cmd_line_args\n')] |
import MetaTrader5 as _mt5
from collections import namedtuple
from typing import Callable
from typing import Iterable
from typing import Tuple
from typing import Union
from typing import Any
from typing import Optional
from typing import Type
# custom namedtuples
CopyRate = namedtuple("CopyRate", "time, open, high, low, close, tick_volume, spread, real_volume")
CopyTick = namedtuple("CopyTick", "time, bid, ask, last, volume, time_msc, flags, volume_real")
# MT5 namedtuple objects for typing
Tick = _mt5.Tick
AccountInfo = _mt5.AccountInfo
SymbolInfo = _mt5.SymbolInfo
TerminalInfo = _mt5.TerminalInfo
OrderCheckResult = _mt5.OrderCheckResult
OrderSendResult = _mt5.OrderSendResult
TradeOrder = _mt5.TradeOrder
TradeDeal = _mt5.TradeDeal
TradeRequest = _mt5.TradeRequest
TradePosition = _mt5.TradePosition
| [
"collections.namedtuple"
] | [((277, 369), 'collections.namedtuple', 'namedtuple', (['"""CopyRate"""', '"""time, open, high, low, close, tick_volume, spread, real_volume"""'], {}), "('CopyRate',\n 'time, open, high, low, close, tick_volume, spread, real_volume')\n", (287, 369), False, 'from collections import namedtuple\n'), ((377, 465), 'collections.namedtuple', 'namedtuple', (['"""CopyTick"""', '"""time, bid, ask, last, volume, time_msc, flags, volume_real"""'], {}), "('CopyTick',\n 'time, bid, ask, last, volume, time_msc, flags, volume_real')\n", (387, 465), False, 'from collections import namedtuple\n')] |
import datetime
import os
import pickle
from decimal import Decimal
from decimal import ROUND_HALF_UP
from functools import total_ordering
import utils
from enums import OrderMode
from error import StopValueError
from submodule.Xu3.utils import getLogger
# total_ordering: 使得我可以只定義 __eq__ 和 __gt__ 就可進行完整的比較
# https://python3-cookbook.readthedocs.io/zh_CN/latest/c08/p24_making_classes_support_comparison_operations.html
@total_ordering
class Order:
"""
TODO: 思考 Order 是否需要 stock_id
revenue: 交易收入(基本上都會是正的)
cost: 交易成本(含買賣成本及手續費等,皆為正數)
income: 交易收入 - 交易成本(可正可負)
"""
def __init__(self, guid, time, price: Decimal, stop_value: Decimal, volumn: int = 1,
discount: Decimal = Decimal("1"), is_etf=False, order_mode=OrderMode.Long):
# 全域唯一識別碼
self.guid = guid
# 買入價格
self.price = Decimal("0")
# 買入張數
self.bought_volumn = 0
# 可交易的數量
self.volumn = 0
# 已賣出張數
self.sold_volumn = 0
# 是否已全部賣光?
self.sold_out = False
# (首次)購買時間
self.buy_time = None
# (最終)售出時間
self.sell_time = None
# 券商手續費折扣
self.discount = discount
# 是否為 ETF
self.is_etf = is_etf
# 購買成本(因股票價格不同而不同) = 股票購買成本 + 券商手續費
self.buy_cost = Decimal("0")
# 出售成本(因股票價格不同而不同) = 券商手續費 + 政府之證交稅
self.sell_cost = Decimal("0")
# 營業額
self.revenue = Decimal("0")
# 報酬率 return_rate = 1.XX or 2.XX
self.return_rate = Decimal("0")
# order_mode 在策略階段就決定,進而決定停損價位
self.order_mode = order_mode
# 停損/停利 價格(在 Order 形成的同時就應存在)
self.stop_value = Decimal("0")
# 紀錄 stop_value 歷程(可用於計算每個 order 平均調整多少次、平均調整金額為多少,用於預測最終價格)
self.stop_value_moving = []
# 首次購買
self.buy(time=time, price=price, volumn=volumn, stop_value=stop_value)
# 用於事後追加本應為同一請求的 Order
def __add__(self, other):
if other.guid == self.guid:
self.buy(time=other.time, price=other.price, volumn=other.volumn, stop_value=other.stop_value)
# 用於事後追加本應為同一請求的 Order
def __sub__(self, other):
if other.guid == self.guid:
self.sell(sell_time=other.sell_time, sell_price=other.sell_price, volumn=other.sell_volumn, is_trial=False)
def __repr__(self):
return f"Order(guid: {self.guid}, time: {self.buy_time}, price: {self.price}, stop_value: {self.stop_value}" \
f"\nbought_volumn: {self.bought_volumn}, sold_volumn: {self.sold_volumn}, " \
f"revenue: {self.revenue}, buy_cost: {self.buy_cost}, sell_cost: {self.sell_cost})"
__str__ = __repr__
def toString(self, time: datetime = None, price: Decimal = None):
description = f"Order(time: {self.buy_time}, price: {self.price}, stop_value: {self.stop_value})"
description += f"\nguid: {self.guid}"
description += f"\nbought_volumn: {self.bought_volumn}, sold_volumn: {self.sold_volumn}, " \
f"buy_cost: {self.buy_cost}, sell_cost: {self.sell_cost}"
if time is not None:
_, _, (_, revenue, buy_cost, sell_cost) = self.sell(sell_time=time,
sell_price=price,
volumn=None,
is_trial=True)
income = revenue - buy_cost - sell_cost
description += f"\nrevenue: {revenue}, income: {income}"
return description
# region total_ordering: 使得我可以只定義 __eq__ 和 __gt__ 就可進行完整的比較
# https://python3-cookbook.readthedocs.io/zh_CN/latest/c08/p24_making_classes_support_comparison_operations.html
def __eq__(self, other):
return (self.stop_value == other.stop_value and
self.bought_volumn == other.volumn and
self.buy_time == other.buy_time)
def __gt__(self, other):
# __gt__: 一般排序後會被放在後面
# OrderMode.Long: stop_value 越小越後面,越大越前面 -> gt = True
# OrderMode.Short: stop_value 越大越後面,越小越前面 -> gt = False
gt = self.order_mode == OrderMode.Long
if self.stop_value < other.stop_value:
return gt
elif self.stop_value > other.stop_value:
return not gt
else:
# 數量越大越後面
if self.bought_volumn > other.volumn:
return True
elif self.bought_volumn < other.volumn:
return False
else:
# 時間越晚越後面
if self.buy_time > other.buy_time:
return True
elif self.buy_time < other.buy_time:
return False
else:
# self.price == other.price and self.volumn == other.volumn and self.time == other.time
return False
# endregion
# 考慮可能無法一次購買到指定數量的情形,可追加數量(並更新 價格 和 stop_value 等數據)
def buy(self, time: datetime.datetime, price: Decimal, volumn: int, stop_value: Decimal):
total_volumn = Decimal(str(self.bought_volumn + volumn))
# 更新買入價格(根據先後購買量進行價格的加權)
origin_weight = self.bought_volumn / total_volumn
append_weight = volumn / total_volumn
self.price = (self.price * origin_weight + price * append_weight).quantize(Decimal('.00'), ROUND_HALF_UP)
# 追加買入張數
self.bought_volumn = int(total_volumn)
# 追加可交易數量
self.volumn += int(volumn)
# 若 self.time 為 None 才初始化,後續追加的時間不應覆蓋,才能正確計算總歷時
if self.buy_time is None:
self.buy_time = time
# 追加購買成本(因股票價格不同而不同) = 股票購買成本 + 券商手續費
self.buy_cost += self.getBuyCost(price, volumn, self.discount)
# 更新 stop_value
self.stop_value = stop_value
# 考慮可能會分批賣出,營業額、成本等數值會累加上去
def sell(self, sell_time: datetime.datetime, sell_price: Decimal = None, volumn: int = None, is_trial=False):
if self.bought_volumn == self.sold_volumn:
print(f"此 Order 已完成交易\n{self}")
self.sold_out = True
return
if self.sell_time is None:
self.sell_time = sell_time
# 若沒有給 sell_price 的數值,則以 stop_value 作為售價來計算
if sell_price is None:
sell_price = self.stop_value
if volumn is None:
# 尚未賣出的部分,若之前沒有部分賣出,則賣出全部
volumn = self.volumn
sold_volumn = self.sold_volumn + volumn
# 判斷是否為當沖
is_day_trading = sell_time.date() == self.buy_time.date()
# 營業額
revenue = self.revenue + sell_price * volumn * 1000
# 總成本(購買成本 + 售出成本): 部分賣出時,購買成本根據賣出比例計算
buy_cost = self.buy_cost * (Decimal(str(volumn)) / self.bought_volumn)
# 剩餘 buy_cost = self.buy_cost * (float(self.volumn) / self.bought_volumn)
# 出售成本(因股票價格不同而不同) = 券商手續費 + 政府的證交稅(考慮到可能有部分賣出的情形而設計)
sell_cost = self.sell_cost + self.getSellCost(sell_price,
is_etf=self.is_etf,
discount=self.discount,
is_day_trading=is_day_trading,
volumn=volumn)
# 並非試算模式
if not is_trial:
# 追加已售出數量
self.sold_volumn = sold_volumn
# 減少可交易數量
self.volumn -= volumn
# 是否已全部賣光?
self.sold_out = self.sold_volumn == self.bought_volumn
# 更新營業額
self.revenue = revenue
# 更新售出成本
self.sell_cost = sell_cost
return self.guid, (self.buy_time, self.price, sold_volumn), (sell_price, revenue, buy_cost, sell_cost)
def modifyStopValue(self, stop_value: Decimal, is_force=False):
# 強制模式(不考慮做多還是做空)
if is_force:
# 新舊 stop_value 變化量
delta_value = stop_value - self.stop_value
return self.modifyStopValueDelta(delta_value)
else:
# 做多: stop_value 應越來越高
if self.order_mode == OrderMode.Long:
if self.stop_value < stop_value:
self.stop_value_moving.append(stop_value - self.stop_value)
self.stop_value = stop_value
return stop_value
else:
return Decimal("0")
# TODO: 未來若是操作到會有負值的商品,例如負油價,返回值等可能需要做項對應的修改,目前假設價格都是正的
# 做空: stop_value 應越來越低
elif self.order_mode == OrderMode.Short:
if self.stop_value > stop_value:
self.stop_value_moving.append(self.stop_value - stop_value)
self.stop_value = stop_value
return stop_value
else:
return 0
else:
raise StopValueError(self.order_mode, self.stop_value, stop_value)
# 預設就是強制模式,在特殊情況下對 stop_value 進行調整
def modifyStopValueDelta(self, delta_value: Decimal):
# 紀錄 stop_value 變化
self.stop_value_moving.append(delta_value)
# 更新 stop_value
self.stop_value += delta_value
return self.stop_value
def getStopValue(self):
return self.stop_value
""" 買股票的交易成本
https://www.cmoney.tw/learn/course/cmoney/topic/152
"""
@staticmethod
def getBuyCost(price: Decimal, volumn: int = 1, discount: Decimal = Decimal("1")) -> Decimal:
return price * volumn * 1000 + utils.alphaCost(price, discount, volumn=volumn)
@staticmethod
def getSellCost(sell_price: Decimal, volumn=1, is_etf=False, is_day_trading=False,
discount: Decimal = Decimal("1")) -> Decimal:
return (utils.alphaCost(price=sell_price, discount=discount, volumn=volumn) +
utils.betaCost(price=sell_price, is_etf=is_etf, is_day_trading=is_day_trading, volumn=volumn))
class OrderList:
def __init__(self, stock_id: str,
logger_dir="order_list", logger_name=datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")):
self.logger_dir = logger_dir
self.logger_name = logger_name
self.extra = {"className": self.__class__.__name__}
self.logger = getLogger(logger_name=self.logger_name,
to_file=True,
time_file=False,
file_dir=self.logger_dir,
instance=True)
self.orders = dict(sold_out=[], un_sold_out=[])
# 股票代碼
self.stock_id = stock_id
def __repr__(self):
return self.toString(value=None)
__str__ = __repr__
def __iter__(self):
for order in self.orders:
yield order
def add(self, order: Order):
self.orders["un_sold_out"].append(order)
self.orders["un_sold_out"].sort()
self.save()
def getOrder(self, guid, has_sold_out=False):
"""
根據 order 的 guid,取得 order
:param guid: order 的 全域唯一識別碼
:param has_sold_out: 若知道該 Order 是否已被售出,可調整尋找順序以加速找到(若給錯還是會去另一邊尋找)
:return:
"""
if has_sold_out:
# 優先尋找已被售出的 Order
keys = ["sold_out", "un_sold_out"]
else:
# 優先尋找尚未被售出的 Order
keys = ["un_sold_out", "sold_out"]
for key in keys:
orders = self.orders[key]
for order in orders:
if order.guid == guid:
return order
return None
def getOrders(self, has_sold_out=False):
if has_sold_out:
orders = self.orders["sold_out"]
else:
orders = self.orders["un_sold_out"]
return orders
def sort(self):
for orders in self.orders.values():
# orders 排序的定義是根據 Order 定義的大小來排序的
orders.sort()
def modifyStopValue(self, price, is_force=False):
"""
對未售出的 Order 進行 stop_value 的調整
:param price: 根據此價格計算新的 stop_value
:param is_force: 一般情況下,停損價只漲不跌,若 is_force = True,可以強制修改
:return:
"""
is_modified = False
for order in self.orders["un_sold_out"]:
origin_stop_value = order.stop_value
return_code = order.modifyStopValue(price, is_force=is_force)
if return_code == 0:
if order.order_mode == OrderMode.Long:
self.logger.debug(f"({self.stock_id}) 做多: stop_value 應越來越高, "
f"self.stop_value: {origin_stop_value}, stop_value: {price}", extra=self.extra)
elif order.order_mode == OrderMode.Short:
self.logger.debug(f"({self.stock_id}) 做空: stop_value 應越來越低, "
f"self.stop_value: {origin_stop_value}, stop_value: {price}", extra=self.extra)
else:
# 若之後想要呈現從多少上移 stop_value 到多少,可以讓 setStopValue 的 return_code
# 回傳原始和新數值之間的價差,order 本身可以取得新 stop_value,搭配價差可算出原始 stop_value
self.logger.info("({}) Update stop_value {:.2f} -> {:.2f}".format(
self.stock_id, origin_stop_value, price), extra=self.extra)
is_modified = True
# orders 當中只要有一筆成功被調整,就會返回 True
return is_modified
# 預設就是強制模式,在特殊情況下對 stop_value 進行調整
def modifyStopValueDelta(self, delta_value: Decimal):
for order in self.orders["un_sold_out"]:
origin_stop_value = order.stop_value
new_stop_value = order.modifyStopValueDelta(delta_value=delta_value)
self.logger.info(f"({self.stock_id}) {origin_stop_value} -> {new_stop_value}", extra=self.extra)
def sell(self, sell_time: datetime.datetime, sell_price: Decimal = None, guid: str = "", sell_volumn: int = 0,
is_trial: bool = False):
trade_records = []
# 優先從未售出的部分尋找
order = self.getOrder(guid=guid, has_sold_out=False)
if order is None:
self.logger.error(f"No Order({guid})", extra=self.extra)
elif order.sold_out:
self.logger.error(f"Order({guid}) has been sold out.", extra=self.extra)
else:
self.logger.info(f"Sell Order({guid})", extra=self.extra)
# 在外部判斷可以成交才會進入此處,因此無須再檢查價格相關資訊
# sell(self, sell_time: datetime.datetime, sell_price: float, volumn: int = None, is_trial: bool)
(guid,
(buy_time, buy_price, buy_volumn),
(sell_price, revenue, buy_cost, sell_cost)) = order.sell(sell_time=sell_time,
sell_price=sell_price,
volumn=sell_volumn,
is_trial=is_trial)
# 將 stop_value 變化幅度返回,並由 History 來紀錄
# 紀錄 stop_value 平均調整次數,搭配平均調整金額,可預測最終價格
trade_record = [guid, buy_time, buy_price, buy_volumn, sell_time, sell_price, sell_volumn,
revenue, buy_cost, sell_cost, order.stop_value_moving]
trade_records.append(trade_record)
# 該 Order 所買入的都賣出
if order.sold_out:
# 由於已完全售出,因此由 un_sold_out 移到 sold_out 管理
self.orders["sold_out"].append(order)
self.orders["un_sold_out"].remove(order)
self.save()
return trade_records
def clear(self, sell_time: datetime.datetime, sell_price: Decimal = None, is_trial: bool = False):
"""
試算出清結果,由於是試算,因此沒有實際對庫存做增減,只計算價值
TODO: 目前沒有考慮價格與數量,之後若有實際要使用,需考慮進去
無視價格高低,全部賣出
:param sell_time:
:param sell_price:
:param is_trial:
:return:
"""
trade_records = []
for order in self.orders["un_sold_out"]:
# order.sell 只負責計算當前狀態賣出的結果,是否是自己要的價格需要自己判斷
(guid,
(buy_time, buy_price, buy_volumn),
(sell_price, revenue, buy_cost, sell_cost)) = order.sell(sell_time=sell_time,
sell_price=sell_price,
is_trial=is_trial)
# 印出售出的 order
self.logger.info(f"({self.stock_id})\n{order}", extra=self.extra)
# 將 stop_value 變化幅度返回,並由 History 來紀錄
# order.volumn: 該 order 所擁有的可交易的數量
trade_record = [guid, buy_time, buy_price, buy_volumn, sell_time, sell_price, order.volumn,
revenue, buy_cost, sell_cost, order.stop_value_moving]
trade_records.append(trade_record)
if not is_trial:
self.orders["sold_out"] += self.orders["un_sold_out"]
self.orders["sold_out"].sort()
self.orders["un_sold_out"] = []
# 不考慮是否為試算模式,皆返回模擬交易後的結果
return trade_records
def getOrderNumber(self):
return len(self.orders["un_sold_out"])
def setLoggerLevel(self, level):
self.logger.setLevel(level)
def toString(self, value: float = None, time: datetime = None, price: Decimal = None,
exclude_sold_out: bool = True):
# TODO: value is None -> value = order.stop_value
description = f"===== OrderList({self.stock_id}) ====="
cost = Decimal("0")
n_order = 0
for order in self.orders["un_sold_out"]:
description += f"\n{order.toString(time, price)}"
cost += order.buy_cost
n_order += 1
# 若不排除已售出的 Order
if not exclude_sold_out:
description += "\n<<<<< 已售出 >>>>>"
for order in self.orders["sold_out"]:
description += f"\n{order.toString(time, price)}"
cost += order.buy_cost
n_order += 1
if value is None:
description += f"\n== 共有 {n_order} 個 order, 共花費 {cost} 元 =="
else:
description += f"\n== 共有 {n_order} 個 order, 共花費 {cost} 元, 價值 {value} 元 =="
return description
def getSellRequests(self, date_time: datetime.datetime):
# 交易請求: (date_time, price, volumn)
sell_requests = []
orders = self.orders["un_sold_out"]
for order in orders:
if not order.sold_out:
# order.volumn: 該 order 剩餘可交易數量
sell_requests.append([order.guid, date_time, order.stop_value, order.volumn])
return sell_requests
def save(self, file_name=None):
if file_name is None:
file_name = self.stock_id
with open(f"data/order_list/{file_name}.pickle", "wb") as f:
pickle.dump(self, f)
def load(self, file_name=None):
if file_name is None:
file_name = self.stock_id
path = f"data/order_list/{file_name}.pickle"
if os.path.exists(path):
with open(path, "rb") as f:
order_list = pickle.load(f)
self.orders = order_list.orders
del order_list
if __name__ == "__main__":
import utils.globals_variable as gv
from data import StockCategory
gv.initialize()
trade_records = """0056,2020-06-05,2020-06-09,28.78,28.80,1,28807,55,-62
2892,2020-06-10,2020-06-11,23.35,23.0,1,23372,89,-461
6005,2020-06-05,2020-06-11,10.15,10.30,1,10170,50,80
5880,2020-06-16,2020-06-17,20.80,20.80,1,20820,82,-102
2888,2020-06-15,2020-06-17,8.56,8.63,1,8580,45,5
6005,2020-07-07,2020-07-08,11.05,10.90,1,11070,52,-222
2823,2020-07-07,2020-07-10,22.90,22.40,1,22920,87,-607
2888,2020-07-07,2020-07-10,8.87,8.72,1,8890,46,-216
3048,2021-02-26,2021-03-16,24.05,32,1,24070,116,7814
1712,2021-03-29,2021-04-14,22.6,21.75,1,22620,85,-955
1310,2021-03-24,2021-04-21,19.05,21.65,1,19070.00,84,2496.00
2012,2021-03-23,2021-04-23,19.95,25.10,1,19970.00,95,5232.00
2012,2021-03-23,2021-04-23,0,0,0,0,0,590
2419,2021-04-28,2021-05-03,25.55,23.55,1,25570.00,90,-2110.00
3049,2021-03-29,2021-05-03,11.70,13.50,1,11720.0,60,1720.00
2329,2021-04-08,2021-05-04,17.40,17.45,1,17420.0,72,-42.00
2442,2021-03-23,2021-05-04,10.80,11.20,1,10820.0,53,327.00
5519,2021-04-07,2021-05-04,19.10,22.25,1,19120.0,86,3044.00
1417,2021-04-12,2021-05-04,12.55,13.90,1,12570.00,61,1269.00
2527,2021-03-18,2021-05-05,21.40,22.90,1,21420.0,88,1392.00
1732,2021-04-29,2021-05-05,28.50,28.40,1,28526.00,105,-225.00
1712,2021-03-29,2021-05-10,0,0,0,0,0,1290
2855,2021-03-22,2021-05-12,20.90,27.40,1,20920.0,102,6378.00
2880,2021-04-13,2021-05-12,18.80,17.80,1,18820.0,73,-1093.00
2892,2021-04-14,2021-05-12,22.30,20.70,1,22320.0,82,-1702.00
2890,2021-04-15,2021-05-12,12.85,12.60,1,12870.00,57,-327.00
2887,2021-04-15,2021-05-17,13.50,13.65,1,13520.0,60,70.00
6165,2021-05-25,2021-06-02,32.80,30.60,1,32820.00,111,-2331.00
3535,2021-05-25,2021-06-07,16.65,16.50,1,16670.00,69,-239.00
6205,2021-06-02,2021-06-07,31.60,29.65,1,31620.00,108,-2078.00
1108,2021-06-03,2021-06-07,14.50,13.65,1,14520.00,60,-930.00
4960,2021-05-31,2021-06-09,12.55,11.80,1,12570.00,55,-825.00
2390,2021-06-08,2021-06-15,25.00,29.75,1,25020.00,109,4621.00
8213,2021-06-11,2021-06-29,50.60,47.95,1,50647.00,163,-2835.00
1732,2021-06-28,2021-06-30,36.85,34.80,1,36884.00,124,-2194.00
1417,2021-06-25,2021-07-06,15.90,15.85,1,15920.00,67,-137.00
2885,2021-06-28,2021-07-12,26.55,25.60,1,26570.00,96,-1066.00
2390,2021-07-07,2021-07-13,26.80,25.40,1,26820.00,96,-1516.00
8478,2021-06-21,2021-07-13,57.60,68.60,1,57624.00,234,10742.00
6172,2021-07-05,2021-07-13,40.60,39.50,1,40620.00,138,-1258.00
2392,2021-05-27,2021-07-20,40.25,42.45,1,40270.00,146,1584.00
8213,2021-06-11,2021-07-27,0,0,0,0,0,3490.0
2885,2021-06-28,2021-07-12,0,0,0,0,0,1190.0
00639,2021-05-21,2021-07-27,17.25,16.45,1,17270.00,36,-856.00
00739,2021-04-26,2021-07-28,28.09,26.70,1,28110.00,46,-1456.00
4942,2021-05-31,2021-07-28,42.80,50.40,1,42820.00,171,7408.00
8103,2021-07-02,2021-07-28,43.65,45.60,1,43690.00,156,1774.00
8478,2021-07-23,2021-07-28,69.90,67.70,1,69965.00,232,-2462.00
2390,2021-07-30,2021-08-10,27.35,26.30,1,27370.00,98,-1168.00
00757,2021-06-11,2021-08-10,46.21,49.47,1,46230.00,70,3170.00
00668,2021-07-05,2021-08-10,35.09,35.27,1,35110.00,55,105.00
00762,2021-07-05,2021-08-10,41.54,42.03,1,41560.00,62,408.00
00646,2021-07-08,2021-08-10,37.33,37.80,1,37350.00,57,393.00
2855,2021-07-01,2021-08-12,26.95,26.10,1,26975.00,102,-977.00
1417,2021-07-26,2021-08-12,15.75,14.65,1,15770.00,63,-1183.00
1732,2021-07-30,2021-08-12,30.00,28.50,1,30028.00,111,-1639.00
2885,2021-08-05,2021-08-12,25.90,24.80,1,25924.00,97,-1221.00
2597,2021-08-13,2021-08-16,149.50,147.0,1,149564.00,503,-3067.00
6172,2021-07-22,2021-08-18,42.00,41.75,1,42020.00,145,-415.00
2392,2021-05-27,2021-07-20,0,0,0,0,0,2490.00
2545,2021-05-31,2021-08-25,40.95,37.45,1,40970.00,132,-3652.00
3003,2021-08-23,2021-08-27,93.50,95.60,1,93540.00,327,1733.00
4989,2021-08-26,2021-09-06,44.90,42.05,1,44920.0,146,-3016.00
3711,2021-08-25,2021-09-07,123.00,119.50,1,123052.00,409,-3961.00
3048,2021-08-25,2021-09-07,35.85,31.55,1,35870.00,114,-4434.00
3037,2021-08-27,2021-09-07,146.00,144.50,1,146062.00,494,-2056.00"""
trs = trade_records.split("\n")
for tr in trs:
stock_id, buy_time, sell_time, buy_price, sell_price, vol, buy_cost, sell_cost, revenue = tr.split(",")
is_etf = StockCategory.isEtf(stock_id=stock_id)
bc = Order.getBuyCost(price=Decimal(buy_price), discount=gv.e_capital_discount)
sc = Order.getSellCost(sell_price=Decimal(sell_price), discount=gv.e_capital_discount, is_etf=is_etf)
if bc != Decimal(buy_cost) or sc != Decimal(sell_cost):
print(f"bc: {bc}, sc: {sc}, is_etf: {is_etf}\n{tr}")
| [
"os.path.exists",
"pickle.dump",
"data.StockCategory.isEtf",
"utils.betaCost",
"utils.globals_variable.initialize",
"pickle.load",
"error.StopValueError",
"datetime.datetime.now",
"utils.alphaCost",
"submodule.Xu3.utils.getLogger",
"decimal.Decimal"
] | [((19141, 19156), 'utils.globals_variable.initialize', 'gv.initialize', ([], {}), '()\n', (19154, 19156), True, 'import utils.globals_variable as gv\n'), ((719, 731), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (726, 731), False, 'from decimal import Decimal\n'), ((855, 867), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (862, 867), False, 'from decimal import Decimal\n'), ((1321, 1333), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (1328, 1333), False, 'from decimal import Decimal\n'), ((1404, 1416), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (1411, 1416), False, 'from decimal import Decimal\n'), ((1455, 1467), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (1462, 1467), False, 'from decimal import Decimal\n'), ((1537, 1549), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (1544, 1549), False, 'from decimal import Decimal\n'), ((1692, 1704), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (1699, 1704), False, 'from decimal import Decimal\n'), ((9412, 9424), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (9419, 9424), False, 'from decimal import Decimal\n'), ((9671, 9683), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (9678, 9683), False, 'from decimal import Decimal\n'), ((10219, 10334), 'submodule.Xu3.utils.getLogger', 'getLogger', ([], {'logger_name': 'self.logger_name', 'to_file': '(True)', 'time_file': '(False)', 'file_dir': 'self.logger_dir', 'instance': '(True)'}), '(logger_name=self.logger_name, to_file=True, time_file=False,\n file_dir=self.logger_dir, instance=True)\n', (10228, 10334), False, 'from submodule.Xu3.utils import getLogger\n'), ((17331, 17343), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (17338, 17343), False, 'from decimal import Decimal\n'), ((18847, 18867), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (18861, 18867), False, 'import os\n'), ((23275, 23313), 'data.StockCategory.isEtf', 'StockCategory.isEtf', ([], {'stock_id': 'stock_id'}), '(stock_id=stock_id)\n', (23294, 23313), False, 'from data import StockCategory\n'), ((5358, 5372), 'decimal.Decimal', 'Decimal', (['""".00"""'], {}), "('.00')\n", (5365, 5372), False, 'from decimal import Decimal\n'), ((9477, 9524), 'utils.alphaCost', 'utils.alphaCost', (['price', 'discount'], {'volumn': 'volumn'}), '(price, discount, volumn=volumn)\n', (9492, 9524), False, 'import utils\n'), ((9713, 9780), 'utils.alphaCost', 'utils.alphaCost', ([], {'price': 'sell_price', 'discount': 'discount', 'volumn': 'volumn'}), '(price=sell_price, discount=discount, volumn=volumn)\n', (9728, 9780), False, 'import utils\n'), ((9799, 9897), 'utils.betaCost', 'utils.betaCost', ([], {'price': 'sell_price', 'is_etf': 'is_etf', 'is_day_trading': 'is_day_trading', 'volumn': 'volumn'}), '(price=sell_price, is_etf=is_etf, is_day_trading=\n is_day_trading, volumn=volumn)\n', (9813, 9897), False, 'import utils\n'), ((18655, 18675), 'pickle.dump', 'pickle.dump', (['self', 'f'], {}), '(self, f)\n', (18666, 18675), False, 'import pickle\n'), ((10005, 10028), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10026, 10028), False, 'import datetime\n'), ((18938, 18952), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (18949, 18952), False, 'import pickle\n'), ((23350, 23368), 'decimal.Decimal', 'Decimal', (['buy_price'], {}), '(buy_price)\n', (23357, 23368), False, 'from decimal import Decimal\n'), ((23444, 23463), 'decimal.Decimal', 'Decimal', (['sell_price'], {}), '(sell_price)\n', (23451, 23463), False, 'from decimal import Decimal\n'), ((23530, 23547), 'decimal.Decimal', 'Decimal', (['buy_cost'], {}), '(buy_cost)\n', (23537, 23547), False, 'from decimal import Decimal\n'), ((23557, 23575), 'decimal.Decimal', 'Decimal', (['sell_cost'], {}), '(sell_cost)\n', (23564, 23575), False, 'from decimal import Decimal\n'), ((8369, 8381), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (8376, 8381), False, 'from decimal import Decimal\n'), ((8846, 8906), 'error.StopValueError', 'StopValueError', (['self.order_mode', 'self.stop_value', 'stop_value'], {}), '(self.order_mode, self.stop_value, stop_value)\n', (8860, 8906), False, 'from error import StopValueError\n')] |
import torch
from torch.nn.functional import leaky_relu
from rational.torch import Rational
import numpy as np
t = torch.tensor([-2., -1, 0., 1., 2.])
expected_res = np.array(leaky_relu(t))
inp = torch.from_numpy(np.array(t)).reshape(-1)
cuda_inp = torch.tensor(np.array(t), dtype=torch.float, device="cuda").reshape(-1)
rationalA_lrelu_gpu = Rational(version='A', cuda=True)(cuda_inp).clone().detach().cpu().numpy()
rationalB_lrelu_gpu = Rational(version='B', cuda=True)(cuda_inp).clone().detach().cpu().numpy()
rationalC_lrelu_gpu = Rational(version='C', cuda=True)(cuda_inp).clone().detach().cpu().numpy()
rationalD_lrelu_gpu = Rational(version='D', cuda=True, trainable=False)(cuda_inp).clone().detach().cpu().numpy()
# Tests on GPU
def test_rationalA_gpu_lrelu():
assert np.all(np.isclose(rationalA_lrelu_gpu, expected_res, atol=5e-02))
def test_rationalB_gpu_lrelu():
assert np.all(np.isclose(rationalB_lrelu_gpu, expected_res, atol=5e-02))
def test_rationalC_gpu_lrelu():
assert np.all(np.isclose(rationalC_lrelu_gpu, expected_res, atol=5e-02))
def test_rationalD_gpu_lrelu():
assert np.all(np.isclose(rationalD_lrelu_gpu, expected_res, atol=5e-02))
| [
"torch.nn.functional.leaky_relu",
"numpy.isclose",
"torch.tensor",
"numpy.array",
"rational.torch.Rational"
] | [((117, 156), 'torch.tensor', 'torch.tensor', (['[-2.0, -1, 0.0, 1.0, 2.0]'], {}), '([-2.0, -1, 0.0, 1.0, 2.0])\n', (129, 156), False, 'import torch\n'), ((177, 190), 'torch.nn.functional.leaky_relu', 'leaky_relu', (['t'], {}), '(t)\n', (187, 190), False, 'from torch.nn.functional import leaky_relu\n'), ((793, 849), 'numpy.isclose', 'np.isclose', (['rationalA_lrelu_gpu', 'expected_res'], {'atol': '(0.05)'}), '(rationalA_lrelu_gpu, expected_res, atol=0.05)\n', (803, 849), True, 'import numpy as np\n'), ((904, 960), 'numpy.isclose', 'np.isclose', (['rationalB_lrelu_gpu', 'expected_res'], {'atol': '(0.05)'}), '(rationalB_lrelu_gpu, expected_res, atol=0.05)\n', (914, 960), True, 'import numpy as np\n'), ((1015, 1071), 'numpy.isclose', 'np.isclose', (['rationalC_lrelu_gpu', 'expected_res'], {'atol': '(0.05)'}), '(rationalC_lrelu_gpu, expected_res, atol=0.05)\n', (1025, 1071), True, 'import numpy as np\n'), ((1126, 1182), 'numpy.isclose', 'np.isclose', (['rationalD_lrelu_gpu', 'expected_res'], {'atol': '(0.05)'}), '(rationalD_lrelu_gpu, expected_res, atol=0.05)\n', (1136, 1182), True, 'import numpy as np\n'), ((215, 226), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (223, 226), True, 'import numpy as np\n'), ((264, 275), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (272, 275), True, 'import numpy as np\n'), ((347, 379), 'rational.torch.Rational', 'Rational', ([], {'version': '"""A"""', 'cuda': '(True)'}), "(version='A', cuda=True)\n", (355, 379), False, 'from rational.torch import Rational\n'), ((443, 475), 'rational.torch.Rational', 'Rational', ([], {'version': '"""B"""', 'cuda': '(True)'}), "(version='B', cuda=True)\n", (451, 475), False, 'from rational.torch import Rational\n'), ((539, 571), 'rational.torch.Rational', 'Rational', ([], {'version': '"""C"""', 'cuda': '(True)'}), "(version='C', cuda=True)\n", (547, 571), False, 'from rational.torch import Rational\n'), ((635, 684), 'rational.torch.Rational', 'Rational', ([], {'version': '"""D"""', 'cuda': '(True)', 'trainable': '(False)'}), "(version='D', cuda=True, trainable=False)\n", (643, 684), False, 'from rational.torch import Rational\n')] |
# Copyright Contributors to the Testing Farm project.
# SPDX-License-Identifier: Apache-2.0
import os
from datetime import datetime
import gluetool
from gluetool import Failure
from gluetool import GlueCommandError
from gluetool import GlueError
from gluetool.utils import Command
from typing import AnyStr, List, Optional, Dict, Any, cast # noqa
class UploadResults(gluetool.Module):
"""
This module is for uploading test results in linux-system-roles BaseOS CI use-case.
It does not provide generic functionality for gluetool-module.
It is used at the end of the citool pipeline.
It uses entries in ``test_schedule`` as a source of artifacts.
It provides ``PR_TESTING_ARTIFACTS_URL`` as the target of uploaded results on the web.
"""
name = 'upload-results'
description = 'Upload result using scp'
supported_dryrun_level = gluetool.glue.DryRunLevels.DRY
options = {
'artifact-src-filenames': {
'help': 'The filenames of source artifacts we want to upload',
'metavar': 'path',
'type': str
},
'artifact-dest-file-postfix': {
'help': 'The postfix in the end of the uploaded test results filename.',
'metavar': 'path',
'type': str
},
'artifact-target-dir-name': {
'help': 'The name of a directory for artifacts in `target-dir`',
'metavar': 'path',
'type': str
},
'artifact-target-subdirs': {
'help': 'The subdirectories in `target-dir`/`artifact_target-dir-name` where to upload results. Optional',
'metavar': 'path',
'type': str
},
'key-path': {
'help': 'the path to the key which will be used to upload',
'metavar': 'path',
'type': str
},
'upload-to-public': {
'help': 'Uploads results to public space if set',
'action': 'store_true'
},
'user': {
'help': 'The user which will be used by scp to log in target host',
'metavar': 'USER',
'type': str
},
'domain': {
'help': 'The domain to which results will be uploaded',
'metavar': 'URL',
'type': str
},
'download-domain': {
'help': 'The domain from which results will be downloaded',
'metavar': 'DOWNLOADURL',
'type': str,
},
'target-url': {
'help': 'The URL to which results will be uploaded',
'metavar': 'URL',
'type': str
},
'target-dir': {
'help': 'The directory in target host where artifacts will be uploaded',
'metavar': 'PATH',
'type': str
}
}
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
super(UploadResults, self).__init__(*args, **kwargs)
self.full_target_url = None # type: Optional[str]
def _get_pull_request_info(self):
# type: () -> str
"""
It generates a string from pull request information.
:rtype: str
:returns: Formated pull request info.
"""
task = self.shared('primary_task')
return "{}-{}-{}".format(task.repo, task.pull_number, task.commit_sha[0:7])
def _get_artifact_dir_name(self):
# type: () -> str
"""
It generates a name for the results folder.
:rtype: str
:returns: The name of the folder where the results will be uploaded
"""
compose = self.shared('compose')
if isinstance(compose, List):
compose = compose[0]
artifact_folder_name = self.option('artifact-target-dir-name').format(
self._get_pull_request_info(),
compose,
datetime.now().strftime('%Y%m%d-%H%M%S')
)
return cast(str, artifact_folder_name)
def _create_subdir_for_artifacts(self, destination_sub_path, user_and_domain):
# type: (str, str) -> Optional[str]
"""
This will create a folder for the results on the target file hosting.
:param str destination_sub_path: Main destination path in filesystem for results.
:param str user_and_domain: User login to the server.
"""
target_subdirectory = self.option('artifact-target-subdirs')
if target_subdirectory:
destination_sub_path = "{}/{}".format(destination_sub_path, target_subdirectory)
target_dir = self.option('target-dir')
cmd_init_remote_dir = [
'ssh', '-i', self.option('key-path'),
user_and_domain,
"mkdir -p {}".format(os.path.join(target_dir, destination_sub_path))
]
try:
Command(cmd_init_remote_dir).run()
return destination_sub_path
except GlueCommandError as exc:
assert exc.output.stderr is not None
raise GlueError('Creating remote folder failed: {} cmd: {}'.format(exc, cmd_init_remote_dir))
return None
def _get_files_to_upload(self):
# type: () -> List[Dict[str, str]]
"""
Get the results to be uploaded to the server.
:returns: The source paths to the test results and destination filenames.
"""
schedule = self.shared('test_schedule')
dest_file_postfix = self.option('artifact-dest-file-postfix')
files = []
for entry in schedule:
dest_filename = "{}-{}{}".format(
os.path.splitext(
entry.playbook_filepath.split('/')[-1]
)[0],
entry.result,
dest_file_postfix
)
files.append({
'src-file-path': os.path.join(entry.work_dirpath, self.option('artifact-src-filenames')),
'dest-filename': dest_filename
})
return files
def _upload_results(self, destination_path, user_and_domain, results_files):
# type: (str, str, List[Dict[str, str]]) -> None
"""
It uploads the artifacts to the server.
:param str destination_path: Where to upload results. Example: ``/data/logs/result1/``
:param str user_and_domain: User login to the server. Example: ``<EMAIL>``
:param dict results_files: Full paths to the source artifacts and destination filenames.
"""
for results_file in results_files:
cmd_upload = ['scp', '-i', cast(str, self.option('key-path'))] # type: Optional[List[str]]
assert cmd_upload is not None
cmd_upload.append(results_file['src-file-path'])
cmd_upload.append('{}:{}'.format(
user_and_domain,
os.path.join(destination_path, results_file['dest-filename'])
))
try:
Command(cmd_upload).run()
cmd_upload = None
except GlueCommandError as exc:
assert exc.output.stderr is not None
raise GlueError('Uploading results failed: {} cmd: {}'.format(exc, cmd_upload))
@property
def _full_target_url(self):
# type: () -> Optional[str]
return self.full_target_url
@property
def eval_context(self):
# type: () -> Dict[str, Optional[str]]
__content__ = { # noqa
'PR_TESTING_ARTIFACTS_URL': """
The URL with results of testing
"""
}
return {
'PR_TESTING_ARTIFACTS_URL': self._full_target_url
}
def destroy(self, failure=None):
# type: (Optional[Failure]) -> None
"""
It creates a directory for results in destination and then it uploads test results.
At the end ``PR_TESTING_ARTIFACTS_URL`` contains the URL with the uploaded results.
:param gluetool.glue.Failure failure: if set, carries information about failure that made
``gluetool`` to destroy the whole session. Modules might want to take actions based
on provided information, e.g. send different notifications.
"""
self.require_shared('test_schedule', 'compose', 'primary_task')
if not self.shared('test_schedule'):
# Probably cloning failed
self.warn('Nothing to upload')
return
if not self.option('upload-to-public'):
return
domain = self.option('domain')
user = self.option('user')
user_and_domain = "{}@{}".format(user, domain)
destination_sub_path = self._get_artifact_dir_name()
subdir = self._create_subdir_for_artifacts(destination_sub_path, user_and_domain)
assert subdir is not None
destination_sub_path = subdir
target_url = self.option('target-url')
self.destination_url = os.path.join(target_url, destination_sub_path)
target_dir = self.option('target-dir')
self.destination_dir = os.path.join(target_dir, destination_sub_path)
# Return artifacts URL
download_domain = self.option('download-domain') or domain
self.full_target_url = "https://{}/{}".format(download_domain, self.destination_url)
files = self._get_files_to_upload()
self._upload_results(self.destination_dir, user_and_domain, files)
| [
"datetime.datetime.now",
"gluetool.utils.Command",
"os.path.join",
"typing.cast"
] | [((3933, 3964), 'typing.cast', 'cast', (['str', 'artifact_folder_name'], {}), '(str, artifact_folder_name)\n', (3937, 3964), False, 'from typing import AnyStr, List, Optional, Dict, Any, cast\n'), ((8964, 9010), 'os.path.join', 'os.path.join', (['target_url', 'destination_sub_path'], {}), '(target_url, destination_sub_path)\n', (8976, 9010), False, 'import os\n'), ((9090, 9136), 'os.path.join', 'os.path.join', (['target_dir', 'destination_sub_path'], {}), '(target_dir, destination_sub_path)\n', (9102, 9136), False, 'import os\n'), ((3867, 3881), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3879, 3881), False, 'from datetime import datetime\n'), ((4753, 4799), 'os.path.join', 'os.path.join', (['target_dir', 'destination_sub_path'], {}), '(target_dir, destination_sub_path)\n', (4765, 4799), False, 'import os\n'), ((6856, 6917), 'os.path.join', 'os.path.join', (['destination_path', "results_file['dest-filename']"], {}), "(destination_path, results_file['dest-filename'])\n", (6868, 6917), False, 'import os\n'), ((4848, 4876), 'gluetool.utils.Command', 'Command', (['cmd_init_remote_dir'], {}), '(cmd_init_remote_dir)\n', (4855, 4876), False, 'from gluetool.utils import Command\n'), ((6967, 6986), 'gluetool.utils.Command', 'Command', (['cmd_upload'], {}), '(cmd_upload)\n', (6974, 6986), False, 'from gluetool.utils import Command\n')] |
import random as random_lib
import copy
from opsbro.evaluater import export_evaluater_function
FUNCTION_GROUP = 'random'
@export_evaluater_function(function_group=FUNCTION_GROUP)
def random():
"""**random()** -> Returns a random float between 0 and 1
<code>
Example:
random()
Returns:
0.6988342144113194
</code>
"""
return random_lib.random()
@export_evaluater_function(function_group=FUNCTION_GROUP)
def randomint_between(int_start, int_end):
"""**randomint_between(int_start, int_end)** -> Returns a random int between the start and the end
<code>
Example:
randomint_between(1, 100)
Returns:
69
</code>
"""
return random_lib.randint(int_start, int_end)
@export_evaluater_function(function_group=FUNCTION_GROUP)
def shuffle(list):
"""**shuffle(list)** -> Return a copy of the list suffle randomly
<code>
Example:
suffle([ 1, 2, 3, 4 ])
Returns:
[ 3, 1, 4, 2 ]
</code>
"""
# NOTE random.shuffle is in place
n_list = copy.copy(list)
random_lib.shuffle(n_list)
return n_list
| [
"random.shuffle",
"opsbro.evaluater.export_evaluater_function",
"copy.copy",
"random.random",
"random.randint"
] | [((126, 182), 'opsbro.evaluater.export_evaluater_function', 'export_evaluater_function', ([], {'function_group': 'FUNCTION_GROUP'}), '(function_group=FUNCTION_GROUP)\n', (151, 182), False, 'from opsbro.evaluater import export_evaluater_function\n'), ((392, 448), 'opsbro.evaluater.export_evaluater_function', 'export_evaluater_function', ([], {'function_group': 'FUNCTION_GROUP'}), '(function_group=FUNCTION_GROUP)\n', (417, 448), False, 'from opsbro.evaluater import export_evaluater_function\n'), ((748, 804), 'opsbro.evaluater.export_evaluater_function', 'export_evaluater_function', ([], {'function_group': 'FUNCTION_GROUP'}), '(function_group=FUNCTION_GROUP)\n', (773, 804), False, 'from opsbro.evaluater import export_evaluater_function\n'), ((369, 388), 'random.random', 'random_lib.random', ([], {}), '()\n', (386, 388), True, 'import random as random_lib\n'), ((706, 744), 'random.randint', 'random_lib.randint', (['int_start', 'int_end'], {}), '(int_start, int_end)\n', (724, 744), True, 'import random as random_lib\n'), ((1054, 1069), 'copy.copy', 'copy.copy', (['list'], {}), '(list)\n', (1063, 1069), False, 'import copy\n'), ((1074, 1100), 'random.shuffle', 'random_lib.shuffle', (['n_list'], {}), '(n_list)\n', (1092, 1100), True, 'import random as random_lib\n')] |
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
import songs.routing
application = ProtocolTypeRouter({
'websocket': AuthMiddlewareStack(URLRouter(songs.routing.websocket_urlpatterns))
}) | [
"channels.routing.URLRouter"
] | [((199, 245), 'channels.routing.URLRouter', 'URLRouter', (['songs.routing.websocket_urlpatterns'], {}), '(songs.routing.websocket_urlpatterns)\n', (208, 245), False, 'from channels.routing import ProtocolTypeRouter, URLRouter\n')] |
import locale
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
import tornado.httpserver
import tornado.ioloop
import tornado.web
import os
import tornado.options
import json
import ipaddress
import functools
import subprocess
import user_agents
from collections import namedtuple
import models
import dispatch
import endpoints
import api_endpoints
import enums
import starlight
import analytics
import webutil
from starlight import private_data_path
def early_init():
os.chdir(os.path.dirname(os.path.realpath(__file__)))
if not os.environ.get("DISABLE_HTTPS_ENFORCEMENT", "") and not os.environ.get("DEV", ""):
# production mode: force https usage due to local storage issues
# also we don't want the NSA knowing you play chinese cartoon games
def _swizzle_RequestHandler_prepare(self):
if self.request.protocol != "https":
self.redirect(
"https://{0}{1}".format(self.request.host, self.request.uri))
tornado.web.RequestHandler.prepare = _swizzle_RequestHandler_prepare
if os.environ.get("BEHIND_CLOUDFLARE") == "1":
cloudflare_ranges = []
with open("cloudflare.txt", "r") as cf:
for line in cf:
cloudflare_ranges.append(ipaddress.ip_network(line.strip()))
_super_RequestHandler_prepare2 = tornado.web.RequestHandler.prepare
def _swizzle_RequestHandler_prepare2(self):
for net in cloudflare_ranges:
if ipaddress.ip_address(self.request.remote_ip) in net:
if "CF-Connecting-IP" in self.request.headers:
self.request.remote_ip = self.request.headers[
"CF-Connecting-IP"]
break
_super_RequestHandler_prepare2(self)
tornado.web.RequestHandler.prepare = _swizzle_RequestHandler_prepare2
_super_RequestHandler_prepare3 = tornado.web.RequestHandler.prepare
def _swizzle_RequestHandler_prepare3(self):
self.request.is_low_bandwidth = 0
if "User-Agent" in self.request.headers:
ua = user_agents.parse(self.request.headers["User-Agent"])
if ua.is_mobile or ua.is_tablet:
self.request.is_low_bandwidth = 1
_super_RequestHandler_prepare3(self)
tornado.web.RequestHandler.prepare = _swizzle_RequestHandler_prepare3
def main():
starlight.init()
early_init()
in_dev_mode = os.environ.get("DEV")
image_server = os.environ.get("IMAGE_HOST", "")
tornado.options.parse_command_line()
application = tornado.web.Application(dispatch.ROUTES,
template_path="webui",
static_path="static",
image_host=image_server,
debug=in_dev_mode,
is_dev=in_dev_mode,
tle=models.TranslationEngine(starlight),
enums=enums,
starlight=starlight,
tlable=webutil.tlable,
webutil=webutil,
analytics=analytics.Analytics(),
# Change every etag when the server restarts, in case we change what the output looks like.
instance_random=os.urandom(8))
http_server = tornado.httpserver.HTTPServer(application, xheaders=1)
addr = os.environ.get("ADDRESS", "0.0.0.0")
port = int(os.environ.get("PORT", 5000))
http_server.listen(port, addr)
print("Current APP_VER:", os.environ.get("VC_APP_VER",
"1.9.1 (warning: Truth updates will fail in the future if an accurate VC_APP_VER "
"is not set. Export VC_APP_VER to suppress this warning.)"))
print("Ready.")
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
| [
"locale.setlocale",
"os.urandom",
"models.TranslationEngine",
"os.environ.get",
"os.path.realpath",
"starlight.init",
"ipaddress.ip_address",
"user_agents.parse",
"analytics.Analytics"
] | [((14, 60), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', '"""en_US.UTF-8"""'], {}), "(locale.LC_ALL, 'en_US.UTF-8')\n", (30, 60), False, 'import locale\n'), ((2398, 2414), 'starlight.init', 'starlight.init', ([], {}), '()\n', (2412, 2414), False, 'import starlight\n'), ((2450, 2471), 'os.environ.get', 'os.environ.get', (['"""DEV"""'], {}), "('DEV')\n", (2464, 2471), False, 'import os\n'), ((2491, 2523), 'os.environ.get', 'os.environ.get', (['"""IMAGE_HOST"""', '""""""'], {}), "('IMAGE_HOST', '')\n", (2505, 2523), False, 'import os\n'), ((3194, 3230), 'os.environ.get', 'os.environ.get', (['"""ADDRESS"""', '"""0.0.0.0"""'], {}), "('ADDRESS', '0.0.0.0')\n", (3208, 3230), False, 'import os\n'), ((1070, 1105), 'os.environ.get', 'os.environ.get', (['"""BEHIND_CLOUDFLARE"""'], {}), "('BEHIND_CLOUDFLARE')\n", (1084, 1105), False, 'import os\n'), ((3246, 3274), 'os.environ.get', 'os.environ.get', (['"""PORT"""', '(5000)'], {}), "('PORT', 5000)\n", (3260, 3274), False, 'import os\n'), ((3342, 3519), 'os.environ.get', 'os.environ.get', (['"""VC_APP_VER"""', '"""1.9.1 (warning: Truth updates will fail in the future if an accurate VC_APP_VER is not set. Export VC_APP_VER to suppress this warning.)"""'], {}), "('VC_APP_VER',\n '1.9.1 (warning: Truth updates will fail in the future if an accurate VC_APP_VER is not set. Export VC_APP_VER to suppress this warning.)'\n )\n", (3356, 3519), False, 'import os\n'), ((499, 525), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (515, 525), False, 'import os\n'), ((540, 587), 'os.environ.get', 'os.environ.get', (['"""DISABLE_HTTPS_ENFORCEMENT"""', '""""""'], {}), "('DISABLE_HTTPS_ENFORCEMENT', '')\n", (554, 587), False, 'import os\n'), ((596, 621), 'os.environ.get', 'os.environ.get', (['"""DEV"""', '""""""'], {}), "('DEV', '')\n", (610, 621), False, 'import os\n'), ((2112, 2165), 'user_agents.parse', 'user_agents.parse', (["self.request.headers['User-Agent']"], {}), "(self.request.headers['User-Agent'])\n", (2129, 2165), False, 'import user_agents\n'), ((2786, 2821), 'models.TranslationEngine', 'models.TranslationEngine', (['starlight'], {}), '(starlight)\n', (2810, 2821), False, 'import models\n'), ((2947, 2968), 'analytics.Analytics', 'analytics.Analytics', ([], {}), '()\n', (2966, 2968), False, 'import analytics\n'), ((3094, 3107), 'os.urandom', 'os.urandom', (['(8)'], {}), '(8)\n', (3104, 3107), False, 'import os\n'), ((1490, 1534), 'ipaddress.ip_address', 'ipaddress.ip_address', (['self.request.remote_ip'], {}), '(self.request.remote_ip)\n', (1510, 1534), False, 'import ipaddress\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmarket.endpoint import endpoint_data
class DescribeCommoditiesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Market', '2015-11-01', 'DescribeCommodities','yunmarket')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_CommodityGmtModifiedTo(self):
return self.get_query_params().get('CommodityGmtModifiedTo')
def set_CommodityGmtModifiedTo(self,CommodityGmtModifiedTo):
self.add_query_param('CommodityGmtModifiedTo',CommodityGmtModifiedTo)
def get_CommodityGmtModifiedFrom(self):
return self.get_query_params().get('CommodityGmtModifiedFrom')
def set_CommodityGmtModifiedFrom(self,CommodityGmtModifiedFrom):
self.add_query_param('CommodityGmtModifiedFrom',CommodityGmtModifiedFrom)
def get_CommodityId(self):
return self.get_query_params().get('CommodityId')
def set_CommodityId(self,CommodityId):
self.add_query_param('CommodityId',CommodityId)
def get_CommodityGmtPublishFrom(self):
return self.get_query_params().get('CommodityGmtPublishFrom')
def set_CommodityGmtPublishFrom(self,CommodityGmtPublishFrom):
self.add_query_param('CommodityGmtPublishFrom',CommodityGmtPublishFrom)
def get_CommodityStatuses(self):
return self.get_query_params().get('CommodityStatuses')
def set_CommodityStatuses(self,CommodityStatuses):
self.add_query_param('CommodityStatuses',CommodityStatuses)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_CommodityGmtCreatedFrom(self):
return self.get_query_params().get('CommodityGmtCreatedFrom')
def set_CommodityGmtCreatedFrom(self,CommodityGmtCreatedFrom):
self.add_query_param('CommodityGmtCreatedFrom',CommodityGmtCreatedFrom)
def get_CommodityIds(self):
return self.get_query_params().get('CommodityIds')
def set_CommodityIds(self,CommodityIds):
self.add_query_param('CommodityIds',CommodityIds)
def get_CommodityGmtCreatedTo(self):
return self.get_query_params().get('CommodityGmtCreatedTo')
def set_CommodityGmtCreatedTo(self,CommodityGmtCreatedTo):
self.add_query_param('CommodityGmtCreatedTo',CommodityGmtCreatedTo)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_CommodityGmtPublishTo(self):
return self.get_query_params().get('CommodityGmtPublishTo')
def set_CommodityGmtPublishTo(self,CommodityGmtPublishTo):
self.add_query_param('CommodityGmtPublishTo',CommodityGmtPublishTo)
def get_CommodityAuditStatuses(self):
return self.get_query_params().get('CommodityAuditStatuses')
def set_CommodityAuditStatuses(self,CommodityAuditStatuses):
self.add_query_param('CommodityAuditStatuses',CommodityAuditStatuses)
def get_Properties(self):
return self.get_query_params().get('Properties')
def set_Properties(self,Properties):
self.add_query_param('Properties',Properties)
def get_CommodityCategoryIds(self):
return self.get_query_params().get('CommodityCategoryIds')
def set_CommodityCategoryIds(self,CommodityCategoryIds):
self.add_query_param('CommodityCategoryIds',CommodityCategoryIds) | [
"aliyunsdkmarket.endpoint.endpoint_data.getEndpointMap",
"aliyunsdkmarket.endpoint.endpoint_data.getEndpointRegional",
"aliyunsdkcore.request.RpcRequest.__init__"
] | [((959, 1048), 'aliyunsdkcore.request.RpcRequest.__init__', 'RpcRequest.__init__', (['self', '"""Market"""', '"""2015-11-01"""', '"""DescribeCommodities"""', '"""yunmarket"""'], {}), "(self, 'Market', '2015-11-01', 'DescribeCommodities',\n 'yunmarket')\n", (978, 1048), False, 'from aliyunsdkcore.request import RpcRequest\n'), ((1113, 1143), 'aliyunsdkmarket.endpoint.endpoint_data.getEndpointMap', 'endpoint_data.getEndpointMap', ([], {}), '()\n', (1141, 1143), False, 'from aliyunsdkmarket.endpoint import endpoint_data\n'), ((1224, 1259), 'aliyunsdkmarket.endpoint.endpoint_data.getEndpointRegional', 'endpoint_data.getEndpointRegional', ([], {}), '()\n', (1257, 1259), False, 'from aliyunsdkmarket.endpoint import endpoint_data\n')] |
# Starter code for Kaggle - Don't Overfit! II dataset.
#
# Objective: make predictions on a dataset after only having trained a model on ~10% of it. Don't overfit.
#
# By <NAME>
import os
import re
import scipy as sp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
# Display the data.
def display_data(dataframe):
# print(dataframe.info())
print(dataframe.describe())
# print(dataframe.corr())
# Test all column data for normality using Shapiro-Wilk and K^2 tests.
alpha = 0.05
SW_results = []
K2_results = []
normality_pass = []
for col in dataframe.columns:
stat, p = sp.stats.shapiro(dataframe[col])
SW_results.append(p)
temp = 0
if p>=alpha:
temp = 1
stat, p = sp.stats.normaltest(dataframe[col])
K2_results.append(p)
if p>=alpha:
normality_pass.append(temp * 1)
else:
normality_pass.append(0)
# Plot SW test p-values.
temp_df = pd.DataFrame(SW_results, index=range(len(dataframe.columns)), columns=['SW_results'])
temp_df.hist(color='green', bins=len(dataframe.columns), figsize=(8, 4))
plt.show()
# Plot K^2 test p-values.
temp_df = pd.DataFrame(K2_results, index=range(len(dataframe.columns)), columns=['K2_results'])
temp_df.hist(color='blue', bins=len(dataframe.columns), figsize=(8, 4))
plt.show()
# Plot pass/fail of both tests for each
temp_df = pd.DataFrame(normality_pass, index=range(len(dataframe.columns)), columns=['NormalityPassFail'])
temp_df.hist(color='red', bins=2, figsize=(8, 4))
plt.show()
# Clean the data.
def clean_data(dataframe):
# Remove 'target' if present.
target_present = False
if 'target' in dataframe:
target_present = True
target = dataframe.pop('target')
# Perform any cleaning.
pass
# Add polynomial features.
'''id = dataframe.pop('id')
poly = PolynomialFeatures(2)
temp = poly.fit_transform(dataframe)
poly_header = poly.get_feature_names(dataframe.columns)
dataframe = pd.DataFrame(data=temp, index=dataframe.index, columns=poly_header)
dataframe = pd.concat([id, dataframe], axis=1)'''
# Perform feature scaling.
scaler = StandardScaler()
id = dataframe.pop('id')
dataframe[dataframe.columns] = scaler.fit_transform(dataframe[dataframe.columns])
dataframe = pd.concat([id, dataframe], axis=1)
# Merge target if present in original data.
if target_present:
dataframe = pd.concat([target, dataframe], axis=1)
return dataframe
# Main execution thread.
if __name__=='__main__':
# Read all data.
top_folder = '.'
df = pd.read_csv(os.path.join('.', 'train.csv'))
training_output = df.pop('target') # Remove output variable
training_ids = list(df['id']) # Get ids for training set.
df_final = pd.read_csv(os.path.join('.', 'test.csv'))
df_all = pd.concat([df, df_final], axis=0, sort=False)
# Display data.
# display_data(df_all)
# Clean data.
df_all = clean_data(df_all)
df = df_all[df_all['id'].isin(training_ids)]
df_ids = df.pop('id')
df_final = df_all[~df_all['id'].isin(training_ids)]
df_final_ids = df_final.pop('id')
# Fit model.
params = {
#'gamma':[1e-6],
'kernel':['rbf', 'linear'],
# 'C':[1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3]
}
grid = GridSearchCV(SVC(gamma='scale', C=1e-2), params, cv=10)
grid.fit(df, training_output)
print('Best SVM parameter values:')
print(grid.best_params_)
print('Best prediction score: ' + str(round(grid.best_score_, 3)))
print()
predictions = grid.predict(df_final)
# Save predictions to output file.
temp = pd.DataFrame(predictions, columns=['target'])
temp.insert(0, 'id', df_final_ids)
temp['target'] = temp['target'].astype('int')
temp.to_csv('prediction.csv', header=list(temp), index=False)
print('\nData saved.\n')
| [
"sklearn.svm.SVC",
"matplotlib.pyplot.show",
"os.path.join",
"sklearn.preprocessing.StandardScaler",
"scipy.stats.normaltest",
"pandas.DataFrame",
"pandas.concat",
"scipy.stats.shapiro"
] | [((1435, 1445), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1443, 1445), True, 'import matplotlib.pyplot as plt\n'), ((1656, 1666), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1664, 1666), True, 'import matplotlib.pyplot as plt\n'), ((1881, 1891), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1889, 1891), True, 'import matplotlib.pyplot as plt\n'), ((2537, 2553), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2551, 2553), False, 'from sklearn.preprocessing import PolynomialFeatures, StandardScaler\n'), ((2685, 2719), 'pandas.concat', 'pd.concat', (['[id, dataframe]'], {'axis': '(1)'}), '([id, dataframe], axis=1)\n', (2694, 2719), True, 'import pandas as pd\n'), ((3230, 3275), 'pandas.concat', 'pd.concat', (['[df, df_final]'], {'axis': '(0)', 'sort': '(False)'}), '([df, df_final], axis=0, sort=False)\n', (3239, 3275), True, 'import pandas as pd\n'), ((4058, 4103), 'pandas.DataFrame', 'pd.DataFrame', (['predictions'], {'columns': "['target']"}), "(predictions, columns=['target'])\n", (4070, 4103), True, 'import pandas as pd\n'), ((887, 919), 'scipy.stats.shapiro', 'sp.stats.shapiro', (['dataframe[col]'], {}), '(dataframe[col])\n', (903, 919), True, 'import scipy as sp\n'), ((1026, 1061), 'scipy.stats.normaltest', 'sp.stats.normaltest', (['dataframe[col]'], {}), '(dataframe[col])\n', (1045, 1061), True, 'import scipy as sp\n'), ((2816, 2854), 'pandas.concat', 'pd.concat', (['[target, dataframe]'], {'axis': '(1)'}), '([target, dataframe], axis=1)\n', (2825, 2854), True, 'import pandas as pd\n'), ((3001, 3031), 'os.path.join', 'os.path.join', (['"""."""', '"""train.csv"""'], {}), "('.', 'train.csv')\n", (3013, 3031), False, 'import os\n'), ((3186, 3215), 'os.path.join', 'os.path.join', (['"""."""', '"""test.csv"""'], {}), "('.', 'test.csv')\n", (3198, 3215), False, 'import os\n'), ((3733, 3759), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""scale"""', 'C': '(0.01)'}), "(gamma='scale', C=0.01)\n", (3736, 3759), False, 'from sklearn.svm import SVC\n')] |
import keyword
key = "elif"
s="vinit"
if keyword.iskeyword(key):
print(key," is keyword")
else:
print(key," is not keyword")
if keyword.iskeyword(s):
print(s," is keyword")
else:
print(s," is not keyword")
#This method is use to print the set of keywords present in python
print(keyword.kwlist) | [
"keyword.iskeyword"
] | [((42, 64), 'keyword.iskeyword', 'keyword.iskeyword', (['key'], {}), '(key)\n', (59, 64), False, 'import keyword\n'), ((131, 151), 'keyword.iskeyword', 'keyword.iskeyword', (['s'], {}), '(s)\n', (148, 151), False, 'import keyword\n')] |
from boxes import Actor, Room, EndActor
class Level(object):
"""Instantiates all the rooms and actors for the game.
Must have an EndActor in a reachable Room, with a proper and acquirable item_trigger to win the game
After Rooms have been instantiated, 'doors' must be created by using room.add_destination(other_room)
Attributes:
start: The Room where the Player object is placed at the start of the game
rooms: List of Rooms included in this level
"""
def __init__(self):
boss = Actor(dialog='Hello frined!',
name="A weird guy that I don't know. He looks friendly.",
description="Talk to the friendly weird guy I don't know",
item='Botato',
item_dialog='Take this botato, use is wisely!\n\nReceived 1 Holy Botato!',
done_dialog='You already have my everything, grasshopper.')
altar = EndActor(dialog='It lacks a little something...',
name='An altar, in dire need of something to be put on it and worshipped',
description='Behold the altar',
item_trigger='Botato',
item_dialog='I place the holy relic into the altar, and I know that my Mission is fulfilled...')
self.start = Room(description='This room is empty', content=[],
door_description='An empty and starting place')
self.alt_room = Room(description="A pitch black room with a shining white altar in the center", content=[altar],
door_description='A place with a strange aura')
self.end = Room(description='This normal looking room has a weird guy standing against the wall', content=[boss],
door_description='The light at the end of the tunnel')
self.start.add_destination(self.alt_room)
self.alt_room.add_destination(self.start)
self.alt_room.add_destination(self.end)
self.end.add_destination(self.alt_room)
self.rooms = [self.start, self.alt_room, self.end]
| [
"boxes.EndActor",
"boxes.Room",
"boxes.Actor"
] | [((549, 870), 'boxes.Actor', 'Actor', ([], {'dialog': '"""Hello frined!"""', 'name': '"""A weird guy that I don\'t know. He looks friendly."""', 'description': '"""Talk to the friendly weird guy I don\'t know"""', 'item': '"""Botato"""', 'item_dialog': '"""Take this botato, use is wisely!\n\nReceived 1 Holy Botato!"""', 'done_dialog': '"""You already have my everything, grasshopper."""'}), '(dialog=\'Hello frined!\', name=\n "A weird guy that I don\'t know. He looks friendly.", description=\n "Talk to the friendly weird guy I don\'t know", item=\'Botato\',\n item_dialog=\n """Take this botato, use is wisely!\n\nReceived 1 Holy Botato!""",\n done_dialog=\'You already have my everything, grasshopper.\')\n', (554, 870), False, 'from boxes import Actor, Room, EndActor\n'), ((967, 1262), 'boxes.EndActor', 'EndActor', ([], {'dialog': '"""It lacks a little something..."""', 'name': '"""An altar, in dire need of something to be put on it and worshipped"""', 'description': '"""Behold the altar"""', 'item_trigger': '"""Botato"""', 'item_dialog': '"""I place the holy relic into the altar, and I know that my Mission is fulfilled..."""'}), "(dialog='It lacks a little something...', name=\n 'An altar, in dire need of something to be put on it and worshipped',\n description='Behold the altar', item_trigger='Botato', item_dialog=\n 'I place the holy relic into the altar, and I know that my Mission is fulfilled...'\n )\n", (975, 1262), False, 'from boxes import Actor, Room, EndActor\n'), ((1366, 1469), 'boxes.Room', 'Room', ([], {'description': '"""This room is empty"""', 'content': '[]', 'door_description': '"""An empty and starting place"""'}), "(description='This room is empty', content=[], door_description=\n 'An empty and starting place')\n", (1370, 1469), False, 'from boxes import Actor, Room, EndActor\n'), ((1510, 1664), 'boxes.Room', 'Room', ([], {'description': '"""A pitch black room with a shining white altar in the center"""', 'content': '[altar]', 'door_description': '"""A place with a strange aura"""'}), "(description=\n 'A pitch black room with a shining white altar in the center', content=\n [altar], door_description='A place with a strange aura')\n", (1514, 1664), False, 'from boxes import Actor, Room, EndActor\n'), ((1698, 1864), 'boxes.Room', 'Room', ([], {'description': '"""This normal looking room has a weird guy standing against the wall"""', 'content': '[boss]', 'door_description': '"""The light at the end of the tunnel"""'}), "(description=\n 'This normal looking room has a weird guy standing against the wall',\n content=[boss], door_description='The light at the end of the tunnel')\n", (1702, 1864), False, 'from boxes import Actor, Room, EndActor\n')] |
#!/usr/bin/env python
import sys
sys.path.append("..")
from game.base.signal import Signal
def test_signal():
s = Signal()
hello = s.connect(lambda: print("hello ", end=""), weak=False)
s.connect(lambda: print("world"), weak=False)
assert len(s) == 2
s() # 'hello world'
assert s.disconnect(hello)
s() # 'world'
assert len(s) == 1
s.clear()
assert len(s) == 0
def test_signal_queue():
# queued connection
s = Signal()
s.blocked += 1
a = s.connect(lambda: print("queued"), weak=False)
assert len(s.queued) == 1
s() # nothing
s.blocked -= 1
for slot in s.queued:
slot()
s.queued = []
s() # "queued"
# queued disconnection
s.blocked += 1
a.disconnect()
assert len(s) == 1 # still attached
assert len(s.queued) == 1
s.blocked -= 1
for q in s.queued:
q()
s.queued = []
assert len(s) == 0
def test_signal_weak():
s = Signal()
w = s.connect(lambda: print("test"))
del w
assert len(s) == 0
s()
assert len(s) == 0
s = Signal()
w = s.connect(lambda: print("test"))
del s # slot outlives signal?
assert w.sig() is None # it works
del w
def test_signal_once():
s = Signal()
w = s.once(lambda: print("test"))
assert len(s.slots) == 1
s()
# assert len(s.slots) == 0
| [
"sys.path.append",
"game.base.signal.Signal"
] | [((34, 55), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (49, 55), False, 'import sys\n'), ((123, 131), 'game.base.signal.Signal', 'Signal', ([], {}), '()\n', (129, 131), False, 'from game.base.signal import Signal\n'), ((469, 477), 'game.base.signal.Signal', 'Signal', ([], {}), '()\n', (475, 477), False, 'from game.base.signal import Signal\n'), ((966, 974), 'game.base.signal.Signal', 'Signal', ([], {}), '()\n', (972, 974), False, 'from game.base.signal import Signal\n'), ((1089, 1097), 'game.base.signal.Signal', 'Signal', ([], {}), '()\n', (1095, 1097), False, 'from game.base.signal import Signal\n'), ((1258, 1266), 'game.base.signal.Signal', 'Signal', ([], {}), '()\n', (1264, 1266), False, 'from game.base.signal import Signal\n')] |
import requests
import json
from requests import Response
from datetime import datetime
class LoggingResult:
'''
A logging result class. Works out whether the log has been successful, and
if not, contains the response from the logging request.
Note: I've designed this this way because there's a myriad of things that
*can* go wrong. Rather than trying to address as many as possible when writing
this in a deadline, I've designed it to get out of the way of diagnosing the
cause of the problem.
I know this isn't good design. It means that actual code using this library
would get littered with code dealing with all of the possible issues that can
arise from the logging, instead of the library handling it and returning useful
errors when it can't resolve the issue itself!
In future, if I was spending more
time on this project, I'd get rid of this class and instead return an integer
success code. The success code would be the code returned by the server, and
before returning errors (non-200 codes), it would see whether it could resolve
any of those issues itself.
My suspicion is that, in practice, any actual errors which would arise would
come from either something like the recent AWS outage in which case, cache
the log messages in memory and retry periodically), or bad configuration
on the part of the programmer (which we can't help anyway).
'''
def __init__(self,
success: bool,
response: Response):
self.response = response
self.success = success
class Logger:
'''
The Client-side library to the Doist technical task logging server.
General usage: logger = Logger(__name__, apikey=APIKEY_HERE).
Parameters let you set:
server: the domain/ip the logging server is running on (string)
port: the port to log to (int)
apikey: the apikey to use for authentication (string)
ssl: whether to use SSL (boolean)
'''
def __init__(self,
origin: str,
server: str = 'localhost',
port: int = 5000,
apikey: str = None,
ssl: bool = True):
self.origin = origin
self.server = server
self.port = port
self.apikey = apikey
self.protocol = 'http'
if ssl:
self.protocol += 's'
def log(self,
message: str,
log_level: str = 'debug',
**kwargs):
'''
Submit a log message.
Log levels are optional, and default to `debug`.
'''
# Construct the base url in the log method, not the init method, so that
# if the api key/server/etc is changed due to a previous error, updated
# connection details are acknowledged.
base_url = self.protocol + '://' + self.server + ':' + str(self.port) + '/'
if self.apikey is not None:
base_url += '?key='+self.apikey
timestamp = datetime.now().isoformat()
log = {'message': message,
'timestamp': timestamp,
'log_level': log_level,
'origin': self.origin}
# Don't let supplementary details override the log details
[kwargs.pop(key, None) for key in log.keys()]
# Extend the log with the supplementary details
for key, value in kwargs.items():
log[key] = value
response = requests.post(base_url,
data=json.dumps(log),
timeout=10)
return LoggingResult(response.status_code == 200, response)
| [
"datetime.datetime.now",
"json.dumps"
] | [((3041, 3055), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3053, 3055), False, 'from datetime import datetime\n'), ((3550, 3565), 'json.dumps', 'json.dumps', (['log'], {}), '(log)\n', (3560, 3565), False, 'import json\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
=====================
SBPy Vega Core Module
=====================
"""
import os
import astropy.units as u
from astropy.utils.state import ScienceState
from ..core import SpectralStandard
from . import sources
__doctest_requires__ = {'Sun': 'synphot'}
__all__ = [
'Vega',
'default_vega'
]
class Vega(SpectralStandard):
"""Vega spectrum.
Parameters
----------
wave : `~astropy.units.Quantity`
The spectral wavelengths.
fluxd : `~astropy.units.Quantity`
The solar flux densities, at 1 au.
description : string, optional
A brief description of the source spectrum.
bibcode : string, optional
Bibliography code for `sbpy.bib.register`.
meta : dict, optional
Any additional meta data, passed on to
`~synphot.SourceSpectrum`.
Attributes
----------
wave - Wavelengths of the source spectrum.
fluxd - Source spectrum.
description - Brief description of the source spectrum.
meta - Meta data.
Examples
--------
Get the default Vega spectrum:
>>> vega = Vega.from_default() # doctest: +REMOTE_DATA +IGNORE_OUTPUT
Create Vega from a file:
>>> vega = Vega.from_file('filename') # doctest: +SKIP
Evaluate Vega at 1 μm:
>>> print(vega(1 * u.um)) # doctest: +SKIP
"""
def __repr__(self):
if self.description is None:
return '<Vega>'
else:
return '<Vega: {}>'.format(self.description)
@classmethod
def from_builtin(cls, name):
"""Vega spectrum from a built-in `sbpy` source.
Parameters
----------
name : string
The name of a Vega spectrum parameter set in
`sbpy.spectroscopy.vega.sources`.
"""
from astropy.utils.data import _is_url
try:
parameters = getattr(sources, name).copy()
if not _is_url(parameters['filename']):
# find in the module's location
path = os.path.dirname(__file__)
parameters['filename'] = os.sep.join(
(path, 'data', parameters['filename']))
vega = Vega.from_file(**parameters)
except AttributeError:
msg = 'Unknown Vega spectrum "{}". Valid spectra:\n{}'.format(
name, sources.available)
raise ValueError(msg)
return vega
@classmethod
def from_default(cls):
"""Return the `sbpy` default Vega spectrum."""
return default_vega.get()
class default_vega(ScienceState):
"""Get/set the `sbpy` default Vega spectrum.
To change it:
>>> from sbpy.spectroscopy.vega import default_vega
>>> with default_vega(Vega.from_file(filename)) # doctest: +SKIP
... # Vega from filename in effect
"""
_value = 'Bohlin2014'
@classmethod
def validate(cls, value):
if isinstance(value, str):
return Vega.from_builtin(value)
elif isinstance(value, Vega):
return value
else:
raise TypeError("default_vega must be a string or Vega instance.")
| [
"os.path.dirname",
"os.sep.join",
"astropy.utils.data._is_url"
] | [((2011, 2042), 'astropy.utils.data._is_url', '_is_url', (["parameters['filename']"], {}), "(parameters['filename'])\n", (2018, 2042), False, 'from astropy.utils.data import _is_url\n'), ((2115, 2140), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2130, 2140), False, 'import os\n'), ((2182, 2233), 'os.sep.join', 'os.sep.join', (["(path, 'data', parameters['filename'])"], {}), "((path, 'data', parameters['filename']))\n", (2193, 2233), False, 'import os\n')] |
#!env python3
import argparse
import csv
import re
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description="Converts a word into an FST")
PARSER.add_argument("-s", dest="symbols", default=None, help="file containing the symbols")
PARSER.add_argument('word', help='a word')
args = PARSER.parse_args()
if not args.symbols:
# processes character by character
for i,c in enumerate(args.word):
print("%d %d %s %s" % (i, i+1, c, c) )
print(i+1)
else:
with open(args.symbols, encoding="utf-8") as f:
symbols = [ row.split()[0] for row in f if row.split()[0] != "eps" ]
symbols.sort(key = lambda s: len(s), reverse=True)
tmp=re.sub("\+","\+","|".join(symbols))
#print(tmp.encode("utf-8"))
exp = re.compile(tmp)
word = args.word
m = exp.match(word)
i=0
while ( len(word) > 0 ) and ( m is not None ):
print("%d %d %s %s" % (i, i+1, m.group(), m.group()) )
word = word[m.end():]
m = exp.match(word)
i += 1
if len(word) > 0:
print("unknown symbols in expression: ", word)
else:
print(i)
| [
"argparse.ArgumentParser",
"re.compile"
] | [((92, 158), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Converts a word into an FST"""'}), "(description='Converts a word into an FST')\n", (115, 158), False, 'import argparse\n'), ((833, 848), 're.compile', 're.compile', (['tmp'], {}), '(tmp)\n', (843, 848), False, 'import re\n')] |
from django.shortcuts import render
from django.http import HttpResponse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# Create your views here.
def index(request):
tmp = []
liuyanList = [['/static/Semantic-UI-master/examples/assets/images/avatar/tom.jpg', '邢卫', '教师', '垃圾网站',
'2017/12/12, 20:00:00', '1'],
['/static/Semantic-UI-master/examples/assets/images/avatar/tom.jpg', '邢卫', '教师', '垃圾网站',
'2017/12/12, 20:00:00', '2'],
['/static/Semantic-UI-master/examples/assets/images/avatar/tom.jpg', '游客', '游客', '垃圾网站',
'2017/12/12, 20:00:00', '3'],
['/static/Semantic-UI-master/examples/assets/images/avatar/tom.jpg', '邢卫', '游客', '垃圾网站',
'2017/12/12, 20:00:00', '4'],
['/static/Semantic-UI-master/examples/assets/images/avatar/tom.jpg', '邢卫', '教师', '垃圾网站',
'2017/12/12, 20:00:00', '5'],
['/static/Semantic-UI-master/examples/assets/images/avatar/tom.jpg', '邢卫', '教师', '垃圾网站',
'2017/12/12, 20:00:00', '6'],
['/static/Semantic-UI-master/examples/assets/images/avatar/tom.jpg', '游客', '游客', '垃圾网站',
'2017/12/12, 20:00:00', '7'],
['/static/Semantic-UI-master/examples/assets/images/avatar/tom.jpg', '邢卫', '游客', '垃圾网站',
'2017/12/12, 20:00:00', '7'],
['/static/Semantic-UI-master/examples/assets/images/avatar/tom.jpg', '邢卫', '教师', '垃圾网站',
'2017/12/12, 20:00:00', '9'],
['/static/Semantic-UI-master/examples/assets/images/avatar/tom.jpg', '邢卫', '教师', '垃圾网站',
'2017/12/12, 20:00:00', '10'],
['/static/Semantic-UI-master/examples/assets/images/avatar/tom.jpg', '游客', '游客', '垃圾网站',
'2017/12/12, 20:00:00', '11'],
['/static/Semantic-UI-master/examples/assets/images/avatar/tom.jpg', '邢卫', '游客', '垃圾网站',
'2017/12/12, 20:00:00', '12'],
]
liuyanPage = Paginator(liuyanList, 10)
liuyanPaginator = []
for i in range(1, liuyanPage.num_pages + 1):
for j in liuyanPage.page(i):
tmp.append(liuyanPage.page(i))
liuyanPaginator.append(tmp)
tmp = []
# 课程表,应统计每门课程未读通知、未完成作业、未阅读课件的数量
CoursesList = [['软件需求工程', ['邢卫', '刘玉生'], ['周一6,7,8'], ['玉泉曹光彪西-503', '玉泉教7-304(多)'], '专业课'],
['操作系统原理', ['伍赛'], ['周一6,7,8'], ['玉泉曹光彪西-503', '玉泉教7-304(多)'], '专业课'],
['软件工程管理', ['金波'], ['周一,7,8'], ['玉泉曹光彪西-503', '玉泉教7-304(多)'], '专业课'],
['计算机网络', ['陆魁军'], ['周一6,7,8'], ['玉泉曹光彪西-503', '玉泉教7-304(多)'], '专业课'],
['计算机网络', ['陆魁军'], ['周一,7,8'], ['玉泉曹光彪西-503', '玉泉教7-304(多)'], '专业课'],
['计算机网络', ['陆魁军'], ['周一,7,8'], ['玉泉曹光彪西-503', '玉泉教7-304(多)'], '专业课'],
['计算机网络', ['陆魁军'], ['周一6,7,8'], ['玉泉曹光彪西-503', '玉泉教7-304(多)'], '专业课'],
['软件需求工程', ['邢卫', '刘玉生'], ['周一6,7,8'], ['玉泉曹光彪西-503', '玉泉教7-304(多)'], '专业课'],
['操作系统原理', ['伍赛'], ['周一6,7,8'], ['玉泉曹光彪西-503', '玉泉教7-304(多)'], '专业课'],
['软件工程管理', ['金波'], ['周一,7,8'], ['玉泉曹光彪西-503', '玉泉教7-304(多)'], '专业课'],
['计算机网络', ['陆魁军'], ['周一6,7,8'], ['玉泉曹光彪西-503', '玉泉教7-304(多)'], '专业课'],
['计算机网络', ['陆魁军'], ['周一,7,8'], ['玉泉曹光彪西-503', '玉泉教7-304(多)'], '专业课'],
['计算机网络', ['陆魁军'], ['周一,7,8'], ['玉泉曹光彪西-503', '玉泉教7-304(多)'], '专业课'],
['计算机网络', ['陆魁军'], ['周一6,7,8'], ['玉泉曹光彪西-503', '玉泉教7-304(多)'], '专业课'],
]
CoursesPage = Paginator(CoursesList, 10)
CoursesPaginator = []
for i in range(1, CoursesPage.num_pages + 1):
for j in CoursesPage.page(i):
tmp.append(CoursesPage.page(i))
CoursesPaginator.append(tmp)
tmp = []
return render(request, 'visitor/index.html', {'liuyanPage': liuyanPage, 'liuyanPaginator': liuyanPaginator,
'CoursesList': CoursesList,
'CoursesPage': CoursesPage, 'CoursesPaginator': CoursesPaginator})
def course(request):
return render(request, 'visitor/visitor_course.html')
| [
"django.shortcuts.render",
"django.core.paginator.Paginator"
] | [((2120, 2145), 'django.core.paginator.Paginator', 'Paginator', (['liuyanList', '(10)'], {}), '(liuyanList, 10)\n', (2129, 2145), False, 'from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n'), ((3697, 3723), 'django.core.paginator.Paginator', 'Paginator', (['CoursesList', '(10)'], {}), '(CoursesList, 10)\n', (3706, 3723), False, 'from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n'), ((3947, 4150), 'django.shortcuts.render', 'render', (['request', '"""visitor/index.html"""', "{'liuyanPage': liuyanPage, 'liuyanPaginator': liuyanPaginator,\n 'CoursesList': CoursesList, 'CoursesPage': CoursesPage,\n 'CoursesPaginator': CoursesPaginator}"], {}), "(request, 'visitor/index.html', {'liuyanPage': liuyanPage,\n 'liuyanPaginator': liuyanPaginator, 'CoursesList': CoursesList,\n 'CoursesPage': CoursesPage, 'CoursesPaginator': CoursesPaginator})\n", (3953, 4150), False, 'from django.shortcuts import render\n'), ((4276, 4322), 'django.shortcuts.render', 'render', (['request', '"""visitor/visitor_course.html"""'], {}), "(request, 'visitor/visitor_course.html')\n", (4282, 4322), False, 'from django.shortcuts import render\n')] |
"""
The :mod:`sportsbed.datasets._soccer` includes functions
to fetch soccer historical and fixtures data.
"""
import numpy as np
HOME_WIN = lambda outputs, col1, col2, offset: outputs[col1] - outputs[col2] > offset
AWAY_WIN = lambda outputs, col1, col2, offset: outputs[col1] - outputs[col2] < -offset
DRAW = lambda outputs, col1, col2, offset: np.abs(outputs[col1] - outputs[col2]) <= offset
OVER = lambda outputs, col1, col2, offset: outputs[col1] - outputs[col2] > offset
UNDER = lambda outputs, col1, col2, offset: outputs[col1] - outputs[col2] < offset
TARGETS = [
('home_win__full_time_goals', lambda outputs: HOME_WIN(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 0.0)),
('away_win__full_time_goals', lambda outputs: AWAY_WIN(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 0.0)),
('draw__full_time_goals', lambda outputs: DRAW(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 0.0)),
('over_1.5__full_time_goals', lambda outputs: OVER(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 1.5)),
('over_2.5__full_time_goals', lambda outputs: OVER(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 2.5)),
('over_3.5__full_time_goals', lambda outputs: OVER(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 3.5)),
('over_4.5__full_time_goals', lambda outputs: OVER(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 4.5)),
('under_1.5__full_time_goals', lambda outputs: UNDER(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 1.5)),
('under_2.5__full_time_goals', lambda outputs: UNDER(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 2.5)),
('under_3.5__full_time_goals', lambda outputs: UNDER(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 3.5)),
('under_4.5__full_time_goals', lambda outputs: UNDER(outputs, 'home_team__full_time_goals', 'away_team__full_time_goals', 4.5)),
('home_win__full_time_adjusted_goals', lambda outputs: HOME_WIN(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 0.5)),
('away_win__full_time_adjusted_goals', lambda outputs: AWAY_WIN(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 0.5)),
('draw__full_time_adjusted_goals', lambda outputs: DRAW(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 0.5)),
('over_1.5__full_time_adjusted_goals', lambda outputs: OVER(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 1.5)),
('over_2.5__full_time_adjusted_goals', lambda outputs: OVER(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 2.5)),
('over_3.5__full_time_adjusted_goals', lambda outputs: OVER(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 3.5)),
('over_4.5__full_time_adjusted_goals', lambda outputs: OVER(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 4.5)),
('under_1.5__full_time_adjusted_goals', lambda outputs: UNDER(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 1.5)),
('under_2.5__full_time_adjusted_goals', lambda outputs: UNDER(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 2.5)),
('under_3.5__full_time_adjusted_goals', lambda outputs: UNDER(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 3.5)),
('under_4.5__full_time_adjusted_goals', lambda outputs: UNDER(outputs, 'home_team__full_time_adjusted_goals', 'away_team__full_time_adjusted_goals', 4.5))
]
| [
"numpy.abs"
] | [((348, 385), 'numpy.abs', 'np.abs', (['(outputs[col1] - outputs[col2])'], {}), '(outputs[col1] - outputs[col2])\n', (354, 385), True, 'import numpy as np\n')] |
from base_client import BaseClient, format_query
class BattleClient(BaseClient):
retries = 3
def do_battle(self, ident):
return self._send_request(format_query("BATTLE", ident))
| [
"base_client.format_query"
] | [((167, 196), 'base_client.format_query', 'format_query', (['"""BATTLE"""', 'ident'], {}), "('BATTLE', ident)\n", (179, 196), False, 'from base_client import BaseClient, format_query\n')] |
from django.db import models
from Modulos.Base.models import ModeloBase
from Modulos.Puestos.models import Puestos
class Candidatos(ModeloBase):
id_puesto = models.ForeignKey(Puestos,on_delete=models.CASCADE)
nombre = models.CharField('Nombre del candidato', max_length = 50, null = False, blank = False)
apellidos = models.CharField('Apellidos', max_length = 50, null = False, blank = False)
telefono = models.CharField('Teléfono', max_length = 20, null = True, blank = True)
email = models.EmailField('Correo electrónico', max_length = 200, null = True, blank = True)
curriculum = models.FileField('Curriculum', null = True, blank = True , upload_to = 'Candidatos/')
objects = models.Manager()
class Meta:
verbose_name = 'Candidato'
verbose_name_plural = 'Candidatos'
def delete(self, *args, **kwargs):
self.curriculum.delete()
super().delete(*args, **kwargs)
def __str__(self):
return self.nombre
| [
"django.db.models.EmailField",
"django.db.models.Manager",
"django.db.models.ForeignKey",
"django.db.models.FileField",
"django.db.models.CharField"
] | [((162, 214), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Puestos'], {'on_delete': 'models.CASCADE'}), '(Puestos, on_delete=models.CASCADE)\n', (179, 214), False, 'from django.db import models\n'), ((227, 312), 'django.db.models.CharField', 'models.CharField', (['"""Nombre del candidato"""'], {'max_length': '(50)', 'null': '(False)', 'blank': '(False)'}), "('Nombre del candidato', max_length=50, null=False, blank=False\n )\n", (243, 312), False, 'from django.db import models\n'), ((330, 399), 'django.db.models.CharField', 'models.CharField', (['"""Apellidos"""'], {'max_length': '(50)', 'null': '(False)', 'blank': '(False)'}), "('Apellidos', max_length=50, null=False, blank=False)\n", (346, 399), False, 'from django.db import models\n'), ((421, 487), 'django.db.models.CharField', 'models.CharField', (['"""Teléfono"""'], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), "('Teléfono', max_length=20, null=True, blank=True)\n", (437, 487), False, 'from django.db import models\n'), ((506, 584), 'django.db.models.EmailField', 'models.EmailField', (['"""Correo electrónico"""'], {'max_length': '(200)', 'null': '(True)', 'blank': '(True)'}), "('Correo electrónico', max_length=200, null=True, blank=True)\n", (523, 584), False, 'from django.db import models\n'), ((608, 686), 'django.db.models.FileField', 'models.FileField', (['"""Curriculum"""'], {'null': '(True)', 'blank': '(True)', 'upload_to': '"""Candidatos/"""'}), "('Curriculum', null=True, blank=True, upload_to='Candidatos/')\n", (624, 686), False, 'from django.db import models\n'), ((709, 725), 'django.db.models.Manager', 'models.Manager', ([], {}), '()\n', (723, 725), False, 'from django.db import models\n')] |
# -*- coding: utf-8 -*-
import requests
from proxy_validator import config
Default_UA = config['CLIENT_UA']
Default_Timeout = config['CLIENT_TIMEOUT']
class Client(object):
def __init__(self, headers=None, proxies=None):
self.headers = headers if headers is not None else {}
self.headers['User-Agent'] = Default_UA
self.proxies = proxies if proxies is not None else {}
self.session = requests.Session()
def get(self, url=None):
if url is None:
raise Exception('Need Url. ')
response = self.session.get(url, headers=self.headers, proxies=self.proxies, timeout=Default_Timeout)
if response.status_code != 200:
return None
return response.text
def set_proxies(self, proxy_str, ptype='http'):
self.proxies = {
'http': (ptype if ptype is not None else 'http') + '://' + proxy_str,
'https': (ptype if ptype is not None else 'https') + '://' + proxy_str,
}
| [
"requests.Session"
] | [((423, 441), 'requests.Session', 'requests.Session', ([], {}), '()\n', (439, 441), False, 'import requests\n')] |
from django.db import models
from django.utils import timezone
from django.utils.text import slugify
class CommentManager(models.Manager):
def create_comment(self, party, author, **kwargs):
if not party:
raise ValueError('파티는 필수입니다.')
if not author:
raise ValueError('작성자는 필수입니다.')
if author not in party.participants.all():
raise ValueError('파티에 참여한 사람만 댓글을 작성할 수 있습니다.')
instance = self.model(party=party, author=author, **kwargs)
instance.slug = self._generate_slug(kwargs['text'], author.username)
instance.save(using=self._db)
return instance
def update_comment(self, instance, text):
instance.text = text
instance.slug = self._generate_slug(text, instance.author.username)
instance.save()
return instance
def _generate_slug(self, text, author):
slug_string = '{} {} {}'.format(timezone.now(), author, text)
return slugify(slug_string, allow_unicode=True)
class Comment(models.Model):
party = models.ForeignKey(
'parties.Party',
on_delete=models.PROTECT,
verbose_name='댓글이 향하는 파티'
)
author = models.ForeignKey(
'profiles.Profile',
on_delete=models.PROTECT,
verbose_name='댓글 작성자'
)
text = models.CharField(max_length=150, verbose_name='댓글 내용')
slug = models.SlugField(
max_length=100,
allow_unicode=True,
verbose_name='댓글 라벨'
)
created_at = models.DateTimeField(auto_now_add=True, verbose_name='최초 작성된 시간')
last_updated = models.DateTimeField(auto_now=True, verbose_name='가장 최근 수정된 시간')
is_active = models.BooleanField(default=True, verbose_name='활성화 여부')
objects = CommentManager()
class Meta:
db_table = 'comments'
verbose_name = '댓글'
verbose_name_plural = '댓글들'
def __str__(self):
return '{} 에 {} 이 남긴 댓글: {}'.format(self.party, self.author, self.text)
| [
"django.utils.text.slugify",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.utils.timezone.now",
"django.db.models.SlugField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((1061, 1153), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""parties.Party"""'], {'on_delete': 'models.PROTECT', 'verbose_name': '"""댓글이 향하는 파티"""'}), "('parties.Party', on_delete=models.PROTECT, verbose_name=\n '댓글이 향하는 파티')\n", (1078, 1153), False, 'from django.db import models\n'), ((1192, 1282), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""profiles.Profile"""'], {'on_delete': 'models.PROTECT', 'verbose_name': '"""댓글 작성자"""'}), "('profiles.Profile', on_delete=models.PROTECT,\n verbose_name='댓글 작성자')\n", (1209, 1282), False, 'from django.db import models\n'), ((1320, 1374), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)', 'verbose_name': '"""댓글 내용"""'}), "(max_length=150, verbose_name='댓글 내용')\n", (1336, 1374), False, 'from django.db import models\n'), ((1386, 1460), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(100)', 'allow_unicode': '(True)', 'verbose_name': '"""댓글 라벨"""'}), "(max_length=100, allow_unicode=True, verbose_name='댓글 라벨')\n", (1402, 1460), False, 'from django.db import models\n'), ((1508, 1573), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""최초 작성된 시간"""'}), "(auto_now_add=True, verbose_name='최초 작성된 시간')\n", (1528, 1573), False, 'from django.db import models\n'), ((1593, 1657), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'verbose_name': '"""가장 최근 수정된 시간"""'}), "(auto_now=True, verbose_name='가장 최근 수정된 시간')\n", (1613, 1657), False, 'from django.db import models\n'), ((1674, 1730), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'verbose_name': '"""활성화 여부"""'}), "(default=True, verbose_name='활성화 여부')\n", (1693, 1730), False, 'from django.db import models\n'), ((977, 1017), 'django.utils.text.slugify', 'slugify', (['slug_string'], {'allow_unicode': '(True)'}), '(slug_string, allow_unicode=True)\n', (984, 1017), False, 'from django.utils.text import slugify\n'), ((932, 946), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (944, 946), False, 'from django.utils import timezone\n')] |
import uuid
from django.conf import settings
from django.db import models
from django.utils import timezone
class AbstractBase(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
created_at = models.DateTimeField(default=timezone.now)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='+'
)
updated_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
def clean(self):
if self.updated_by is None and self.created_by is not None:
self.updated_by = self.created_by
class Meta:
abstract = True
| [
"django.db.models.DateTimeField",
"django.db.models.UUIDField",
"django.db.models.ForeignKey"
] | [((154, 224), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'primary_key': '(True)', 'default': 'uuid.uuid4', 'editable': '(False)'}), '(primary_key=True, default=uuid.uuid4, editable=False)\n', (170, 224), False, 'from django.db import models\n'), ((272, 314), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (292, 314), False, 'from django.db import models\n'), ((332, 367), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (352, 367), False, 'from django.db import models\n'), ((385, 476), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE', 'related_name': '"""+"""'}), "(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,\n related_name='+')\n", (402, 476), False, 'from django.db import models\n'), ((520, 635), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.SET_NULL', 'related_name': '"""+"""'}), "(settings.AUTH_USER_MODEL, null=True, blank=True,\n on_delete=models.SET_NULL, related_name='+')\n", (537, 635), False, 'from django.db import models\n')] |
import unittest
from problems.problem33 import solution
class Test(unittest.TestCase):
def test(self):
self.assertEqual(solution([100, 4, 200, 1, 3, 2]), 4)
| [
"problems.problem33.solution"
] | [((125, 157), 'problems.problem33.solution', 'solution', (['[100, 4, 200, 1, 3, 2]'], {}), '([100, 4, 200, 1, 3, 2])\n', (133, 157), False, 'from problems.problem33 import solution\n')] |
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import abc
from newrelic.api.external_trace import ExternalTrace
from newrelic.common.object_wrapper import wrap_function_wrapper
def newrelic_event_hook(response):
tracer = getattr(response.request, "_nr_trace", None)
if tracer is not None:
headers = dict(getattr(response, "headers", ())).items()
tracer.process_response(getattr(response, "status_code", None), headers)
async def newrelic_event_hook_async(response):
tracer = getattr(response.request, "_nr_trace", None)
if tracer is not None:
headers = dict(getattr(response, "headers", ())).items()
tracer.process_response(getattr(response, "status_code", None), headers)
def newrelic_first_gen(l, is_async=False):
if is_async:
yield newrelic_event_hook_async
else:
yield newrelic_event_hook
while True:
try:
yield next(l)
except StopIteration:
break
class NewRelicFirstList(list):
def __init__(self, *args, is_async=False, **kwargs):
super(NewRelicFirstList, self).__init__(*args, **kwargs)
self.is_async = is_async
def __iter__(self):
l = super().__iter__()
return iter(newrelic_first_gen(l, self.is_async))
class NewRelicFirstDict(dict):
def __init__(self, *args, is_async=False, **kwargs):
super().__init__(*args, **kwargs)
self.is_async = is_async
self.__setitem__("response", self["response"])
def __setitem__(self, key, value):
if key == "response":
value = NewRelicFirstList(value, is_async=self.is_async)
super().__setitem__(key, value)
def bind_request(request, *args, **kwargs):
return request
def sync_send_wrapper(wrapped, instance, args, kwargs):
request = bind_request(*args, **kwargs)
with ExternalTrace("httpx", str(request.url), request.method) as tracer:
if hasattr(tracer, "generate_request_headers"):
request._nr_trace = tracer
outgoing_headers = tracer.generate_request_headers(tracer.transaction)
for header_name, header_value in outgoing_headers:
# User headers should override our CAT headers
if header_name not in request.headers:
request.headers[header_name] = header_value
return wrapped(*args, **kwargs)
async def async_send_wrapper(wrapped, instance, args, kwargs):
request = bind_request(*args, **kwargs)
with ExternalTrace("httpx", str(request.url), request.method) as tracer:
if hasattr(tracer, "generate_request_headers"):
request._nr_trace = tracer
outgoing_headers = tracer.generate_request_headers(tracer.transaction)
for header_name, header_value in outgoing_headers:
# User headers should override our CAT headers
if header_name not in request.headers:
request.headers[header_name] = header_value
return await wrapped(*args, **kwargs)
@property
def nr_first_event_hooks(self):
if not hasattr(self, "_nr_event_hooks"):
# This branch should only be hit if agent initialize is called after
# the initialization of the http client
self._event_hooks = vars(self)["_event_hooks"]
del vars(self)["_event_hooks"]
return self._nr_event_hooks
@nr_first_event_hooks.setter
def nr_first_event_hooks(self, value):
value = NewRelicFirstDict(value, is_async=False)
self._nr_event_hooks = value
@property
def nr_first_event_hooks_async(self):
if not hasattr(self, "_nr_event_hooks"):
# This branch should only be hit if agent initialize is called after
# the initialization of the http client
self._event_hooks = vars(self)["_event_hooks"]
del vars(self)["_event_hooks"]
return self._nr_event_hooks
@nr_first_event_hooks_async.setter
def nr_first_event_hooks_async(self, value):
value = NewRelicFirstDict(value, is_async=True)
self._nr_event_hooks = value
def instrument_httpx_client(module):
module.Client._event_hooks = nr_first_event_hooks
module.AsyncClient._event_hooks = nr_first_event_hooks_async
wrap_function_wrapper(module, "Client.send", sync_send_wrapper)
wrap_function_wrapper(module, "AsyncClient.send", async_send_wrapper)
| [
"newrelic.common.object_wrapper.wrap_function_wrapper"
] | [((4766, 4829), 'newrelic.common.object_wrapper.wrap_function_wrapper', 'wrap_function_wrapper', (['module', '"""Client.send"""', 'sync_send_wrapper'], {}), "(module, 'Client.send', sync_send_wrapper)\n", (4787, 4829), False, 'from newrelic.common.object_wrapper import wrap_function_wrapper\n'), ((4834, 4903), 'newrelic.common.object_wrapper.wrap_function_wrapper', 'wrap_function_wrapper', (['module', '"""AsyncClient.send"""', 'async_send_wrapper'], {}), "(module, 'AsyncClient.send', async_send_wrapper)\n", (4855, 4903), False, 'from newrelic.common.object_wrapper import wrap_function_wrapper\n')] |
import face_recognition
import cv2
import numpy as np
# getMouthImage (from TLR Teeth Appearance Calculation.ipynb)
def getMouthImage(faceImage,margin=0):
# face_locations = face_recognition.face_locations(faceImage)
face_landmarks_list = face_recognition.face_landmarks(faceImage)
if len(face_landmarks_list) == 0:
return None
minx = miny = float('inf')
maxx = maxy = float('-inf')
for x,y in face_landmarks_list[0]['top_lip']:
minx = min(minx,x)
miny = min(miny,y)
for x,y in face_landmarks_list[0]['bottom_lip']:
maxx = max(maxx,x)
maxy = max(maxy,y)
mouthImage = faceImage[miny-margin:maxy+margin,minx-margin:maxx+margin]
# lip_landmarks must be translate to origin (0,0) by minx, miny
lip_landmarks = {
'top_lip': [],
'bottom_lip': []
}
for p in face_landmarks_list[0]['top_lip']:
p2 = (p[0] - minx, p[1] - miny)
lip_landmarks['top_lip'].append(p2)
for p in face_landmarks_list[0]['bottom_lip']:
p2 = (p[0] - minx, p[1] - miny)
lip_landmarks['bottom_lip'].append(p2)
return mouthImage,lip_landmarks
# Ray tracing (from TLR Teeth Appearance Calculation.ipynb)
def ray_tracing_method(x,y,poly):
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
# isin_inner_mouth (from TLR Teeth Appearance Calculation.ipynb)
def isin_inner_mouth(lip_boundary,x,y):
top_lip = lip_boundary['top_lip']
bottom_lip = lip_boundary['bottom_lip']
bounds = np.concatenate((top_lip[6:], bottom_lip[6:]),axis=0)
isin = ray_tracing_method(x,y,bounds)
return isin
# findCavity (from TLR Teeth Appearance Calculation.ipynb)
def findCavity(top_lip,bottom_lip):
return np.concatenate((top_lip[6:], bottom_lip[6:]),axis=0)
# cavityArea (from TLR Teeth Appearance Calculation.ipynb)
def cavityArea(top_lip,bottom_lip):
cavity = findCavity(top_lip,bottom_lip)
# cavity = np.concatenate((top_lip[6:], bottom_lip[6:]),axis=0)
x = cavity[:,0]
y = cavity[:,1]
return PolyArea(x,y)
# getTeethScore (from TLR Teeth Appearance Calculation.ipynb)
def getTeethScore(mouthImage,lip_landmarks=None):
height, width, channels = mouthImage.shape
area = height * width
# Operate in BGR (imread loads in BGR)
# OR WHAT???
# Working with VDO frame
# - RGB2Lab gives all WHITE region
lab = cv2.cvtColor(mouthImage, cv2.COLOR_RGB2Lab)
luv = cv2.cvtColor(mouthImage, cv2.COLOR_RGB2Luv)
# lab = cv2.cvtColor(mouthImage, cv2.COLOR_BGR2Lab)
# luv = cv2.cvtColor(mouthImage, cv2.COLOR_BGR2Luv)
lab_ud = lab[:,:,1].mean() - lab[:,:,1].std()
ta = lab_ud # From THESIS (LAB, LUV)
luv_ud = luv[:,:,1].mean() - luv[:,:,1].std()
tu = luv_ud # from thesis
# WHY do we copy?
lab2 = np.copy(lab)
luv2 = np.copy(luv)
# Copy for teeth hilight
hilightedMouthImage = np.copy(mouthImage)
# Pixel-wise operation
# TODO make it faster?
lab_c = luv_c = 0 # Counters
for y in range(len(hilightedMouthImage)):
row = hilightedMouthImage[y]
for x in range(len(row)):
inMouth = False
if lip_landmarks == None:
inMouth = isin_mouth(hilightedMouthImage,x,y)
else:
inMouth = isin_inner_mouth(lip_landmarks,x,y)
if inMouth:
p = row[x]
lab_a = lab2[y,x,1]
luv_a = luv2[y,x,1]
if lab_a <= ta:
p[0] = 255 # L
p[1] = 255 # L
p[2] = 255 # L
lab_c += 1
if luv_a <= tu:
p[0] = 255 # L
p[1] = 255 # L
p[2] = 255 # L
luv_c += 1
return (hilightedMouthImage,lab,luv,lab_c,luv_c)
# draw_bounary
def draw_bounary(facial_feature):
# print(type(face_landmarks[facial_feature]),face_landmarks[facial_feature])
points = face_landmarks[facial_feature]
points = np.array(points, np.int32)
points = points.reshape((-1,1,2))
cv2.polylines(frame,points,True,(255,255,255),thickness=4)
def extract_features(image):
frame = image
rgb_frame = frame[:, :, ::-1]
face_landmarks_list = face_recognition.face_landmarks(rgb_frame)
if len(face_landmarks_list) == 0:
return None
face_landmarks = face_landmarks_list[0]
mouthImage,lip_landmarks = getMouthImage(rgb_frame)
score = getTeethScore(mouthImage,lip_landmarks)
markedMouthImage = score[0]
lab_c = score[3]
luv_c = score[4]
lip_features = {
# "frame_id": frame_number,
"top_lip": face_landmarks_list[0]['top_lip'],
"bottom_lip": face_landmarks_list[0]['bottom_lip'],
"teeth_appearance": {
"LAB": lab_c,
"LUV": luv_c
}
}
x_offset = y_offset = float('inf')
for x,y in face_landmarks_list[0]['top_lip']:
x_offset = min(x_offset,x)
y_offset = min(y_offset,y)
markedMouthImage = markedMouthImage[:, :, ::-1]
frame[y_offset:y_offset+markedMouthImage.shape[0], x_offset:x_offset+markedMouthImage.shape[1]] = markedMouthImage
return frame,lip_features
| [
"numpy.copy",
"cv2.polylines",
"face_recognition.face_landmarks",
"numpy.array",
"cv2.cvtColor",
"numpy.concatenate"
] | [((244, 286), 'face_recognition.face_landmarks', 'face_recognition.face_landmarks', (['faceImage'], {}), '(faceImage)\n', (275, 286), False, 'import face_recognition\n'), ((1845, 1898), 'numpy.concatenate', 'np.concatenate', (['(top_lip[6:], bottom_lip[6:])'], {'axis': '(0)'}), '((top_lip[6:], bottom_lip[6:]), axis=0)\n', (1859, 1898), True, 'import numpy as np\n'), ((2059, 2112), 'numpy.concatenate', 'np.concatenate', (['(top_lip[6:], bottom_lip[6:])'], {'axis': '(0)'}), '((top_lip[6:], bottom_lip[6:]), axis=0)\n', (2073, 2112), True, 'import numpy as np\n'), ((2693, 2736), 'cv2.cvtColor', 'cv2.cvtColor', (['mouthImage', 'cv2.COLOR_RGB2Lab'], {}), '(mouthImage, cv2.COLOR_RGB2Lab)\n', (2705, 2736), False, 'import cv2\n'), ((2745, 2788), 'cv2.cvtColor', 'cv2.cvtColor', (['mouthImage', 'cv2.COLOR_RGB2Luv'], {}), '(mouthImage, cv2.COLOR_RGB2Luv)\n', (2757, 2788), False, 'import cv2\n'), ((3103, 3115), 'numpy.copy', 'np.copy', (['lab'], {}), '(lab)\n', (3110, 3115), True, 'import numpy as np\n'), ((3125, 3137), 'numpy.copy', 'np.copy', (['luv'], {}), '(luv)\n', (3132, 3137), True, 'import numpy as np\n'), ((3193, 3212), 'numpy.copy', 'np.copy', (['mouthImage'], {}), '(mouthImage)\n', (3200, 3212), True, 'import numpy as np\n'), ((4171, 4197), 'numpy.array', 'np.array', (['points', 'np.int32'], {}), '(points, np.int32)\n', (4179, 4197), True, 'import numpy as np\n'), ((4237, 4301), 'cv2.polylines', 'cv2.polylines', (['frame', 'points', '(True)', '(255, 255, 255)'], {'thickness': '(4)'}), '(frame, points, True, (255, 255, 255), thickness=4)\n', (4250, 4301), False, 'import cv2\n'), ((4405, 4447), 'face_recognition.face_landmarks', 'face_recognition.face_landmarks', (['rgb_frame'], {}), '(rgb_frame)\n', (4436, 4447), False, 'import face_recognition\n')] |
import math
pi = math.pi
cos = lambda x : math.cos(x * pi/180)
sin = lambda x : math.sin(x * pi/180)
circles = []
vertices = []
vert_norm_tex = []
faces = []
# Generate coordinates
factor = 5 # 1,3,5,9,15
circles.append(([(0.,-1.,0.,0.,1.)], 0))
for theta in range(-87, 90, factor):
if abs(theta) >= 82:
step = 8 * factor
elif abs(theta) >= 75:
step = 4 * factor
elif abs(theta) >= 60:
step = 2 * factor
else :
step = 1 * factor
A = range(0,360,step)
# In a sphere, normal vector == position vector
# x,y,z,u,v
circ = [(cos(theta)*cos(a), sin(theta), cos(theta)*sin(a),
1.0 - a/360., 0.5 - theta/180.) for a in A]
circ.append((cos(theta), sin(theta), 0.0, 0.0, 0.5-theta/180.))
# circles.append((circ, len(circ)))
circles.append((circ, circles[-1][1]+len(circles[-1][0])))
circles.append(([(0.,1.,0.,0.,0.)], circles[-1][1]+len(circles[-1][0])))
# Create list of faces
for theta, (points, cc) in enumerate(circles):
# print(theta*factor-90, cc)
i = theta * factor - 90
if theta == 0:
l = len(circles[theta+1][0])
for p in range(l):
faces.extend([0, cc+p + 1, (cc+p+1)%l + 1])
elif theta == len(circles)-2:
l = len(points)+1
for p in range(l):
faces.extend([cc+p, cc+ (p+1)%l, cc+len(points)])
elif theta == len(circles)-1:
pass
elif len(circles[theta][0]) == len(circles[theta+1][0]) :
l = len(circles[theta][0])
for p in range(l):
faces.extend([cc+p, cc + (p+1)%l, cc+p+l])
faces.extend([cc+p+l, cc+l + (p+1)%l, cc + (p+1)%l])
elif len(circles[theta][0]) < len(circles[theta+1][0]) :
l = len(circles[theta][0])-1
for p in range(l):
faces.extend([cc+p, cc+l+2*p+1, cc+l+2*p+2])
faces.extend([cc+l+2*p+2, cc+l + (2*p+3), cc + (p+1)])
faces.extend([cc+p, cc+l+2*p+2, cc + (p+1)])
elif len(circles[theta][0]) > len(circles[theta+1][0]) :
l = len(circles[theta+1][0])-1
for p in range(l):
faces.extend([cc+2*p, cc+2*l+p+1, cc+2*p+1])
faces.extend([cc+2*p+1, cc + (2*p+2), cc+2*l + (p+2)])
faces.extend([cc+2*l+p+1, cc+2*p+1, cc+2*l + (p+2)])
else :
assert False
for pos in points :
vertices.extend(pos[:3])
vert_norm_tex.extend(pos[:3])
vert_norm_tex.extend(pos)
spherical_mesh = {'v':vertices, 'f':faces, 'format':
[(b'v_pos', 3, 'float')]}
spherical_mesh_tex = {'v':vert_norm_tex, 'f':faces, 'format':
[(b'v_pos', 3, 'float'),(b'v_norm', 3, 'float'),(b'v_texc', 2, 'float')]}
c = 0.02
cube_mesh = {'v':
[-c,-c,-c, -1.,0.,0., -c,-c,c, -1.,0.,0., -c,c,c, -1.,0.,0., -c,c,-c, -1.,0.,0.,
c,-c,-c, 1.,0.,0., c,-c,c, 1.,0.,0., c,c,c, 1.,0.,0., c,c,-c, 1.,0.,0.,
-c,-c,-c, 0.,-1.,0., c,-c,-c, 0.,-1.,0., c,-c,c, 0.,-1.,0., -c,-c,c, 0.,-1.,0.,
-c,c,c, 0.,1.,0., c,c,c, 0.,1.,0., c,c,-c, 0.,1.,0., -c,c,-c, 0.,1.,0.,
-c,-c,-c, 0.,0.,-1., c,-c,-c, 0.,0.,-1., c,c,-c, 0.,0.,-1., -c,c,-c, 0.,0.,-1.,
c,-c,c, 0.,0.,1., -c,-c,c, 0.,0.,1., -c,c,c, 0.,0.,1., c,c,c, 0.,0.,1.,
],
'f':[0,1,2, 0,2,3, 4,5,6, 4,6,7, 8,9,10, 8,10,11,
12,13,14, 12,14,15, 16,17,18, 16,18,19, 20,21,22, 20,22,23],
'format':[(b'v_pos', 3, 'float'),(b'v_norm', 3, 'float')]}
| [
"math.cos",
"math.sin"
] | [((44, 66), 'math.cos', 'math.cos', (['(x * pi / 180)'], {}), '(x * pi / 180)\n', (52, 66), False, 'import math\n'), ((82, 104), 'math.sin', 'math.sin', (['(x * pi / 180)'], {}), '(x * pi / 180)\n', (90, 104), False, 'import math\n')] |
import contextlib
import traceback
from cube2protocol.cube_data_stream import CubeDataStream
from cipolla.protocol import swh
from cipolla.game.client.client import Client
from cipolla.game.player.player import Player
from cipolla.game.room.client_collection import ClientCollection
from cipolla.game.room.player_collection import PlayerCollection
from cube2common.vec import vec
from typing import Callable, Iterator, Tuple
class RoomBroadcaster(object):
def __init__(self, client_collection: ClientCollection, player_collection: PlayerCollection) -> None:
self._client_collection = client_collection
self._player_collection = player_collection
@contextlib.contextmanager
def broadcastbuffer(self, channel: int, reliable: bool, *args) -> Iterator[CubeDataStream]:
with self.clientbuffer(channel, reliable, *args) as cds:
yield cds
@property
def clientbuffer(self) -> Callable:
return self._client_collection.broadcastbuffer
def resume(self) -> None:
with self.broadcastbuffer(1, True) as cds:
swh.put_pausegame(cds, 0)
def pause(self):
with self.broadcastbuffer(1, True) as cds:
swh.put_pausegame(cds, 1)
def time_left(self, seconds):
with self.broadcastbuffer(1, True) as cds:
swh.put_timeup(cds, seconds)
def intermission(self):
self.time_left(0)
def shotfx(self, player: Player, gun: int, shot_id: int, from_pos: vec, to_pos: vec) -> None:
with self.broadcastbuffer(1, True, [player]) as cds:
swh.put_shotfx(cds, player, gun, shot_id, from_pos, to_pos)
def explodefx(self, player, gun, explode_id):
with self.broadcastbuffer(1, True, [player]) as cds:
swh.put_explodefx(cds, player, gun, explode_id)
def player_died(self, player, killer, teams):
with self.broadcastbuffer(1, True) as cds:
swh.put_died(cds, player, killer, teams)
def player_disconnected(self, player: Player) -> None:
with self.broadcastbuffer(1, True) as cds:
swh.put_cdis(cds, player)
def teleport(self, player, teleport, teledest):
with self.broadcastbuffer(0, True, [player]) as cds:
swh.put_teleport(cds, player, teleport, teledest)
def jumppad(self, player, jumppad):
with self.broadcastbuffer(0, True, [player]) as cds:
swh.put_jumppad(cds, player, jumppad)
def server_message(self, message: str, exclude: Tuple = ()) -> None:
with self.broadcastbuffer(1, True, exclude) as cds:
swh.put_servmsg(cds, message)
def client_connected(self, client: Client) -> None:
player = client.get_player()
with self.broadcastbuffer(1, True, [client]) as cds:
swh.put_resume(cds, [player])
swh.put_initclients(cds, [player])
def current_masters(self, mastermode, clients):
with self.broadcastbuffer(1, True) as cds:
swh.put_currentmaster(cds, mastermode, clients)
def sound(self, sound):
for client in self._client_collection.to_iterator():
with client.sendbuffer(1, True) as cds:
tm = CubeDataStream()
swh.put_sound(tm, sound)
swh.put_clientdata(cds, client, str(tm))
def flush_messages(self) -> None:
try:
class ClientBufferReference(object):
def __init__(self, client, positions_next_byte, positions_size, messages_next_byte, messages_size):
self.client = client
self.positions_next_byte = positions_next_byte
self.positions_size = positions_size
self.messages_next_byte = messages_next_byte
self.messages_size = messages_size
room_positions = CubeDataStream()
room_messages = CubeDataStream()
references = []
positions_next_byte = 0
messages_next_byte = 0
for client in self._client_collection.to_iterator():
player = client.get_player()
positions_first_byte = positions_next_byte
messages_first_byte = messages_next_byte
player.write_state(room_positions, room_messages)
positions_next_byte = len(room_positions)
messages_next_byte = len(room_messages)
positions_size = positions_next_byte - positions_first_byte
messages_size = messages_next_byte - messages_first_byte
references.append(ClientBufferReference(client, positions_next_byte, positions_size, messages_next_byte, messages_size))
positions_len = len(room_positions)
messages_len = len(room_messages)
room_positions.write(room_positions)
room_messages.write(room_messages)
position_data = memoryview(room_positions.data)
message_data = memoryview(room_messages.data)
for ref in references:
client = ref.client
pnb = ref.positions_next_byte
mnb = ref.messages_next_byte
psize = ref.positions_size
msize = ref.messages_size
if positions_len - psize > 0:
# TODO: Use no_allocate option here
client.send(0, position_data[pnb:pnb + (positions_len - psize)], False, False)
if messages_len - msize > 0:
# TODO: Use no_allocate option here
client.send(1, message_data[mnb:mnb + (messages_len - msize)], True, False)
for player in self._player_collection.to_iterator():
player.state.clear_flushed_state()
except:
traceback.print_exc()
| [
"cipolla.protocol.swh.put_jumppad",
"cipolla.protocol.swh.put_sound",
"cipolla.protocol.swh.put_servmsg",
"cipolla.protocol.swh.put_timeup",
"cipolla.protocol.swh.put_cdis",
"cipolla.protocol.swh.put_explodefx",
"cipolla.protocol.swh.put_resume",
"cipolla.protocol.swh.put_pausegame",
"cipolla.protoc... | [((1088, 1113), 'cipolla.protocol.swh.put_pausegame', 'swh.put_pausegame', (['cds', '(0)'], {}), '(cds, 0)\n', (1105, 1113), False, 'from cipolla.protocol import swh\n'), ((1199, 1224), 'cipolla.protocol.swh.put_pausegame', 'swh.put_pausegame', (['cds', '(1)'], {}), '(cds, 1)\n', (1216, 1224), False, 'from cipolla.protocol import swh\n'), ((1323, 1351), 'cipolla.protocol.swh.put_timeup', 'swh.put_timeup', (['cds', 'seconds'], {}), '(cds, seconds)\n', (1337, 1351), False, 'from cipolla.protocol import swh\n'), ((1579, 1638), 'cipolla.protocol.swh.put_shotfx', 'swh.put_shotfx', (['cds', 'player', 'gun', 'shot_id', 'from_pos', 'to_pos'], {}), '(cds, player, gun, shot_id, from_pos, to_pos)\n', (1593, 1638), False, 'from cipolla.protocol import swh\n'), ((1763, 1810), 'cipolla.protocol.swh.put_explodefx', 'swh.put_explodefx', (['cds', 'player', 'gun', 'explode_id'], {}), '(cds, player, gun, explode_id)\n', (1780, 1810), False, 'from cipolla.protocol import swh\n'), ((1925, 1965), 'cipolla.protocol.swh.put_died', 'swh.put_died', (['cds', 'player', 'killer', 'teams'], {}), '(cds, player, killer, teams)\n', (1937, 1965), False, 'from cipolla.protocol import swh\n'), ((2089, 2114), 'cipolla.protocol.swh.put_cdis', 'swh.put_cdis', (['cds', 'player'], {}), '(cds, player)\n', (2101, 2114), False, 'from cipolla.protocol import swh\n'), ((2241, 2290), 'cipolla.protocol.swh.put_teleport', 'swh.put_teleport', (['cds', 'player', 'teleport', 'teledest'], {}), '(cds, player, teleport, teledest)\n', (2257, 2290), False, 'from cipolla.protocol import swh\n'), ((2405, 2442), 'cipolla.protocol.swh.put_jumppad', 'swh.put_jumppad', (['cds', 'player', 'jumppad'], {}), '(cds, player, jumppad)\n', (2420, 2442), False, 'from cipolla.protocol import swh\n'), ((2589, 2618), 'cipolla.protocol.swh.put_servmsg', 'swh.put_servmsg', (['cds', 'message'], {}), '(cds, message)\n', (2604, 2618), False, 'from cipolla.protocol import swh\n'), ((2786, 2815), 'cipolla.protocol.swh.put_resume', 'swh.put_resume', (['cds', '[player]'], {}), '(cds, [player])\n', (2800, 2815), False, 'from cipolla.protocol import swh\n'), ((2828, 2862), 'cipolla.protocol.swh.put_initclients', 'swh.put_initclients', (['cds', '[player]'], {}), '(cds, [player])\n', (2847, 2862), False, 'from cipolla.protocol import swh\n'), ((2979, 3026), 'cipolla.protocol.swh.put_currentmaster', 'swh.put_currentmaster', (['cds', 'mastermode', 'clients'], {}), '(cds, mastermode, clients)\n', (3000, 3026), False, 'from cipolla.protocol import swh\n'), ((3837, 3853), 'cube2protocol.cube_data_stream.CubeDataStream', 'CubeDataStream', ([], {}), '()\n', (3851, 3853), False, 'from cube2protocol.cube_data_stream import CubeDataStream\n'), ((3882, 3898), 'cube2protocol.cube_data_stream.CubeDataStream', 'CubeDataStream', ([], {}), '()\n', (3896, 3898), False, 'from cube2protocol.cube_data_stream import CubeDataStream\n'), ((3190, 3206), 'cube2protocol.cube_data_stream.CubeDataStream', 'CubeDataStream', ([], {}), '()\n', (3204, 3206), False, 'from cube2protocol.cube_data_stream import CubeDataStream\n'), ((3223, 3247), 'cipolla.protocol.swh.put_sound', 'swh.put_sound', (['tm', 'sound'], {}), '(tm, sound)\n', (3236, 3247), False, 'from cipolla.protocol import swh\n'), ((5804, 5825), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5823, 5825), False, 'import traceback\n')] |
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import time
import json
import sentiment_mod as s
#consumer key, consumer secret, access token, access secret.
ckey="<KEY>"
csecret="<KEY>"
atoken="<KEY>"
asecret="<KEY>"
class listener(StreamListener):
def on_data(self, data):
all_data = json.loads(data)
tweet = all_data["text"]
sentiment_value, confidence = s.sentiment(tweet)
print(tweet,sentiment_value,confidence)
time.sleep(1)
print("NO")
#if confidence*100 >= 80:
output = open("Live Sentiment Analysis/twitter-out.txt","a")
output.write(sentiment_value)
output.write('\n')
output.close()
return True
def on_error(self, status):
print(status)
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, listener())
twitterStream.filter(track=["happy"]) | [
"json.loads",
"sentiment_mod.sentiment",
"time.sleep",
"tweepy.OAuthHandler"
] | [((856, 883), 'tweepy.OAuthHandler', 'OAuthHandler', (['ckey', 'csecret'], {}), '(ckey, csecret)\n', (868, 883), False, 'from tweepy import OAuthHandler\n'), ((374, 390), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (384, 390), False, 'import json\n'), ((463, 481), 'sentiment_mod.sentiment', 's.sentiment', (['tweet'], {}), '(tweet)\n', (474, 481), True, 'import sentiment_mod as s\n'), ((538, 551), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (548, 551), False, 'import time\n')] |
#!/usr/bin/env python
# encoding: utf-8
from Alfred import Tools
import HTMLParser
import os
import re
import urllib2
class Markdown(object):
PANDOC = '/usr/local/bin/pandoc -f html-native_divs-native_spans -t gfm --strip-comments --atx-headers '
def __init__(self, url):
self.url = url
self.html = self._fetchHtml()
self.md = self._fetchMd()
def _fetchHtml(self):
try:
r = urllib2.urlopen(self.url)
response = r.read().decode('utf-8')
except:
response = "<html><body><a href=\"" + self.url + "\">" + self.url + "</a></body></html>"
pass
return response
def _fetchMd(self):
try:
cmd = '{0} {1}'.format(self.PANDOC, self.url)
md = os.popen(cmd)
resp = md.read()
except:
resp = "[{0}]({0})".format(self.url)
pass
return resp
@staticmethod
def _htmlDecode(string):
string = urllib2.unquote(string)
# return string
return HTMLParser.HTMLParser().unescape(string).encode('utf-8')
def _markdownHeader(self):
return "---\n" \
"Title: {title}\n" \
"Created: {date}\n" \
"Tags: #WebClip\n" \
"Url: {url}\n" \
"---\n".format(date=Tools.getTodayDate(), url=self.getMdUrl(), title=self.getTitle())
def getHtml(self):
return self.html
def getMarkdownContent(self):
out = self._markdownHeader()
out += self.getMd()
return out
def getMd(self):
return self.md.decode('utf-8')
def getMdUrl(self):
page_url = u"[{0}]({1})".format(self.getTitle(), self.getUrl())
return page_url
def getTitle(self):
res = re.findall(r'<title>[\n\t\s]*(.+)[\n\t\s]*</title>', self.html, re.MULTILINE)
return self._htmlDecode(''.join(res))
def getUrl(self):
return self.url.decode('utf-8')
def parseFilename(self, filename):
to_replace = ['/', '\\', ':']
tmp = filename.decode('utf-8').strip()
for i in to_replace:
tmp = tmp.replace(i, '-')
return tmp.encode('utf-8')
def writeMarkdown(self, content, path):
with open(path, "w+") as file:
file.write(content.encode('utf-8'))
| [
"Alfred.Tools.getTodayDate",
"urllib2.urlopen",
"HTMLParser.HTMLParser",
"os.popen",
"re.findall",
"urllib2.unquote"
] | [((990, 1013), 'urllib2.unquote', 'urllib2.unquote', (['string'], {}), '(string)\n', (1005, 1013), False, 'import urllib2\n'), ((1798, 1885), 're.findall', 're.findall', (['"""<title>[\\\\n\\\\t\\\\s]*(.+)[\\\\n\\\\t\\\\s]*</title>"""', 'self.html', 're.MULTILINE'], {}), "('<title>[\\\\n\\\\t\\\\s]*(.+)[\\\\n\\\\t\\\\s]*</title>', self.html, re.\n MULTILINE)\n", (1808, 1885), False, 'import re\n'), ((435, 460), 'urllib2.urlopen', 'urllib2.urlopen', (['self.url'], {}), '(self.url)\n', (450, 460), False, 'import urllib2\n'), ((780, 793), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (788, 793), False, 'import os\n'), ((1343, 1363), 'Alfred.Tools.getTodayDate', 'Tools.getTodayDate', ([], {}), '()\n', (1361, 1363), False, 'from Alfred import Tools\n'), ((1053, 1076), 'HTMLParser.HTMLParser', 'HTMLParser.HTMLParser', ([], {}), '()\n', (1074, 1076), False, 'import HTMLParser\n')] |
import click
import os
import yaml
from panoptes_client import Panoptes
@click.version_option(prog_name='Panoptes CLI')
@click.group()
@click.option(
'--endpoint',
'-e',
help="Overides the default API endpoint",
type=str,
)
@click.option(
'--admin',
'-a',
help=(
"Enables admin mode. Ignored if you're not logged in as an "
"administrator."
),
is_flag=True,
)
@click.pass_context
def cli(ctx, endpoint, admin):
ctx.config_dir = os.path.expanduser('~/.panoptes/')
ctx.config_file = os.path.join(ctx.config_dir, 'config.yml')
ctx.config = {
'endpoint': 'https://www.zooniverse.org',
'username': '',
'password': '',
}
try:
with open(ctx.config_file) as conf_f:
ctx.config.update(yaml.full_load(conf_f))
except IOError:
pass
if endpoint:
ctx.config['endpoint'] = endpoint
if ctx.invoked_subcommand != 'configure':
Panoptes.connect(
endpoint=ctx.config['endpoint'],
username=ctx.config['username'],
password=ctx.config['password'],
admin=admin,
)
from panoptes_cli.commands.configure import *
from panoptes_cli.commands.info import *
from panoptes_cli.commands.project import *
from panoptes_cli.commands.subject import *
from panoptes_cli.commands.subject_set import *
from panoptes_cli.commands.user import *
from panoptes_cli.commands.workflow import *
| [
"yaml.full_load",
"click.group",
"click.option",
"os.path.join",
"panoptes_client.Panoptes.connect",
"click.version_option",
"os.path.expanduser"
] | [((75, 121), 'click.version_option', 'click.version_option', ([], {'prog_name': '"""Panoptes CLI"""'}), "(prog_name='Panoptes CLI')\n", (95, 121), False, 'import click\n'), ((123, 136), 'click.group', 'click.group', ([], {}), '()\n', (134, 136), False, 'import click\n'), ((138, 226), 'click.option', 'click.option', (['"""--endpoint"""', '"""-e"""'], {'help': '"""Overides the default API endpoint"""', 'type': 'str'}), "('--endpoint', '-e', help='Overides the default API endpoint',\n type=str)\n", (150, 226), False, 'import click\n'), ((243, 376), 'click.option', 'click.option', (['"""--admin"""', '"""-a"""'], {'help': '"""Enables admin mode. Ignored if you\'re not logged in as an administrator."""', 'is_flag': '(True)'}), '(\'--admin\', \'-a\', help=\n "Enables admin mode. Ignored if you\'re not logged in as an administrator.",\n is_flag=True)\n', (255, 376), False, 'import click\n'), ((486, 520), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.panoptes/"""'], {}), "('~/.panoptes/')\n", (504, 520), False, 'import os\n'), ((543, 585), 'os.path.join', 'os.path.join', (['ctx.config_dir', '"""config.yml"""'], {}), "(ctx.config_dir, 'config.yml')\n", (555, 585), False, 'import os\n'), ((967, 1100), 'panoptes_client.Panoptes.connect', 'Panoptes.connect', ([], {'endpoint': "ctx.config['endpoint']", 'username': "ctx.config['username']", 'password': "ctx.config['password']", 'admin': 'admin'}), "(endpoint=ctx.config['endpoint'], username=ctx.config[\n 'username'], password=ctx.config['password'], admin=admin)\n", (983, 1100), False, 'from panoptes_client import Panoptes\n'), ((795, 817), 'yaml.full_load', 'yaml.full_load', (['conf_f'], {}), '(conf_f)\n', (809, 817), False, 'import yaml\n')] |
import math
from twitter import tweet
class Rectangle(object):
def __init__(self, width, height, *args, **kwargs):
super().__init__(*args, **kwargs)
self.width = width
self.height = height
def area(self):
return self.width * self.height
def broadcast(self):
message = 'My rectangle is {} by {}'.format(self.width, self.height)
tweet(message)
class Cylinder(object):
def __init__(self, radius, height, *args, **kwargs):
super().__init__(*args, **kwargs)
self.radius = radius
self.height = height
def area_of_base(self):
return math.pi * (self.radius ** 2)
def volume(self):
return self.area_of_base() * self.height
| [
"twitter.tweet"
] | [((393, 407), 'twitter.tweet', 'tweet', (['message'], {}), '(message)\n', (398, 407), False, 'from twitter import tweet\n')] |
#!/usr/bin/env python
import json
from jconfigure import configure
if __name__ == "__main__":
print(json.dumps(configure(), indent=2))
| [
"jconfigure.configure"
] | [((117, 128), 'jconfigure.configure', 'configure', ([], {}), '()\n', (126, 128), False, 'from jconfigure import configure\n')] |
#!/usr/bin/env/python3.9
#
# Made by EtcAug10
import requests as req, os, re
banner = """
----------------- -- --- |
------------ -----------|
--- SQLi CMS Lokomedia - |
------- by EtcAug10 -----|
--- -------------------|
"""
admin = [
'adm/',
'_adm_/',
'_admin_/',
'_administrator_/',
'operator/',
'sika/',
'develop/',
'ketua/',
'redaktur/',
'author',
'admin/',
'administrator/',
'adminweb/',
'user/',
'users/',
'dinkesadmin/',
'retel/',
'author/',
'panel/',
'paneladmin/',
'panellogin/',
'redaksi/',
'cp-admin/',
'master/',
'master/index.php',
'master/login.php',
'operator/index.php',
'sika/index.php',
'develop/index.php',
'ketua/index.php',
'redaktur/index.php',
'admin/index.php',
'administrator/index.php',
'adminweb/index.php',
'user/index.php',
'users/index.php',
'dinkesadmin/index.php',
'retel/index.php',
'author/index.php',
'panel/index.php',
'paneladmin/index.php',
'panellogin/index.php',
'redaksi/index.php',
'cp-admin/index.php',
'operator/login.php',
'sika/login.php',
'develop/login.php',
'ketua/login.php',
'redaktur/login.php',
'admin/login.php',
'administrator/login.php',
'adminweb/login.php',
'user/login.php',
'users/login.php',
'dinkesadmin/login.php',
'retel/login.php',
'author/login.php',
'panel/login.php',
'paneladmin/login.php',
'panellogin/login.php',
'redaksi/login.php',
'cp-admin/login.php',
'terasadmin/',
'terasadmin/index.php',
'terasadmin/login.php',
'rahasia/',
'rahasia/index.php',
'rahasia/admin.php',
'rahasia/login.php',
'dinkesadmin/',
'dinkesadmin/login.php',
'adminpmb/',
'adminpmb/index.php',
'adminpmb/login.php',
'system/',
'system/index.php',
'system/login.php',
'webadmin/',
'webadmin/index.php',
'webadmin/login.php',
'wpanel/',
'wpanel/index.php',
'wpanel/login.php',
'adminpanel/index.php',
'adminpanel/',
'adminpanel/login.php',
'adminkec/',
'adminkec/index.php',
'adminkec/login.php',
'admindesa/',
'admindesa/index.php',
'admindesa/login.php',
'adminkota/',
'adminkota/index.php',
'adminkota/login.php',
'admin123/',
'admin123/index.php',
'admin123/login.php',
'logout/',
'logout/index.php',
'logout/login.php',
'logout/admin.php',
'adminweb_setting']
real_pass = [
"<PASSWORD>" , "<PASSWORD>",
"<PASSWORD>" , "master!@#$qwe",
"<PASSWORD>" , "sumed<PASSWORD>",
"<PASSWORD>" , "<PASSWORD>",
"<KEY>" , "b1smillah",
"<KEY>" , "house69",
"<KEY>" , "b1smillah",
"<KEY>" , "Suk4bum1",
"<KEY>" , "kasitaugakya",
"fbff29af096fa646757ce8439b644714" , "vro190588",
"1feadc10e93f2b64c65868132f1e72d3" , "agoes",
"<KEY>" , "admin123",
"7aa1dfee8619ac8f282e296d83eb55ff" , "meong",
"24fa5ee2c1285e115dd6b5fe1c25a333" , "773062",
"<KEY>" , "#admin#",
"5fec4ba8376f207d1ff2f0cac0882b01" , "admin!@#",
"<KEY>" , "@dm1n",
"73acd9a5972130b750<PASSWORD>" , "ADMIN",
"<PASSWORD>" , "bs1unt46",
"<PASSWORD>" , "Administrator",
"<PASSWORD>" , "ADMINISTRATOR",
"e58bfd635502ea963e1d52487ac2edfa" , "!@#123!@#123",
"<PASSWORD>" , "ngadimin",
"<PASSWORD>" , "default",
"<PASSWORD>" , "pass",
"<PASSWORD>" , "sukmapts",
"<PASSWORD>" , "password",
"<PASSWORD>" , "secret",
"c893bad68927b457dbed39460e6afd62" , "prueba",
"<PASSWORD>" , "admin4343",
"<PASSWORD>" , "bingo",
"<PASSWORD>e73929961e" , "bismillah",
"<KEY>" , "salawarhandap123",
"0570e3795fbe97ddd3ce53be141d1aed" , "indoxploit",
"<KEY>" , "test",
"976adc43eaf39b180d9f2c624a1712cd" , "adminppcp",
"5985609a2dc01098797c94a43e0a1115" , "masarief",
"2<PASSWORD>f<PASSWORD>a57a5a<PASSWORD>a0e4a<PASSWORD>fc3" , "admin",
"1870a829d9bc69abf500eca6f00241fe" , "wordpress",
"126ac9f6149081eb0e97c2e939eaad52" , "blog",
"fe01ce2a7fbac8fafaed7c982a04e229" , "demo",
"04e484000489dd3b3fb25f9aa65305c6" , "redaksi2016",
"91f5167c34c400758115c2a6826ec2e3" , "administrador",
"200ceb26807d6bf99fd6f4f0d1ca54d4" , "administrator",
"<KEY>" , "admin1234",
"912ec803b2ce49e4a541068d495ab570" , "asdf",
"<KEY>" , "asdf1234",
"e99a18c428cb38d5f260853678922e03" , "abc123",
"<KEY>" , "asdfgh",
"a384b6463fc216a5f8ecb6670f86456a" , "qwert",
"d8578edf8458ce06fbc5bb76a58c5ca4" , "qwerty",
"<KEY>" , "1111",
"96e79218965eb72c92a549dd5a330112" , "111111",
"<KEY>" , "123123",
"<KEY>" , "654321",
"<KEY>" , "1234",
"e10adc3949ba59abbe56e057f20f883e" , "123456",
"fcea920f7412b5da7be0cf42b8c93759" , "1234567",
"25d55ad283aa400af464c76d713c07ad" , "12345678",
"<KEY>" , "123456789",
"<KEY>" , "1234567890",
"befe9f8a14346e3e52c762f333395796" , "qawsed",
"<PASSWORD>" , "qazwsx",
"<PASSWORD>" , "password",
"<PASSWORD>" , "pass<PASSWORD>",
"<PASSWORD>" , "admin",
"e<PASSWORD>" , "123456",
"<PASSWORD>" , "password",
"<PASSWORD>" , "12345678",
"f379eaf3c831b04de153469d1bec345e" , "666666",
"<PASSWORD>" , "111111",
"<PASSWORD>93759" , "1234567",
"d8578edf8458ce06fbc5bb76a58c5ca4" , "qwerty",
"6f3cac6213ffceee27cc85414f458caa" , "siteadmin",
"200ceb26807d6bf99fd6f4f0d1ca54d4" , "administrator",
"63a9f0ea7bb98050796b649e85481845" , "root",
"<KEY>3" , "123123",
"<KEY>" , "123321",
"<KEY>" , "1234567890",
"4ca7c5c27c2314eecc71f67501abb724" , "letmein123",
"cc03e747a6afbbcbf8be7668acfebee5" , "test123",
"<KEY>" , "demo123",
"<PASSWORD>170a0dca92d53ec9624f336ca24" , "pass<PASSWORD>",
"<PASSWORD>" , "123qwe",
"200820e3227815ed1756a6b531e7e0d2" , "qwe123",
"<KEY>" , "654321",
"<KEY>" , "loveyou",
"172eee54aa664e9dd0536b063796e54e" , "adminadmin123",
"e924e336dcc4126334c852eb8fadd334" , "waskita1234",
"<KEY>" , "rsamku2013",
"<KEY>" , "unlock08804",
"12e110a1b89da9b09a191f1f9b0a1398" , "nalaratih",
"f70d32432ff0a8984b5aadeb159f9db6" , "Much240316",
"a2fffa77aa0dde8cd4c416b5114eba21" , "gondola",
"2b45af95ce316ea4cffd2ce4093a2b83" , "w4nd3szaki",
"c5612a125d8613ddae79a6b36c8bee37" , "Reddevil#21",
"6e7fbe8e6147e2c430ce7e8ab883e533" , "R4nd0m?!",
"<KEY>" , "adminku",
"5214905fbe8d7f0bb0d0a328f08af3f0" , "adminpust4k4",
"acfc976c2d22e4a595a9ee6fc0d05f27" , "dikmen2016",
"dcdee606657b5f7d8b218badfeb22a90" , "masputradmin",
"ecb4208ee41389259a632d3a733c2786" , "741908",
"827ccb0eea8a706c4c34a16891f84e7b" , "12345",
"<PASSWORD>" , "tolol",
"eeee<PASSWORD>" , "master10",
"<PASSWORD>" , "adminjalan",
"<PASSWORD>" , "<PASSWORD>",
"<PASSWORD>" , "ganteng",
"528d06a172eb2d8fab4e93f33f3986a8" , "jasindolive",
"<KEY>" , "404J",
"abe1f4492f922a9111317ed7f7f8e723" , "bantarjati5",
]
def ekse(t):
reqs = req.get(t)
resp = reqs.text
print(resp)
def simpen(sisi):
f = open("md5.txt","a+")
f.write(isi+"\n")
def main():
print(banner)
os.system('sleep 0.5s')
print("Memulai serangan..")
os.system('sleep 2s')
target = input("Masukkan Target: ")
login = ""
id = ""
reqs = req.get(target)
resp = reqs.text
curl = resp.split("'")
param = ["statis","kategori","berita"]
re.findall("/"+param[0]+"-(.*?)\">/",resp) # Pilihan: param[0], param[1], param[2]
pecah = id.split("-")
statis = pecah[0]
sisa = pecah[1]
r_admin = ekse(target+"/"+admin)
if re.findall("/administrator|username|password/i",r_admin) and re.findall("/not found|forbidden|404|403|500/i",r_admin):
login = target+"/"+admin
sqli = ekse(target+"/"+param+"-"+statis+"'/*!50000UniON*/+/*!50000SeLeCT*/+/*!50000cOnCAt*/(0x696e646f78706c6f6974,0x3<PASSWORD>,username,<PASSWORD>,password,<PASSWORD>)+from+users--+---+-"+sisa)
up = []
akun = []
re.findall("/<meta name=\"description\" content=\"(.*?)\">/", sqli, up)
re.findall("/<li>(.*)<li>/", up[1], akun)
data = split(" ", akun[1])
print("\n\n URL: "+target+"\n")
print("[+] param: "+param+"\n")
if curl != sqli:
if split("/error/", sqli):
print("[ Injection Successfully ]\n")
if data[0] == "" | data[1] == "":
print("(/) Not Injected :(\n")
else:
print("# username: "+data[0]+"\n")
passwd = real_pass[data[1]]
if passwd == "":
passwd = data[1]
simpen(data[1])
print("# password: "+passwd+"\n")
if login == "":
print("(/) Login Admin ga ketemu :(\n")
else:
print("\ Login: "+login+"\n")
else:
print("(/) Not Injected :(\n")
else:
print("(/) Not Injected :(\n")
os.system('clear')
main()
p = input("Lakukan eksploitasi lagi? (y/n)\n")
if p == "y":
main()
elif p == "n":
print("Terimakasih telah menggunakan.. ^_^")
exit()
else:
print("Salah command, exit")
exit() | [
"os.system",
"re.findall",
"requests.get"
] | [((7872, 7890), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (7881, 7890), False, 'import requests as req, os, re\n'), ((6202, 6212), 'requests.get', 'req.get', (['t'], {}), '(t)\n', (6209, 6212), True, 'import requests as req, os, re\n'), ((6337, 6360), 'os.system', 'os.system', (['"""sleep 0.5s"""'], {}), "('sleep 0.5s')\n", (6346, 6360), False, 'import requests as req, os, re\n'), ((6391, 6412), 'os.system', 'os.system', (['"""sleep 2s"""'], {}), "('sleep 2s')\n", (6400, 6412), False, 'import requests as req, os, re\n'), ((6479, 6494), 'requests.get', 'req.get', (['target'], {}), '(target)\n', (6486, 6494), True, 'import requests as req, os, re\n'), ((6578, 6624), 're.findall', 're.findall', (['(\'/\' + param[0] + \'-(.*?)">/\')', 'resp'], {}), '(\'/\' + param[0] + \'-(.*?)">/\', resp)\n', (6588, 6624), False, 'import requests as req, os, re\n'), ((7122, 7189), 're.findall', 're.findall', (['"""/<meta name="description" content="(.*?)">/"""', 'sqli', 'up'], {}), '(\'/<meta name="description" content="(.*?)">/\', sqli, up)\n', (7132, 7189), False, 'import requests as req, os, re\n'), ((7195, 7236), 're.findall', 're.findall', (['"""/<li>(.*)<li>/"""', 'up[1]', 'akun'], {}), "('/<li>(.*)<li>/', up[1], akun)\n", (7205, 7236), False, 'import requests as req, os, re\n'), ((6758, 6815), 're.findall', 're.findall', (['"""/administrator|username|password/i"""', 'r_admin'], {}), "('/administrator|username|password/i', r_admin)\n", (6768, 6815), False, 'import requests as req, os, re\n'), ((6819, 6876), 're.findall', 're.findall', (['"""/not found|forbidden|404|403|500/i"""', 'r_admin'], {}), "('/not found|forbidden|404|403|500/i', r_admin)\n", (6829, 6876), False, 'import requests as req, os, re\n')] |
from urllib import urlencode
from flask import url_for, request
from flask.ext.utils.serialization import jsonify
from nomenklatura.views.common import get_limit, get_offset
SKIP_ARGS = ['limit', 'offset', '_']
def args(limit, offset):
_args = [('limit', limit), ('offset', offset)]
for k, v in request.args.items():
if k not in SKIP_ARGS:
_args.append((k, v.encode('utf-8')))
return '?' + urlencode(_args)
def next_url(url, count, offset, limit):
if count <= (offset + limit):
return
return url + args(limit, min(limit + offset, count))
def prev_url(url, count, offset, limit):
if (offset - limit) < 0:
return
return url + args(limit, max(offset - limit, 0))
def query_pager(q, paginate=True, serializer=lambda x: x, **kw):
limit = get_limit()
offset = get_offset()
if paginate:
results = q.offset(offset).limit(limit)
else:
results = q
url = url_for(request.endpoint, _external=True, **kw)
count = q.count()
data = {
'count': count,
'limit': limit,
'offset': offset,
'format': url + args('LIMIT', 'OFFSET'),
'previous': prev_url(url, count, offset, limit),
'next': next_url(url, count, offset, limit),
'results': map(serializer, results)
}
response = jsonify(data, refs=True)
if data['next']:
response.headers.add_header('Link', '<%s>; rel=next' % data['next'])
if data['previous']:
response.headers.add_header('Link', '<%s>; rel=previous' % data['previous'])
return response
| [
"flask.ext.utils.serialization.jsonify",
"nomenklatura.views.common.get_limit",
"flask.request.args.items",
"nomenklatura.views.common.get_offset",
"flask.url_for",
"urllib.urlencode"
] | [((309, 329), 'flask.request.args.items', 'request.args.items', ([], {}), '()\n', (327, 329), False, 'from flask import url_for, request\n'), ((813, 824), 'nomenklatura.views.common.get_limit', 'get_limit', ([], {}), '()\n', (822, 824), False, 'from nomenklatura.views.common import get_limit, get_offset\n'), ((838, 850), 'nomenklatura.views.common.get_offset', 'get_offset', ([], {}), '()\n', (848, 850), False, 'from nomenklatura.views.common import get_limit, get_offset\n'), ((956, 1003), 'flask.url_for', 'url_for', (['request.endpoint'], {'_external': '(True)'}), '(request.endpoint, _external=True, **kw)\n', (963, 1003), False, 'from flask import url_for, request\n'), ((1337, 1361), 'flask.ext.utils.serialization.jsonify', 'jsonify', (['data'], {'refs': '(True)'}), '(data, refs=True)\n', (1344, 1361), False, 'from flask.ext.utils.serialization import jsonify\n'), ((428, 444), 'urllib.urlencode', 'urlencode', (['_args'], {}), '(_args)\n', (437, 444), False, 'from urllib import urlencode\n')] |
import logging
from smtplib import SMTPException
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
logger = logging.getLogger(__name__)
def sendmail(title, body, to_email, email_template=None):
context = body
html_text = render_to_string(email_template, context)
try:
email = EmailMultiAlternatives(
subject=title,
body="",
from_email=settings.DEFAULT_FROM_EMAIL,
to=[to_email],
)
email.attach_alternative(html_text, "text/html")
email.send(fail_silently=False)
except SMTPException:
logger.exception("There was an error sending an email")
| [
"logging.getLogger",
"django.core.mail.EmailMultiAlternatives",
"django.template.loader.render_to_string"
] | [((197, 224), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (214, 224), False, 'import logging\n'), ((320, 361), 'django.template.loader.render_to_string', 'render_to_string', (['email_template', 'context'], {}), '(email_template, context)\n', (336, 361), False, 'from django.template.loader import render_to_string\n'), ((387, 493), 'django.core.mail.EmailMultiAlternatives', 'EmailMultiAlternatives', ([], {'subject': 'title', 'body': '""""""', 'from_email': 'settings.DEFAULT_FROM_EMAIL', 'to': '[to_email]'}), "(subject=title, body='', from_email=settings.\n DEFAULT_FROM_EMAIL, to=[to_email])\n", (409, 493), False, 'from django.core.mail import EmailMultiAlternatives\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 9 03:14:54 2017
@author: kogito
"""
from Gui import MainWindow
def main():
MainWindow('Texhide')
if __name__ == '__main__':
main()
| [
"Gui.MainWindow"
] | [((129, 150), 'Gui.MainWindow', 'MainWindow', (['"""Texhide"""'], {}), "('Texhide')\n", (139, 150), False, 'from Gui import MainWindow\n')] |
"Testcases for Rule.check property"
from .. import Case
from bobot.Rule import Rule
checkTrue = Case.Case([
Rule({
'check': lambda x: True,
'match': 'checkTrue',
'response': 'checkTrue'
})
], [
{
'expected': [Case.Expectation('checkTrue').value()],
'message': Case.Message('checkTrue').value()
}
])
checkFalse = Case.Case([Rule({
'check': lambda x: False,
'match': '3140981',
'response': '3140981'
})], [{
'expected': [None],
'message': Case.Message('3140981').value()
}])
def isTeste(upd):
return upd.get('message').get('from').get('username') == 'devbot',
checkUpdateName = Case.Case([Rule({
'check': isTeste,
'match': 'zefirka',
'response': 'zefirka'
})], [{
'expected': [Case.Expectation('zefirka').value()],
'message': Case.Message('zefirka').value()
}])
| [
"bobot.Rule.Rule"
] | [((114, 192), 'bobot.Rule.Rule', 'Rule', (["{'check': lambda x: True, 'match': 'checkTrue', 'response': 'checkTrue'}"], {}), "({'check': lambda x: True, 'match': 'checkTrue', 'response': 'checkTrue'})\n", (118, 192), False, 'from bobot.Rule import Rule\n'), ((382, 457), 'bobot.Rule.Rule', 'Rule', (["{'check': lambda x: False, 'match': '3140981', 'response': '3140981'}"], {}), "({'check': lambda x: False, 'match': '3140981', 'response': '3140981'})\n", (386, 457), False, 'from bobot.Rule import Rule\n'), ((672, 739), 'bobot.Rule.Rule', 'Rule', (["{'check': isTeste, 'match': 'zefirka', 'response': 'zefirka'}"], {}), "({'check': isTeste, 'match': 'zefirka', 'response': 'zefirka'})\n", (676, 739), False, 'from bobot.Rule import Rule\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 08 21:24:18 2014
@author: Derrick
Module containing import detex classes
"""
# python 2 and 3 compatibility imports
from __future__ import print_function, absolute_import, unicode_literals, division
import json
import numbers
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import obspy
import pandas as pd
import scipy
from six import string_types
import detex
try: # python 2/3 compat
import cPickle
except ImportError:
import pickle as cPickle
import itertools
import copy
import colorsys
from struct import pack
import PyQt4
import sys
from scipy.cluster.hierarchy import dendrogram, fcluster
from detex.detect import _SSDetex
pd.options.mode.chained_assignment = None # mute setting copy warning
# warnings.filterwarnings('error') #uncomment this to make all warnings errors
# lines for backward compat.
class ClusterStream(object):
"""
A container for multiple cluster objects, should only be called with
detex.construct.createCluster
"""
def __init__(self, trdf, temkey, stakey, fetcher, eventList, ccReq, filt,
decimate, trim, fileName, eventsOnAllStations, enforceOrigin):
self.__dict__.update(locals()) # Instantiate all input variables
self.ccReq = None # set to None because it can vary between stations
self.clusters = [0] * len(trdf)
self.stalist = trdf.Station.values.tolist() # station lists
self.stalist2 = [x.split('.')[1] for x in self.stalist]
self.filename = fileName
self.eventCodes = self._makeCodes()
for num, row in trdf.iterrows():
if not eventsOnAllStations:
evlist = row.Events
else:
evlist = eventList
self.clusters[num] = Cluster(self, row.Station, temkey, evlist,
row.Link, ccReq, filt, decimate, trim,
row.CCs)
def writeSimpleHypoDDInput(self, fileName='dt.cc', coef=1, minCC=.35):
"""
Create a hypoDD cross correlation file (EG dt.cc), assuming the lag
times are pure S times (should be true if S amplitude is dominant)
Parameters
----------
fileName : str
THe path to the new file to be created
coef : float or int
The exponential coeficient to apply to the correlation
coeficient when creating file, usefull to downweight lower cc
values
"""
if not self.enforceOrigin:
msg = ('Sample Lags are not meaningful unless origin times are '
'enforced on each waveform. re-run detex.subspace.'
'createCluster with enforceOrigin=True')
detex.log(__name__, msg, level='error')
fil = open(fileName, 'wb')
# required number of zeros for numbering all events
reqZeros = int(np.ceil(np.log10(len(self.temkey))))
for num1, everow1 in self.temkey.iterrows():
for num2, everow2 in self.temkey.iterrows():
if num1 >= num2: # if autocors or redundant pair then skip
continue
ev1, ev2 = everow1.NAME, everow2.NAME
header = self._makeHeader(num1, num2, reqZeros)
count = 0
for sta in self.stalist: # iter through each station
Clu = self[sta]
try:
# find station specific index for event1
ind1 = np.where(np.array(Clu.key) == ev1)[0][0]
ind2 = np.where(np.array(Clu.key) == ev2)[0][0]
except IndexError: # if either event is not in index
msg = ('%s or %s not found on station %s' %
(ev1, ev2, sta))
detex.log(__name__, msg, level='warning', pri=True)
continue
# get data specific to this station
trdf = self.trdf[self.trdf.Station == sta].iloc[0]
sr1 = trdf.Stats[ev1]['sampling_rate']
sr2 = trdf.Stats[ev2]['sampling_rate']
if sr1 != sr2:
msg = 'Samp. rates not equal on %s and %s' % (ev1, ev2)
detex.log(__name__, msg, level='error')
else:
sr = sr1
Nc1, Nc2 = trdf.Stats[ev1]['Nc'], trdf.Stats[ev2]['Nc']
if Nc1 != Nc2:
msg = ('Num. of channels not equal for %s and %s on %s'
% (ev1, ev2))
detex.log(__name__, msg, level='warning', pri=True)
continue
else:
Nc = Nc1
cc = trdf.CCs[ind2][ind1] # get cc value
if np.isnan(cc): # get other part of symetric matrix
try:
cc = trdf.CCs[ind1][ind2]
except KeyError:
msg = ('%s - %s pair not in CCs matrix' %
(ev1, ev2))
detex.log(__name__, msg, level='warning', pri=True)
continue
if np.isnan(cc): # second pass required
msg = ('%s - %s pair returning NaN' %
(ev1, ev2))
detex.log(__name__, msg, level='error', pri=True)
continue
if cc < minCC:
continue
lagsamps = trdf.Lags[ind2][ind1]
subsamps = trdf.Subsamp[ind2][ind1]
if np.isnan(lagsamps): # if lag from other end of mat
lagsamps = -trdf.Lags[ind1][ind2]
subsamps = trdf.Subsamp[ind1][ind2]
lags = lagsamps / (sr * Nc) + subsamps
obsline = self._makeObsLine(sta, lags, cc ** coef)
if isinstance(obsline, string_types):
count += 1
if count == 1:
fil.write(header + '\n')
fil.write(obsline + '\n')
fil.close()
def _makeObsLine(self, sta, dt, cc, pha='S'):
line = '%s %0.4f %0.4f %s' % (sta, dt, cc, pha)
return line
def _makeHeader(self, num1, num2, reqZeros):
fomatstr = '{:0' + "{:d}".format(reqZeros) + 'd}'
# assume cross corr and cat origins are identical
head = '# ' + fomatstr.format(num1) + \
' ' + fomatstr.format(num2) + ' ' + '0.0'
return head
def _makeCodes(self):
evcodes = {}
for num, row in self.temkey.iterrows():
evcodes[num] = row.NAME
return evcodes
def updateReqCC(self, reqCC):
"""
Updates the required correlation coefficient for clusters to form on
all stations or individual stations.
Parameters
--------------
reqCC : float (between 0 and 1), or dict of reference keys and floats
if reqCC is a float the required correlation coeficient for
clusters to form will be set to reqCC on all stations.
If dict keys must be indicies for each cluster object (IE net.sta,
sta, or int index) and values are the reqCC for that station.
Notes
---------------
The Cluster class also have a similar method that can be more
intuitive to use, as in the tutorial
"""
if isinstance(reqCC, float):
if reqCC < 0 or reqCC > 1:
msg = 'reqCC must be between 0 and 1'
detex.log(__name__, msg, level='error')
for cl in self.clusters:
cl.updateReqCC(reqCC)
elif isinstance(reqCC, dict):
for key in reqCC.keys():
self[key].updateReqCC(reqCC[key])
elif isinstance(reqCC, list):
for num, cc in enumerate(reqCC):
self[num].updateReqCC(cc)
def printAtr(self): # print out basic attributes used to make cluster
for cl in self.clusters:
cl.printAtr()
def dendro(self, **kwargs):
"""
Create dendrograms for each station
"""
for cl in self.clusters:
cl.dendro(**kwargs)
def simMatrix(self, groupClusts=False, savename=False, returnMat=False,
**kwargs):
"""
Function to create similarity matrix of each event pair
Parameters
-------
groupClusts : bool
If True order by clusters on the simmatrix with the singletons
coming last
savename : str or False
If not False, a path used by plt.savefig to save the current
figure. The extension is necesary for specifying format.
See plt.savefig for details
returnMat : bool
If true return the similarity matrix
"""
out = []
for cl in self.clusters:
dout = cl.simMatrix(groupClusts, savename, returnMat, **kwargs)
out.append(dout)
def plotEvents(self, projection='merc', plotSingles=True, **kwargs):
"""
Plot the event locations for each station using basemap. Calls the
plotEvents method of the Cluster class, see its docs for accepted
kwargs.
Parameters
---------
projection : str
The pojection type to pass to basemap
plotSingles : bool
If True also plot the singletons (events that dont cluster)
Notes
-------
kwargs are passed to basemap
If no working installation of basemap is found an ImportError will
be raised. See the following URL for tips on installing it:
http://matplotlib.org/basemap/users/installing.html, good luck!
"""
for cl in self.clusters:
cl.plotEvents(projection, plotSingles, **kwargs)
def write(self): # uses pickle to write class to disk
"""
Write instance to file (name is the filename attribute)
"""
msg = 'writing ClusterStream instance as %s' % self.filename
detex.log(__name__, msg, level='info', pri=True)
cPickle.dump(self, open(self.filename, 'wb'))
def __getitem__(self, key): # allows indexing of children Cluster objects
if isinstance(key, int):
return self.clusters[key]
elif isinstance(key, string_types):
if len(key.split('.')) == 1:
return self.clusters[self.stalist2.index(key)]
elif len(key.split('.')) == 2:
return self.clusters[self.stalist.index(key)]
else:
msg = ('indexer must either be an int or str of sta.net or sta'
' you passed %s' % key)
detex.log(__name__, msg, level='error')
def __len__(self):
return len(self.clusters)
def __repr__(self):
outstr = 'SSClusterStream with %d stations ' % (len(self.stalist))
return outstr
class Cluster(object):
def __init__(self, clustStream, station, temkey, eventList, link, ccReq,
filt, decimate, trim, DFcc):
# instantiate a few needed varaibles (not all to save space)
self.link = link
self.DFcc = DFcc
self.station = station
self.temkey = temkey
self.key = eventList
self.updateReqCC(ccReq)
self.trim = trim
self.decimate = decimate
self.nonClustColor = '0.6' # use a grey of 0.6 for singletons
def updateReqCC(self, newccReq):
"""
Function to update the required correlation coeficient for
this station
Parameters
-------------
newccReq : float (between 0 and 1)
Required correlation coef
"""
if newccReq < 0. or newccReq > 1.:
msg = 'Parameter ccReq must be between 0 and 1'
detex.log(__name__, msg, level='error')
self.ccReq = newccReq
self.dflink, serclus = self._makeDFLINK(truncate=False)
# get events that actually cluster (filter out singletons)
dfcl = self.dflink[self.dflink.disSim <= 1 - self.ccReq]
# sort putting highest links in cluster on top
dfcl.sort_values(by='disSim', inplace=True, ascending=False)
dfcl.reset_index(inplace=True, drop=True)
dftemp = dfcl.copy()
clustlinks = {}
clustEvents = {}
clnum = 0
while len(dftemp) > 0:
ser = dftemp.iloc[0]
ndf = dftemp[[set(x).issubset(ser.II) for x in dftemp.II]]
clustlinks[clnum] = ndf.clust
valset = set([y for x in ndf.II.values for y in x])
clustEvents[clnum] = list(valset)
dftemp = dftemp[~dftemp.index.isin(ndf.index)]
clnum += 1
self.clustlinks = clustlinks
self.clusts = [[self.key[y] for y in clustEvents[x]]
for x in clustEvents.keys()]
keyset = set(self.key)
clustset = set([y for x in self.clusts for y in x])
self.singles = list(keyset.difference(clustset))
self.clustcount = np.sum([len(x) for x in self.clusts])
self.clustColors = self._getColors(len(self.clusts))
msg = ('ccReq for station %s updated to ccReq=%1.3f' %
(self.station, newccReq))
detex.log(__name__, msg, level='info', pri=True)
def _getColors(self, numClusts):
"""
See if there are enough defualt colors for the clusters, if not
Generate N unique colors (that probably dont look good together)
"""
clustColorsDefault = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
# if there are enough default python colors use them
if numClusts <= len(clustColorsDefault):
return clustColorsDefault[:numClusts]
else: # if not generaete N unique colors
colors = []
for i in np.arange(0., 360., 360. / numClusts):
hue = i / 360.
lightness = (50 + np.random.rand() * 10) / 100.
saturation = (90 + np.random.rand() * 10) / 100.
cvect = colorsys.hls_to_rgb(hue, lightness, saturation)
rgb = [int(x * 255) for x in cvect]
# covnert to hex code
colors.append('#' + pack("BBB", *rgb).encode('hex'))
return colors
def _makeColorDict(self, clustColors, nonClustColor):
if len(self.clusts) < 1:
colorsequence = clustColors
# if not enough colors repeat color matrix
elif float(len(clustColors)) / len(self.clusts) < 1:
colorsequence = clustColors * \
int(np.ceil((float(len(self.clusts)) / len(clustColors))))
else:
colorsequence = clustColors
# unitialize color list with default color
color_list = [nonClustColor] * 3 * len(self.dflink)
for a in range(len(self.clusts)):
for b in self.clustlinks[a]:
color_list[int(b)] = colorsequence[a]
return color_list
def _makeDFLINK(self, truncate=True): # make the link dataframe
N = len(self.link)
# append cluster numbers to link array
link = np.append(self.link, np.arange(N + 1, N + N + 1).reshape(N, 1), 1)
if truncate: # truncate after required coeficient
linkup = link[link[:, 2] <= 1 - self.ccReq]
else:
linkup = link
T = fcluster(link[:, 0:4], 1 - self.ccReq, criterion='distance')
serclus = pd.Series(T)
clusdict = pd.Series([np.array([x]) for x in np.arange(
0, N + 1)], index=np.arange(0, N + 1))
for a in range(len(linkup)):
clusdict[int(linkup[a, 4])] = np.append(
clusdict[int(linkup[a, 0])], clusdict[int(linkup[a, 1])])
columns = ['i1', 'i2', 'disSim', 'num', 'clust']
dflink = pd.DataFrame(linkup, columns=columns)
if len(dflink) > 0:
dflink['II'] = list
else:
msg = 'No events cluster with corr coef = %1.3f' % self.ccReq
detex.log(__name__, msg, level='info', pri=True)
for a in dflink.iterrows(): # enumerate cluster contents
ar1 = list(np.array(clusdict[int(a[1].i1)]))
ar2 = list(np.array(clusdict[int(a[1].i2)]))
dflink['II'][a[0]] = ar1 + ar2
return dflink, serclus
# creates a basic dendrogram plot
def dendro(self, hideEventLabels=True, show=True, saveName=False,
legend=True, **kwargs):
"""
Function to plot dendrograms of the clusters
Parameters
-----
hideEventLabels : bool
turns x axis labeling on/off. Better set to false
if many events are in event pool
show : bool
If true call plt.show
saveName : str or False
path to save figure. Extention denotes format. See plt.savefig
for details
legend : bool
If true plot a legend on the side of the dendrogram
Note
----------
kwargs are passed to scipy.cluster.hierarchy.dendrogram, see docs
for acceptable arguments and descriptions
"""
# Get color schemes
color_list = self._makeColorDict(self.clustColors, self.nonClustColor)
for a in range(len(self.clusts)):
plt.plot([], [], '-', color=self.clustColors[a])
plt.plot([], [], '-', color=self.nonClustColor)
dendrogram(self.link, color_threshold=1 - self.ccReq, count_sort=True,
link_color_func=lambda x: color_list[x], **kwargs)
ax = plt.gca()
if legend:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend([str(x) for x in range(1, len(self.clusts) + 1)] +
['N/A'], loc='center left', bbox_to_anchor=(1, .5),
title='Clusters')
ax.set_ylim([0, 1])
if hideEventLabels:
ax.set_xticks([])
plt.xlabel('Events')
plt.ylabel('Dissimilarity')
plt.title(self.station)
if saveName:
plt.savefig(saveName, **kwargs)
if show:
plt.show()
def plotEvents(self, projection='merc', plotSingles=True, **kwargs):
"""
Plot the event locations for each station using basemap. Calls the
plotEvents method of the Cluster class, see its docs for accepted
kwargs.
Parameters
---------
projection : str
The pojection type to pass to basemap
plotSingles : bool
If True also plot the singletons (events that dont cluster)
Notes
-------
kwargs are passed to basemap
If no working installation of basemap is found an ImportError will
be raised. See the following URL for tips on installing it:
http://matplotlib.org/basemap/users/installing.html, good luck!
"""
# TODO make dot size scale with magnitudes
# make sure basemap is installed
try:
from mpl_toolkits.basemap import Basemap
except ImportError:
msg = 'mpl_toolskits basemap not installed, cant plot'
detex.log(__name__, msg, level='error', e=ImportError)
# init figures and get limits
fig_map, emap, horrange = self._init_map(Basemap, projection, kwargs)
zmin, zmax, zscale = self._get_z_scaling(horrange)
fig_lat = self._init_profile_figs(zmin, zmax, zscale)
fig_lon = self._init_profile_figs(zmin, zmax, zscale)
# seperate singletons from clustered events
cl_dfs, sing_df = self._get_singletons_and_clusters()
self._plot_map_view(emap, fig_map, horrange, cl_dfs, sing_df)
self._plot_profile_view(zmin, zmax, zscale, fig_lat, fig_lon, cl_dfs,
sing_df, emap)
def _init_map(self, Basemap, projection, kwargs):
"""
Function to setup the map figure with basemap returns the
figure instance and basemap instance and horizontal range of plot
"""
map_fig = plt.figure()
# get map bounds
latmin = self.temkey.LAT.min()
latmax = self.temkey.LAT.max()
lonmin = self.temkey.LON.min()
lonmax = self.temkey.LON.max()
# create buffers so there is a slight border with no events around map
latbuff = abs((latmax - latmin) * 0.1)
lonbuff = abs((lonmax - lonmin) * 0.1)
# get the total horizontal distance of plot in km
totalxdist = obspy.core.util.geodetics.gps2DistAzimuth(
latmin, lonmin, latmin, lonmax)[0] / 1000
# init projection
emap = Basemap(projection=projection,
lat_0=np.mean([latmin, latmax]),
lon_0=np.mean([lonmin, lonmax]),
resolution='h',
area_thresh=0.1,
llcrnrlon=lonmin - lonbuff,
llcrnrlat=latmin - latbuff,
urcrnrlon=lonmax + lonbuff,
urcrnrlat=latmax + latbuff,
**kwargs)
# draw scale
emap.drawmapscale(lonmin, latmin, lonmin, latmin, totalxdist / 4.5)
# get limits in projection
xmax, xmin, ymax, ymin = emap.xmax, emap.xmin, emap.ymax, emap.ymin
horrange = max((xmax - xmin), (ymax - ymin)) # horizontal range
# get maximum degree distance for setting scalable ticks
latdi, londi = [abs(latmax - latmin), abs(lonmax - lonmin)]
maxdeg = max(latdi, londi)
parallels = np.arange(0., 80, maxdeg / 4)
emap.drawparallels(parallels, labels=[1, 0, 0, 1])
meridians = np.arange(10., 360., maxdeg / 4)
mers = emap.drawmeridians(meridians, labels=[1, 0, 0, 1])
for m in mers: # rotate meridian labels
try:
mers[m][1][0].set_rotation(90)
except:
pass
plt.title('Clusters on %s' % self.station)
return map_fig, emap, horrange
def _init_profile_figs(self, zmin, zmax, zscale):
"""
init figs for plotting the profiles of the events
"""
# init profile figures
profile_fig = plt.figure()
z1 = zmin * zscale
z2 = zmax * zscale
tickfor = ['%0.1f' % x1 for x1 in np.linspace(zmin, zmax, 10)]
plt.yticks(np.linspace(z1, z2, 10), tickfor)
plt.gca().invert_yaxis()
plt.xticks([])
plt.ylabel('Depth (km)')
return profile_fig
def _get_z_scaling(self, horrange):
"""
Return z limits and scale factors
"""
zmin, zmax = self.temkey.DEPTH.min(), self.temkey.DEPTH.max()
zscale = horrange / (zmax - zmin)
return zmin, zmax, zscale
def _get_singletons_and_clusters(self):
"""
get dataframes of clustered events and singletons
Note: cl_dfs is a list of dfs whereas sing_df is just a df
"""
cl_dfs = [self.temkey[self.temkey.NAME.isin(x)] for x in self.clusts]
sing_df = self.temkey[self.temkey.NAME.isin([x for x in self.singles])]
return cl_dfs, sing_df
def _plot_map_view(self, emap, map_fig, horrange, cl_dfs, sing_df):
"""
plot the map figure
"""
plt.figure(map_fig.number) # set to map figure
# plot singles
x, y = emap(sing_df.LON.values, sing_df.LAT.values)
emap.plot(x, y, '.', color=self.nonClustColor, ms=6.0)
for clnum, cl in enumerate(cl_dfs):
x, y = emap(cl.LON.values, cl.LAT.values)
emap.plot(x, y, '.', color=self.clustColors[clnum])
def _plot_profile_view(self, zmin, zmax, zscale, fig_lat, fig_lon, cl_df,
sing_df, emap):
"""
plot the profile view
"""
x_sing, y_sing = emap(sing_df.LON.values, sing_df.LAT.values)
# plot singletons
nccolor = self.nonClustColor
plt.figure(fig_lon.number)
plt.plot(x_sing, sing_df.DEPTH * zscale, '.', color=nccolor, ms=6.0)
plt.xlabel('Longitude')
plt.figure(fig_lat.number)
plt.plot(y_sing, sing_df.DEPTH * zscale, '.', color=nccolor, ms=6.0)
plt.xlabel('Latitude')
# plot clusters
for clnum, cl in enumerate(cl_df):
ccolor = self.clustColors[clnum]
x, y = emap(cl.LON.values, cl.LAT.values)
plt.figure(fig_lon.number)
plt.plot(x, cl.DEPTH * zscale, '.', color=ccolor)
plt.figure(fig_lat.number)
plt.plot(y, cl.DEPTH * zscale, '.', color=ccolor)
# set buffers so nothing plots right on edge
for fig in [fig_lat, fig_lon]:
plt.figure(fig.number)
xlim = plt.xlim()
xdist = abs(max(xlim) - min(xlim))
plt.xlim(xlim[0] - xdist * .1, xlim[1] + xdist * .1)
ylim = plt.ylim()
ydist = abs(max(xlim) - min(xlim))
plt.ylim(ylim[0] - ydist * .1, ylim[1] + ydist * .1)
def simMatrix(self, groupClusts=False, savename=False, returnMat=False,
**kwargs):
"""
Function to create basic similarity matrix of the values
in the cluster object
Parameters
-------
groupClusts : boolean
If True order by clusters on the simmatrix with the
singletons coming last
savename : str or False
If not False, a path used by plt.savefig to save the current
figure. The extension is necesary for specifying format. See
plt.savefig for details
returnMat : boolean
If true return the similarity matrix
"""
if groupClusts: # if grouping clusters together
clusts = copy.deepcopy(self.clusts) # get cluster list
clusts.append(self.singles) # add singles list at end
eveOrder = list(itertools.chain.from_iterable(clusts))
indmask = {
num: list(self.key).index(eve) for num,
eve in enumerate(eveOrder)} # create a mask forthe order
else:
# blank index mask if not
indmask = {x: x for x in range(len(self.key))}
plt.figure()
le = self.DFcc.columns.values.max()
mat = np.zeros((le + 1, le + 1))
# deb([le,indmask,self.DFcc])
for a in range(le + 1):
for b in range(le + 1):
if a == b:
mat[a, b] = 1
else:
# new a and b coords based on mask
a1, b1 = indmask[a], indmask[b]
gi = max(a1, b1)
li = min(a1, b1)
mat[a, b] = self.DFcc.loc[li, gi]
mat[b, a] = self.DFcc.loc[li, gi]
cmap = mpl.colors.LinearSegmentedColormap.from_list(
'my_colormap', ['blue', 'red'], 256)
img = plt.imshow(
mat,
interpolation='nearest',
cmap=cmap,
origin='upper',
vmin=0,
vmax=1)
plt.clim(0, 1)
plt.grid(True, color='white')
plt.colorbar(img, cmap=cmap)
plt.title(self.station)
if savename:
plt.savefig(savename, **kwargs)
if returnMat:
return mat
def write(self): # uses pickle to write class to disk
cPickle.dump(self, open(self.filename, 'wb'))
def printAtr(self): # print out basic attributes used to make cluster
print('%s Cluster' % self.station)
print('%d Events cluster out of %d' %
(self.clustcount, len(self.singles) + self.clustcount))
print('Total number of clusters = %d' % len(self.clusts))
print('Required Cross Correlation Coeficient = %.3f' % self.ccReq)
def __getitem__(self, index): # allow indexing
return self.clusts[index]
def __iter__(self): # make class iterable
return iter(self.clusts)
def __len__(self):
return len(self.clusts)
# def __repr__(self):
# self.printAtr()
# return ''
class SubSpace(object):
""" Class used to hold subspaces for detector
Holds both subspaces (as defined from the SScluster object) and
single event clusters, or singles
"""
def __init__(self, singlesDict, subSpaceDict, cl, dtype, Pf, cfetcher):
self.cfetcher = cfetcher
self.clusters = cl
self.subspaces = subSpaceDict
self.singles = singlesDict
self.singletons = singlesDict
self.dtype = dtype
self.Pf = Pf
self.ssStations = self.subspaces.keys()
self.singStations = self.singles.keys()
self.Stations = list(set(self.ssStations) | set(self.singStations))
self.Stations.sort()
self._stakey2 = {x: x for x in self.ssStations}
self._stakey1 = {x.split('.')[1]: x for x in self.ssStations}
################################ Validate Cluster functions
def validateClusters(self):
"""
Method to check for misaligned waveforms and discard those that no
longer meet the required correlation coeficient for each cluster.
See Issue 25 (www.github.com/d-chambers/detex) for why this might
be useful.
"""
msg = 'Validating aligned (and trimmed) waveforms in each cluster'
detex.log(__name__, msg, level='info', pri=True)
for sta in self.subspaces.keys():
subs = self.subspaces[sta]
c = self.clusters[sta]
ccreq = c.ccReq
for clustNum, row in subs.iterrows():
stKeys = row.SampleTrims.keys()
# get trim times if defined
if 'Starttime' in stKeys and 'Endtime' in stKeys:
start = row.SampleTrims['Starttime']
stop = row.SampleTrims['Endtime']
else:
start = 0
stop = -1
for ev1num, ev1 in enumerate(row.Events[:-1]):
ccs = [] # blank list for storing ccs of aligned WFs
for ev2 in row.Events[ev1num + 1:]:
t = row.AlignedTD[ev1][start: stop]
s = row.AlignedTD[ev2][start: stop]
maxcc = detex.construct.fast_normcorr(t, s)
ccs.append(maxcc)
if len(ccs) > 0 and max(ccs) < ccreq:
msg = (('%s fails validation check or is ill-aligned '
'on station %s, removing') % (ev1, row.Station))
detex.log(__name__, msg, pri=True)
self._removeEvent(sta, ev1, clustNum)
msg = 'Finished validateCluster call'
detex.log(__name__, msg, level='info', pri=True)
def _removeEvent(self, sta, event, clustNum):
"""
Function to remove an event from a SubSpace instance
"""
# remove from eventList
srow = self.subspaces[sta].loc[clustNum]
srow.Events.remove(event)
srow.AlignedTD.pop(event, None)
################################ SVD Functions
def SVD(self, selectCriteria=2, selectValue=0.9, conDatNum=100,
threshold=None, normalize=False, useSingles=True,
validateWaveforms=True, backupThreshold=None, **kwargs):
"""
Function to perform SVD on the alligned waveforms and select which
of the SVD basis are to be used in event detection. Also assigns
a detection threshold to each subspace-station pair.
Parameters
----------------
selctionCriteria : int, selectValue : number
selectCriteria is the method for selecting which basis vectors
will be used as detectors. selectValue depends on selectCriteria
Valid options are:
0 - using the given Pf, find number of dimensions to maximize
detection probability !!! NOT YET IMPLIMENTED!!!
selectValue - Not used
(Need to find a way to use the doubly-non central F
distribution in python)
1 - Failed implementation, not supported
2 - select basis number based on an average fractional signal
energy captured (see Figure 8 of Harris 2006). Then calculate
an empirical distribution of the detection statistic by running
each subspace over random continuous data with no high amplitude
signals (see getFAS method). A beta distribution is then fit to
the data and the DS value that sets the probability of false
detection to the Pf defined in the subspace instance is selected
as the threshold.
selectValue - Average fractional energy captured,
can range from 0 (use no basis vectors) to 1
(use all basis vectors). A value between 0.75 and 0.95
is recommended.
3 - select basis number based on an average fractional signal
energy captured (see Figure 8 of Harris 2006).
Then set detection threshold to a percentage of the minimum
fractional energy captured. This method is a bit quick and dirty
but ensures all events in the waveform pool will be detected.
select value is a fraction representing the fraction of
the minum fractional energy captured (between 0 and 1).
4 - use a user defined number of basis vectors, beginning with the
most significant (Barrett and Beroza 2014 use first two basis
vectors as an "empirical" subspace detector). Then use the same
technique in method one to set threshold
selectValue - can range from 0 to number of events in
subspace, if selectValue is greater than number of events
all events are used
conDatNum : int
The number of continuous data chunks to use to estimate the
effective dimension of the signal space or to estimate the null
distribution. Used if selectCriteria == 1,2,4
threshold : float or None
Used to set each subspace at a user defined threshold. If any
value is set it overrides any of the previously defined methods and
avoids estimating the effective dimension of representation or
distribution of the null space. Can be useful if problems arise
in the false alarm statistic calculation
normalize : bool
If true normalize the amplitude of all the training events before
preforming the SVD. Keeps higher amplitude events from dominating
the SVD vectors but can over emphasize noise. Haris 2006 recomends
using normalization but the personal experience of the author has
found normalization can increase the detector's propensity to
return false detections.
useSingles : bool
If True also calculate the thresholds for singles
validateWaveforms : bool
If True call the validateClusters method before the performing SVD
to make sure each trimed aligned waveform still meets the
required correlation coeficient. Any waveforms that do not will
be discarded.
backupThreshold : None or float
A backup threshold to use if approximation fails. Typically,
using the default detex settings, a reasonable value would be
0.25
kwargs are passed to the getFAS call (if used)
"""
# make sure user defined options are kosher
self._checkSelection(selectCriteria, selectValue, threshold)
# Iterate through all subspaces defined by stations
for station in self.ssStations:
for ind, row in self.subspaces[station].iterrows():
self.subspaces[station].UsedSVDKeys[ind] = []
svdDict = {} # initialize dict to put SVD vectors in
keys = sorted(row.Events)
arr, basisLength = self._trimGroups(ind, row, keys, station)
if basisLength == 0:
msg = (('subspace %d on %s is failing alignment and '
'trimming, deleting it') % (ind, station))
detex.log(__name__, msg, level='warn')
self._drop_subspace(station, ind)
continue
if normalize:
arr = np.array([x / np.linalg.norm(x) for x in arr])
tparr = np.transpose(arr)
# perform SVD
U, s, Vh = scipy.linalg.svd(tparr, full_matrices=False)
# make dict with sing. value as key and sing. vector as value
for einum, eival in enumerate(s):
svdDict[eival] = U[:, einum]
# asign Parameters back to subspace dataframes
self.subspaces[station].SVD[ind] = svdDict # assign SVD
fracEnergy = self._getFracEnergy(ind, row, svdDict, U)
usedBasis = self._getUsedBasis(ind, row, svdDict, fracEnergy,
selectCriteria, selectValue)
# Add fracEnergy and SVD keys (sing. vals) to main DataFrames
self.subspaces[station].FracEnergy[ind] = fracEnergy
self.subspaces[station].UsedSVDKeys[ind] = usedBasis
self.subspaces[station].SVDdefined[ind] = True
numBas = len(self.subspaces[station].UsedSVDKeys[ind])
self.subspaces[station].NumBasis[ind] = numBas
if len(self.ssStations) > 0:
self._setThresholds(selectCriteria, selectValue, conDatNum,
threshold, basisLength, backupThreshold, kwargs)
if len(self.singStations) > 0 and useSingles:
self.setSinglesThresholds(conDatNum=conDatNum, threshold=threshold,
backupThreshold=backupThreshold,
kwargs=kwargs)
def _drop_subspace(self, station, ssnum):
"""
Drop a subspace that is misbehaving
"""
space = self.subspaces[station]
self.subspaces[station] = space[space.index != int(ssnum)]
def _trimGroups(self, ind, row, keys, station):
"""
function to get trimed subspaces if trim times are defined, and
return an array of the aligned waveforms for the SVD to act on
"""
stkeys = row.SampleTrims.keys()
aliTD = row.AlignedTD
if 'Starttime' in stkeys and 'Endtime' in stkeys:
stim = row.SampleTrims['Starttime']
etim = row.SampleTrims['Endtime']
if stim < 0: # make sure stim is not less than 0
stim = 0
Arr = np.vstack([aliTD[x][stim:etim] -
np.mean(aliTD[x][stim:etim]) for x in keys])
basisLength = Arr.shape[1]
else:
msg = ('No trim times for %s and station %s, try running '
'pickTimes or attachPickTimes' % (row.Name, station))
detex.log(__name__, msg, level='warn', pri=True)
Arr = np.vstack([aliTD[x] - np.mean(aliTD[x]) for x in keys])
basisLength = Arr.shape[1]
return Arr, basisLength
def _checkSelection(self, selectCriteria, selectValue, threshold):
"""
Make sure all user defined values are kosher for SVD call
"""
if selectCriteria in [1, 2, 3]:
if selectValue > 1 or selectValue < 0:
msg = ('When selectCriteria==%d selectValue must be a float'
' between 0 and 1' % selectCriteria)
detex.log(__name__, msg, level='error', e=ValueError)
elif selectCriteria == 4:
if selectValue < 0 or not isinstance(selectValue, int):
msg = ('When selectCriteria==3 selectValue must be an'
'integer greater than 0')
detex.log(__name__, msg, level='error', e=ValueError)
else:
msg = 'selectCriteria of %s is not supported' % selectCriteria
detex.log(__name__, msg, level='error')
if threshold is not None:
if not isinstance(threshold, numbers.Number) or threshold < 0:
msg = 'Unsupported type for threshold, must be None or float'
detex.log(__name__, msg, level='error', e=ValueError)
def _getFracEnergy(self, ind, row, svdDict, U):
"""
calculates the % energy capture for each stubspace for each possible
dimension of rep. (up to # of events that go into the subspace)
"""
fracDict = {}
keys = row.Events
svales = svdDict.keys()
svales.sort(reverse=True)
stkeys = row.SampleTrims.keys() # dict defining sample trims
for key in keys:
aliTD = row.AlignedTD[key] # aligned waveform for event key
if 'Starttime' in stkeys and 'Endtime' in stkeys:
start = row.SampleTrims['Starttime'] # start of trim in samps
end = row.SampleTrims['Endtime'] # end of trim in samps
aliwf = aliTD[start: end]
else:
aliwf = aliTD
Ut = np.transpose(U) # transpose of basis vects
# normalized dot product (mat. mult.)
normUtAliwf = scipy.dot(Ut, aliwf) / scipy.linalg.norm(aliwf)
# add 0% energy capture for dim of 0
repvect = np.insert(np.square(normUtAliwf), 0, 0)
# cumul. energy captured for increasing dim. reps
cumrepvect = [np.sum(repvect[:x + 1]) for x in range(len(repvect))]
fracDict[key] = cumrepvect # add cumul. to keys
# get average and min energy capture, append value to dict
fracDict['Average'] = np.average([fracDict[x] for x in keys], axis=0)
fracDict['Minimum'] = np.min([fracDict[x] for x in keys], axis=0)
return (fracDict)
def _getUsedBasis(self, ind, row, svdDict, cumFracEnergy,
selectCriteria, selectValue):
"""
function to populate the keys of the selected SVD basis vectors
"""
keys = svdDict.keys()
keys.sort(reverse=True)
if selectCriteria in [1, 2, 3]:
# make sure last element is exactly 1
cumFracEnergy['Average'][-1] = 1.00
ndim = np.argmax(cumFracEnergy['Average'] >= selectValue)
selKeys = keys[:ndim] # selected keys
if selectCriteria == 4:
selKeys = keys[:selectValue + 1]
return selKeys
def _setThresholds(self, selectCriteria, selectValue, conDatNum,
threshold, basisLength, backupThreshold, kwargs={}):
if threshold > 0:
for station in self.ssStations:
subspa = self.subspaces[station]
for ind, row in subspa.iterrows():
self.subspaces[station].Threshold[ind] = threshold
elif selectCriteria == 1:
msg = 'selectCriteria 1 currently not supported'
detex.log(__name__, msg, level='error', e=ValueError)
elif selectCriteria in [2, 4]:
# call getFAS to estimate null space dist.
self.getFAS(conDatNum, **kwargs)
for station in self.ssStations:
subspa = self.subspaces[station]
for ind, row in subspa.iterrows():
beta_a, beta_b = row.FAS['betadist'][0:2]
# get threshold from beta dist.
# TODO consider implementing other dist. options as well
th = scipy.stats.beta.isf(self.Pf, beta_a, beta_b, 0, 1)
if th > .9:
th, Pftemp = self._approxThld(beta_a, beta_b, station,
row, self.Pf, 1000, 3,
backupThreshold)
msg = ('Scipy.stats.beta.isf failed with pf=%e, '
'approximated threshold to %f with a Pf of %e '
'for station %s %s using forward grid search' %
(self.Pf, th, Pftemp, station, row.Name))
detex.log(__name__, msg, level='warning')
self.subspaces[station].Threshold[ind] = th
elif selectCriteria == 3:
for station in self.ssStations:
subspa = self.subspaces[station]
for ind, row in subspa.iterrows():
th = row.FracEnergy['Minimum'][row.NumBasis] * selectValue
self.subspaces[station].Threshold[ind] = th
def setSinglesThresholds(self, conDatNum=50, recalc=False,
threshold=None, backupThreshold=None, **kwargs):
"""
Set thresholds for the singletons (unclustered events) by fitting
a beta distribution to estimation of null space
Parameters
----------
condatNum : int
The number of continuous data chunks to use to fit PDF
recalc : boolean
If true recalculate the the False Alarm Statistics
threshold : None or float between 0 and 1
If number, don't call getFAS simply use given threshold
backupThreshold : None or float
If approximate a threshold fails then use backupThreshold. If None
then raise.
Note
----------
Any singles without pick times will not be used. In this way singles
can be rejected
"""
for sta in self.singStations:
sing = self.singles[sta] # singles on station
sampTrims = self.singles[sta].SampleTrims
self.singles[sta].Name = ['SG%d' % x for x in range(len(sing))]
# get singles that have phase picks
singsAccepted = sing[[len(x.keys()) > 0 for x in sampTrims]]
self.singles[sta] = singsAccepted
self.singles[sta].reset_index(inplace=True, drop=True)
if threshold is None:
# get empirical dist unless manual threshold is passed
self.getFAS(conDatNum, useSingles=True,
useSubSpaces=False, **kwargs)
for sta in self.singStations:
for ind, row in self.singles[sta].iterrows():
if len(row.SampleTrims.keys()) < 1: # skip singles with no pick times
continue
if threshold:
th = threshold
else:
beta_a, beta_b = row.FAS[0]['betadist'][0:2]
th = scipy.stats.beta.isf(self.Pf, beta_a, beta_b, 0, 1)
if th > .9:
th, Pftemp = self._approxThld(beta_a, beta_b, sta,
row, self.Pf, 1000, 3,
backupThreshold)
msg = ('Scipy.stats.beta.isf failed with pf=%e, '
'approximated threshold to %f with a Pf of %e '
'for station %s %s using forward grid search' %
(self.Pf, th, Pftemp, sta, row.Name))
detex.log(__name__, msg, level='warning')
self.singles[sta]['Threshold'][ind] = th
def _approxThld(self, beta_a, beta_b, sta, row, target, numint, numloops,
backupThreshold):
"""
Because scipy.stats.beta.isf can break, if it returns a value near 1
when this is obviously wrong initialize grid search algorithm to get
close to desired threshold using forward problem which seems to work
where inverse fails See this bug report:
https://github.com/scipy/scipy/issues/4677
"""
startVal, stopVal = 0, 1
loops = 0
while loops < numloops:
Xs = np.linspace(startVal, stopVal, numint)
pfs = np.array([scipy.stats.beta.sf(x, beta_a, beta_b) for x in Xs])
resids = abs(pfs - target)
minind = resids.argmin()
if minind == 0 or minind == numint - 1:
msg1 = (('Grid search for threshold failing for %s on %s, '
'set it manually or use default') % (sta, row.name))
msg2 = (('Grid search for threshold failing for %s on %s, '
'using backup %.2f') % (sta, row.name, backupThreshold))
if backupThreshold is None:
detex.log(__name__, msg1, level='error', e=ValueError)
else:
detex.log(__name__, msg2, level='warn', pri=True)
return backupThreshold, target
bestPf = pfs[minind]
bestX = Xs[minind]
startVal, stopVal = Xs[minind - 1], Xs[minind + 1]
loops += 1
return bestX, bestPf
########################### Visualization Methods
def plotThresholds(self, conDatNum, xlim=[-.01, .5], **kwargs):
"""
Function to sample the continuous data and plot the thresholds
calculated with the SVD call with a histogram of detex's best
estimate of the null space (see getFAS for more details)
Parameters
------
conDatNum : int
The number of continuous data chunks to use in the sampling,
duration of chunks defined in data fetcher
xlim : list (number, number)
The x limits on the plot (often it is useful to zoom in around 0)
**kwargs are passed to the getFAS call
"""
self.getFAS(conDatNum, **kwargs)
count = 0
for station in self.ssStations:
for ind, row in self.subspaces[station].iterrows():
beta_a, beta_b = row.FAS['betadist'][0:2]
plt.figure(count)
plt.subplot(2, 1, 1)
bins = np.mean(
[row.FAS['bins'][1:], row.FAS['bins'][:-1]], axis=0)
plt.plot(bins, row.FAS['hist'])
plt.title('Station %s %s' % (station, row.Name))
plt.axvline(row.Threshold, color='g')
beta = scipy.stats.beta.pdf(bins, beta_a, beta_b)
plt.plot(bins, beta * (max(row.FAS['hist']) / max(beta)), 'k')
plt.title('%s station %s' % (row.Name, row.Station))
plt.xlim(xlim)
plt.ylabel('Count')
plt.subplot(2, 1, 2)
bins = np.mean(
[row.FAS['bins'][1:], row.FAS['bins'][:-1]], axis=0)
plt.plot(bins, row.FAS['hist'])
plt.axvline(row.Threshold, color='g')
plt.plot(bins, beta * (max(row.FAS['hist']) / max(beta)), 'k')
plt.xlabel('Detection Statistic')
plt.ylabel('Count')
plt.semilogy()
plt.ylim(ymin=10 ** -1)
plt.xlim(xlim)
count += 1
def plotFracEnergy(self):
"""
Method to plot the fractional energy captured of by the subspace for
various dimensions of rep. Each event is plotted as a grey dotted
line, the average as a red solid line, and the chosen degree of rep.
is plotted as a solid green vertical line.
Similar to Harris 2006 Fig 8
"""
for a, station in enumerate(self.ssStations):
f = plt.figure(a + 1)
f.set_figheight(1.85 * len(self.subspaces[station]))
for ind, row in self.subspaces[station].iterrows():
if not isinstance(row.FracEnergy, dict):
msg = 'fractional energy not defiend, call SVD'
detex.log(__name__, msg, level='error')
plt.subplot(len(self.subspaces[station]), 1, ind + 1)
for event in row.Events:
plt.plot(row.FracEnergy[event], '--', color='0.6')
plt.plot(row.FracEnergy['Average'], 'r')
plt.axvline(row.NumBasis, 0, 1, color='g')
plt.ylim([0, 1.1])
plt.title('Station %s, %s' % (row.Station, row.Name))
f.subplots_adjust(hspace=.4)
f.text(0.5, 0.06, 'Dimension of Representation', ha='center')
f.text(0.04, 0.5, 'Fraction of Energy Captured',
va='center', rotation='vertical')
plt.show()
def plotAlignedEvents(self): # plot aligned subspaces in SubSpaces object
"""
Plots the aligned events for each station in each cluster.
Will trim waveforms if trim times (by pickTimes or attachPickTimes)
are defined.
"""
for a, station in enumerate(self.ssStations):
for ind, row in self.subspaces[station].iterrows():
plt.figure(figsize=[10, .9 * len(row.Events)])
# f.set_figheight(1.85 * len(row.Events))
# plt.subplot(len(self.subspaces[station]), 1, ind + 1)
events = row.Events
stKeys = row.SampleTrims.keys() # sample trim keys
for evenum, eve in enumerate(events):
# plt.subplot(len(self.subspaces[station]), 1, evenum + 1)
aliTD = row.AlignedTD[eve] # aligned wf for event eve
if 'Starttime' in stKeys and 'Endtime' in stKeys:
start = row.SampleTrims['Starttime']
stop = row.SampleTrims['Endtime']
aliwf = aliTD[start: stop]
else:
aliwf = row.AlignedTD[eve]
plt.plot(aliwf / (2 * max(aliwf)) + 1.5 * evenum, c='k')
plt.xlim([0, len(aliwf)])
plt.ylim(-1, 1.5 * evenum + 1)
plt.xticks([])
plt.yticks([])
plt.title('Station %s, %s, %d events' % (station, row.Name, len(events)))
plt.show()
def plotBasisVectors(self, onlyused=False):
"""
Plots the basis vectors selected after performing the SVD
If SVD has not been called will throw error
Parameters
------------
onlyUsed : bool
If true only the selected basis vectors will be plotted. See
SVD for how detex selects basis vectors.
If false all will be plotted (used in blue, unused in red)
"""
if not self.subspaces.values()[0].iloc[0].SVDdefined:
msg = 'SVD not performed, call SVD before plotting basis vectors'
detex.log(__name__, msg, level='error')
for subnum, station in enumerate(self.ssStations):
subsp = self.subspaces[station]
for ind, row in subsp.iterrows():
num_wfs = len(row.UsedSVDKeys) if onlyused else len(row.SVD)
keyz = row.SVD.keys()
keyz.sort(reverse=True)
keyz = keyz[:num_wfs]
plt.figure(figsize=[10, .9 * num_wfs])
for keynum, key in enumerate(keyz):
wf = row.SVD[key] / (2 * max(row.SVD[key])) - 1.5 * keynum
c = 'b' if keynum < len(row.UsedSVDKeys) else '.5'
plt.plot(wf, c=c)
plt.ylim(-1.5 * keynum - 1, 1)
plt.yticks([])
plt.xticks([])
plt.title('%s station %s' % (row.Name, row.Station))
def plotOffsetTimes(self):
"""
Function to loop through each station/subspace pair and make
histograms of offset times
"""
count = 1
for station in self.ssStations:
for ind, row in self.subspaces[station].iterrows():
if len(row.SampleTrims.keys()) < 1:
msg = 'subspaces must be trimmed before plotting offsets'
detex.log(__name__, msg, level='error')
plt.figure(count)
keys = row.Events
offsets = [row.Stats[x]['offset'] for x in keys]
plt.hist(offsets)
plt.title('%s %s' % (row.Station, row.Name))
plt.figure(count + 1)
numEvs = len(row.Events)
ranmin = np.zeros(numEvs)
ranmax = np.zeros(numEvs)
orsamps = np.zeros(numEvs)
for evenum, eve in enumerate(row.Events):
tem = self.clusters.temkey[
self.clusters.temkey.NAME == eve].iloc[0]
condat = row.AlignedTD[
eve] / max(2 * abs(row.AlignedTD[eve])) + evenum + 1
Nc, Sr = row.Stats[eve]['Nc'], row.Stats[
eve]['sampling_rate']
starTime = row.Stats[eve]['starttime']
ortime = obspy.core.UTCDateTime(tem.TIME).timestamp
orsamps[evenum] = row.SampleTrims[
'Starttime'] - (starTime - ortime) * Nc * Sr
plt.plot(condat, 'k')
plt.axvline(row.SampleTrims['Starttime'], c='g')
plt.plot(orsamps[evenum], evenum + 1, 'r*')
ran = row.SampleTrims['Endtime'] - orsamps[evenum]
ranmin[evenum] = orsamps[evenum] - ran * .1
ranmax[evenum] = row.SampleTrims['Endtime'] + ran * .1
plt.xlim(int(min(ranmin)), int(max(ranmax)))
plt.axvline(min(orsamps), c='r')
plt.axvline(max(orsamps), c='r')
count += 2
############################# Pick Times functions
def pickTimes(self, duration=30, traceLimit=15, repick=False,
subspace=True, singles=True):
"""
Calls a modified version of obspyck (https://github.com/megies/obspyck)
, a GUI for picking phases, so user can manually select start times
(trim) of unclustered and clustered events.
Triming down each waveform group to only include event phases,
and not pre and post event noise, will significantly decrease the
runtime for the subspace detection (called with detex method).
Trimming is required for singletons as any singletons without trim
times will not be used as detectors).
Parameters
--------------
duration : real number
the time after the first pick (in seconds) to trim waveforms.
The fact that the streams are multiplexed is taken into account.
If None is passed then the last pick will be used as the end time
for truncating waveforms.
traceLimit : int
Limits the number of traces that will show up to be manually
picked to traceLimit events. Avoids bogging down and/or killing
the GUI with too many events.
repick : boolean
If true repick times that already have sample trim times, else
only pick those that do not.
subspace : boolean
If true pick subspaces
singles : boolean
If true pick singletons
"""
qApp = PyQt4.QtGui.QApplication(sys.argv)
if subspace:
self._pickTimes(self.subspaces, duration, traceLimit,
qApp, repick=repick)
if singles:
self._pickTimes(self.singles, duration, traceLimit, qApp,
issubspace=False, repick=repick)
def _pickTimes(self, trdfDict, duration, traceLimit, qApp,
issubspace=True, repick=False):
"""
Function to initate GUI for picking, called by pickTimes
"""
for sta in trdfDict.keys():
for ind, row in trdfDict[sta].iterrows():
if not row.SampleTrims or repick: # if not picked or repick
# Make a modified obspy stream to pass to streamPick
st = self._makeOpStream(ind, row, traceLimit)
Pks = None # This is needed or it crashes OS X
Pks = detex.streamPick.streamPick(st, ap=qApp)
d1 = {}
for b in Pks._picks:
if b: # if any picks made
d1[b.phase_hint] = b.time.timestamp
if len(d1.keys()) > 0: # if any picks made
# get sample rate and number of chans
sr = row.Stats[row.Events[0]]['sampling_rate']
Nc = row.Stats[row.Events[0]]['Nc']
# get sample divisible by NC to keep traces aligned
fp = int(min(d1.values())) # first picked phase
d1['Starttime'] = fp - fp % Nc
# if duration paramenter is defined (it is usually
# better to leave it defined)
stime = d1['Starttime']
if duration:
etime = stime + int(duration * sr * Nc)
d1['Endtime'] = etime
d1['DurationSeconds'] = duration
else:
etime = int(max(d1.values()))
d1['Endtime'] = etime
dursecs = (etime - stime) / (sr * Nc)
d1['DurationSeconds'] = dursecs
trdfDict[sta].SampleTrims[ind] = d1
for event in row.Events: # update starttimes
sspa = trdfDict[sta]
stimeOld = sspa.Stats[ind][event]['starttime']
# get updated start time
stN = stimeOld + d1['Starttime'] / (Nc * sr)
ot = trdfDict[sta].Stats[ind][event]['origintime']
offset = stN - ot
trdfDict[sta].Stats[ind][event]['starttime'] = stN
trdfDict[sta].Stats[ind][event]['offset'] = offset
if not Pks.KeepGoing:
msg = 'aborting picking, progress saved'
detex.log(__name__, msg, pri=1)
return None
self._updateOffsets()
def _makeOpStream(self, ind, row, traceLimit):
"""
Make an obspy stream of the multiplexed data stored in main detex
DataFrame
"""
st = obspy.core.Stream()
count = 0
if 'AlignedTD' in row: # if this is a subspace
for key in row.Events:
if count < traceLimit:
tr = obspy.core.Trace(data=row.AlignedTD[key])
tr.stats.channel = key
tr.stats.network = row.Name
tr.stats.station = row.Station
st += tr
count += 1
return st
else: # if this is a single event
for key in row.Events:
tr = obspy.core.Trace(data=row.MPtd[key])
tr.stats.channel = key
tr.stats.station = row.Station
st += tr
return st
def _updateOffsets(self):
"""
Calculate offset (predicted origin times), throw out extreme
outliers using median and median scaling
"""
for sta in self.subspaces.keys():
for num, row in self.subspaces[sta].iterrows():
keys = row.Stats.keys()
offsets = [row.Stats[x]['offset'] for x in keys]
self.subspaces[sta].Offsets[
num] = self._getOffsets(np.array(offsets))
for sta in self.singles.keys():
for num, row in self.singles[sta].iterrows():
keys = row.Stats.keys()
offsets = [row.Stats[x]['offset'] for x in keys]
self.singles[sta].Offsets[
num] = self._getOffsets(np.array(offsets))
def attachPickTimes(self, pksFile='PhasePicks.csv',
function='median', defaultDuration=30):
"""
Rather than picking times manually attach a file (either csv or pkl
of pandas dataframe) with pick times. Pick time file must have the
following fields: TimeStamp, Station, Event, Phase.
This file can be created by detex.util.pickPhases. If trims are
already defined attachPickTimes will not override.
----------
pksFile : str
Path to the input file (either csv or pickle)
function : str ('Average','Max', or 'Min')
Describes how to handle selecting a common pick time for
subspace groups (each event in a subspace cannot be treated
independently as the entire group is aligned to maximize
similarity). Does not apply for singles.
mean - Trims the group to the sample corresponding to the
average of the first arriving phase
median - Trims the group to the sample corresponding to the
median of the first arriving phase
max - trim to max value of start times for group
min - trim to min value of end times for group
defaultDuration : int or None
if Int, the default duration (in seconds) to trim the signal to
starting from the first arrival in pksFile for each event or
subspace group. If None, then durations are defined by first
arriving phase (start) and last arriving phase (stop) for each
event
"""
try: # read pksFile
pks = pd.read_csv(pksFile)
except Exception:
try:
pks = pd.read_pickle(pksFile)
except Exception:
msg = ('%s does not exist, or it is not a pkl or csv file'
% pksFile)
detex.log(__name__, msg, level='error')
# get appropriate function according to ssmod
if function == 'mean':
fun = np.mean
elif function == 'max':
fun = np.max
elif function == 'min':
fun = np.min
elif function == 'median':
fun = np.median
else:
msg = ('function %s not supported, options are: mean, median, min,'
' max' % function)
detex.log(__name__, msg, level='error')
# loop through each station in cluster, get singles and subspaces
for cl in self.clusters:
sta = cl.station # current station
# Attach singles
if sta in self.singles.keys():
for ind, row in self.singles[sta].iterrows():
if len(row.SampleTrims.keys()) > 0:
continue # skip if sampletrims already defined
# get phases that apply to current event and station
con1 = pks.Event.isin(row.Events)
con2 = pks.Station == sta
pk = pks[(con1) & (con2)]
eves, starttimes, Nc, Sr = self._getStats(row)
if len(pk) > 0:
trims = self._getSampTrim(eves, starttimes, Nc, Sr, pk,
defaultDuration, fun, sta,
ind, self.singles[sta], row)
if isinstance(trims, dict):
self.singles[sta].SampleTrims[ind] = trims
self._updateOffsets()
# Attach Subspaces
if sta in self.subspaces.keys():
for ind, row in self.subspaces[sta].iterrows():
if len(row.SampleTrims.keys()) > 0:
continue # skip if sampletrims already defined
# phases that apply to current event and station
con1 = pks.Event.isin(row.Events)
con2 = pks.Station == sta
pk = pks[(con1) & (con2)]
eves, starttimes, Nc, Sr = self._getStats(row)
if len(pk) > 0:
trims = self._getSampTrim(eves, starttimes, Nc, Sr, pk,
defaultDuration, fun, sta,
ind, self.subspaces[sta], row)
if isinstance(trims, dict):
self.subspaces[sta].SampleTrims[ind] = trims
self._updateOffsets()
def _getSampTrim(self, eves, starttimes, Nc, Sr, pk, defaultDuration,
fun, sta, num, DF, row):
"""
Determine sample trims for each single or subspace
"""
# stdict={}#intialize sample trim dict
startsamps = []
stopsamps = []
secduration = []
for ev in eves: # loop through each event
p = pk[pk.Event == ev]
if len(p) < 1: # if event is not recorded skip
continue
start = p.TimeStamp.min()
startsampsEve = (start - starttimes[ev]) * (Nc * Sr)
# see if any of the samples would be trimmed too much
try: # assume is single
len_test = len(row.MPtd[ev]) < startsampsEve
except AttributeError: # this is really a subspace
len_test = len(row.AlignedTD[ev]) < startsampsEve
if len_test:
utc_start = obspy.UTCDateTime(start)
msg = (('Start samples for %s on %s exceeds avaliable data,'
'check waveform quality and ensure phase pick is for '
'the correct event. The origin time is %s and the '
'pick time is %s, Skipping attaching pick. '
) % (ev, sta, ev, str(utc_start)))
detex.log(__name__, msg, level='warn')
return
# make sure starting time is not less than 0 else set to zero
if startsampsEve < 0:
startsampsEve = 0
start = starttimes[ev]
msg = 'Start time in phase file < 0 for event %s' % ev
detex.log(__name__, msg, level='warning', pri=False)
if defaultDuration:
stop = start + defaultDuration
secduration.append(defaultDuration)
else:
stop = p.TimeStamp.max()
secduration.append(stop - start)
assert stop > start # Make sure stop is greater than start
assert stop > starttimes[ev]
endsampsEve = (stop - starttimes[ev]) * (Nc * Sr)
startsamps.append(startsampsEve)
stopsamps.append(endsampsEve)
# update stats attached to each event to reflect new start time
otime = DF.Stats[num][ev]['origintime'] # origin time
DF.Stats[num][ev]['Starttime'] = start
DF.Stats[num][ev]['offset'] = start - otime
if len(startsamps) > 0:
sSamps = int(fun(startsamps))
rSSamps = sSamps - sSamps % Nc
eSamps = int(fun(stopsamps))
rESamps = eSamps - eSamps % Nc
dursec = int(fun(secduration))
outdict = {'Starttime': rSSamps, 'Endtime': rESamps,
'DurationSeconds': dursec}
return outdict
else:
return
def _getStats(self, row):
"""
Get the sampling rate, starttime, and number of channels for
each event group
"""
eves = row.Events
sr = [np.round(row.Stats[x]['sampling_rate']) for x in eves]
if len(set(sr)) != 1:
msg = ('Events %s on Station %s have different sampling rates or '
'no sampling rates' % (row.Station, row.events))
detex.log(__name__, msg, level='error')
Nc = [row.Stats[x]['Nc'] for x in eves]
if len(set(Nc)) != 1:
msg = (('Events %s on Station %s do not have the same channels or'
' have no channels') % (row.Station, row.events))
detex.log(__name__, msg, level='error')
starttimes = {x: row.Stats[x]['starttime'] for x in eves}
return eves, starttimes, list(set(Nc))[0], list(set(sr))[0]
def _getOffsets(self, offsets, m=25.):
"""
Get offsets, reject outliers bassed on median values (accounts
for possible mismatch in events and origin times)
"""
if len(offsets) == 1:
return offsets[0], offsets[0], offsets[0]
d = np.abs(offsets - np.median(offsets))
mdev = np.median(d)
s = d / mdev if mdev else 0.
if isinstance(s, float):
offs = offsets
else:
offs = offsets[s < m]
return [np.min(offs), np.median(offs), np.max(offs)]
def getFAS(
self,
conDatNum,
LTATime=5,
STATime=0.5,
staltalimit=8.0,
useSubSpaces=True,
useSingles=False,
numBins=401,
recalc=False,
**kwargs):
"""
Function to initialize a FAS (false alarm statistic) instance, used
primarily for sampling and characterizing the null space of the
subspaces and singletons. Random samples of the continuous data are
loaded, examined for high amplitude signals with a basic STA/LTA
method, and any traces with STA/LTA ratios higher than the
staltalimit parameter are rejected. The continuous DataFetcher
already attached to the SubSpace instance will be used to get
the continuous data.
Parameters
-------------
ConDatNum : int
The number of continuous data files (by default in hour chunks)
to use.
LTATime : float
The long term average time window in seconds used for
checking continuous data
STATime : float
The short term average time window in seconds for checking
continuous data
staltalimit : int or float
The value at which continuous data gets rejected as too noisey
(IE transient signals are present)
useSubSpaces : bool
If True calculate FAS for subspaces
useSingles : bool
If True calculate FAS for singles
numBins : int
Number of bins for binning distributions (so distribution can be
loaded and plotted later)
Note
---------
The results are stored in a DataFrame for each subspace/singleton
under the "FAS" column of the main DataFrame
"""
if useSubSpaces:
self._updateOffsets() # make sure offset times are up to date
for sta in self.subspaces.keys():
# check if FAS already calculated, only recalc if recalc
fas1 = self.subspaces[sta]['FAS'][0]
if isinstance(fas1, dict) and not recalc:
msg = ('FAS for station %s already calculated, to '
'recalculate pass True to the parameter recalc' %
sta)
detex.log(__name__, msg, pri=True)
else:
self.subspaces[sta]['FAS'] = detex.fas._initFAS(
self.subspaces[sta],
conDatNum,
self.clusters,
self.cfetcher,
LTATime=LTATime,
STATime=STATime,
staltalimit=staltalimit,
numBins=numBins,
dtype=self.dtype)
if useSingles:
for sta in self.singles.keys():
for a in range(len(self.singles[sta])):
fas1 = self.singles[sta]['FAS'][a]
if isinstance(fas1, dict) and not recalc:
msg = (('FAS for singleton %d already calculated on '
'station %s, to recalculate pass True to the '
'parameter recalc') % (a, sta))
detex.log(__name__, msg, pri=True)
# skip any events that have not been trimmed
elif len(self.singles[sta]['SampleTrims'][a].keys()) < 1:
continue
else:
self.singles[sta]['FAS'][a] = detex.fas._initFAS(
self.singles[sta][a:a + 1],
conDatNum,
self.clusters,
self.cfetcher,
LTATime=LTATime,
STATime=STATime,
staltalimit=staltalimit,
numBins=numBins,
dtype=self.dtype,
issubspace=False)
def detex(self,
utcStart=None,
utcEnd=None,
subspaceDB='SubSpace.db',
trigCon=0,
triggerLTATime=5,
triggerSTATime=0,
multiprocess=False,
delOldCorrs=True,
calcHist=True,
useSubSpaces=True,
useSingles=False,
estimateMags=True,
classifyEvents=None,
eventCorFile='EventCors',
utcSaves=None,
fillZeros=False):
"""
function to run subspace detection over continuous data and store
results in SQL database subspaceDB
Parameters
------------
utcStart : str or num
An obspy.core.UTCDateTime readable object defining the start time
of the correlations if not all avaliable data are to be used
utcEnd : str num
An obspy.core.UTCDateTime readable object defining the end time
of the correlations
subspaceDB : str
Path to the SQLite database to store detections in. If it already
exists delOldCorrs parameters governs if it will be deleted before
running new detections, or appended to.
trigCon is the condition for which detections should trigger.
Once the condition is set the variable minCoef is used:
0 is based on the detection statistic threshold
1 is based on the STA/LTA of the detection statistic threshold
(Only 0 is currently supported)
triggerLTATime : number
The long term average for the STA/LTA calculations in seconds.
triggerSTATime : number
The short term average for the STA/LTA calculations in seconds.
If ==0 then one sample is used.
multiprocess : bool
Determine if each station should be forked into its own process
for potential speed ups. Currently not implemented.
delOldCorrs : bool
Determines if subspaceDB should be deleted before performing
detections. If False old database is appended to.
calcHist : boolean
If True calculates the histagram for every point of the detection
statistic vectors (all hours, stations and subspaces) by keeping a
a cumulative bin count. Only slows the detections down slightly
and can be useful for threshold sanity checks. The histograms are
then returned to the main DataFrame in the SubSpace instance
as the column histSubSpaces, and saved in the subspaceDB under the
ss_hist and sg_hists tables for subspacs and singletons.
useSubspace : bool
If True the subspaces will be used as detectors to scan
continuous data
useSingles : bool
If True the singles (events that did not cluster) will be used as
detectors to scan continuous data
estimateMags : bool
If True, magnitudes will be estimated for each detection by using
two methods. The first is using standard deviation ratios, and the
second uses projected energy ratios (see chambers et al. 2015 for
details).
classifyEvents : None, str, or DataFrame
If None subspace detectors will be run over continuous data.
Else, detex will be run over event waveforms in order to classify
events into groups bassed on which subspace they are most similar
to. In the latter case the classifyEvents argument must be a
str (path to template key like csv) or DataFrame (loaded template
key file). The same event DataFetcher attached to the cluster
object will be used to get the data. This feature is Experimental.
eventCorFile : str
A path to a new pickled DataFrame created when the eventDir option
is used. Records the highest detection statistic in the file
for each event, station, and subspace. Useful when trying to
characterize events.
utcSaves : None or list of obspy DateTime readable objects
Either none (not used) or an iterrable of objects readable by
obspy.UTCDateTime. When the detections are run if the continous
data cover a time indicated in UTCSaves then the continuous data
and detection statistic vectors,are saved to a pickled dataframe
of the name "UTCsaves.pkl". This can be useful for debugging, or
extracting the DS vector for a time of interest.
fillZeros : bool
If true fill the gaps in continuous data with 0s. If True
STA/LTA of detection statistic cannot be calculated in order to
avoid dividing by 0.
Notes
----------
The same filter and decimation parameters that were used in the
ClusterStream instance will be applied.
"""
# make sure no parameters that dont work yet are selected
if multiprocess or trigCon != 0:
msg = 'multiprocessing and trigcon other than 0 not supported'
detex.log(__name__, msg, level='error')
if os.path.exists(subspaceDB):
if delOldCorrs:
os.remove(subspaceDB)
msg = 'Deleting old subspace database %s' % subspaceDB
detex.log(__name__, msg, pri=True)
else:
msg = 'Not deleting old subspace database %s' % subspaceDB
detex.log(__name__, msg, pri=True)
if useSubSpaces: # run subspaces
TRDF = self.subspaces
# determine if subspaces are defined (ie SVD has been called)
stas = self.subspaces.keys()
sv = [all(TRDF[sta].SVDdefined) for sta in stas]
if not all(sv):
msg = 'call SVD before running subspace detectors'
detex.log(__name__, msg, level='error')
Det = _SSDetex(TRDF, utcStart, utcEnd, self.cfetcher, self.clusters,
subspaceDB, trigCon, triggerLTATime, triggerSTATime,
multiprocess, calcHist, self.dtype, estimateMags,
classifyEvents, eventCorFile, utcSaves, fillZeros)
self.histSubSpaces = Det.hist
if useSingles: # run singletons
# make sure thresholds are calcualted
self.setSinglesThresholds()
TRDF = self.singles
Det = _SSDetex(TRDF, utcStart, utcEnd, self.cfetcher, self.clusters,
subspaceDB, trigCon, triggerLTATime, triggerSTATime,
multiprocess, calcHist, self.dtype, estimateMags,
classifyEvents, eventCorFile, utcSaves, fillZeros,
issubspace=False)
self.histSingles = Det.hist
# save addational info to sql database
if useSubSpaces or useSingles:
cols = ['FREQMIN', 'FREQMAX', 'CORNERS', 'ZEROPHASE']
dffil = pd.DataFrame([self.clusters.filt], columns=cols, index=[0])
detex.util.saveSQLite(dffil, subspaceDB, 'filt_params')
# get general info on each singleton/subspace and save
ssinfo, sginfo = self._getInfoDF()
sshists, sghists = self._getHistograms(useSubSpaces, useSingles)
if useSubSpaces and ssinfo is not None:
# save subspace info
detex.util.saveSQLite(ssinfo, subspaceDB, 'ss_info')
if useSingles and sginfo is not None:
# save singles info
detex.util.saveSQLite(sginfo, subspaceDB, 'sg_info')
if useSubSpaces and sshists is not None:
# save subspace histograms
detex.util.saveSQLite(sshists, subspaceDB, 'ss_hist')
if useSingles and sghists is not None:
# save singles histograms
detex.util.saveSQLite(sghists, subspaceDB, 'sg_hist')
def _getInfoDF(self):
"""
get dataframes that have info about each subspace and single
"""
sslist = [] # list in which to put DFs for each subspace/station pair
sglist = [] # list in which to put DFs for each single/station pair
for sta in self.Stations:
if sta not in self.ssStations:
msg = 'No subspaces on station %s' % sta
detex.log(__name__, msg, pri=True)
continue
for num, ss in self.subspaces[sta].iterrows(): # write ss info
name = ss.Name
station = ss.Station
events = ','.join(ss.Events)
numbasis = ss.NumBasis
thresh = ss.Threshold
if isinstance(ss.FAS, dict) and len(ss.FAS.keys()) > 1:
b1, b2 = ss.FAS['betadist'][0], ss.FAS['betadist'][1]
else:
b1, b2 = np.nan, np.nan
cols = ['Name', 'Sta', 'Events', 'Threshold', 'NumBasisUsed',
'beta1', 'beta2']
dat = [[name, station, events, thresh, numbasis, b1, b2]]
sslist.append(pd.DataFrame(dat, columns=cols))
for sta in self.Stations:
if sta not in self.singStations:
msg = 'No singletons on station %s' % sta
detex.log(__name__, msg, pri=True)
continue
for num, ss in self.singles[sta].iterrows(): # write singles info
name = ss.Name
station = ss.Station
events = ','.join(ss.Events)
thresh = ss.Threshold
if isinstance(ss.FAS, list) and len(ss.FAS[0].keys()) > 1:
b1, b2 = ss.FAS[0]['betadist'][0], ss.FAS[0]['betadist'][1]
else:
b1, b2 = np.nan, np.nan
cols = ['Name', 'Sta', 'Events', 'Threshold', 'beta1', 'beta2']
dat = [[name, station, events, thresh, b1, b2]]
sglist.append(pd.DataFrame(dat, columns=cols))
if len(sslist) > 0:
ssinfo = pd.concat(sslist, ignore_index=True)
else:
ssinfo = None
if len(sglist) > 0:
sginfo = pd.concat(sglist, ignore_index=True)
else:
sginfo = None
return ssinfo, sginfo
def _getHistograms(self, useSubSpaces, useSingles):
"""
Pull out the histogram info for saving to database
"""
cols = ['Name', 'Sta', 'Value']
if useSubSpaces:
bins = json.dumps(self.histSubSpaces['Bins'].tolist())
dat = [['Bins', 'Bins', bins]]
sshists = [pd.DataFrame(dat, columns=cols)]
for sta in self.Stations:
if sta in self.histSubSpaces.keys():
for skey in self.histSubSpaces[sta]:
try:
vl = json.dumps(self.histSubSpaces[sta][skey].tolist())
except AttributeError:
continue
dat = [[skey, sta, vl]]
sshists.append(pd.DataFrame(dat, columns=cols))
sshist = pd.concat(sshists, ignore_index=True)
else:
sshist = None
if useSingles:
bins = json.dumps(self.histSingles['Bins'].tolist())
dat = [['Bins', 'Bins', bins]]
sghists = [pd.DataFrame(dat, columns=cols)]
for sta in self.Stations:
if sta in self.histSingles.keys():
for skey in self.histSingles[sta]:
try:
vl = json.dumps(self.histSingles[sta][skey].tolist())
except AttributeError:
pass
dat = [[skey, sta, vl]]
sghists.append(pd.DataFrame(dat, columns=cols))
sghist = pd.concat(sghists, ignore_index=True)
else:
sghist = None
return sshist, sghist
########################### Python Class Attributes
def __getitem__(self, key): # make object indexable
if isinstance(key, int):
return self.subspaces[self.ssStations[key]]
elif isinstance(key, string_types):
if len(key.split('.')) == 2:
return self.subspaces[self._stakey2[key]]
elif len(key.split('.')) == 1:
return self.subspaces[self._stakey1[key]]
else:
msg = '%s is not a station in this cluster object' % key
detex.log(__name__, msg, level='error')
else:
msg = '%s must either be a int or str of station name' % key
detex.log(__name__, msg, level='error')
def __len__(self):
return len(self.subspaces)
############ MISC
def write(self, filename='subspace.pkl'):
"""
pickle the subspace class
Parameters
-------------
filename : str
Path of the file to be created
"""
cPickle.dump(self, open(filename, 'wb'))
def printOffsets(self):
"""
Function to print out the offset min max and ranges for each
station/subpace pair
"""
for station in self.ssStations:
for num, row in self.subspaces[station].iterrows():
print('%s, %s, min=%3f, max=%3f, range=%3f' %
(row.Station, row.Name, row.Offsets[0], row.Offsets[2],
row.Offsets[2] - row.Offsets[0]))
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.hist",
"numpy.random.rand",
"obspy.core.Stream",
"matplotlib.pyplot.ylabel",
"pandas.read_csv",
"detex.construct.fast_normcorr",
"obspy.core.UTCDateTime",
"numpy.array",
"detex.util.saveSQLite",
"copy.deepcopy",
"numpy.linalg.norm",
"scipy.dot",
... | [((10480, 10528), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""info"""', 'pri': '(True)'}), "(__name__, msg, level='info', pri=True)\n", (10489, 10528), False, 'import detex\n'), ((13698, 13746), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""info"""', 'pri': '(True)'}), "(__name__, msg, level='info', pri=True)\n", (13707, 13746), False, 'import detex\n'), ((15826, 15886), 'scipy.cluster.hierarchy.fcluster', 'fcluster', (['link[:, 0:4]', '(1 - self.ccReq)'], {'criterion': '"""distance"""'}), "(link[:, 0:4], 1 - self.ccReq, criterion='distance')\n", (15834, 15886), False, 'from scipy.cluster.hierarchy import dendrogram, fcluster\n'), ((15905, 15917), 'pandas.Series', 'pd.Series', (['T'], {}), '(T)\n', (15914, 15917), True, 'import pandas as pd\n'), ((16272, 16309), 'pandas.DataFrame', 'pd.DataFrame', (['linkup'], {'columns': 'columns'}), '(linkup, columns=columns)\n', (16284, 16309), True, 'import pandas as pd\n'), ((17818, 17865), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]', '"""-"""'], {'color': 'self.nonClustColor'}), "([], [], '-', color=self.nonClustColor)\n", (17826, 17865), True, 'import matplotlib.pyplot as plt\n'), ((17874, 17999), 'scipy.cluster.hierarchy.dendrogram', 'dendrogram', (['self.link'], {'color_threshold': '(1 - self.ccReq)', 'count_sort': '(True)', 'link_color_func': '(lambda x: color_list[x])'}), '(self.link, color_threshold=1 - self.ccReq, count_sort=True,\n link_color_func=lambda x: color_list[x], **kwargs)\n', (17884, 17999), False, 'from scipy.cluster.hierarchy import dendrogram, fcluster\n'), ((18028, 18037), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (18035, 18037), True, 'import matplotlib.pyplot as plt\n'), ((18449, 18469), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Events"""'], {}), "('Events')\n", (18459, 18469), True, 'import matplotlib.pyplot as plt\n'), ((18478, 18505), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Dissimilarity"""'], {}), "('Dissimilarity')\n", (18488, 18505), True, 'import matplotlib.pyplot as plt\n'), ((18514, 18537), 'matplotlib.pyplot.title', 'plt.title', (['self.station'], {}), '(self.station)\n', (18523, 18537), True, 'import matplotlib.pyplot as plt\n'), ((20578, 20590), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20588, 20590), True, 'import matplotlib.pyplot as plt\n'), ((22100, 22130), 'numpy.arange', 'np.arange', (['(0.0)', '(80)', '(maxdeg / 4)'], {}), '(0.0, 80, maxdeg / 4)\n', (22109, 22130), True, 'import numpy as np\n'), ((22209, 22243), 'numpy.arange', 'np.arange', (['(10.0)', '(360.0)', '(maxdeg / 4)'], {}), '(10.0, 360.0, maxdeg / 4)\n', (22218, 22243), True, 'import numpy as np\n'), ((22471, 22513), 'matplotlib.pyplot.title', 'plt.title', (["('Clusters on %s' % self.station)"], {}), "('Clusters on %s' % self.station)\n", (22480, 22513), True, 'import matplotlib.pyplot as plt\n'), ((22743, 22755), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22753, 22755), True, 'import matplotlib.pyplot as plt\n'), ((22975, 22989), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (22985, 22989), True, 'import matplotlib.pyplot as plt\n'), ((22998, 23022), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Depth (km)"""'], {}), "('Depth (km)')\n", (23008, 23022), True, 'import matplotlib.pyplot as plt\n'), ((23827, 23853), 'matplotlib.pyplot.figure', 'plt.figure', (['map_fig.number'], {}), '(map_fig.number)\n', (23837, 23853), True, 'import matplotlib.pyplot as plt\n'), ((24500, 24526), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_lon.number'], {}), '(fig_lon.number)\n', (24510, 24526), True, 'import matplotlib.pyplot as plt\n'), ((24535, 24603), 'matplotlib.pyplot.plot', 'plt.plot', (['x_sing', '(sing_df.DEPTH * zscale)', '"""."""'], {'color': 'nccolor', 'ms': '(6.0)'}), "(x_sing, sing_df.DEPTH * zscale, '.', color=nccolor, ms=6.0)\n", (24543, 24603), True, 'import matplotlib.pyplot as plt\n'), ((24612, 24635), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Longitude"""'], {}), "('Longitude')\n", (24622, 24635), True, 'import matplotlib.pyplot as plt\n'), ((24644, 24670), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_lat.number'], {}), '(fig_lat.number)\n', (24654, 24670), True, 'import matplotlib.pyplot as plt\n'), ((24679, 24747), 'matplotlib.pyplot.plot', 'plt.plot', (['y_sing', '(sing_df.DEPTH * zscale)', '"""."""'], {'color': 'nccolor', 'ms': '(6.0)'}), "(y_sing, sing_df.DEPTH * zscale, '.', color=nccolor, ms=6.0)\n", (24687, 24747), True, 'import matplotlib.pyplot as plt\n'), ((24756, 24778), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Latitude"""'], {}), "('Latitude')\n", (24766, 24778), True, 'import matplotlib.pyplot as plt\n'), ((26810, 26822), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (26820, 26822), True, 'import matplotlib.pyplot as plt\n'), ((26881, 26907), 'numpy.zeros', 'np.zeros', (['(le + 1, le + 1)'], {}), '((le + 1, le + 1))\n', (26889, 26907), True, 'import numpy as np\n'), ((27402, 27487), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mpl.colors.LinearSegmentedColormap.from_list', (['"""my_colormap"""', "['blue', 'red']", '(256)'], {}), "('my_colormap', ['blue', 'red'],\n 256)\n", (27446, 27487), True, 'import matplotlib as mpl\n'), ((27511, 27598), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mat'], {'interpolation': '"""nearest"""', 'cmap': 'cmap', 'origin': '"""upper"""', 'vmin': '(0)', 'vmax': '(1)'}), "(mat, interpolation='nearest', cmap=cmap, origin='upper', vmin=0,\n vmax=1)\n", (27521, 27598), True, 'import matplotlib.pyplot as plt\n'), ((27676, 27690), 'matplotlib.pyplot.clim', 'plt.clim', (['(0)', '(1)'], {}), '(0, 1)\n', (27684, 27690), True, 'import matplotlib.pyplot as plt\n'), ((27699, 27728), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'color': '"""white"""'}), "(True, color='white')\n", (27707, 27728), True, 'import matplotlib.pyplot as plt\n'), ((27737, 27765), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['img'], {'cmap': 'cmap'}), '(img, cmap=cmap)\n', (27749, 27765), True, 'import matplotlib.pyplot as plt\n'), ((27774, 27797), 'matplotlib.pyplot.title', 'plt.title', (['self.station'], {}), '(self.station)\n', (27783, 27797), True, 'import matplotlib.pyplot as plt\n'), ((29954, 30002), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""info"""', 'pri': '(True)'}), "(__name__, msg, level='info', pri=True)\n", (29963, 30002), False, 'import detex\n'), ((31364, 31412), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""info"""', 'pri': '(True)'}), "(__name__, msg, level='info', pri=True)\n", (31373, 31412), False, 'import detex\n'), ((42719, 42766), 'numpy.average', 'np.average', (['[fracDict[x] for x in keys]'], {'axis': '(0)'}), '([fracDict[x] for x in keys], axis=0)\n', (42729, 42766), True, 'import numpy as np\n'), ((42797, 42840), 'numpy.min', 'np.min', (['[fracDict[x] for x in keys]'], {'axis': '(0)'}), '([fracDict[x] for x in keys], axis=0)\n', (42803, 42840), True, 'import numpy as np\n'), ((60197, 60231), 'PyQt4.QtGui.QApplication', 'PyQt4.QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (60221, 60231), False, 'import PyQt4\n'), ((63545, 63564), 'obspy.core.Stream', 'obspy.core.Stream', ([], {}), '()\n', (63562, 63564), False, 'import obspy\n'), ((73810, 73822), 'numpy.median', 'np.median', (['d'], {}), '(d)\n', (73819, 73822), True, 'import numpy as np\n'), ((83483, 83509), 'os.path.exists', 'os.path.exists', (['subspaceDB'], {}), '(subspaceDB)\n', (83497, 83509), False, 'import os\n'), ((2814, 2853), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (2823, 2853), False, 'import detex\n'), ((12258, 12297), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (12267, 12297), False, 'import detex\n'), ((14274, 14314), 'numpy.arange', 'np.arange', (['(0.0)', '(360.0)', '(360.0 / numClusts)'], {}), '(0.0, 360.0, 360.0 / numClusts)\n', (14283, 14314), True, 'import numpy as np\n'), ((16470, 16518), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""info"""', 'pri': '(True)'}), "(__name__, msg, level='info', pri=True)\n", (16479, 16518), False, 'import detex\n'), ((17761, 17809), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]', '"""-"""'], {'color': 'self.clustColors[a]'}), "([], [], '-', color=self.clustColors[a])\n", (17769, 17809), True, 'import matplotlib.pyplot as plt\n'), ((18571, 18602), 'matplotlib.pyplot.savefig', 'plt.savefig', (['saveName'], {}), '(saveName, **kwargs)\n', (18582, 18602), True, 'import matplotlib.pyplot as plt\n'), ((18632, 18642), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18640, 18642), True, 'import matplotlib.pyplot as plt\n'), ((22900, 22923), 'numpy.linspace', 'np.linspace', (['z1', 'z2', '(10)'], {}), '(z1, z2, 10)\n', (22911, 22923), True, 'import numpy as np\n'), ((24957, 24983), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_lon.number'], {}), '(fig_lon.number)\n', (24967, 24983), True, 'import matplotlib.pyplot as plt\n'), ((24996, 25045), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(cl.DEPTH * zscale)', '"""."""'], {'color': 'ccolor'}), "(x, cl.DEPTH * zscale, '.', color=ccolor)\n", (25004, 25045), True, 'import matplotlib.pyplot as plt\n'), ((25058, 25084), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_lat.number'], {}), '(fig_lat.number)\n', (25068, 25084), True, 'import matplotlib.pyplot as plt\n'), ((25097, 25146), 'matplotlib.pyplot.plot', 'plt.plot', (['y', '(cl.DEPTH * zscale)', '"""."""'], {'color': 'ccolor'}), "(y, cl.DEPTH * zscale, '.', color=ccolor)\n", (25105, 25146), True, 'import matplotlib.pyplot as plt\n'), ((25251, 25273), 'matplotlib.pyplot.figure', 'plt.figure', (['fig.number'], {}), '(fig.number)\n', (25261, 25273), True, 'import matplotlib.pyplot as plt\n'), ((25293, 25303), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (25301, 25303), True, 'import matplotlib.pyplot as plt\n'), ((25363, 25417), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(xlim[0] - xdist * 0.1)', '(xlim[1] + xdist * 0.1)'], {}), '(xlim[0] - xdist * 0.1, xlim[1] + xdist * 0.1)\n', (25371, 25417), True, 'import matplotlib.pyplot as plt\n'), ((25435, 25445), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (25443, 25445), True, 'import matplotlib.pyplot as plt\n'), ((25505, 25559), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(ylim[0] - ydist * 0.1)', '(ylim[1] + ydist * 0.1)'], {}), '(ylim[0] - ydist * 0.1, ylim[1] + ydist * 0.1)\n', (25513, 25559), True, 'import matplotlib.pyplot as plt\n'), ((26321, 26347), 'copy.deepcopy', 'copy.deepcopy', (['self.clusts'], {}), '(self.clusts)\n', (26334, 26347), False, 'import copy\n'), ((27831, 27862), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savename'], {}), '(savename, **kwargs)\n', (27842, 27862), True, 'import matplotlib.pyplot as plt\n'), ((39965, 40013), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warn"""', 'pri': '(True)'}), "(__name__, msg, level='warn', pri=True)\n", (39974, 40013), False, 'import detex\n'), ((42139, 42154), 'numpy.transpose', 'np.transpose', (['U'], {}), '(U)\n', (42151, 42154), True, 'import numpy as np\n'), ((43298, 43348), 'numpy.argmax', 'np.argmax', (["(cumFracEnergy['Average'] >= selectValue)"], {}), "(cumFracEnergy['Average'] >= selectValue)\n", (43307, 43348), True, 'import numpy as np\n'), ((48899, 48937), 'numpy.linspace', 'np.linspace', (['startVal', 'stopVal', 'numint'], {}), '(startVal, stopVal, numint)\n', (48910, 48937), True, 'import numpy as np\n'), ((52446, 52463), 'matplotlib.pyplot.figure', 'plt.figure', (['(a + 1)'], {}), '(a + 1)\n', (52456, 52463), True, 'import matplotlib.pyplot as plt\n'), ((53422, 53432), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (53430, 53432), True, 'import matplotlib.pyplot as plt\n'), ((55597, 55636), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (55606, 55636), False, 'import detex\n'), ((66776, 66796), 'pandas.read_csv', 'pd.read_csv', (['pksFile'], {}), '(pksFile)\n', (66787, 66796), True, 'import pandas as pd\n'), ((72767, 72806), 'numpy.round', 'np.round', (["row.Stats[x]['sampling_rate']"], {}), "(row.Stats[x]['sampling_rate'])\n", (72775, 72806), True, 'import numpy as np\n'), ((73011, 73050), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (73020, 73050), False, 'import detex\n'), ((73290, 73329), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (73299, 73329), False, 'import detex\n'), ((73984, 73996), 'numpy.min', 'np.min', (['offs'], {}), '(offs)\n', (73990, 73996), True, 'import numpy as np\n'), ((73998, 74013), 'numpy.median', 'np.median', (['offs'], {}), '(offs)\n', (74007, 74013), True, 'import numpy as np\n'), ((74015, 74027), 'numpy.max', 'np.max', (['offs'], {}), '(offs)\n', (74021, 74027), True, 'import numpy as np\n'), ((83431, 83470), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (83440, 83470), False, 'import detex\n'), ((84266, 84491), 'detex.detect._SSDetex', '_SSDetex', (['TRDF', 'utcStart', 'utcEnd', 'self.cfetcher', 'self.clusters', 'subspaceDB', 'trigCon', 'triggerLTATime', 'triggerSTATime', 'multiprocess', 'calcHist', 'self.dtype', 'estimateMags', 'classifyEvents', 'eventCorFile', 'utcSaves', 'fillZeros'], {}), '(TRDF, utcStart, utcEnd, self.cfetcher, self.clusters, subspaceDB,\n trigCon, triggerLTATime, triggerSTATime, multiprocess, calcHist, self.\n dtype, estimateMags, classifyEvents, eventCorFile, utcSaves, fillZeros)\n', (84274, 84491), False, 'from detex.detect import _SSDetex\n'), ((84788, 85035), 'detex.detect._SSDetex', '_SSDetex', (['TRDF', 'utcStart', 'utcEnd', 'self.cfetcher', 'self.clusters', 'subspaceDB', 'trigCon', 'triggerLTATime', 'triggerSTATime', 'multiprocess', 'calcHist', 'self.dtype', 'estimateMags', 'classifyEvents', 'eventCorFile', 'utcSaves', 'fillZeros'], {'issubspace': '(False)'}), '(TRDF, utcStart, utcEnd, self.cfetcher, self.clusters, subspaceDB,\n trigCon, triggerLTATime, triggerSTATime, multiprocess, calcHist, self.\n dtype, estimateMags, classifyEvents, eventCorFile, utcSaves, fillZeros,\n issubspace=False)\n', (84796, 85035), False, 'from detex.detect import _SSDetex\n'), ((85344, 85403), 'pandas.DataFrame', 'pd.DataFrame', (['[self.clusters.filt]'], {'columns': 'cols', 'index': '[0]'}), '([self.clusters.filt], columns=cols, index=[0])\n', (85356, 85403), True, 'import pandas as pd\n'), ((85416, 85471), 'detex.util.saveSQLite', 'detex.util.saveSQLite', (['dffil', 'subspaceDB', '"""filt_params"""'], {}), "(dffil, subspaceDB, 'filt_params')\n", (85437, 85471), False, 'import detex\n'), ((88447, 88483), 'pandas.concat', 'pd.concat', (['sslist'], {'ignore_index': '(True)'}), '(sslist, ignore_index=True)\n', (88456, 88483), True, 'import pandas as pd\n'), ((88573, 88609), 'pandas.concat', 'pd.concat', (['sglist'], {'ignore_index': '(True)'}), '(sglist, ignore_index=True)\n', (88582, 88609), True, 'import pandas as pd\n'), ((89537, 89574), 'pandas.concat', 'pd.concat', (['sshists'], {'ignore_index': '(True)'}), '(sshists, ignore_index=True)\n', (89546, 89574), True, 'import pandas as pd\n'), ((90279, 90316), 'pandas.concat', 'pd.concat', (['sghists'], {'ignore_index': '(True)'}), '(sghists, ignore_index=True)\n', (90288, 90316), True, 'import pandas as pd\n'), ((7906, 7945), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (7915, 7945), False, 'import detex\n'), ((11132, 11171), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (11141, 11171), False, 'import detex\n'), ((14497, 14544), 'colorsys.hls_to_rgb', 'colorsys.hls_to_rgb', (['hue', 'lightness', 'saturation'], {}), '(hue, lightness, saturation)\n', (14516, 14544), False, 'import colorsys\n'), ((15949, 15962), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (15957, 15962), True, 'import numpy as np\n'), ((16013, 16032), 'numpy.arange', 'np.arange', (['(0)', '(N + 1)'], {}), '(0, N + 1)\n', (16022, 16032), True, 'import numpy as np\n'), ((19677, 19731), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""', 'e': 'ImportError'}), "(__name__, msg, level='error', e=ImportError)\n", (19686, 19731), False, 'import detex\n'), ((21032, 21105), 'obspy.core.util.geodetics.gps2DistAzimuth', 'obspy.core.util.geodetics.gps2DistAzimuth', (['latmin', 'lonmin', 'latmin', 'lonmax'], {}), '(latmin, lonmin, latmin, lonmax)\n', (21073, 21105), False, 'import obspy\n'), ((21230, 21255), 'numpy.mean', 'np.mean', (['[latmin, latmax]'], {}), '([latmin, latmax])\n', (21237, 21255), True, 'import numpy as np\n'), ((21286, 21311), 'numpy.mean', 'np.mean', (['[lonmin, lonmax]'], {}), '([lonmin, lonmax])\n', (21293, 21311), True, 'import numpy as np\n'), ((22852, 22879), 'numpy.linspace', 'np.linspace', (['zmin', 'zmax', '(10)'], {}), '(zmin, zmax, 10)\n', (22863, 22879), True, 'import numpy as np\n'), ((22942, 22951), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (22949, 22951), True, 'import matplotlib.pyplot as plt\n'), ((26463, 26500), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['clusts'], {}), '(clusts)\n', (26492, 26500), False, 'import itertools\n'), ((37358, 37375), 'numpy.transpose', 'np.transpose', (['arr'], {}), '(arr)\n', (37370, 37375), True, 'import numpy as np\n'), ((37433, 37477), 'scipy.linalg.svd', 'scipy.linalg.svd', (['tparr'], {'full_matrices': '(False)'}), '(tparr, full_matrices=False)\n', (37449, 37477), False, 'import scipy\n'), ((40565, 40618), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""', 'e': 'ValueError'}), "(__name__, msg, level='error', e=ValueError)\n", (40574, 40618), False, 'import detex\n'), ((41012, 41051), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (41021, 41051), False, 'import detex\n'), ((41256, 41309), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""', 'e': 'ValueError'}), "(__name__, msg, level='error', e=ValueError)\n", (41265, 41309), False, 'import detex\n'), ((42260, 42280), 'scipy.dot', 'scipy.dot', (['Ut', 'aliwf'], {}), '(Ut, aliwf)\n', (42269, 42280), False, 'import scipy\n'), ((42283, 42307), 'scipy.linalg.norm', 'scipy.linalg.norm', (['aliwf'], {}), '(aliwf)\n', (42300, 42307), False, 'import scipy\n'), ((42389, 42411), 'numpy.square', 'np.square', (['normUtAliwf'], {}), '(normUtAliwf)\n', (42398, 42411), True, 'import numpy as np\n'), ((42507, 42530), 'numpy.sum', 'np.sum', (['repvect[:x + 1]'], {}), '(repvect[:x + 1])\n', (42513, 42530), True, 'import numpy as np\n'), ((43995, 44048), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""', 'e': 'ValueError'}), "(__name__, msg, level='error', e=ValueError)\n", (44004, 44048), False, 'import detex\n'), ((50856, 50873), 'matplotlib.pyplot.figure', 'plt.figure', (['count'], {}), '(count)\n', (50866, 50873), True, 'import matplotlib.pyplot as plt\n'), ((50890, 50910), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (50901, 50910), True, 'import matplotlib.pyplot as plt\n'), ((50934, 50994), 'numpy.mean', 'np.mean', (["[row.FAS['bins'][1:], row.FAS['bins'][:-1]]"], {'axis': '(0)'}), "([row.FAS['bins'][1:], row.FAS['bins'][:-1]], axis=0)\n", (50941, 50994), True, 'import numpy as np\n'), ((51032, 51063), 'matplotlib.pyplot.plot', 'plt.plot', (['bins', "row.FAS['hist']"], {}), "(bins, row.FAS['hist'])\n", (51040, 51063), True, 'import matplotlib.pyplot as plt\n'), ((51080, 51128), 'matplotlib.pyplot.title', 'plt.title', (["('Station %s %s' % (station, row.Name))"], {}), "('Station %s %s' % (station, row.Name))\n", (51089, 51128), True, 'import matplotlib.pyplot as plt\n'), ((51146, 51183), 'matplotlib.pyplot.axvline', 'plt.axvline', (['row.Threshold'], {'color': '"""g"""'}), "(row.Threshold, color='g')\n", (51157, 51183), True, 'import matplotlib.pyplot as plt\n'), ((51207, 51249), 'scipy.stats.beta.pdf', 'scipy.stats.beta.pdf', (['bins', 'beta_a', 'beta_b'], {}), '(bins, beta_a, beta_b)\n', (51227, 51249), False, 'import scipy\n'), ((51345, 51397), 'matplotlib.pyplot.title', 'plt.title', (["('%s station %s' % (row.Name, row.Station))"], {}), "('%s station %s' % (row.Name, row.Station))\n", (51354, 51397), True, 'import matplotlib.pyplot as plt\n'), ((51414, 51428), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (51422, 51428), True, 'import matplotlib.pyplot as plt\n'), ((51445, 51464), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (51455, 51464), True, 'import matplotlib.pyplot as plt\n'), ((51482, 51502), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (51493, 51502), True, 'import matplotlib.pyplot as plt\n'), ((51526, 51586), 'numpy.mean', 'np.mean', (["[row.FAS['bins'][1:], row.FAS['bins'][:-1]]"], {'axis': '(0)'}), "([row.FAS['bins'][1:], row.FAS['bins'][:-1]], axis=0)\n", (51533, 51586), True, 'import numpy as np\n'), ((51624, 51655), 'matplotlib.pyplot.plot', 'plt.plot', (['bins', "row.FAS['hist']"], {}), "(bins, row.FAS['hist'])\n", (51632, 51655), True, 'import matplotlib.pyplot as plt\n'), ((51672, 51709), 'matplotlib.pyplot.axvline', 'plt.axvline', (['row.Threshold'], {'color': '"""g"""'}), "(row.Threshold, color='g')\n", (51683, 51709), True, 'import matplotlib.pyplot as plt\n'), ((51805, 51838), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Detection Statistic"""'], {}), "('Detection Statistic')\n", (51815, 51838), True, 'import matplotlib.pyplot as plt\n'), ((51855, 51874), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (51865, 51874), True, 'import matplotlib.pyplot as plt\n'), ((51891, 51905), 'matplotlib.pyplot.semilogy', 'plt.semilogy', ([], {}), '()\n', (51903, 51905), True, 'import matplotlib.pyplot as plt\n'), ((51922, 51945), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(10 ** -1)'}), '(ymin=10 ** -1)\n', (51930, 51945), True, 'import matplotlib.pyplot as plt\n'), ((51962, 51976), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (51970, 51976), True, 'import matplotlib.pyplot as plt\n'), ((52976, 53016), 'matplotlib.pyplot.plot', 'plt.plot', (["row.FracEnergy['Average']", '"""r"""'], {}), "(row.FracEnergy['Average'], 'r')\n", (52984, 53016), True, 'import matplotlib.pyplot as plt\n'), ((53033, 53075), 'matplotlib.pyplot.axvline', 'plt.axvline', (['row.NumBasis', '(0)', '(1)'], {'color': '"""g"""'}), "(row.NumBasis, 0, 1, color='g')\n", (53044, 53075), True, 'import matplotlib.pyplot as plt\n'), ((53092, 53110), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.1]'], {}), '([0, 1.1])\n', (53100, 53110), True, 'import matplotlib.pyplot as plt\n'), ((53127, 53180), 'matplotlib.pyplot.title', 'plt.title', (["('Station %s, %s' % (row.Station, row.Name))"], {}), "('Station %s, %s' % (row.Station, row.Name))\n", (53136, 53180), True, 'import matplotlib.pyplot as plt\n'), ((54782, 54812), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(1.5 * evenum + 1)'], {}), '(-1, 1.5 * evenum + 1)\n', (54790, 54812), True, 'import matplotlib.pyplot as plt\n'), ((54829, 54843), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (54839, 54843), True, 'import matplotlib.pyplot as plt\n'), ((54860, 54874), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (54870, 54874), True, 'import matplotlib.pyplot as plt\n'), ((54981, 54991), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (54989, 54991), True, 'import matplotlib.pyplot as plt\n'), ((55996, 56035), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[10, 0.9 * num_wfs]'}), '(figsize=[10, 0.9 * num_wfs])\n', (56006, 56035), True, 'import matplotlib.pyplot as plt\n'), ((56291, 56321), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5 * keynum - 1)', '(1)'], {}), '(-1.5 * keynum - 1, 1)\n', (56299, 56321), True, 'import matplotlib.pyplot as plt\n'), ((56338, 56352), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (56348, 56352), True, 'import matplotlib.pyplot as plt\n'), ((56369, 56383), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (56379, 56383), True, 'import matplotlib.pyplot as plt\n'), ((56400, 56452), 'matplotlib.pyplot.title', 'plt.title', (["('%s station %s' % (row.Name, row.Station))"], {}), "('%s station %s' % (row.Name, row.Station))\n", (56409, 56452), True, 'import matplotlib.pyplot as plt\n'), ((56942, 56959), 'matplotlib.pyplot.figure', 'plt.figure', (['count'], {}), '(count)\n', (56952, 56959), True, 'import matplotlib.pyplot as plt\n'), ((57075, 57092), 'matplotlib.pyplot.hist', 'plt.hist', (['offsets'], {}), '(offsets)\n', (57083, 57092), True, 'import matplotlib.pyplot as plt\n'), ((57109, 57153), 'matplotlib.pyplot.title', 'plt.title', (["('%s %s' % (row.Station, row.Name))"], {}), "('%s %s' % (row.Station, row.Name))\n", (57118, 57153), True, 'import matplotlib.pyplot as plt\n'), ((57170, 57191), 'matplotlib.pyplot.figure', 'plt.figure', (['(count + 1)'], {}), '(count + 1)\n', (57180, 57191), True, 'import matplotlib.pyplot as plt\n'), ((57258, 57274), 'numpy.zeros', 'np.zeros', (['numEvs'], {}), '(numEvs)\n', (57266, 57274), True, 'import numpy as np\n'), ((57300, 57316), 'numpy.zeros', 'np.zeros', (['numEvs'], {}), '(numEvs)\n', (57308, 57316), True, 'import numpy as np\n'), ((57343, 57359), 'numpy.zeros', 'np.zeros', (['numEvs'], {}), '(numEvs)\n', (57351, 57359), True, 'import numpy as np\n'), ((64103, 64139), 'obspy.core.Trace', 'obspy.core.Trace', ([], {'data': 'row.MPtd[key]'}), '(data=row.MPtd[key])\n', (64119, 64139), False, 'import obspy\n'), ((70623, 70647), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['start'], {}), '(start)\n', (70640, 70647), False, 'import obspy\n'), ((71024, 71062), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warn"""'}), "(__name__, msg, level='warn')\n", (71033, 71062), False, 'import detex\n'), ((71354, 71406), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warning"""', 'pri': '(False)'}), "(__name__, msg, level='warning', pri=False)\n", (71363, 71406), False, 'import detex\n'), ((73775, 73793), 'numpy.median', 'np.median', (['offsets'], {}), '(offsets)\n', (73784, 73793), True, 'import numpy as np\n'), ((83555, 83576), 'os.remove', 'os.remove', (['subspaceDB'], {}), '(subspaceDB)\n', (83564, 83576), False, 'import os\n'), ((83664, 83698), 'detex.log', 'detex.log', (['__name__', 'msg'], {'pri': '(True)'}), '(__name__, msg, pri=True)\n', (83673, 83698), False, 'import detex\n'), ((83808, 83842), 'detex.log', 'detex.log', (['__name__', 'msg'], {'pri': '(True)'}), '(__name__, msg, pri=True)\n', (83817, 83842), False, 'import detex\n'), ((84207, 84246), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (84216, 84246), False, 'import detex\n'), ((85769, 85821), 'detex.util.saveSQLite', 'detex.util.saveSQLite', (['ssinfo', 'subspaceDB', '"""ss_info"""'], {}), "(ssinfo, subspaceDB, 'ss_info')\n", (85790, 85821), False, 'import detex\n'), ((85924, 85976), 'detex.util.saveSQLite', 'detex.util.saveSQLite', (['sginfo', 'subspaceDB', '"""sg_info"""'], {}), "(sginfo, subspaceDB, 'sg_info')\n", (85945, 85976), False, 'import detex\n'), ((86089, 86142), 'detex.util.saveSQLite', 'detex.util.saveSQLite', (['sshists', 'subspaceDB', '"""ss_hist"""'], {}), "(sshists, subspaceDB, 'ss_hist')\n", (86110, 86142), False, 'import detex\n'), ((86252, 86305), 'detex.util.saveSQLite', 'detex.util.saveSQLite', (['sghists', 'subspaceDB', '"""sg_hist"""'], {}), "(sghists, subspaceDB, 'sg_hist')\n", (86273, 86305), False, 'import detex\n'), ((86732, 86766), 'detex.log', 'detex.log', (['__name__', 'msg'], {'pri': '(True)'}), '(__name__, msg, pri=True)\n', (86741, 86766), False, 'import detex\n'), ((87680, 87714), 'detex.log', 'detex.log', (['__name__', 'msg'], {'pri': '(True)'}), '(__name__, msg, pri=True)\n', (87689, 87714), False, 'import detex\n'), ((89018, 89049), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'columns': 'cols'}), '(dat, columns=cols)\n', (89030, 89049), True, 'import pandas as pd\n'), ((89770, 89801), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'columns': 'cols'}), '(dat, columns=cols)\n', (89782, 89801), True, 'import pandas as pd\n'), ((91082, 91121), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (91091, 91121), False, 'import detex\n'), ((4979, 4991), 'numpy.isnan', 'np.isnan', (['cc'], {}), '(cc)\n', (4987, 4991), True, 'import numpy as np\n'), ((5881, 5899), 'numpy.isnan', 'np.isnan', (['lagsamps'], {}), '(lagsamps)\n', (5889, 5899), True, 'import numpy as np\n'), ((15613, 15640), 'numpy.arange', 'np.arange', (['(N + 1)', '(N + N + 1)'], {}), '(N + 1, N + N + 1)\n', (15622, 15640), True, 'import numpy as np\n'), ((15972, 15991), 'numpy.arange', 'np.arange', (['(0)', '(N + 1)'], {}), '(0, N + 1)\n', (15981, 15991), True, 'import numpy as np\n'), ((37109, 37147), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warn"""'}), "(__name__, msg, level='warn')\n", (37118, 37147), False, 'import detex\n'), ((40857, 40910), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""', 'e': 'ValueError'}), "(__name__, msg, level='error', e=ValueError)\n", (40866, 40910), False, 'import detex\n'), ((47585, 47636), 'scipy.stats.beta.isf', 'scipy.stats.beta.isf', (['self.Pf', 'beta_a', 'beta_b', '(0)', '(1)'], {}), '(self.Pf, beta_a, beta_b, 0, 1)\n', (47605, 47636), False, 'import scipy\n'), ((48966, 49004), 'scipy.stats.beta.sf', 'scipy.stats.beta.sf', (['x', 'beta_a', 'beta_b'], {}), '(x, beta_a, beta_b)\n', (48985, 49004), False, 'import scipy\n'), ((49523, 49577), 'detex.log', 'detex.log', (['__name__', 'msg1'], {'level': '"""error"""', 'e': 'ValueError'}), "(__name__, msg1, level='error', e=ValueError)\n", (49532, 49577), False, 'import detex\n'), ((49620, 49669), 'detex.log', 'detex.log', (['__name__', 'msg2'], {'level': '"""warn"""', 'pri': '(True)'}), "(__name__, msg2, level='warn', pri=True)\n", (49629, 49669), False, 'import detex\n'), ((52738, 52777), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (52747, 52777), False, 'import detex\n'), ((52909, 52959), 'matplotlib.pyplot.plot', 'plt.plot', (['row.FracEnergy[event]', '"""--"""'], {'color': '"""0.6"""'}), "(row.FracEnergy[event], '--', color='0.6')\n", (52917, 52959), True, 'import matplotlib.pyplot as plt\n'), ((56257, 56274), 'matplotlib.pyplot.plot', 'plt.plot', (['wf'], {'c': 'c'}), '(wf, c=c)\n', (56265, 56274), True, 'import matplotlib.pyplot as plt\n'), ((56886, 56925), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (56895, 56925), False, 'import detex\n'), ((58063, 58084), 'matplotlib.pyplot.plot', 'plt.plot', (['condat', '"""k"""'], {}), "(condat, 'k')\n", (58071, 58084), True, 'import matplotlib.pyplot as plt\n'), ((58105, 58153), 'matplotlib.pyplot.axvline', 'plt.axvline', (["row.SampleTrims['Starttime']"], {'c': '"""g"""'}), "(row.SampleTrims['Starttime'], c='g')\n", (58116, 58153), True, 'import matplotlib.pyplot as plt\n'), ((58174, 58217), 'matplotlib.pyplot.plot', 'plt.plot', (['orsamps[evenum]', '(evenum + 1)', '"""r*"""'], {}), "(orsamps[evenum], evenum + 1, 'r*')\n", (58182, 58217), True, 'import matplotlib.pyplot as plt\n'), ((61123, 61163), 'detex.streamPick.streamPick', 'detex.streamPick.streamPick', (['st'], {'ap': 'qApp'}), '(st, ap=qApp)\n', (61150, 61163), False, 'import detex\n'), ((63738, 63779), 'obspy.core.Trace', 'obspy.core.Trace', ([], {'data': 'row.AlignedTD[key]'}), '(data=row.AlignedTD[key])\n', (63754, 63779), False, 'import obspy\n'), ((64743, 64760), 'numpy.array', 'np.array', (['offsets'], {}), '(offsets)\n', (64751, 64760), True, 'import numpy as np\n'), ((65052, 65069), 'numpy.array', 'np.array', (['offsets'], {}), '(offsets)\n', (65060, 65069), True, 'import numpy as np\n'), ((66862, 66885), 'pandas.read_pickle', 'pd.read_pickle', (['pksFile'], {}), '(pksFile)\n', (66876, 66885), True, 'import pandas as pd\n'), ((76423, 76457), 'detex.log', 'detex.log', (['__name__', 'msg'], {'pri': '(True)'}), '(__name__, msg, pri=True)\n', (76432, 76457), False, 'import detex\n'), ((76529, 76712), 'detex.fas._initFAS', 'detex.fas._initFAS', (['self.subspaces[sta]', 'conDatNum', 'self.clusters', 'self.cfetcher'], {'LTATime': 'LTATime', 'STATime': 'STATime', 'staltalimit': 'staltalimit', 'numBins': 'numBins', 'dtype': 'self.dtype'}), '(self.subspaces[sta], conDatNum, self.clusters, self.\n cfetcher, LTATime=LTATime, STATime=STATime, staltalimit=staltalimit,\n numBins=numBins, dtype=self.dtype)\n', (76547, 76712), False, 'import detex\n'), ((87494, 87525), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'columns': 'cols'}), '(dat, columns=cols)\n', (87506, 87525), True, 'import pandas as pd\n'), ((88365, 88396), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'columns': 'cols'}), '(dat, columns=cols)\n', (88377, 88396), True, 'import pandas as pd\n'), ((4391, 4430), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (4400, 4430), False, 'import detex\n'), ((4750, 4801), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warning"""', 'pri': '(True)'}), "(__name__, msg, level='warning', pri=True)\n", (4759, 4801), False, 'import detex\n'), ((5415, 5427), 'numpy.isnan', 'np.isnan', (['cc'], {}), '(cc)\n', (5423, 5427), True, 'import numpy as np\n'), ((30893, 30928), 'detex.construct.fast_normcorr', 'detex.construct.fast_normcorr', (['t', 's'], {}), '(t, s)\n', (30922, 30928), False, 'import detex\n'), ((31213, 31247), 'detex.log', 'detex.log', (['__name__', 'msg'], {'pri': '(True)'}), '(__name__, msg, pri=True)\n', (31222, 31247), False, 'import detex\n'), ((39710, 39738), 'numpy.mean', 'np.mean', (['aliTD[x][stim:etim]'], {}), '(aliTD[x][stim:etim])\n', (39717, 39738), True, 'import numpy as np\n'), ((40054, 40071), 'numpy.mean', 'np.mean', (['aliTD[x]'], {}), '(aliTD[x])\n', (40061, 40071), True, 'import numpy as np\n'), ((48217, 48258), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warning"""'}), "(__name__, msg, level='warning')\n", (48226, 48258), False, 'import detex\n'), ((57858, 57890), 'obspy.core.UTCDateTime', 'obspy.core.UTCDateTime', (['tem.TIME'], {}), '(tem.TIME)\n', (57880, 57890), False, 'import obspy\n'), ((63261, 63292), 'detex.log', 'detex.log', (['__name__', 'msg'], {'pri': '(1)'}), '(__name__, msg, pri=1)\n', (63270, 63292), False, 'import detex\n'), ((67041, 67080), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (67050, 67080), False, 'import detex\n'), ((67514, 67553), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (67523, 67553), False, 'import detex\n'), ((77406, 77440), 'detex.log', 'detex.log', (['__name__', 'msg'], {'pri': '(True)'}), '(__name__, msg, pri=True)\n', (77415, 77440), False, 'import detex\n'), ((90943, 90982), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""'}), "(__name__, msg, level='error')\n", (90952, 90982), False, 'import detex\n'), ((3922, 3973), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warning"""', 'pri': '(True)'}), "(__name__, msg, level='warning', pri=True)\n", (3931, 3973), False, 'import detex\n'), ((5594, 5643), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""error"""', 'pri': '(True)'}), "(__name__, msg, level='error', pri=True)\n", (5603, 5643), False, 'import detex\n'), ((14378, 14394), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (14392, 14394), True, 'import numpy as np\n'), ((14443, 14459), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (14457, 14459), True, 'import numpy as np\n'), ((44550, 44601), 'scipy.stats.beta.isf', 'scipy.stats.beta.isf', (['self.Pf', 'beta_a', 'beta_b', '(0)', '(1)'], {}), '(self.Pf, beta_a, beta_b, 0, 1)\n', (44570, 44601), False, 'import scipy\n'), ((77697, 77905), 'detex.fas._initFAS', 'detex.fas._initFAS', (['self.singles[sta][a:a + 1]', 'conDatNum', 'self.clusters', 'self.cfetcher'], {'LTATime': 'LTATime', 'STATime': 'STATime', 'staltalimit': 'staltalimit', 'numBins': 'numBins', 'dtype': 'self.dtype', 'issubspace': '(False)'}), '(self.singles[sta][a:a + 1], conDatNum, self.clusters,\n self.cfetcher, LTATime=LTATime, STATime=STATime, staltalimit=\n staltalimit, numBins=numBins, dtype=self.dtype, issubspace=False)\n', (77715, 77905), False, 'import detex\n'), ((89483, 89514), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'columns': 'cols'}), '(dat, columns=cols)\n', (89495, 89514), True, 'import pandas as pd\n'), ((90225, 90256), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'columns': 'cols'}), '(dat, columns=cols)\n', (90237, 90256), True, 'import pandas as pd\n'), ((5299, 5350), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warning"""', 'pri': '(True)'}), "(__name__, msg, level='warning', pri=True)\n", (5308, 5350), False, 'import detex\n'), ((14671, 14688), 'struct.pack', 'pack', (['"""BBB"""', '*rgb'], {}), "('BBB', *rgb)\n", (14675, 14688), False, 'from struct import pack\n'), ((37301, 37318), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (37315, 37318), True, 'import numpy as np\n'), ((45191, 45232), 'detex.log', 'detex.log', (['__name__', 'msg'], {'level': '"""warning"""'}), "(__name__, msg, level='warning')\n", (45200, 45232), False, 'import detex\n'), ((3604, 3621), 'numpy.array', 'np.array', (['Clu.key'], {}), '(Clu.key)\n', (3612, 3621), True, 'import numpy as np\n'), ((3676, 3693), 'numpy.array', 'np.array', (['Clu.key'], {}), '(Clu.key)\n', (3684, 3693), True, 'import numpy as np\n')] |
#!/usr/bin/env python
r"""Aggregate, create, and save 1D and 2D plots.
"""
import pdb # noqa: F401
from matplotlib import pyplot as plt
from . import base
class Scatter(base.PlotWithZdata, base.CbarMaker):
r"""Create a scatter plot.
Properties
----------
Methods
-------
Abstract Properties
-------------------
Abstract Methods
----------------
Notes
-----
"""
def __init__(self, x, y, z=None, clip_data=False):
r"""
Parameters
----------
x, y: pd.Series
Data defining (x, y) coordinates.
z: pd.Series, optional
If not None, used to specify the color for each point.
clip_data: bool
If True, remove extreme values at the 0.001 and 0.999 percentitles.
"""
super(Scatter, self).__init__()
self.set_data(x, y, z, clip_data)
self._labels = base.AxesLabels(x="x", y="y", z="z" if z is not None else None)
self._log = base.LogAxes(x=False, y=False)
self.set_path(None)
def _format_axis(self, ax, collection):
super()._format_axis(ax)
x = self.data.loc[:, "x"]
minx, maxx = x.min(), x.max()
y = self.data.loc[:, "y"]
miny, maxy = y.min(), y.max()
# `pulled from the end of `ax.pcolormesh`.
collection.sticky_edges.x[:] = [minx, maxx]
collection.sticky_edges.y[:] = [miny, maxy]
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
def make_plot(self, ax=None, cbar=True, cbar_kwargs=None, **kwargs):
r"""
Make a scatter plot on `ax` using `ax.scatter`.
Paremeters
----------
ax: mpl.axes.Axes, None
If None, create an `Axes` instance from `plt.subplots`.
cbar: bool
If True, create color bar with `labels.z`.
cbar_kwargs: dict, None
If not None, kwargs passed to `self._make_cbar`.
kwargs:
Passed to `ax.pcolormesh`.
"""
if ax is None:
fig, ax = plt.subplots()
data = self.data
if self.clip:
data = self.clip_data(data, self.clip)
if data.loc[:, "z"].unique().size > 1:
zkey = "z"
else:
zkey = None
collection = ax.scatter(x="x", y="y", c=zkey, data=data, **kwargs)
if cbar and zkey is not None:
if cbar_kwargs is None:
cbar_kwargs = dict()
if "cax" not in cbar_kwargs.keys() and "ax" not in cbar_kwargs.keys():
cbar_kwargs["ax"] = ax
cbar = self._make_cbar(collection, **cbar_kwargs)
else:
cbar = None
self._format_axis(ax, collection)
return ax, cbar
| [
"matplotlib.pyplot.subplots"
] | [((2109, 2123), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2121, 2123), True, 'from matplotlib import pyplot as plt\n')] |
from torchvision import models
import numpy as np
import torch
import os
from moviepy.editor import VideoFileClip
SKIP_FRAME_RATE = 10
MINIMAX_FRAME = 4
# 함수에서 documentaiton 읽기
model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
model.eval()
os.environ['KMP_DUPLICATE_LIB_OK']='True'
def extract_boxes(reference_clip, compare_clip):
clips = [reference_clip, compare_clip]
clips_frame_info = []
for clip in clips:
i = 0
every_frame_info = []
# loop over the frames from the video stream
while True:
i+=SKIP_FRAME_RATE # 1초에 60 fps가 있으므로 몇개는 skip해도 될거 같음!
if (i*1.0/clip.fps)> clip.duration:
break
frame = clip.get_frame(i*1.0/clip.fps)
frame = frame/255 # image, and should be in ``0-1`` range.
frame = np.transpose(frame, (2,0,1)) # HWC -> CHW(그 위치에 몇차원 애를 넣을거냔?)
x = [torch.from_numpy(frame).float()]
# label list https://github.com/tensorflow/models/blob/master/research/object_detection/data/mscoco_label_map.pbtxt
predictions = model(x)
prediction= predictions[0]
each_box_list = zip(prediction['boxes'].tolist(), prediction['labels'].tolist(), prediction['scores'].tolist())
# 0.95 정도 올려야 까맣게 보이는 관중이 없어짐!
filtered_box_list = filter(lambda x: x[1]==1 and x[2] >= 0.95, each_box_list)
filtered_center_dot_list = list(map(lambda x: [(x[0][0]+x[0][2])/2, (x[0][1]+x[0][3])/2], filtered_box_list))
# x좌표로 정렬하기(대형이 가로로 늘어져 있다고 가정하고 순서대로 정렬)
sorted_dot_list = sorted(filtered_center_dot_list, key = lambda x: x[0])
every_frame_info.append(sorted_dot_list) # 프레임별 정보
clips_frame_info.append(np.array(every_frame_info)) # 각 영상별로 붙이기
return clips_frame_info
def calculate_pose_distance(reference_clip, compare_clip):
clips_frame_info = extract_boxes(reference_clip, compare_clip) # 모든 프레임마다 길이 계산해줌
min_size = min(len(clips_frame_info[0]),len(clips_frame_info[1]))
dist_arr = list()
# Calculate distance (by frame)
for i in range(min_size):
if len(clips_frame_info[0][i])>0 and len(clips_frame_info[1][i])>0: # 둘다 있으면
# x축 값이 가장 가까운걸로 찾고 그거랑 비교(어차피 대형이 중요한거니까)
ref_frame_dots = clips_frame_info[0][i] # 해당 frame의 정보
compare_frame_dots = clips_frame_info[1][i] # 해당 frame의 정보
min_dot_num = min(len(ref_frame_dots), len(compare_frame_dots)) # reference 기준으로 계산할거양
penalty = ((reference_clip.w **2 + reference_clip.h**2)**0.5) * abs(len(ref_frame_dots)-len(compare_frame_dots)) # 개수가 다를때 주는 패널티
total_diff = penalty
for dot_idx in range(min_dot_num):
ref_frame_dots[dot_idx] and compare_frame_dots[dot_idx]
total_diff += ((ref_frame_dots[dot_idx][0] - compare_frame_dots[dot_idx][0])**2 + (ref_frame_dots[dot_idx][1] - compare_frame_dots[dot_idx][1])**2)**0.5
dist_arr.append(total_diff)
else:
dist_arr.append(None)
# Minimize max distance in (minimax_frames) frames
min_diff = np.float('Inf')
min_idx = 0
max_dist = []
for i in range(min_size-(MINIMAX_FRAME-1)):
if None in dist_arr[i:i+MINIMAX_FRAME]:
max_dist.append(None)
else:
tmp_max = np.max(dist_arr[i:i+MINIMAX_FRAME])
max_dist.append(tmp_max)
if min_diff > tmp_max:
min_diff = tmp_max
min_idx = i
# return distance, second, additional_info
return min_diff, (min_idx*SKIP_FRAME_RATE)/reference_clip.fps, {} | [
"numpy.float",
"torch.from_numpy",
"numpy.max",
"numpy.array",
"torchvision.models.detection.fasterrcnn_resnet50_fpn",
"numpy.transpose"
] | [((186, 243), 'torchvision.models.detection.fasterrcnn_resnet50_fpn', 'models.detection.fasterrcnn_resnet50_fpn', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (226, 243), False, 'from torchvision import models\n'), ((3155, 3170), 'numpy.float', 'np.float', (['"""Inf"""'], {}), "('Inf')\n", (3163, 3170), True, 'import numpy as np\n'), ((839, 869), 'numpy.transpose', 'np.transpose', (['frame', '(2, 0, 1)'], {}), '(frame, (2, 0, 1))\n', (851, 869), True, 'import numpy as np\n'), ((1776, 1802), 'numpy.array', 'np.array', (['every_frame_info'], {}), '(every_frame_info)\n', (1784, 1802), True, 'import numpy as np\n'), ((3372, 3409), 'numpy.max', 'np.max', (['dist_arr[i:i + MINIMAX_FRAME]'], {}), '(dist_arr[i:i + MINIMAX_FRAME])\n', (3378, 3409), True, 'import numpy as np\n'), ((919, 942), 'torch.from_numpy', 'torch.from_numpy', (['frame'], {}), '(frame)\n', (935, 942), False, 'import torch\n')] |
import numpy
#Variables
PLAYERS= 2
boardW = 5
boardH = 5
board = numpy.zeros((boardW,boardH))
step = 0
winLength = 3
#Functions
def drawBoard():
global step
print("\n Step:", step, "\n")
for i in range(0,len(board)):
for j in numpy.flipud(board)[i]:
print('{:>4}'.format(getSym(j)), end = "")
print("\n")
step+=1;
symbols="■XOABCDEFGHIJKLMNOPQRSTUVWXZ"
def getSym(n):
return symbols[int(n)]
def move(player):
while(True):
row, column = eval(input("Player "+str(player)+" Move, Enter coordinates: "))
try:
if board[column-1][row-1]==0:
board[column-1][row-1]=player
break;
else:
print("You can't move there! Choose a blank spot!")
except:
print("Coordinates Out of Bounds, Try again!")
def contains(small, big):
for i in range(len(big)-len(small)+1):
for j in range(len(small)):
if big[i+j] != small[j]:
break
else:
return i, i+len(small)
return False
def getState():
#checks columns
for r in range(board.shape[0]):
for p in range(1, PLAYERS+1):
#if all(board[w,:] == numpy.full((board.shape[1]),p)):
if contains(numpy.full(3,p), board[r,:]):
return p
#checks rows
for c in range(board.shape[1]):
for p in range(1, PLAYERS+1):
#if all(board[:,h] == numpy.full((board.shape[0]),p)):
if contains(numpy.full(winLength,p), board[:,c]):
return p
#check diagonals
maxDiagonalOffset=max(board.shape[0], board.shape[1])-(winLength-1)
for o in range(-maxDiagonalOffset+1,maxDiagonalOffset):
for p in range(1, PLAYERS+1):
for i in [-1,1]:
if contains(numpy.full(winLength,p), numpy.diagonal(board[::i],o)):
return p
#check for no more blanks
if 0 not in board:
return "Tied"
return 0
#Main loop
while(True):
step = 0
board = numpy.zeros((5,5))
print(" ======= EXTREME TIC TAC TOE ======= ")
#Variables
PLAYERS=int(input("How many players?: "))
boardW = int(input("What's the board's width?: "))
boardH = int(input("What's the board's height?: "))
board = numpy.zeros((boardW,boardH))
step = 0
winLength = int(input("How many in a row to win?: "))
print(" ======= GAME STARTING... ======= ")
while(True):
drawBoard()
if getState()=="Tied":
print("The game tied!")
break;
elif getState()>0:
print("Player", getState(), "Won!")
break;
move((step-1)%PLAYERS+1)
if input("Keep playing?(press y): ").lower() != 'y':
break
| [
"numpy.full",
"numpy.zeros",
"numpy.diagonal",
"numpy.flipud"
] | [((66, 95), 'numpy.zeros', 'numpy.zeros', (['(boardW, boardH)'], {}), '((boardW, boardH))\n', (77, 95), False, 'import numpy\n'), ((2058, 2077), 'numpy.zeros', 'numpy.zeros', (['(5, 5)'], {}), '((5, 5))\n', (2069, 2077), False, 'import numpy\n'), ((2313, 2342), 'numpy.zeros', 'numpy.zeros', (['(boardW, boardH)'], {}), '((boardW, boardH))\n', (2324, 2342), False, 'import numpy\n'), ((248, 267), 'numpy.flipud', 'numpy.flipud', (['board'], {}), '(board)\n', (260, 267), False, 'import numpy\n'), ((1283, 1299), 'numpy.full', 'numpy.full', (['(3)', 'p'], {}), '(3, p)\n', (1293, 1299), False, 'import numpy\n'), ((1520, 1544), 'numpy.full', 'numpy.full', (['winLength', 'p'], {}), '(winLength, p)\n', (1530, 1544), False, 'import numpy\n'), ((1831, 1855), 'numpy.full', 'numpy.full', (['winLength', 'p'], {}), '(winLength, p)\n', (1841, 1855), False, 'import numpy\n'), ((1856, 1885), 'numpy.diagonal', 'numpy.diagonal', (['board[::i]', 'o'], {}), '(board[::i], o)\n', (1870, 1885), False, 'import numpy\n')] |
import json
import os
import random
import bottle
import platform
from api import ping_response, start_response, move_response, end_response
lastMove = ''
@bottle.route('/')
def index():
return '''
Battlesnake documentation can be found at
<a href="https://docs.battlesnake.com">https://docs.battlesnake.com</a>.
'''
@bottle.route('/static/<path:path>')
def static(path):
"""
Given a path, return the static file located relative
to the static folder.
This can be used to return the snake head URL in an API response.
"""
return bottle.static_file(path, root='static/')
@bottle.post('/ping')
def ping():
"""
A keep-alive endpoint used to prevent cloud application platforms,
such as Heroku, from sleeping the application instance.
"""
return ping_response()
@bottle.post('/start')
def start():
data = bottle.request.json
"""
TODO: If you intend to have a stateful snake AI,
initialize your snake state here using the
request's data if necessary.
"""
print(json.dumps(data))
color = "#50DEDA"
return start_response(color)
@bottle.post('/move')
def move():
data = bottle.request.json
global lastMove
"""
TODO: Using the data from the endpoint request object, your
snake AI must choose a direction to move in.
"""
print(json.dumps(data, indent=2))
directions = ['up', 'down', 'left', 'right']
#Terminology
# - data['board']['width'] how you tell max or least x
# - data['board']['length'] how you tell max or least y
#direction = random.choice(directions)
#direction='right'
#Ideas for moving toward food
head_x = data['you']['body'][0]['x']
head_y = data['you']['body'][0]['y']
food_x = data['board']['food'][0]['x']
food_y = data['board']['food'][0]['y']
if health < 26:
#this would have it doing any other code first untill it was low health
if head_x > food_x:
direction ='left'
elif head_x < food_x:
direction ='right'
#this makes the x cordinate align with the food
elif head_y > food_y:
direction ='up'
elif head_y < food_y:
direction ='down'
#this makes the y cordinate align with the food
#If head_x > food_x then *move left*
#If head_x < food_x then *move right*
#If *Head x_location = food x_location = 0* then *don't change x_location* - don't need
#If head_y > food_y then *move up*
#If head_y < food_y then *move down*
#If *Head y_location = food y_location = 0* then *don't change y_location* - don't need
#Atempts to make the snake go around the outside of the board
#Seems like it might work with proper terminology
#if *snake's head location* == y_loction=*least* then *go* 'right' unless x_loction=*max* *then go* direction='down'
#if *snake's head location* == x_loction=*max* then *go* 'down' unless x_loction=*max* y_loction=*max* *then go* direction='left'
#if *snake's head location* == y_loction=*max* then *go* 'left' unless x_loction=*least* *then go* direction='up'
#if *snake's head location* == x_loction=*least* then *go* 'up' unless x_loction=*lest* y_loction=*least* *then go* direction='right'
#This is just a direction I chose at random to start going
#direction = 'right'
#This makes the snake go in circles
if lastMove=='':
direction='right'
if lastMove=='right':
direction='down'
if lastMove=='down':
direction='left'
if lastMove=='left':
direction='up'
if lastMove=='up':
direction='right'
lastMove=direction
return move_response(direction)
@bottle.post('/end')
def end():
data = bottle.request.json
"""
TODO: If your snake AI was stateful,
clean up any stateful objects here.
"""
print(json.dumps(data, indent=2))
return end_response()
# Expose WSGI app (so gunicorn can find it)
application = bottle.default_app()
if __name__ == '__main__':
s = platform.system() # s now contains the name of your operating system
if s == 'Windows' or s == 'Darwin': # if you’re running on Windows or Mac
bottle.run(
application,
host=os.getenv('IP', '0.0.0.0'),
port=os.getenv('PORT', '8080'),
debug=os.getenv('DEBUG', True),
#server='paste'
server='tornado'
)
else: # otherwise serve on port 80
bottle.run(
application,
host=os.getenv('IP', '0.0.0.0'),
port=os.getenv('PORT', '80'),
debug=os.getenv('DEBUG', True)
)
| [
"bottle.static_file",
"api.move_response",
"os.getenv",
"bottle.post",
"api.end_response",
"json.dumps",
"bottle.route",
"api.start_response",
"platform.system",
"api.ping_response",
"bottle.default_app"
] | [((159, 176), 'bottle.route', 'bottle.route', (['"""/"""'], {}), "('/')\n", (171, 176), False, 'import bottle\n'), ((342, 377), 'bottle.route', 'bottle.route', (['"""/static/<path:path>"""'], {}), "('/static/<path:path>')\n", (354, 377), False, 'import bottle\n'), ((622, 642), 'bottle.post', 'bottle.post', (['"""/ping"""'], {}), "('/ping')\n", (633, 642), False, 'import bottle\n'), ((832, 853), 'bottle.post', 'bottle.post', (['"""/start"""'], {}), "('/start')\n", (843, 853), False, 'import bottle\n'), ((1152, 1172), 'bottle.post', 'bottle.post', (['"""/move"""'], {}), "('/move')\n", (1163, 1172), False, 'import bottle\n'), ((4055, 4074), 'bottle.post', 'bottle.post', (['"""/end"""'], {}), "('/end')\n", (4066, 4074), False, 'import bottle\n'), ((4344, 4364), 'bottle.default_app', 'bottle.default_app', ([], {}), '()\n', (4362, 4364), False, 'import bottle\n'), ((578, 618), 'bottle.static_file', 'bottle.static_file', (['path'], {'root': '"""static/"""'}), "(path, root='static/')\n", (596, 618), False, 'import bottle\n'), ((813, 828), 'api.ping_response', 'ping_response', ([], {}), '()\n', (826, 828), False, 'from api import ping_response, start_response, move_response, end_response\n'), ((1127, 1148), 'api.start_response', 'start_response', (['color'], {}), '(color)\n', (1141, 1148), False, 'from api import ping_response, start_response, move_response, end_response\n'), ((4022, 4046), 'api.move_response', 'move_response', (['direction'], {}), '(direction)\n', (4035, 4046), False, 'from api import ping_response, start_response, move_response, end_response\n'), ((4269, 4283), 'api.end_response', 'end_response', ([], {}), '()\n', (4281, 4283), False, 'from api import ping_response, start_response, move_response, end_response\n'), ((4401, 4418), 'platform.system', 'platform.system', ([], {}), '()\n', (4416, 4418), False, 'import platform\n'), ((1074, 1090), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1084, 1090), False, 'import json\n'), ((1393, 1419), 'json.dumps', 'json.dumps', (['data'], {'indent': '(2)'}), '(data, indent=2)\n', (1403, 1419), False, 'import json\n'), ((4229, 4255), 'json.dumps', 'json.dumps', (['data'], {'indent': '(2)'}), '(data, indent=2)\n', (4239, 4255), False, 'import json\n'), ((4610, 4636), 'os.getenv', 'os.getenv', (['"""IP"""', '"""0.0.0.0"""'], {}), "('IP', '0.0.0.0')\n", (4619, 4636), False, 'import os\n'), ((4655, 4680), 'os.getenv', 'os.getenv', (['"""PORT"""', '"""8080"""'], {}), "('PORT', '8080')\n", (4664, 4680), False, 'import os\n'), ((4700, 4724), 'os.getenv', 'os.getenv', (['"""DEBUG"""', '(True)'], {}), "('DEBUG', True)\n", (4709, 4724), False, 'import os\n'), ((4894, 4920), 'os.getenv', 'os.getenv', (['"""IP"""', '"""0.0.0.0"""'], {}), "('IP', '0.0.0.0')\n", (4903, 4920), False, 'import os\n'), ((4939, 4962), 'os.getenv', 'os.getenv', (['"""PORT"""', '"""80"""'], {}), "('PORT', '80')\n", (4948, 4962), False, 'import os\n'), ((4982, 5006), 'os.getenv', 'os.getenv', (['"""DEBUG"""', '(True)'], {}), "('DEBUG', True)\n", (4991, 5006), False, 'import os\n')] |
import os
import pytest
from zscli.cli import API
DEBUG = True
TEST_LIMIT = 10
TEST_PAGE = 1
VERBOSE = False
@pytest.fixture()
def api_key():
return os.environ["ZEROSSL_API_KEY"]
@pytest.fixture()
def test_api(api_key):
return API(DEBUG, api_key, TEST_LIMIT, TEST_PAGE, VERBOSE)
| [
"pytest.fixture",
"zscli.cli.API"
] | [((115, 131), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (129, 131), False, 'import pytest\n'), ((191, 207), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (205, 207), False, 'import pytest\n'), ((242, 293), 'zscli.cli.API', 'API', (['DEBUG', 'api_key', 'TEST_LIMIT', 'TEST_PAGE', 'VERBOSE'], {}), '(DEBUG, api_key, TEST_LIMIT, TEST_PAGE, VERBOSE)\n', (245, 293), False, 'from zscli.cli import API\n')] |
from common.BaseCommand import BaseCommand
from common.ResultAndData import *
from models.CalEvent import CalEvent
import argparse
from argparse import Namespace
from msgraph import helpers
from tabulate import tabulate
import datetime
import os
class WeekCommand(BaseCommand):
def add_parser(self, subparsers):
list_cmd = subparsers.add_parser(
"week", description="Gets your week at a glance"
)
return list_cmd
def do_command_with_args(self, instance, args):
# type: (Instance, Namespace) -> ResultAndData
db = instance.get_db()
instance.login_to_graph()
rd = instance.get_current_user()
if not rd.success:
return Error("no logged in user")
current_user = rd.data
graph = instance.get_graph_session()
today = datetime.date.today()
start = today - datetime.timedelta(days=today.weekday())
end = start + datetime.timedelta(days=6)
startdt = datetime.datetime.combine(start, datetime.datetime.min.time())
enddt = datetime.datetime.combine(end, datetime.datetime.max.time())
blobs = helpers.list_events_in_time_range(graph, start=startdt, end=enddt)
events = []
for blob in blobs["value"]:
e = CalEvent.from_json(blob)
events.append(e)
table = []
for e in events:
row = [
e.subject,
e.start.strftime("%c"),
e.end.strftime("%c"),
e.location,
e.organizer,
]
table.append(row)
print(
tabulate(
table,
headers=["Title", "Start Time", "End Time", "Location", "Created By"],
)
)
return Success()
| [
"msgraph.helpers.list_events_in_time_range",
"tabulate.tabulate",
"datetime.datetime.min.time",
"datetime.timedelta",
"datetime.date.today",
"datetime.datetime.max.time",
"models.CalEvent.CalEvent.from_json"
] | [((869, 890), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (888, 890), False, 'import datetime\n'), ((1188, 1254), 'msgraph.helpers.list_events_in_time_range', 'helpers.list_events_in_time_range', (['graph'], {'start': 'startdt', 'end': 'enddt'}), '(graph, start=startdt, end=enddt)\n', (1221, 1254), False, 'from msgraph import helpers\n'), ((980, 1006), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(6)'}), '(days=6)\n', (998, 1006), False, 'import datetime\n'), ((1061, 1089), 'datetime.datetime.min.time', 'datetime.datetime.min.time', ([], {}), '()\n', (1087, 1089), False, 'import datetime\n'), ((1139, 1167), 'datetime.datetime.max.time', 'datetime.datetime.max.time', ([], {}), '()\n', (1165, 1167), False, 'import datetime\n'), ((1334, 1358), 'models.CalEvent.CalEvent.from_json', 'CalEvent.from_json', (['blob'], {}), '(blob)\n', (1352, 1358), False, 'from models.CalEvent import CalEvent\n'), ((1704, 1794), 'tabulate.tabulate', 'tabulate', (['table'], {'headers': "['Title', 'Start Time', 'End Time', 'Location', 'Created By']"}), "(table, headers=['Title', 'Start Time', 'End Time', 'Location',\n 'Created By'])\n", (1712, 1794), False, 'from tabulate import tabulate\n')] |
import traceback
import copy
from cloud_console_common.utils import *
from cloud_console_common import log
class ExtractLogic:
"""Implementation of ExtractLogic that will extract the data from a remote API call
"""
def __init__(self):
pass
def extract(self, raw_data)->dict:
"""Receive the raw data from a remote call. In the case of AWS and the Boto3 library, this should always be a dict.
.. note::
Typically this method must be customized to suite the needs of the API called
:param raw_data: The data, usually a dict, returned from a remote API call
:type raw_data: dict
:return: The extract logic implementation will return the relevant extracted data portion.
:rtype: dict
"""
log.warning(message='This method is a dummy method with no implementation logic - create your own')
if raw_data is None:
return dict()
if isinstance(raw_data, dict):
return raw_data
return dict()
class RemoteCallLogic:
def __init__(self, extract_logic: ExtractLogic=ExtractLogic(), base_data: dict=dict(), **kwargs):
log.debug(message='kwargs={}'.format(kwargs))
self.extract_logic = extract_logic
self.base_data = base_data
self.args = kwargs.items()
def execute(self)->dict:
log.warning(message='This method is a dummy method with no implementation logic - create your own')
return self.extract_logic.extract(raw_data=self.base_data)
class DataPointBase:
def __init__(
self,
name: str,
label: str=None,
initial_value: object=None,
remote_call_logic: RemoteCallLogic=RemoteCallLogic(),
ui_section_name: str='',
ui_tab_name: str='',
ui_identifier: str=''
):
if basic_string_validation(input_string=name) is False:
raise Exception('name basic validation failed. must be a string of 1 to 1024 characters')
self.name = name
self.label = name[0:32]
if label is not None:
if basic_string_validation(input_string=label, max_len=32) is False:
raise Exception('If the label is supplied, it must be a string between 1 and 32 characters')
self.label = label
self.children_data_points = dict() # Dictionary of DataPointBase with the "name" of each data point as dictionary index
self.value = initial_value
self.display_value = '-'
self.remote_call_logic = remote_call_logic
self.ui_section_name = ui_section_name
self.ui_tab_name = ui_tab_name
self.ui_identifier = ui_identifier
def call_remote_api(self):
return self.remote_call_logic.execute()
def update_value(self, value: dict=dict()):
pass
def update_child_data_point(self, data_point_name: str, value=dict()):
pass
def get_ui_display_value(self)->str:
if self.display_value is not None:
return str(self.display_value)
return '-'
def __str__(self):
return self.get_ui_display_value()
def __repr__(self):
if isinstance(self.value, dict):
return 'DataPoint: {}: {}'.format(self.name, self.value)
return 'DataPoint: {}: {}'.format(self.name, repr(self.value))
class DataPoint(DataPointBase):
def __init__(
self, name: str,
label: str=None,
initial_value: object=None,
remote_call_logic: RemoteCallLogic=RemoteCallLogic(),
ui_section_name: str='',
ui_tab_name: str='',
ui_identifier: str=''
):
super().__init__(
name=name,
label=label,
initial_value=initial_value,
remote_call_logic=remote_call_logic,
ui_section_name=ui_section_name,
ui_tab_name=ui_tab_name,
ui_identifier=ui_identifier
)
def add_child_data_point(self, data_point: DataPointBase):
if data_point is None:
log.warning(message='data_point cannot be None - ignoring')
return
if not isinstance(data_point, DataPointBase):
log.warning(message='data_point cannot be of type "{}" - ignoring'.format(type(data_point)))
return
self.children_data_points[data_point.name] = data_point
def update_value(self, value: dict=dict()):
log.debug(message='Updated DataPoint named "{}" with value={}'.format(self.name, value))
self.remote_call_logic.base_data = value
self.value = self.call_remote_api()
for idx, data_point in self.children_data_points.items():
log.debug(message='Updating child datapoint "{}"'.format(idx))
self.update_child_data_point(data_point_name=idx, value=self.remote_call_logic.base_data)
def update_child_data_point(self, data_point_name: str, value=dict()):
if data_point_name not in self.children_data_points:
return
if isinstance(self.children_data_points[data_point_name], DataPointBase):
self.children_data_points[data_point_name].update_value(value=value)
def get_child_by_name(self, name: str)->DataPointBase:
if name in self.children_data_points:
return self.children_data_points[name]
raise Exception('Child DataPoint named "{}" not found'.format(name))
def get_child_by_label(self, label: str)->list:
children_data_points = list()
for child_data_point_name, child_data_point_obj in self.children_data_points.items():
if child_data_point_obj.label == label:
children_data_points.append(child_data_point_obj)
return children_data_points
class DataObjectCache:
def __init__(self, identifier: str, data_point: DataPoint=None, max_cache_lifetime: int=300):
if basic_string_validation(input_string=identifier) is False:
log.error(message='Invalid Identifier')
raise Exception('Invalid identifier')
self.identifier = identifier
self.last_called_timestamp_utc = 0
self.data_point = data_point
self.max_cache_lifetime = max_cache_lifetime
def update_results(self, results: dict):
if self.data_point is None:
raise Exception('data point not yet initialized')
if results is None:
return
if not isinstance(results, dict):
return
self.data_point.update_value(value=results)
self.last_called_timestamp_utc = get_utc_timestamp(with_decimal=False)
log.info(message='Updated "{}"'.format(self.identifier))
def refresh_cache(self, force: bool=False)->bool:
now = get_utc_timestamp(with_decimal=False)
if ((now - self.last_called_timestamp_utc) > self.max_cache_lifetime) or (force is True):
log.info(message='Refreshing local data state - data point "{}"'.format(self.data_point.name))
self.data_point.update_value()
self.last_called_timestamp_utc = now
return True
return False
# EOF
| [
"cloud_console_common.log.error",
"cloud_console_common.log.warning"
] | [((791, 900), 'cloud_console_common.log.warning', 'log.warning', ([], {'message': '"""This method is a dummy method with no implementation logic - create your own"""'}), "(message=\n 'This method is a dummy method with no implementation logic - create your own'\n )\n", (802, 900), False, 'from cloud_console_common import log\n'), ((1368, 1477), 'cloud_console_common.log.warning', 'log.warning', ([], {'message': '"""This method is a dummy method with no implementation logic - create your own"""'}), "(message=\n 'This method is a dummy method with no implementation logic - create your own'\n )\n", (1379, 1477), False, 'from cloud_console_common import log\n'), ((4043, 4102), 'cloud_console_common.log.warning', 'log.warning', ([], {'message': '"""data_point cannot be None - ignoring"""'}), "(message='data_point cannot be None - ignoring')\n", (4054, 4102), False, 'from cloud_console_common import log\n'), ((5945, 5984), 'cloud_console_common.log.error', 'log.error', ([], {'message': '"""Invalid Identifier"""'}), "(message='Invalid Identifier')\n", (5954, 5984), False, 'from cloud_console_common import log\n')] |
from fastapi.routing import APIRouter
from lnbits.db import Database
db = Database("database")
core_app: APIRouter = APIRouter()
from .views.api import * # noqa
from .views.generic import * # noqa
from .views.public_api import * # noqa
| [
"fastapi.routing.APIRouter",
"lnbits.db.Database"
] | [((76, 96), 'lnbits.db.Database', 'Database', (['"""database"""'], {}), "('database')\n", (84, 96), False, 'from lnbits.db import Database\n'), ((120, 131), 'fastapi.routing.APIRouter', 'APIRouter', ([], {}), '()\n', (129, 131), False, 'from fastapi.routing import APIRouter\n')] |
import sys
import click
from api.google import GoogleTranslator
from api.weblio import WeblioTranslator
_translators = {
'google': GoogleTranslator,
'weblio': WeblioTranslator,
}
@click.command()
@click.option('--text')
@click.option('--from', 'source')
@click.option('--to', 'target')
@click.option('--api', default='weblio')
def main(text, source, target, api):
if api not in _translators:
print('Error: API "%s" is not supported.' % api)
sys.exit(1)
print(_translators[api]().translate(text, source, target))
if __name__ == '__main__':
main()
# EOF
| [
"click.option",
"click.command",
"sys.exit"
] | [((194, 209), 'click.command', 'click.command', ([], {}), '()\n', (207, 209), False, 'import click\n'), ((211, 233), 'click.option', 'click.option', (['"""--text"""'], {}), "('--text')\n", (223, 233), False, 'import click\n'), ((235, 267), 'click.option', 'click.option', (['"""--from"""', '"""source"""'], {}), "('--from', 'source')\n", (247, 267), False, 'import click\n'), ((269, 299), 'click.option', 'click.option', (['"""--to"""', '"""target"""'], {}), "('--to', 'target')\n", (281, 299), False, 'import click\n'), ((301, 340), 'click.option', 'click.option', (['"""--api"""'], {'default': '"""weblio"""'}), "('--api', default='weblio')\n", (313, 340), False, 'import click\n'), ((475, 486), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (483, 486), False, 'import sys\n')] |
"""
Author: <NAME>
Created: 3/11/2020 9:04 AM
"""
from Climate_Shocks.vcsn_pull import vcsn_pull_single_site
from Climate_Shocks.note_worthy_events.simple_soil_moisture_pet import calc_sma_smd_historical, calc_smd_monthly
from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record
from Pasture_Growth_Modelling.initialisation_support.pasture_growth_deficit import calc_past_pasture_growth_anomaly
import ksl_env
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import itertools
import sys
event_def_dir = sys.argv[1] # the path to the directory
print(event_def_dir)
vcsn_version = sys.argv[2] # 'trended', 'detrended2'
print(vcsn_version)
if vcsn_version not in ['trended', 'detrended2']:
raise ValueError('incorrect value for vcsn_version: {}'.format(vcsn_version, ))
if not os.path.exists(event_def_dir):
os.makedirs(event_def_dir)
irrigated_pga = calc_past_pasture_growth_anomaly('irrigated', site='eyrewell').reset_index()
irrigated_pga.loc[:, 'year'] = irrigated_pga.date.dt.year
irrigated_pga = irrigated_pga.set_index(['month', 'year'])
dryland_pga = calc_past_pasture_growth_anomaly('dryland').reset_index()
dryland_pga.loc[:, 'year'] = dryland_pga.date.dt.year
dryland_pga = dryland_pga.set_index(['month', 'year'])
def prob(x):
out = np.nansum(x) / len(x)
return out
def add_pga_from_idx(idx):
idx = idx.dropna()
irr_temp = irrigated_pga.loc[idx].reset_index()
irr_temp2 = irr_temp.loc[:, ['month', 'pga_norm']].groupby('month').describe().loc[:, 'pga_norm']
dry_temp = dryland_pga.loc[idx].reset_index()
dry_temp2 = dry_temp.loc[:, ['month', 'pga_norm']].groupby('month').describe().loc[:, 'pga_norm']
temp3 = pd.merge(irr_temp2, dry_temp2, left_index=True, right_index=True, suffixes=('_irr', '_dry'))
return pd.DataFrame(temp3)
def add_pga(grouped_data, sim_keys, outdata):
grouped_data = grouped_data.set_index(['month', 'year'])
years = {}
for k in sim_keys:
idx = grouped_data.loc[grouped_data.loc[:, k], k]
assert idx.all()
idx = idx.index
years[k] = idx.values
temp_irr = irrigated_pga.loc[idx].reset_index()
temp_irr2 = temp_irr.loc[:, ['month', 'pga_norm']].groupby('month').describe().loc[:, 'pga_norm']
temp_dry = dryland_pga.loc[idx].reset_index()
temp_dry2 = temp_dry.loc[:, ['month', 'pga_norm']].groupby('month').describe().loc[:, 'pga_norm']
for k2 in temp_irr2:
outdata.loc[:, (k, 'pga_irr_{}'.format(k2))] = temp_irr2.loc[:, k2]
outdata.loc[:, (k, 'pga_dry_{}'.format(k2))] = temp_dry2.loc[:, k2]
mx_years = 48 * 12 + 1
out_years = pd.DataFrame(index=range(mx_years), columns=sim_keys)
for k in sim_keys:
missing_len = mx_years - len(years[k])
out_years.loc[:, k] = np.concatenate((years[k], np.zeros(missing_len) * np.nan))
outdata = outdata.sort_index(axis=1, level=0, sort_remaining=False)
return outdata, out_years
def calc_dry_recurance_monthly_smd():
data = get_vcsn_record(vcsn_version)
t = calc_smd_monthly(rain=data.rain, pet=data.pet, dates=data.index)
data.loc[:, 'smd'] = t
t = data.loc[:, ['doy', 'smd']].groupby('doy').mean().to_dict()
data.loc[:, 'sma'] = data.loc[:, 'smd'] - data.loc[:, 'doy'].replace(t['smd'])
data.reset_index(inplace=True)
data.to_csv(os.path.join(event_def_dir, 'monthly_smd_dry_raw.csv'))
smd_thresholds = [0]
sma_thresholds = [-5, -10, -12, -15, -17, -20]
ndays = [5, 7, 10, 14]
out_keys = []
for smd_t, sma_t in itertools.product(smd_thresholds, sma_thresholds):
k = 'd_smd{:03d}_sma{:02d}'.format(smd_t, sma_t)
data.loc[:, k] = (data.loc[:, 'smd'] <= smd_t) & (data.loc[:, 'sma'] <= sma_t)
out_keys.append(k)
grouped_data = data.loc[:, ['month', 'year',
'smd', 'sma'] + out_keys].groupby(['month', 'year']).sum().reset_index()
grouped_data.to_csv(os.path.join(event_def_dir, 'monthly_smd_dry_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'monthly_smd_dry_monthly_data_desc.csv'))
out_keys2 = []
for nd in ndays:
for k in out_keys:
ok = '{:02d}d_{}'.format(nd, k)
out_keys2.append(ok)
grouped_data.loc[:, ok] = grouped_data.loc[:, k] >= nd
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'monthly_smd_dry_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'monthly_smd_dry_prob_only_prob.csv'),
float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'monthly_smd_dry_years.csv'))
def calc_dry_recurance():
data = get_vcsn_record(vcsn_version).reset_index()
temp = calc_sma_smd_historical(data['rain'], data['pet'], data.date, 150, 1)
trans_cols = ['mean_doy_smd', 'sma', 'smd', 'drain', 'aet_out']
data.loc[:, trans_cols] = temp.loc[:, trans_cols]
data.to_csv(os.path.join(event_def_dir, 'dry_raw.csv'))
smd_thresholds = [0, -110, -110]
sma_thresholds = [-20, 0, -20]
ndays = [5, 7, 10, 14]
out_keys = []
for smd_t, sma_t in zip(smd_thresholds, sma_thresholds):
k = 'd_smd{:03d}_sma{:02d}'.format(smd_t, sma_t)
data.loc[:, k] = (data.loc[:, 'smd'] <= smd_t) & (data.loc[:, 'sma'] <= sma_t)
out_keys.append(k)
grouped_data = data.loc[:, ['month', 'year',
'smd', 'sma'] + out_keys].groupby(['month', 'year']).sum().reset_index()
grouped_data.to_csv(os.path.join(event_def_dir, 'dry_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'dry_monthly_data_desc.csv'))
out_keys2 = []
for nd in ndays:
for k in out_keys:
ok = '{:02d}d_{}'.format(nd, k)
out_keys2.append(ok)
grouped_data.loc[:, ok] = grouped_data.loc[:, k] >= nd
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'dry_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'dry_prob_only_prob.csv'), float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'dry_years.csv'))
def calc_wet_recurance():
data = get_vcsn_record(vcsn_version).reset_index()
temp = calc_sma_smd_historical(data['rain'], data['pet'], data.date, 150, 1)
trans_cols = ['mean_doy_smd', 'sma', 'smd', 'drain', 'aet_out']
data.loc[:, trans_cols] = temp.loc[:, trans_cols]
temp = False
if temp: # just to look at some plots
fig, (ax, ax2, ax3) = plt.subplots(3, sharex=True)
ax.plot(data.date, data.smd)
ax2.plot(data.date, data.drain)
ax3.plot(data.date, data.rain)
plt.show()
data.to_csv(os.path.join(event_def_dir, 'smd_wet_raw.csv'))
thresholds_rain = [5, 3, 1, 0]
thresholds_smd = [0, -5, -10]
ndays = [7, 10, 14]
out_keys = []
for t_r, t_smd in itertools.product(thresholds_rain, thresholds_smd):
k = 'd_r{}_smd{}'.format(t_r, t_smd)
data.loc[:, k] = (data.loc[:, 'rain'] >= t_r) & (data.loc[:, 'smd'] >= t_smd)
out_keys.append(k)
grouped_data = data.loc[:, ['month', 'year', 'rain'] + out_keys].groupby(['month', 'year']).sum().reset_index()
# make montly restriction anaomaloy - mean
temp = grouped_data.groupby('month').mean().loc[:, 'rain'].to_dict()
grouped_data.loc[:, 'rain_an_mean'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'rain_an_mean': temp})
grouped_data.loc[:, 'rain_an_mean'] = grouped_data.loc[:, 'rain'] - grouped_data.loc[:, 'rain_an_mean']
# make montly restriction anaomaloy - median
temp = grouped_data.groupby('month').median().loc[:, 'rain'].to_dict()
grouped_data.loc[:, 'rain_an_med'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'rain_an_med': temp})
grouped_data.loc[:, 'rain_an_med'] = grouped_data.loc[:, 'rain'] - grouped_data.loc[:, 'rain_an_med']
grouped_data.to_csv(os.path.join(event_def_dir, 'smd_wet_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'smd_wet_monthly_data_desc.csv'))
# number of n days
out_keys2 = []
for nd in ndays:
for k in out_keys:
ok = '{:02d}d_{}'.format(nd, k)
out_keys2.append(ok)
grouped_data.loc[:, ok] = grouped_data.loc[:, k] >= nd
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'smd_wet_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'smd_wet_prob_only_prob.csv'),
float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'smd_wet_years.csv'))
def calc_wet_recurance_ndays():
ndays = {
'org': { # this is the best value!
5: 14,
6: 11,
7: 11,
8: 13,
9: 13,
}
}
for v in ndays.values():
v.update({
1: 99,
2: 99,
3: 99,
4: 99,
10: 99,
11: 99,
12: 99,
})
data = get_vcsn_record(vcsn_version).reset_index()
temp = calc_sma_smd_historical(data['rain'], data['pet'], data.date, 150, 1)
trans_cols = ['mean_doy_smd', 'sma', 'smd', 'drain', 'aet_out']
data.loc[:, trans_cols] = temp.loc[:, trans_cols]
data.loc[:, 'ndays_rain'] = (data.loc[:, 'rain'] > 0.01).astype(float)
data.to_csv(os.path.join(event_def_dir, 'ndays_wet_raw.csv'))
grouped_data = data.loc[:, ['month', 'year', 'rain', 'ndays_rain']].groupby(['month', 'year']).sum().reset_index()
grouped_data.to_csv(os.path.join(event_def_dir, 'ndays_wet_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'ndays_wet_monthly_data_desc.csv'))
# number of n days
out_keys2 = []
for k, val in ndays.items():
ok = '{}'.format(k)
out_keys2.append(ok)
grouped_data.loc[:, 'limit'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'limit': val})
grouped_data.loc[:, ok] = grouped_data.loc[:, 'ndays_rain'] >= grouped_data.loc[:, 'limit']
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'ndays_wet_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'ndays_wet_prob_only_prob.csv'),
float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'ndays_wet_years.csv'))
def calc_dry_rolling():
bulk_ndays = [5, 10, 15, 20]
ndays = {}
for bnd in bulk_ndays:
ndays['ndays{}'.format(bnd)] = {k: bnd for k in range(1, 13)}
thresholds = { # this did not end up getting used
'first': {
4: 15,
5: 10,
8: 5,
9: 10,
},
'first-3': {
4: 15 - 3,
5: 10 - 3,
8: 5 - 3,
9: 10 - 3,
},
'first-5': {
4: 15 - 5,
5: 10 - 5,
8: 5 - 5,
9: 10 - 5,
},
'first-10': {
4: 15 - 10,
5: 10 - 10,
8: 5 - 10,
9: 10 - 10,
},
'zero': {
4: 0,
5: 0,
8: 0,
9: 0,
},
'one': {
4: 1,
5: 1,
8: 1,
9: 1,
},
'first-7': {
4: 15 - 7,
5: 10 - 7,
8: 5 - 7,
9: 10 - 7,
},
}
for v in thresholds.values():
v.update({
1: -1,
2: -1,
3: -1,
6: -1,
7: -1,
10: -1,
11: -1,
12: -1,
})
data = get_vcsn_record(vcsn_version).reset_index()
data.loc[:, 'roll_rain_10'] = data.loc[:, 'rain'].rolling(10).sum()
out_keys = []
outdata = pd.DataFrame(
index=pd.MultiIndex.from_product([range(1, 13), range(1972, 2020)], names=['month', 'year']))
for nd, thresh in itertools.product(ndays.keys(), thresholds.keys()):
temp_data = data.copy(deep=True)
ok = '{}_{}'.format(thresh, nd)
out_keys.append(ok)
for m in range(1, 13):
idx = data.month == m
temp_data.loc[idx, ok] = temp_data.loc[idx, 'roll_rain_10'] <= thresholds[thresh][m]
temp_data.loc[idx, 'ndays'] = ndays[nd][m]
temp_data = temp_data.groupby(['month', 'year']).agg({ok: 'sum', 'ndays': 'mean'})
outdata.loc[:, ok] = temp_data.loc[:, ok] >= temp_data.loc[:, 'ndays']
outdata.to_csv(os.path.join(event_def_dir, 'rolling_dry_monthly.csv'))
outdata = outdata.reset_index()
out = outdata.loc[:, ['month'] + out_keys].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(outdata, set(out_keys) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'rolling_dry_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'variable_hot_prob_only_prob.csv'),
float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'rolling_dry_years.csv'))
return list(set(out_keys) - set(drop_keys)), out
def calc_dry_recurance_ndays():
ndays = { # happy with this value other than middle ones; this did not end up getting used
'lower_q': { # based on the sma -20 10days
1: 31, # lower quartile of normal
2: 45, # lower quartile of normal
3: 38, # lower quartile of normal
4: 46, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
5: 37, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
8: 35, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
9: 30, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
10: 53, # lower quartile of normal
11: 43, # lower quartile of normal
12: 47, # lower quartile of normal
},
'up_5': { # based on the sma -20 10days
1: 31, # lower quartile of normal
2: 45, # lower quartile of normal
3: 38, # lower quartile of normal
4: 46 + 5, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
5: 37 + 5, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
8: 35 + 5, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
9: 30 + 5, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
10: 53, # lower quartile of normal
11: 43, # lower quartile of normal
12: 47, # lower quartile of normal
},
'down_5': { # based on the sma -20 10days
1: 31, # lower quartile of normal
2: 45, # lower quartile of normal
3: 38, # lower quartile of normal
4: 46 - 5, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
5: 37 - 5, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
8: 35 - 5, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
9: 30 - 5, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
10: 53, # lower quartile of normal
11: 43, # lower quartile of normal
12: 47, # lower quartile of normal
},
'down_7': { # based on the sma -20 10days
1: 31, # lower quartile of normal
2: 45, # lower quartile of normal
3: 38, # lower quartile of normal
4: 46 - 7, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
5: 37 - 7, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
8: 35 - 7, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
9: 30 - 7, # lower quartile of normal, pair with 'hot' as pet is imporant in this month
10: 53, # lower quartile of normal
11: 43, # lower quartile of normal
12: 47, # lower quartile of normal
},
}
for v in ndays.values():
v.update({
6: -1,
7: -1,
})
data = get_vcsn_record(vcsn_version).reset_index()
temp = calc_sma_smd_historical(data['rain'], data['pet'], data.date, 150, 1)
trans_cols = ['mean_doy_smd', 'sma', 'smd', 'drain', 'aet_out']
data.loc[:, trans_cols] = temp.loc[:, trans_cols]
data.loc[:, 'ndays_rain'] = (data.loc[:, 'rain'] > 0.01).astype(float)
data.to_csv(os.path.join(event_def_dir, 'ndays_dry_raw.csv'))
grouped_data = data.loc[:, ['month', 'year', 'rain', 'ndays_rain']].groupby(['month', 'year']).sum().reset_index()
grouped_data.to_csv(os.path.join(event_def_dir, 'ndays_dry_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'ndays_dry_monthly_data_desc.csv'))
# number of n days
out_keys2 = []
for k, val in ndays.items():
ok = '{}'.format(k)
out_keys2.append(ok)
grouped_data.loc[:, 'limit'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'limit': val})
grouped_data.loc[:, ok] = grouped_data.loc[:, 'rain'] <= grouped_data.loc[:, 'limit']
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'ndays_dry_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'ndays_dry_prob_only_prob.csv'),
float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'ndays_dry_years.csv'))
def calc_hot_recurance_variable():
var_to_use = {
1: 'tmax',
2: 'tmax',
3: 'tmax',
4: 'tmean', # to use in conjunction with dry to get atual dry
5: 'tmean', # to use in conjunction with dry to get atual dry
6: 'tmax',
7: 'tmax',
8: 'tmean', # to use in conjunction with dry to get atual dry
9: 'tmean', # to use in conjunction with dry to get atual dry
10: 'tmax',
11: 'tmax',
12: 'tmax',
}
ndays = {
'5day': {
4: 5,
5: 5,
8: 5,
9: 5,
},
'7day': {
4: 7,
5: 7,
8: 7,
9: 7,
},
'10day': {
4: 10,
5: 10,
8: 10,
9: 10,
},
'15day': {
4: 15,
5: 15,
8: 15,
9: 15,
}
}
thresholds = {
'upper_q': { # based on the sma -20 10days
4: 18, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
5: 15, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
8: 13, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
9: 15, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
},
'2_less': { # based on the sma -20 10days
4: 18 - 2, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
5: 15 - 2, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
8: 13 - 2, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
9: 15 - 2, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
},
'5_less': { # based on the sma -20 10days
4: 18 - 5, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
5: 15 - 5, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
8: 13 - 5, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
9: 15 - 5, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
},
'7_less': { # based on the sma -20 10days
4: 18 - 7, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
5: 15 - 7, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
8: 13 - 7, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
9: 15 - 7, # upper quartile of normal, pair with 'hot' as pet is imporant in this month
}
}
for v in thresholds.values(): # set for actual hot events
v.update({
1: 25,
2: 25,
3: 25,
6: 25,
7: 25,
10: 25,
11: 25,
12: 25,
})
for v in ndays.values(): # set for actual hot events
v.update({
1: 7,
2: 7,
3: 7,
6: 7,
7: 7,
10: 7,
11: 7,
12: 7,
})
data = get_vcsn_record(vcsn_version).reset_index()
data.loc[:, 'tmean'] = (data.loc[:, 'tmax'] + data.loc[:, 'tmin']) / 2
out_keys = []
outdata = pd.DataFrame(index=pd.MultiIndex.from_product([range(1, 13), range(1972, 2020)], names=['month', 'year']))
for thresh, nd in itertools.product(thresholds.keys(), ndays.keys()):
temp_data = data.copy(deep=True)
ok = '{}_{}'.format(thresh, nd)
out_keys.append(ok)
for m in range(1, 13):
idx = data.month == m
temp_data.loc[idx, ok] = temp_data.loc[idx, var_to_use[m]] >= thresholds[thresh][m]
temp_data.loc[idx, 'ndays'] = ndays[nd][m]
temp_data = temp_data.groupby(['month', 'year']).agg({ok: 'sum', 'ndays': 'mean'})
outdata.loc[:, ok] = temp_data.loc[:, ok] >= temp_data.loc[:, 'ndays']
outdata.to_csv(os.path.join(event_def_dir, 'variable_hot_monthly.csv'))
outdata = outdata.reset_index()
out = outdata.loc[:, ['month'] + out_keys].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(outdata, set(out_keys) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'variable_hot_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'variable_hot_prob_only_prob.csv'),
float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'variable_hot_years.csv'))
def joint_hot_dry():
hot = pd.read_csv(os.path.join(event_def_dir, 'variable_hot_years.csv'), index_col=0)
hot_keys = list(hot.keys())
dry = pd.read_csv(os.path.join(event_def_dir, 'rolling_dry_years.csv'), index_col=0)
dry_keys = list(dry.keys())
data = pd.merge(hot, dry, left_index=True, right_index=True)
use_data = []
for d in data.keys():
use_data.append(
pd.Series([np.nan if isinstance(t, float) else tuple(int(e) for e in t.strip('()').split(',')) for t in
data.loc[:, d]]))
use_data = pd.concat(use_data, axis=1)
use_data.columns = data.columns
_org_describe_names = ['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max']
_describe_names = []
for e in _org_describe_names:
_describe_names.extend(['{}_irr'.format(e), '{}_dry'.format(e)])
full_event_names = ['hot:{}_dry:{}'.format(h, d) for h, d in itertools.product(hot_keys, dry_keys)]
outdata = pd.DataFrame(index=pd.Series(range(1, 13), name='month'),
columns=pd.MultiIndex.from_product((full_event_names,
(['prob'] + _describe_names))
, names=['event', 'pga_desc']), dtype=float)
# make base data
print('making base data')
for hot_nm, dry_nm in itertools.product(hot_keys, dry_keys):
en = 'hot:{}_dry:{}'.format(hot_nm, dry_nm)
joint_event = pd.Series(list(set(use_data.loc[:, hot_nm]).intersection(set(use_data.loc[:, dry_nm]))))
if joint_event.dropna().empty:
continue
temp = make_prob(joint_event)
outdata.loc[temp.index, (en, 'prob')] = temp.values[:, 0]
temp = add_pga_from_idx(joint_event)
outdata.loc[temp.index, (en, _describe_names)] = temp.loc[:, _describe_names].values
t = pd.Series([' '.join(e) for e in outdata.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
outdata.loc[:, outdata.columns[idx]] *= 100
outdata = outdata.sort_index(axis=1, level=0, sort_remaining=False)
outdata.to_csv(os.path.join(event_def_dir, 'joint_hot_dry_prob.csv'), float_format='%.1f%%')
idx = t.str.contains('prob')
outdata.loc[:, outdata.columns[idx]].to_csv(os.path.join(event_def_dir, 'joint_hot_dry_prob_only_prob.csv'),
float_format='%.1f%%')
idx = t.str.contains('mean')
outdata.loc[:, outdata.columns[idx]].to_csv(os.path.join(event_def_dir, 'joint_hot_dry_mean_impact.csv'),
float_format='%.1f%%')
return full_event_names, outdata
def make_prob(in_series):
in_series = in_series.dropna()
data = pd.DataFrame(np.atleast_2d(list(in_series.values)), columns=['month', 'year'])
out_series = data.groupby('month').count() / 48
return pd.DataFrame(out_series)
def old_calc_restrict_recurance():
data = get_restriction_record()
thresholds = [0.5, 0.75, 1]
tnames = ['half', '3/4', 'full']
ndays = [1, 5, 7, 10, 14]
out_keys = []
for thresh, tname in zip(thresholds, tnames):
k = 'd_>{}_rest'.format(tname)
data.loc[:, k] = data.loc[:, 'f_rest'] >= thresh
out_keys.append(k)
grouped_data = data.loc[:, ['month', 'year', 'f_rest'] + out_keys].groupby(['month', 'year']).sum().reset_index()
# make montly restriction anaomaloy - mean
temp = grouped_data.groupby('month').mean().loc[:, 'f_rest'].to_dict()
grouped_data.loc[:, 'f_rest_an_mean'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'f_rest_an_mean': temp})
grouped_data.loc[:, 'f_rest_an_mean'] = grouped_data.loc[:, 'f_rest'] - grouped_data.loc[:, 'f_rest_an_mean']
# make montly restriction anaomaloy
temp = grouped_data.groupby('month').median().loc[:, 'f_rest'].to_dict()
grouped_data.loc[:, 'f_rest_an_med'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'f_rest_an_med': temp})
grouped_data.loc[:, 'f_rest_an_med'] = grouped_data.loc[:, 'f_rest'] - grouped_data.loc[:, 'f_rest_an_med']
grouped_data.to_csv(os.path.join(event_def_dir, 'rest_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'rest_monthly_data_desc.csv'))
# number of n days
out_keys2 = []
for nd in ndays:
for k in out_keys:
ok = '{:02d}d_{}'.format(nd, k)
out_keys2.append(ok)
grouped_data.loc[:, ok] = grouped_data.loc[:, k] >= nd
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (
out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
out_years.to_csv(os.path.join(event_def_dir, 'rest_years.csv'))
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'old_rest_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'old_rest_prob_only_prob.csv'),
float_format='%.1f%%')
def calc_restrict_cumulative_recurance():
data = get_restriction_record()
ndays = [1, 5, 7, 10, 14, 21, 25, 29]
ndays = {'{:02d}'.format(e): e for e in ndays}
temp = {1: 10,
2: 17,
3: 17,
4: 10,
5: 7,
6: 10,
7: 10,
8: 10,
9: 7,
10: 5,
11: 5,
12: 7,
}
ndays['eqlikly'] = temp # note don't use 'prob' in this name!
grouped_data = data.loc[:, ['month', 'year', 'f_rest']].groupby(['month', 'year']).sum().reset_index()
# make montly restriction anaomaloy - mean
temp = grouped_data.groupby('month').mean().loc[:, 'f_rest'].to_dict()
grouped_data.loc[:, 'f_rest_an_mean'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'f_rest_an_mean': temp})
grouped_data.loc[:, 'f_rest_an_mean'] = grouped_data.loc[:, 'f_rest'] - grouped_data.loc[:, 'f_rest_an_mean']
# make montly restriction anaomaloy - median
temp = grouped_data.groupby('month').median().loc[:, 'f_rest'].to_dict()
grouped_data.loc[:, 'f_rest_an_med'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'f_rest_an_med': temp})
grouped_data.loc[:, 'f_rest_an_med'] = grouped_data.loc[:, 'f_rest'] - grouped_data.loc[:, 'f_rest_an_med']
grouped_data.to_csv(os.path.join(event_def_dir, 'rest_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'rest_monthly_data_desc.csv'))
# number of n days
out_keys2 = []
for k, nd in ndays.items():
ok = '{}d_rest'.format(k)
out_keys2.append(ok)
if isinstance(nd, int):
grouped_data.loc[:, ok] = grouped_data.loc[:, 'f_rest'] >= nd
elif isinstance(nd, dict):
grouped_data.loc[:, ok] = grouped_data.loc[:, 'f_rest'] >= grouped_data.loc[:, 'month'].replace(nd)
else:
raise ValueError('unexpected type for nd: {}'.format(type(nd)))
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (
out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
out_years.to_csv(os.path.join(event_def_dir, 'rest_years.csv'))
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'rest_prob.csv'), float_format='%.1f%%')
idx = (t.str.contains('prob') | t.str.contains('sum'))
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'rest_prob_only_prob.csv'), float_format='%.1f%%')
def calc_restrict_recurance():
data = get_restriction_record()
thresholds = [0.001, 0.5, 0.75, 1]
tnames = ['any', 'half', '75rest', 'full']
con_days = [5, 7, 10]
ndays = [5, 7, 10, 15, 20]
consecutive_data = {}
for tnm, t in zip(tnames, thresholds):
test_value = tnm
data.loc[:, test_value] = data.loc[:, 'f_rest'] >= t
data.loc[:, 'con_id'] = (data.loc[:, ['year',
'month',
test_value]].diff(1) != 0).any(axis=1).astype('int').cumsum().values
temp = data.loc[data[test_value]].groupby('con_id')
consecutive_data[tnm] = temp.agg({'year': 'mean', 'month': 'mean', test_value: 'size'}).reset_index()
out_columns = ['total_rest_days', 'num_per', 'mean_per_len', 'min_per_len', 'max_per_len']
rename_mapper = {'sum': 'total_rest_days', 'count': 'num_per',
'mean': 'mean_per_len', 'min': 'min_per_len', 'max': 'max_per_len'}
all_data = pd.DataFrame(
index=pd.MultiIndex.from_product([set(data.year), set(data.month)], names=['year', 'month']),
columns=pd.MultiIndex.from_product([tnames, out_columns]))
all_data.loc[:] = np.nan
for k, v in consecutive_data.items():
v.to_csv(os.path.join(event_def_dir, 'len_rest_{}_raw.csv'.format(k)))
temp = v.groupby(['year', 'month']).agg({k: ['sum', 'count',
'mean', 'min', 'max']})
temp = temp.rename(columns=rename_mapper, level=1)
all_data = all_data.combine_first(temp)
all_data = all_data.loc[:, (tnames, out_columns)]
all_data.reset_index().astype(float).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'len_rest_month_desc_no_zeros.csv'))
t = all_data['any']['num_per'].isna().reset_index().groupby('month').agg({'num_per': ['sum', prob]})
t.to_csv(os.path.join(event_def_dir, 'len_rest_prob_no_rest.csv'))
all_data = all_data.fillna(0)
all_data.to_csv(os.path.join(event_def_dir, 'len_rest_monthly.csv'))
all_data.reset_index().groupby('month').describe().to_csv(
os.path.join(event_def_dir, 'len_rest_month_desc_with_zeros.csv'))
prob_data = pd.DataFrame(index=all_data.index)
for rt, l, nd in itertools.product(tnames, con_days, ndays):
prob_data.loc[:, '{}d_{}_{}tot'.format(l, rt, nd)] = ((all_data.loc[:, (rt, 'max_per_len')] >= l) &
(all_data.loc[:, (rt, 'total_rest_days')] >= nd))
out = prob_data.reset_index().groupby('month').agg(['sum', prob])
out_keys2 = set(out.columns.levels[0]) - {'year'}
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (
out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(prob_data.reset_index(), set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'len_rest_prob.csv'), float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'len_rest_years.csv'))
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'len_rest_prob_only_prob.csv'),
float_format='%.1f%%')
def calc_cold_recurance():
data = get_vcsn_record(vcsn_version)
data.loc[:, 'tmean'] = (data.loc[:, 'tmax'] + data.loc[:, 'tmin']) / 2
data.loc[:, 'tmean_raw'] = (data.loc[:, 'tmax'] + data.loc[:, 'tmin']) / 2
data.loc[:, 'tmean'] = data.loc[:, 'tmean'].rolling(3).mean()
data.to_csv(os.path.join(event_def_dir, 'rolling_cold_raw.csv'))
thresholds = [0, 5, 7, 10, 12]
vars = ['tmean']
ndays = [3, 5, 7, 10, 14]
out_keys = []
for thresh, v in itertools.product(thresholds, vars):
k = 'd_{}_{:02d}'.format(v, thresh)
data.loc[:, k] = data.loc[:, v] <= thresh
out_keys.append(k)
aggs = {e: 'sum' for e in out_keys}
aggs.update({e: 'mean' for e in vars})
grouped_data = data.loc[:, ['month', 'year'] + vars + out_keys].groupby(['month', 'year'])
grouped_data = grouped_data.aggregate(aggs).reset_index()
grouped_data.to_csv(os.path.join(event_def_dir, 'rolling_cold_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'rolling_cold_monthly_data_desc.csv'))
# number of n days
out_keys2 = []
for nd in ndays:
for k in out_keys:
ok = '{:02d}d_{}'.format(nd, k)
out_keys2.append(ok)
grouped_data.loc[:, ok] = grouped_data.loc[:, k] >= nd
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (
out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'rolling_cold_prob.csv'), float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'rolling_cold_years.csv'))
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'rolling_cold_prob_only_prob.csv'),
float_format='%.1f%%')
def calc_hot_recurance():
data = get_vcsn_record(vcsn_version)
data.loc[:, 'tmean'] = (data.loc[:, 'tmax'] + data.loc[:, 'tmin']) / 2
data.to_csv(os.path.join(event_def_dir, 'temp_raw.csv'))
thresholds = [20, 25, 28, 30, 35]
vars = ['tmax', 'tmean']
ndays = [3, 5, 7, 10, 14]
out_keys = []
for thresh, v in itertools.product(thresholds, vars):
k = 'd_{}_{:02d}'.format(v, thresh)
data.loc[:, k] = data.loc[:, v] >= thresh
out_keys.append(k)
aggs = {e: 'sum' for e in out_keys}
aggs.update({e: 'mean' for e in vars})
grouped_data = data.loc[:, ['month', 'year'] + vars + out_keys].groupby(['month', 'year'])
grouped_data = grouped_data.aggregate(aggs).reset_index()
grouped_data.to_csv(os.path.join(event_def_dir, 'hot_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'hot_monthly_data_desc.csv'))
# number of n days
out_keys2 = []
for nd in ndays:
for k in out_keys:
ok = '{:02d}d_{}'.format(nd, k)
out_keys2.append(ok)
grouped_data.loc[:, ok] = grouped_data.loc[:, k] >= nd
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (
out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'hot_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'hot_prob_only_prob.csv'), float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'hot_years.csv'))
def plot_vcsn_smd():
data, use_cords = vcsn_pull_single_site(
lat=-43.358,
lon=172.301,
year_min=1972,
year_max=2019,
use_vars=('evspsblpot', 'pr'))
print(use_cords)
temp = calc_sma_smd_historical(data['pr'], data['evspsblpot'], data.date, 150, 1)
trans_cols = ['mean_doy_smd', 'sma', 'smd', 'drain', 'aet_out']
data.loc[:, trans_cols] = temp.loc[:, trans_cols]
data.set_index('date', inplace=True)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1, sharex=True)
ax1.plot(data.index, data['evspsblpot'], label='pet')
ax1.plot(data.index, data['aet_out'], label='aet')
ax2.plot(data.index, data['pr'], label='rain')
ax3.plot(data.index, data['smd'], label='smd')
ax3.plot(data.index, data['mean_doy_smd'], label='daily_mean_smd')
ax4.plot(data.index, data['sma'], label='sma')
ax4.axhline(ls='--', c='k')
for ax in (ax1, ax2, ax3, ax4):
ax.legend()
plt.show()
def check_vcns_data():
data, use_cords = vcsn_pull_single_site(
lat=-43.358,
lon=172.301,
year_min=1972,
year_max=2019,
use_vars='all')
print(use_cords)
data.set_index('date', inplace=True)
for v in data.keys():
fix, (ax) = plt.subplots()
ax.plot(data.index, data[v])
ax.set_title(v)
plt.show()
def plot_restriction_record():
data = get_restriction_record()
fix, (ax) = plt.subplots()
ax.plot(pd.to_datetime(data['date']), data['f_rest'])
plt.show()
if __name__ == '__main__':
# final run set up
calc_dry_recurance_monthly_smd()
calc_dry_recurance()
calc_hot_recurance()
calc_cold_recurance()
calc_wet_recurance_ndays()
calc_restrict_cumulative_recurance()
| [
"Climate_Shocks.get_past_record.get_restriction_record",
"pandas.to_datetime",
"os.path.exists",
"Climate_Shocks.get_past_record.get_vcsn_record",
"pandas.MultiIndex.from_product",
"itertools.product",
"pandas.DataFrame",
"Climate_Shocks.note_worthy_events.simple_soil_moisture_pet.calc_sma_smd_histori... | [((844, 873), 'os.path.exists', 'os.path.exists', (['event_def_dir'], {}), '(event_def_dir)\n', (858, 873), False, 'import os\n'), ((879, 905), 'os.makedirs', 'os.makedirs', (['event_def_dir'], {}), '(event_def_dir)\n', (890, 905), False, 'import os\n'), ((1731, 1828), 'pandas.merge', 'pd.merge', (['irr_temp2', 'dry_temp2'], {'left_index': '(True)', 'right_index': '(True)', 'suffixes': "('_irr', '_dry')"}), "(irr_temp2, dry_temp2, left_index=True, right_index=True, suffixes=\n ('_irr', '_dry'))\n", (1739, 1828), True, 'import pandas as pd\n'), ((1835, 1854), 'pandas.DataFrame', 'pd.DataFrame', (['temp3'], {}), '(temp3)\n', (1847, 1854), True, 'import pandas as pd\n'), ((3062, 3091), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (3077, 3091), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((3100, 3164), 'Climate_Shocks.note_worthy_events.simple_soil_moisture_pet.calc_smd_monthly', 'calc_smd_monthly', ([], {'rain': 'data.rain', 'pet': 'data.pet', 'dates': 'data.index'}), '(rain=data.rain, pet=data.pet, dates=data.index)\n', (3116, 3164), False, 'from Climate_Shocks.note_worthy_events.simple_soil_moisture_pet import calc_sma_smd_historical, calc_smd_monthly\n'), ((3597, 3646), 'itertools.product', 'itertools.product', (['smd_thresholds', 'sma_thresholds'], {}), '(smd_thresholds, sma_thresholds)\n', (3614, 3646), False, 'import itertools\n'), ((5519, 5588), 'Climate_Shocks.note_worthy_events.simple_soil_moisture_pet.calc_sma_smd_historical', 'calc_sma_smd_historical', (["data['rain']", "data['pet']", 'data.date', '(150)', '(1)'], {}), "(data['rain'], data['pet'], data.date, 150, 1)\n", (5542, 5588), False, 'from Climate_Shocks.note_worthy_events.simple_soil_moisture_pet import calc_sma_smd_historical, calc_smd_monthly\n'), ((7721, 7790), 'Climate_Shocks.note_worthy_events.simple_soil_moisture_pet.calc_sma_smd_historical', 'calc_sma_smd_historical', (["data['rain']", "data['pet']", 'data.date', '(150)', '(1)'], {}), "(data['rain'], data['pet'], data.date, 150, 1)\n", (7744, 7790), False, 'from Climate_Shocks.note_worthy_events.simple_soil_moisture_pet import calc_sma_smd_historical, calc_smd_monthly\n'), ((8368, 8418), 'itertools.product', 'itertools.product', (['thresholds_rain', 'thresholds_smd'], {}), '(thresholds_rain, thresholds_smd)\n', (8385, 8418), False, 'import itertools\n'), ((11323, 11392), 'Climate_Shocks.note_worthy_events.simple_soil_moisture_pet.calc_sma_smd_historical', 'calc_sma_smd_historical', (["data['rain']", "data['pet']", 'data.date', '(150)', '(1)'], {}), "(data['rain'], data['pet'], data.date, 150, 1)\n", (11346, 11392), False, 'from Climate_Shocks.note_worthy_events.simple_soil_moisture_pet import calc_sma_smd_historical, calc_smd_monthly\n'), ((19803, 19872), 'Climate_Shocks.note_worthy_events.simple_soil_moisture_pet.calc_sma_smd_historical', 'calc_sma_smd_historical', (["data['rain']", "data['pet']", 'data.date', '(150)', '(1)'], {}), "(data['rain'], data['pet'], data.date, 150, 1)\n", (19826, 19872), False, 'from Climate_Shocks.note_worthy_events.simple_soil_moisture_pet import calc_sma_smd_historical, calc_smd_monthly\n'), ((27246, 27299), 'pandas.merge', 'pd.merge', (['hot', 'dry'], {'left_index': '(True)', 'right_index': '(True)'}), '(hot, dry, left_index=True, right_index=True)\n', (27254, 27299), True, 'import pandas as pd\n'), ((27542, 27569), 'pandas.concat', 'pd.concat', (['use_data'], {'axis': '(1)'}), '(use_data, axis=1)\n', (27551, 27569), True, 'import pandas as pd\n'), ((28358, 28395), 'itertools.product', 'itertools.product', (['hot_keys', 'dry_keys'], {}), '(hot_keys, dry_keys)\n', (28375, 28395), False, 'import itertools\n'), ((29887, 29911), 'pandas.DataFrame', 'pd.DataFrame', (['out_series'], {}), '(out_series)\n', (29899, 29911), True, 'import pandas as pd\n'), ((29960, 29984), 'Climate_Shocks.get_past_record.get_restriction_record', 'get_restriction_record', ([], {}), '()\n', (29982, 29984), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((32635, 32659), 'Climate_Shocks.get_past_record.get_restriction_record', 'get_restriction_record', ([], {}), '()\n', (32657, 32659), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((35665, 35689), 'Climate_Shocks.get_past_record.get_restriction_record', 'get_restriction_record', ([], {}), '()\n', (35687, 35689), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((37964, 37998), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'all_data.index'}), '(index=all_data.index)\n', (37976, 37998), True, 'import pandas as pd\n'), ((38021, 38063), 'itertools.product', 'itertools.product', (['tnames', 'con_days', 'ndays'], {}), '(tnames, con_days, ndays)\n', (38038, 38063), False, 'import itertools\n'), ((39273, 39302), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (39288, 39302), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((39718, 39753), 'itertools.product', 'itertools.product', (['thresholds', 'vars'], {}), '(thresholds, vars)\n', (39735, 39753), False, 'import itertools\n'), ((41633, 41662), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (41648, 41662), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((41936, 41971), 'itertools.product', 'itertools.product', (['thresholds', 'vars'], {}), '(thresholds, vars)\n', (41953, 41971), False, 'import itertools\n'), ((43772, 43885), 'Climate_Shocks.vcsn_pull.vcsn_pull_single_site', 'vcsn_pull_single_site', ([], {'lat': '(-43.358)', 'lon': '(172.301)', 'year_min': '(1972)', 'year_max': '(2019)', 'use_vars': "('evspsblpot', 'pr')"}), "(lat=-43.358, lon=172.301, year_min=1972, year_max=\n 2019, use_vars=('evspsblpot', 'pr'))\n", (43793, 43885), False, 'from Climate_Shocks.vcsn_pull import vcsn_pull_single_site\n'), ((43955, 44029), 'Climate_Shocks.note_worthy_events.simple_soil_moisture_pet.calc_sma_smd_historical', 'calc_sma_smd_historical', (["data['pr']", "data['evspsblpot']", 'data.date', '(150)', '(1)'], {}), "(data['pr'], data['evspsblpot'], data.date, 150, 1)\n", (43978, 44029), False, 'from Climate_Shocks.note_worthy_events.simple_soil_moisture_pet import calc_sma_smd_historical, calc_smd_monthly\n'), ((44227, 44258), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'sharex': '(True)'}), '(4, 1, sharex=True)\n', (44239, 44258), True, 'import matplotlib.pyplot as plt\n'), ((44690, 44700), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (44698, 44700), True, 'import matplotlib.pyplot as plt\n'), ((44748, 44846), 'Climate_Shocks.vcsn_pull.vcsn_pull_single_site', 'vcsn_pull_single_site', ([], {'lat': '(-43.358)', 'lon': '(172.301)', 'year_min': '(1972)', 'year_max': '(2019)', 'use_vars': '"""all"""'}), "(lat=-43.358, lon=172.301, year_min=1972, year_max=\n 2019, use_vars='all')\n", (44769, 44846), False, 'from Climate_Shocks.vcsn_pull import vcsn_pull_single_site\n'), ((45072, 45082), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (45080, 45082), True, 'import matplotlib.pyplot as plt\n'), ((45127, 45151), 'Climate_Shocks.get_past_record.get_restriction_record', 'get_restriction_record', ([], {}), '()\n', (45149, 45151), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((45168, 45182), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (45180, 45182), True, 'import matplotlib.pyplot as plt\n'), ((45245, 45255), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (45253, 45255), True, 'import matplotlib.pyplot as plt\n'), ((923, 985), 'Pasture_Growth_Modelling.initialisation_support.pasture_growth_deficit.calc_past_pasture_growth_anomaly', 'calc_past_pasture_growth_anomaly', (['"""irrigated"""'], {'site': '"""eyrewell"""'}), "('irrigated', site='eyrewell')\n", (955, 985), False, 'from Pasture_Growth_Modelling.initialisation_support.pasture_growth_deficit import calc_past_pasture_growth_anomaly\n'), ((1131, 1174), 'Pasture_Growth_Modelling.initialisation_support.pasture_growth_deficit.calc_past_pasture_growth_anomaly', 'calc_past_pasture_growth_anomaly', (['"""dryland"""'], {}), "('dryland')\n", (1163, 1174), False, 'from Pasture_Growth_Modelling.initialisation_support.pasture_growth_deficit import calc_past_pasture_growth_anomaly\n'), ((1323, 1335), 'numpy.nansum', 'np.nansum', (['x'], {}), '(x)\n', (1332, 1335), True, 'import numpy as np\n'), ((3395, 3449), 'os.path.join', 'os.path.join', (['event_def_dir', '"""monthly_smd_dry_raw.csv"""'], {}), "(event_def_dir, 'monthly_smd_dry_raw.csv')\n", (3407, 3449), False, 'import os\n'), ((3999, 4062), 'os.path.join', 'os.path.join', (['event_def_dir', '"""monthly_smd_dry_monthly_data.csv"""'], {}), "(event_def_dir, 'monthly_smd_dry_monthly_data.csv')\n", (4011, 4062), False, 'import os\n'), ((4139, 4207), 'os.path.join', 'os.path.join', (['event_def_dir', '"""monthly_smd_dry_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'monthly_smd_dry_monthly_data_desc.csv')\n", (4151, 4207), False, 'import os\n'), ((5094, 5149), 'os.path.join', 'os.path.join', (['event_def_dir', '"""monthly_smd_dry_prob.csv"""'], {}), "(event_def_dir, 'monthly_smd_dry_prob.csv')\n", (5106, 5149), False, 'import os\n'), ((5214, 5279), 'os.path.join', 'os.path.join', (['event_def_dir', '"""monthly_smd_dry_prob_only_prob.csv"""'], {}), "(event_def_dir, 'monthly_smd_dry_prob_only_prob.csv')\n", (5226, 5279), False, 'import os\n'), ((5366, 5422), 'os.path.join', 'os.path.join', (['event_def_dir', '"""monthly_smd_dry_years.csv"""'], {}), "(event_def_dir, 'monthly_smd_dry_years.csv')\n", (5378, 5422), False, 'import os\n'), ((5728, 5770), 'os.path.join', 'os.path.join', (['event_def_dir', '"""dry_raw.csv"""'], {}), "(event_def_dir, 'dry_raw.csv')\n", (5740, 5770), False, 'import os\n'), ((6302, 6353), 'os.path.join', 'os.path.join', (['event_def_dir', '"""dry_monthly_data.csv"""'], {}), "(event_def_dir, 'dry_monthly_data.csv')\n", (6314, 6353), False, 'import os\n'), ((6430, 6486), 'os.path.join', 'os.path.join', (['event_def_dir', '"""dry_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'dry_monthly_data_desc.csv')\n", (6442, 6486), False, 'import os\n'), ((7373, 7416), 'os.path.join', 'os.path.join', (['event_def_dir', '"""dry_prob.csv"""'], {}), "(event_def_dir, 'dry_prob.csv')\n", (7385, 7416), False, 'import os\n'), ((7481, 7534), 'os.path.join', 'os.path.join', (['event_def_dir', '"""dry_prob_only_prob.csv"""'], {}), "(event_def_dir, 'dry_prob_only_prob.csv')\n", (7493, 7534), False, 'import os\n'), ((7581, 7625), 'os.path.join', 'os.path.join', (['event_def_dir', '"""dry_years.csv"""'], {}), "(event_def_dir, 'dry_years.csv')\n", (7593, 7625), False, 'import os\n'), ((8005, 8033), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {'sharex': '(True)'}), '(3, sharex=True)\n', (8017, 8033), True, 'import matplotlib.pyplot as plt\n'), ((8158, 8168), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8166, 8168), True, 'import matplotlib.pyplot as plt\n'), ((8186, 8232), 'os.path.join', 'os.path.join', (['event_def_dir', '"""smd_wet_raw.csv"""'], {}), "(event_def_dir, 'smd_wet_raw.csv')\n", (8198, 8232), False, 'import os\n'), ((9448, 9503), 'os.path.join', 'os.path.join', (['event_def_dir', '"""smd_wet_monthly_data.csv"""'], {}), "(event_def_dir, 'smd_wet_monthly_data.csv')\n", (9460, 9503), False, 'import os\n'), ((9581, 9641), 'os.path.join', 'os.path.join', (['event_def_dir', '"""smd_wet_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'smd_wet_monthly_data_desc.csv')\n", (9593, 9641), False, 'import os\n'), ((10552, 10599), 'os.path.join', 'os.path.join', (['event_def_dir', '"""smd_wet_prob.csv"""'], {}), "(event_def_dir, 'smd_wet_prob.csv')\n", (10564, 10599), False, 'import os\n'), ((10664, 10721), 'os.path.join', 'os.path.join', (['event_def_dir', '"""smd_wet_prob_only_prob.csv"""'], {}), "(event_def_dir, 'smd_wet_prob_only_prob.csv')\n", (10676, 10721), False, 'import os\n'), ((10808, 10856), 'os.path.join', 'os.path.join', (['event_def_dir', '"""smd_wet_years.csv"""'], {}), "(event_def_dir, 'smd_wet_years.csv')\n", (10820, 10856), False, 'import os\n'), ((11607, 11655), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_wet_raw.csv"""'], {}), "(event_def_dir, 'ndays_wet_raw.csv')\n", (11619, 11655), False, 'import os\n'), ((11802, 11859), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_wet_monthly_data.csv"""'], {}), "(event_def_dir, 'ndays_wet_monthly_data.csv')\n", (11814, 11859), False, 'import os\n'), ((11937, 11999), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_wet_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'ndays_wet_monthly_data_desc.csv')\n", (11949, 11999), False, 'import os\n'), ((13036, 13085), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_wet_prob.csv"""'], {}), "(event_def_dir, 'ndays_wet_prob.csv')\n", (13048, 13085), False, 'import os\n'), ((13150, 13209), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_wet_prob_only_prob.csv"""'], {}), "(event_def_dir, 'ndays_wet_prob_only_prob.csv')\n", (13162, 13209), False, 'import os\n'), ((13296, 13346), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_wet_years.csv"""'], {}), "(event_def_dir, 'ndays_wet_years.csv')\n", (13308, 13346), False, 'import os\n'), ((15471, 15525), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_dry_monthly.csv"""'], {}), "(event_def_dir, 'rolling_dry_monthly.csv')\n", (15483, 15525), False, 'import os\n'), ((16136, 16187), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_dry_prob.csv"""'], {}), "(event_def_dir, 'rolling_dry_prob.csv')\n", (16148, 16187), False, 'import os\n'), ((16252, 16314), 'os.path.join', 'os.path.join', (['event_def_dir', '"""variable_hot_prob_only_prob.csv"""'], {}), "(event_def_dir, 'variable_hot_prob_only_prob.csv')\n", (16264, 16314), False, 'import os\n'), ((16401, 16453), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_dry_years.csv"""'], {}), "(event_def_dir, 'rolling_dry_years.csv')\n", (16413, 16453), False, 'import os\n'), ((20087, 20135), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_dry_raw.csv"""'], {}), "(event_def_dir, 'ndays_dry_raw.csv')\n", (20099, 20135), False, 'import os\n'), ((20282, 20339), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_dry_monthly_data.csv"""'], {}), "(event_def_dir, 'ndays_dry_monthly_data.csv')\n", (20294, 20339), False, 'import os\n'), ((20417, 20479), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_dry_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'ndays_dry_monthly_data_desc.csv')\n", (20429, 20479), False, 'import os\n'), ((21510, 21559), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_dry_prob.csv"""'], {}), "(event_def_dir, 'ndays_dry_prob.csv')\n", (21522, 21559), False, 'import os\n'), ((21624, 21683), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_dry_prob_only_prob.csv"""'], {}), "(event_def_dir, 'ndays_dry_prob_only_prob.csv')\n", (21636, 21683), False, 'import os\n'), ((21770, 21820), 'os.path.join', 'os.path.join', (['event_def_dir', '"""ndays_dry_years.csv"""'], {}), "(event_def_dir, 'ndays_dry_years.csv')\n", (21782, 21820), False, 'import os\n'), ((25982, 26037), 'os.path.join', 'os.path.join', (['event_def_dir', '"""variable_hot_monthly.csv"""'], {}), "(event_def_dir, 'variable_hot_monthly.csv')\n", (25994, 26037), False, 'import os\n'), ((26648, 26700), 'os.path.join', 'os.path.join', (['event_def_dir', '"""variable_hot_prob.csv"""'], {}), "(event_def_dir, 'variable_hot_prob.csv')\n", (26660, 26700), False, 'import os\n'), ((26765, 26827), 'os.path.join', 'os.path.join', (['event_def_dir', '"""variable_hot_prob_only_prob.csv"""'], {}), "(event_def_dir, 'variable_hot_prob_only_prob.csv')\n", (26777, 26827), False, 'import os\n'), ((26914, 26967), 'os.path.join', 'os.path.join', (['event_def_dir', '"""variable_hot_years.csv"""'], {}), "(event_def_dir, 'variable_hot_years.csv')\n", (26926, 26967), False, 'import os\n'), ((27014, 27067), 'os.path.join', 'os.path.join', (['event_def_dir', '"""variable_hot_years.csv"""'], {}), "(event_def_dir, 'variable_hot_years.csv')\n", (27026, 27067), False, 'import os\n'), ((27136, 27188), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_dry_years.csv"""'], {}), "(event_def_dir, 'rolling_dry_years.csv')\n", (27148, 27188), False, 'import os\n'), ((29124, 29177), 'os.path.join', 'os.path.join', (['event_def_dir', '"""joint_hot_dry_prob.csv"""'], {}), "(event_def_dir, 'joint_hot_dry_prob.csv')\n", (29136, 29177), False, 'import os\n'), ((29284, 29347), 'os.path.join', 'os.path.join', (['event_def_dir', '"""joint_hot_dry_prob_only_prob.csv"""'], {}), "(event_def_dir, 'joint_hot_dry_prob_only_prob.csv')\n", (29296, 29347), False, 'import os\n'), ((29501, 29561), 'os.path.join', 'os.path.join', (['event_def_dir', '"""joint_hot_dry_mean_impact.csv"""'], {}), "(event_def_dir, 'joint_hot_dry_mean_impact.csv')\n", (29513, 29561), False, 'import os\n'), ((31163, 31215), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rest_monthly_data.csv"""'], {}), "(event_def_dir, 'rest_monthly_data.csv')\n", (31175, 31215), False, 'import os\n'), ((31292, 31349), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rest_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'rest_monthly_data_desc.csv')\n", (31304, 31349), False, 'import os\n'), ((32122, 32167), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rest_years.csv"""'], {}), "(event_def_dir, 'rest_years.csv')\n", (32134, 32167), False, 'import os\n'), ((32344, 32392), 'os.path.join', 'os.path.join', (['event_def_dir', '"""old_rest_prob.csv"""'], {}), "(event_def_dir, 'old_rest_prob.csv')\n", (32356, 32392), False, 'import os\n'), ((32457, 32515), 'os.path.join', 'os.path.join', (['event_def_dir', '"""old_rest_prob_only_prob.csv"""'], {}), "(event_def_dir, 'old_rest_prob_only_prob.csv')\n", (32469, 32515), False, 'import os\n'), ((33946, 33998), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rest_monthly_data.csv"""'], {}), "(event_def_dir, 'rest_monthly_data.csv')\n", (33958, 33998), False, 'import os\n'), ((34075, 34132), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rest_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'rest_monthly_data_desc.csv')\n", (34087, 34132), False, 'import os\n'), ((35152, 35197), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rest_years.csv"""'], {}), "(event_def_dir, 'rest_years.csv')\n", (35164, 35197), False, 'import os\n'), ((35374, 35418), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rest_prob.csv"""'], {}), "(event_def_dir, 'rest_prob.csv')\n", (35386, 35418), False, 'import os\n'), ((35542, 35596), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rest_prob_only_prob.csv"""'], {}), "(event_def_dir, 'rest_prob_only_prob.csv')\n", (35554, 35596), False, 'import os\n'), ((37371, 37434), 'os.path.join', 'os.path.join', (['event_def_dir', '"""len_rest_month_desc_no_zeros.csv"""'], {}), "(event_def_dir, 'len_rest_month_desc_no_zeros.csv')\n", (37383, 37434), False, 'import os\n'), ((37643, 37699), 'os.path.join', 'os.path.join', (['event_def_dir', '"""len_rest_prob_no_rest.csv"""'], {}), "(event_def_dir, 'len_rest_prob_no_rest.csv')\n", (37655, 37699), False, 'import os\n'), ((37755, 37806), 'os.path.join', 'os.path.join', (['event_def_dir', '"""len_rest_monthly.csv"""'], {}), "(event_def_dir, 'len_rest_monthly.csv')\n", (37767, 37806), False, 'import os\n'), ((37880, 37945), 'os.path.join', 'os.path.join', (['event_def_dir', '"""len_rest_month_desc_with_zeros.csv"""'], {}), "(event_def_dir, 'len_rest_month_desc_with_zeros.csv')\n", (37892, 37945), False, 'import os\n'), ((38925, 38973), 'os.path.join', 'os.path.join', (['event_def_dir', '"""len_rest_prob.csv"""'], {}), "(event_def_dir, 'len_rest_prob.csv')\n", (38937, 38973), False, 'import os\n'), ((39019, 39068), 'os.path.join', 'os.path.join', (['event_def_dir', '"""len_rest_years.csv"""'], {}), "(event_def_dir, 'len_rest_years.csv')\n", (39031, 39068), False, 'import os\n'), ((39110, 39168), 'os.path.join', 'os.path.join', (['event_def_dir', '"""len_rest_prob_only_prob.csv"""'], {}), "(event_def_dir, 'len_rest_prob_only_prob.csv')\n", (39122, 39168), False, 'import os\n'), ((39539, 39590), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_cold_raw.csv"""'], {}), "(event_def_dir, 'rolling_cold_raw.csv')\n", (39551, 39590), False, 'import os\n'), ((40142, 40202), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_cold_monthly_data.csv"""'], {}), "(event_def_dir, 'rolling_cold_monthly_data.csv')\n", (40154, 40202), False, 'import os\n'), ((40280, 40345), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_cold_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'rolling_cold_monthly_data_desc.csv')\n", (40292, 40345), False, 'import os\n'), ((41274, 41326), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_cold_prob.csv"""'], {}), "(event_def_dir, 'rolling_cold_prob.csv')\n", (41286, 41326), False, 'import os\n'), ((41372, 41425), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_cold_years.csv"""'], {}), "(event_def_dir, 'rolling_cold_years.csv')\n", (41384, 41425), False, 'import os\n'), ((41467, 41529), 'os.path.join', 'os.path.join', (['event_def_dir', '"""rolling_cold_prob_only_prob.csv"""'], {}), "(event_def_dir, 'rolling_cold_prob_only_prob.csv')\n", (41479, 41529), False, 'import os\n'), ((41754, 41797), 'os.path.join', 'os.path.join', (['event_def_dir', '"""temp_raw.csv"""'], {}), "(event_def_dir, 'temp_raw.csv')\n", (41766, 41797), False, 'import os\n'), ((42360, 42411), 'os.path.join', 'os.path.join', (['event_def_dir', '"""hot_monthly_data.csv"""'], {}), "(event_def_dir, 'hot_monthly_data.csv')\n", (42372, 42411), False, 'import os\n'), ((42489, 42545), 'os.path.join', 'os.path.join', (['event_def_dir', '"""hot_monthly_data_desc.csv"""'], {}), "(event_def_dir, 'hot_monthly_data_desc.csv')\n", (42501, 42545), False, 'import os\n'), ((43473, 43516), 'os.path.join', 'os.path.join', (['event_def_dir', '"""hot_prob.csv"""'], {}), "(event_def_dir, 'hot_prob.csv')\n", (43485, 43516), False, 'import os\n'), ((43581, 43634), 'os.path.join', 'os.path.join', (['event_def_dir', '"""hot_prob_only_prob.csv"""'], {}), "(event_def_dir, 'hot_prob_only_prob.csv')\n", (43593, 43634), False, 'import os\n'), ((43681, 43725), 'os.path.join', 'os.path.join', (['event_def_dir', '"""hot_years.csv"""'], {}), "(event_def_dir, 'hot_years.csv')\n", (43693, 43725), False, 'import os\n'), ((44992, 45006), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (45004, 45006), True, 'import matplotlib.pyplot as plt\n'), ((45195, 45223), 'pandas.to_datetime', 'pd.to_datetime', (["data['date']"], {}), "(data['date'])\n", (45209, 45223), True, 'import pandas as pd\n'), ((5463, 5492), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (5478, 5492), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((7666, 7695), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (7681, 7695), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((11268, 11297), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (11283, 11297), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((14616, 14645), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (14631, 14645), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((19748, 19777), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (19763, 19777), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((25134, 25163), 'Climate_Shocks.get_past_record.get_vcsn_record', 'get_vcsn_record', (['vcsn_version'], {}), '(vcsn_version)\n', (25149, 25163), False, 'from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record\n'), ((27889, 27926), 'itertools.product', 'itertools.product', (['hot_keys', 'dry_keys'], {}), '(hot_keys, dry_keys)\n', (27906, 27926), False, 'import itertools\n'), ((28035, 28142), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["(full_event_names, ['prob'] + _describe_names)"], {'names': "['event', 'pga_desc']"}), "((full_event_names, ['prob'] + _describe_names),\n names=['event', 'pga_desc'])\n", (28061, 28142), True, 'import pandas as pd\n'), ((36786, 36835), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[tnames, out_columns]'], {}), '([tnames, out_columns])\n', (36812, 36835), True, 'import pandas as pd\n'), ((2874, 2895), 'numpy.zeros', 'np.zeros', (['missing_len'], {}), '(missing_len)\n', (2882, 2895), True, 'import numpy as np\n')] |
'''
'''
import sys
import json
import argparse
import eosfactory.core.utils as utils
import eosfactory.core.config as config
IS_ERROR = 2
IS_WARNING = 1
class Checklist():
def __init__(self, is_html=False, error_codes=""):
self.is_html = is_html
self.html_text = ""
self.is_error = False
self.is_warning = False
self.IS_WINDOWS = utils.is_windows_ubuntu()
self.os_version = utils.os_version()
self.print_msg("EOSFactory version {}".format(config.VERSION))
################################################################################
# psutil
################################################################################
try:
if "psutil" in error_codes:
import psutil1
else:
import psutil
except ImportError:
command = "pip3 install --user psutil"
button = '''
<button
style="text-align:left;"
class="btn ${{BASH_COMMAND}}";
class="btn";
id="Install psutil";
title="Install psutil. Click the button then ENTER in a newly created bash terminal window, to go."
>
{}
</button>
'''.format(command)
self.error_msg('''
Module 'psutil' is not installed. Install it: {}
'''.format(button))
self.print_error(
'''Module 'psutil' is not installed.
Install it: ''')
self.print_code("`{}`\n".format(command))
################################################################################
# termcolor
################################################################################
try:
if "termcolor" in error_codes:
import termcolor1
else:
import termcolor
except ImportError:
command = "pip3 install --user termcolor"
button = '''
<button
style="text-align:left;"
class="btn ${{BASH_COMMAND}}";
class="btn";
id="Install termcolor";
title="Install termcolor. Click the button then ENTER in a newly created bash terminal window, to go."
>
{}
</button>
'''.format(command)
self.error_msg('''
Module 'termcolor' is not installed. Install it: {}
'''.format(button))
self.print_error(
'''Module 'termcolor' is not installed.
Install it: ''')
self.print_code("`{}`\n".format(command))
if self.IS_WINDOWS:
################################################################################
# Ubuntu version
################################################################################
lsb_release, error = utils.spawn(
["lsb_release", "-r", "-s"], raise_exception=False)
if error:
self.error_msg(error)
else:
if "ubuntuversion" in error_codes:
lsb_release = "16.4.1"
ubuntu_version = int(lsb_release.split(".")[0])
if ubuntu_version < config.UBUNTU_VERSION_MIN:
msg = \
'''
WSL Ubuntu version is {}.
EOSIO nodeos can fail with Windows WSL Ubuntu below version 16.
'''.format(lsb_release)
self.status_msg(self.warning(msg))
self.print_warning(msg)
################################################################################
# WSL root
################################################################################
root = config.wsl_root()
if not root or "wslroot" in error_codes:
self.error_msg(
'''Cannot determine the root of the WSL. Set it:
<button
class="btn ${FIND_WSL}";
id="";
title="Click the button to open file dialog. Navigate to a directory containing the Ubuntu file system."
>
Indicate WSL root
</button>
''')
self.print_error(
'''Cannot determine the root of the WSL. To indicate it, use the command:''')
self.print_code("`python3 -m eosfactory.config --wsl_root`\n")
################################################################################
# eosio
################################################################################
eosio_version = config.eosio_version()
if "eosio" in error_codes:
eosio_version = ["", "1.8.0"]
# eosio_version = ["1.3.3", "1.8.0"]
if eosio_version[0]:
self.status_msg(
"Found eosio version {}".format(eosio_version[0]))
self.print_status(
"Found eosio version {}".format(eosio_version[0]))
if not eosio_version[0] or len(eosio_version) > 1\
and not self.equal(eosio_version[0], eosio_version[1]):
command = ""
if self.os_version == utils.UBUNTU:
ubuntu_version = utils.spawn(
["lsb_release", "-r", "-s"],
raise_exception=False)[0].split(".")[0]
if ubuntu_version and ubuntu_version == 16:
command = \
'''sudo apt remove eosio &&\\
wget https://github.com/eosio/eos/releases/download/v1.8.0/eosio_1.8.0-1-ubuntu-16.04_amd64.deb &&\\
sudo apt install ./eosio_1.8.0-1-ubuntu-16.04_amd64.deb
'''
else:
command = \
'''sudo apt remove eosio &&\\
wget https://github.com/eosio/eos/releases/download/v1.8.0/eosio_1.8.0-1-ubuntu-18.04_amd64.deb &&\\
apt install ./eosio_1.8.0-1-ubuntu-18.04_amd64.deb
'''
elif self.os_version == utils.DARWIN:
command = \
'''brew remove eosio &&\\
brew tap eosio/eosio &&\\
brew install eosio
'''
button = '''
<button
style="text-align:left;"
class="btn ${{BASH_COMMAND}}";
class="btn";
id="Install eosio v{0}";
title="Install eosio v{0}. Click the button then ENTER in a newly created bash terminal window, to go."
>
{1}
</button>
'''.format(eosio_version[1], command)
instructions = '<a href="https://github.com/EOSIO/eos">EOSIO installation instructions</a>'
if eosio_version[0] and len(eosio_version) > 1 :
self.warning_msg(
'''
NOTE: EOSFactory was tested with version {0} while installed is {1}. Install eosio v{0}:<br>
{2}
'''.format(
eosio_version[1], eosio_version[0],
button if command else instructions))
self.print_warning(
'''NOTE: EOSFactory was tested with version {0} while installed is {1}. Install eosio v{0}:
'''.format(
eosio_version[1], eosio_version[0])
)
self.print_code(
'''```
{}
```
'''.format(command if command else instructions))
else:
if not "ignoreeoside" in error_codes:
self.warning_msg('''
Cannot determine that eosio is installed as nodeos does not response.<br>
It hangs up sometimes.<br>
EOSFactory expects eosio version {}. Install eosio, if not installed:<br>
{}<br>
'''.format(eosio_version[1], button if command else instructions))
self.print_warning(
'''Cannot determine that eosio is installed as nodeos does not response.
It hangs up sometimes.
EOSFactory expects eosio version {}. Install eosio, if not installed:
'''.format(eosio_version[1]))
self.print_code(
'''```
{}
```
'''.format(command if command else instructions))
################################################################################
# eosio_cdt
################################################################################
eosio_cdt_version = config.eosio_cdt_version()
if "eosio_cdt" in error_codes:
eosio_cdt_version = ["", "1.6.0"]
# eosio_cdt_version = ["1.6.1", "1.6.0"]
if eosio_cdt_version[0]:
self.status_msg(
"Found eosio.cdt version {}.".format(eosio_cdt_version[0]))
self.print_status(
"Found eosio.cdt version {}.".format(eosio_cdt_version[0]))
if not eosio_cdt_version[0] or len(eosio_cdt_version) > 1\
and not self.equal(eosio_cdt_version[0], eosio_cdt_version[1]):
command = ""
if self.os_version == utils.UBUNTU:
command = \
'''sudo apt remove eosio.cdt &&\\
wget https://github.com/eosio/eosio.cdt/releases/download/v1.6.1/eosio.cdt_1.6.1-1_amd64.deb &&\\
sudo apt install ./eosio.cdt_1.6.1-1_amd64.deb
'''
elif self.os_version == utils.DARWIN:
command = \
'''brew remove eosio.cdt &&\\
brew tap eosio/eosio.cdt && \\
brew install eosio.cdt
'''
button = '''
<button
style="text-align:left;"
class="btn ${{BASH_COMMAND}}";
class="btn";
id="Install eosio.cdt v{0}";
title="Install eosio.cdt v{0}. Click the button then ENTER in a newly created bash terminal window, to go."
>
{1}
</button>
'''.format(eosio_cdt_version[1], command)
instructions = '<a href="https://github.com/EOSIO/eosio.cdt">EOSIO.cdt installation instructions</a>'
if eosio_cdt_version[0] and len(eosio_cdt_version) > 1 \
and not eosio_cdt_version[0] == eosio_cdt_version[1]:
self.warning_msg(
'''
NOTE: EOSFactory was tested with version {0} while installed is {1}. Install eosio.cdt v{0}:<br>
{2}
'''.format(
eosio_cdt_version[1], eosio_cdt_version[0],
button if command else instructions))
self.print_warning(
'''NOTE: EOSFactory was tested with version {0} while installed is {1}. Install eosio v{0}:
'''.format(
eosio_cdt_version[1], eosio_cdt_version[0]))
self.print_code(
'''```
{}
```
'''.format(command if command else instructions))
else:
self.error_msg('''
Cannot determine that eosio.cdt is installed as eosio-cpp does not response.<br>
EOSFactory expects eosio.cdt version {}. Install it, if not installed.
{}<br>
'''.format(eosio_cdt_version[1], button if command else instructions))
self.print_error(
'''Cannot determine that eosio.cdt is installed as eosio-cpp does not response.
EOSFactory expects eosio.cdt version {}. Install it, if not installed.
'''.format(eosio_cdt_version[1]))
self.print_code(
'''```
{}
```
'''.format(command if command else instructions))
################################################################################
# Default workspace
################################################################################
try:
contract_workspace_dir = config.contract_workspace_dir()
except:
contract_workspace_dir = None
button = '''
<button
class="btn ${CHANGE_WORKSPACE}";
id="${CHANGE_WORKSPACE}";
title="Set workspace"
>
Set workspace.
</button>
'''
if not contract_workspace_dir or "workspace" in error_codes:
self.error_msg('''
Default workspace is not set, or it does not exist.{}
'''.format(button))
else:
self.status_msg(
'''Default workspace is {}.{}
'''.format(contract_workspace_dir, button))
################################################################################
#
################################################################################
def just_msg(self, msg):
if self.is_html:
msg = msg.replace("&&\\", "&&\\<br>")
print("{}\n".format(msg))
def print_msg(self, msg):
if not self.is_html:
print(msg)
def status_msg(self, msg):
if self.is_html:
msg = msg.replace("&&\\", "&&\\<br>")
print("<li>{}</li>\n".format(msg))
def print_status(self, msg):
if not self.is_html:
msg = msg.replace("<br>", "")
print(msg)
def warning(self, msg):
self.is_warning = True
if self.is_html:
msg = msg.replace("&&\\", "&&\\<br>")
return '<em style="color: ${{WARNING_COLOR}}"> {} </em>'.format(msg)
def warning_msg(self, msg):
self.is_warning = True
if self.is_html:
msg = msg.replace("&&\\", "&&\\<br>")
print('<em style="color: ${{WARNING_COLOR}}"> {} </em>'.format(msg))
def print_warning(self, msg):
if not self.is_html:
msg = msg.replace("<br>", "")
msg = "WARNING:\n" + msg
try:
import termcolor
msg = termcolor.colored(msg, "yellow")
except:
pass
print(msg)
def error_msg(self, msg):
if self.is_html:
self.is_error = True
msg = msg.replace("&&\\", "&&\\<br>")
print(
'<p style="color: ${{ERROR_COLOR}}">ERROR: {}</p>'.format(msg))
def print_error(self, msg):
if not self.is_html:
self.is_error = True
msg = msg.replace("<br>", "")
msg = "ERROR:\n" + msg
try:
import termcolor
msg = termcolor.colored(msg, "magenta")
except:
pass
print(msg)
def print_code(self, msg):
if not self.is_html:
msg = msg.replace("<br>", "")
try:
import termcolor
msg = termcolor.colored(msg, "blue")
except:
pass
print(msg)
def equal(self, version1, version2):
return version1.split(".")[0] == version2.split(".")[0] \
and version1.split(".")[1] == version2.split(".")[1]
def main():
parser = argparse.ArgumentParser(description='''
Check whether installation conditions are fulfilled.
''')
parser.add_argument(
"--html", help="Print html output.", action="store_true")
parser.add_argument("--error", help="Error code", default="")
parser.add_argument(
"--wsl_root", help="Show set the root of the WSL and exit.",
action="store_true")
parser.add_argument(
"--dont_set_workspace", help="Ignore empty workspace directory.",
action="store_true")
parser.add_argument(
"--json", help="Bare config JSON and exit.",
action="store_true")
parser.add_argument(
"--workspace", help="Set contract workspace and exit.",
action="store_true")
parser.add_argument(
"--dependencies", help="Set dependencies and exit.",
action="store_true")
args = parser.parse_args()
if args.json:
print(json.dumps(
config.current_config(dont_set_workspace=args.dont_set_workspace),
sort_keys=True, indent=4))
elif args.wsl_root:
config.wsl_root()
elif args.workspace:
config.set_contract_workspace_dir()
elif args.html:
checklist = Checklist(args.html, args.error)
if checklist.is_error:
sys.exit(IS_ERROR)
elif checklist.is_warning:
sys.exit(IS_WARNING)
elif args.dependencies:
checklist = Checklist(False, args.error)
else:
print("Checking dependencies of EOSFactory...")
checklist = Checklist(False, args.error)
if not checklist.is_error and not checklist.is_warning:
print("... all the dependencies are in place.\n\n")
else:
print(
'''Some functionalities of EOSFactory may fail if the indicated errors are not
corrected.
''')
config.config()
if __name__ == '__main__':
main()
| [
"eosfactory.core.config.current_config",
"termcolor.colored",
"eosfactory.core.config.eosio_cdt_version",
"eosfactory.core.utils.os_version",
"argparse.ArgumentParser",
"eosfactory.core.utils.spawn",
"sys.exit",
"eosfactory.core.config.wsl_root",
"eosfactory.core.config.config",
"eosfactory.core.c... | [((13839, 13949), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""\n Check whether installation conditions are fulfilled.\n """'}), '(description=\n """\n Check whether installation conditions are fulfilled.\n """)\n', (13862, 13949), False, 'import argparse\n'), ((377, 402), 'eosfactory.core.utils.is_windows_ubuntu', 'utils.is_windows_ubuntu', ([], {}), '()\n', (400, 402), True, 'import eosfactory.core.utils as utils\n'), ((429, 447), 'eosfactory.core.utils.os_version', 'utils.os_version', ([], {}), '()\n', (445, 447), True, 'import eosfactory.core.utils as utils\n'), ((4270, 4292), 'eosfactory.core.config.eosio_version', 'config.eosio_version', ([], {}), '()\n', (4290, 4292), True, 'import eosfactory.core.config as config\n'), ((7739, 7765), 'eosfactory.core.config.eosio_cdt_version', 'config.eosio_cdt_version', ([], {}), '()\n', (7763, 7765), True, 'import eosfactory.core.config as config\n'), ((2632, 2695), 'eosfactory.core.utils.spawn', 'utils.spawn', (["['lsb_release', '-r', '-s']"], {'raise_exception': '(False)'}), "(['lsb_release', '-r', '-s'], raise_exception=False)\n", (2643, 2695), True, 'import eosfactory.core.utils as utils\n'), ((3531, 3548), 'eosfactory.core.config.wsl_root', 'config.wsl_root', ([], {}), '()\n', (3546, 3548), True, 'import eosfactory.core.config as config\n'), ((10762, 10793), 'eosfactory.core.config.contract_workspace_dir', 'config.contract_workspace_dir', ([], {}), '()\n', (10791, 10793), True, 'import eosfactory.core.config as config\n'), ((14946, 14963), 'eosfactory.core.config.wsl_root', 'config.wsl_root', ([], {}), '()\n', (14961, 14963), True, 'import eosfactory.core.config as config\n'), ((12671, 12703), 'termcolor.colored', 'termcolor.colored', (['msg', '"""yellow"""'], {}), "(msg, 'yellow')\n", (12688, 12703), False, 'import termcolor\n'), ((13261, 13294), 'termcolor.colored', 'termcolor.colored', (['msg', '"""magenta"""'], {}), "(msg, 'magenta')\n", (13278, 13294), False, 'import termcolor\n'), ((13536, 13566), 'termcolor.colored', 'termcolor.colored', (['msg', '"""blue"""'], {}), "(msg, 'blue')\n", (13553, 13566), False, 'import termcolor\n'), ((14807, 14872), 'eosfactory.core.config.current_config', 'config.current_config', ([], {'dont_set_workspace': 'args.dont_set_workspace'}), '(dont_set_workspace=args.dont_set_workspace)\n', (14828, 14872), True, 'import eosfactory.core.config as config\n'), ((14997, 15032), 'eosfactory.core.config.set_contract_workspace_dir', 'config.set_contract_workspace_dir', ([], {}), '()\n', (15030, 15032), True, 'import eosfactory.core.config as config\n'), ((15149, 15167), 'sys.exit', 'sys.exit', (['IS_ERROR'], {}), '(IS_ERROR)\n', (15157, 15167), False, 'import sys\n'), ((15694, 15709), 'eosfactory.core.config.config', 'config.config', ([], {}), '()\n', (15707, 15709), True, 'import eosfactory.core.config as config\n'), ((15215, 15235), 'sys.exit', 'sys.exit', (['IS_WARNING'], {}), '(IS_WARNING)\n', (15223, 15235), False, 'import sys\n'), ((4911, 4974), 'eosfactory.core.utils.spawn', 'utils.spawn', (["['lsb_release', '-r', '-s']"], {'raise_exception': '(False)'}), "(['lsb_release', '-r', '-s'], raise_exception=False)\n", (4922, 4974), True, 'import eosfactory.core.utils as utils\n')] |
"""Tests for the match format model and schema."""
import pytest
from marshmallow import ValidationError
from sqlalchemy.exc import IntegrityError
from scorecard.models.match_format import MatchFormat, MatchFormatSchema
class TestMatchFormat:
@pytest.fixture
def match_format(self):
return MatchFormat("format", "description")
@pytest.fixture
def invalid_format(self, match_format, rollback_db):
yield match_format
with pytest.raises(IntegrityError):
match_format.save()
def test_create_new_match_format(self, match_format):
assert match_format.name == "format"
assert match_format.description == "description"
def test_save_duplicate_name_raises_integrity_error(self, invalid_format):
MatchFormat("format", "description").save()
def test_save_without_name_raises_integrity_error(self, invalid_format):
invalid_format.name = None
def test_save_without_description_raises_integrity_error(self, invalid_format):
invalid_format.description = None
def test_match_format_schema_dump(self, match_format, rollback_db):
format_dict = MatchFormatSchema().dump(match_format.save())
assert format_dict["id"] == match_format.id
assert format_dict["name"] == match_format.name
assert format_dict["description"] == match_format.description
class TestMatchFormatSchema:
@pytest.fixture(scope="class")
def schema(self):
return MatchFormatSchema()
@pytest.fixture
def invalid_dict(self, schema):
format_dict = dict(name="format", description="description")
yield format_dict
with pytest.raises(ValidationError):
schema.load(format_dict)
def test_load_returns_match_format_instance(self, schema):
match_format = schema.load(dict(name="format", description="description"))
assert isinstance(match_format, MatchFormat)
def test_load_raises_validation_error_if_dictionary_has_no_name_property(self, invalid_dict):
invalid_dict["name"] = None
def test_load_raises_validation_error_if_dictionary_has_no_description_property(self, invalid_dict):
invalid_dict["description"] = None
def test_load_raises_validation_error_if_dictionary_has_an_id_property(self, invalid_dict):
invalid_dict["id"] = 1
| [
"pytest.fixture",
"scorecard.models.match_format.MatchFormatSchema",
"scorecard.models.match_format.MatchFormat",
"pytest.raises"
] | [((1417, 1446), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (1431, 1446), False, 'import pytest\n'), ((310, 346), 'scorecard.models.match_format.MatchFormat', 'MatchFormat', (['"""format"""', '"""description"""'], {}), "('format', 'description')\n", (321, 346), False, 'from scorecard.models.match_format import MatchFormat, MatchFormatSchema\n'), ((1484, 1503), 'scorecard.models.match_format.MatchFormatSchema', 'MatchFormatSchema', ([], {}), '()\n', (1501, 1503), False, 'from scorecard.models.match_format import MatchFormat, MatchFormatSchema\n'), ((466, 495), 'pytest.raises', 'pytest.raises', (['IntegrityError'], {}), '(IntegrityError)\n', (479, 495), False, 'import pytest\n'), ((1670, 1700), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (1683, 1700), False, 'import pytest\n'), ((778, 814), 'scorecard.models.match_format.MatchFormat', 'MatchFormat', (['"""format"""', '"""description"""'], {}), "('format', 'description')\n", (789, 814), False, 'from scorecard.models.match_format import MatchFormat, MatchFormatSchema\n'), ((1157, 1176), 'scorecard.models.match_format.MatchFormatSchema', 'MatchFormatSchema', ([], {}), '()\n', (1174, 1176), False, 'from scorecard.models.match_format import MatchFormat, MatchFormatSchema\n')] |
import argparse
import numpy as np
import numpy_net as npn
parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, help='Learning rate', default=0.1)
parser.add_argument('--epochs', type=int, help='Number of epochs', default=10)
parser.add_argument('--batch-size', type=int, help='Batch size', default=50)
parser.add_argument('--model',
type=str,
help="Model type",
choices=['dense', 'conv'],
default='conv')
args = parser.parse_args()
N_CLASSES = 10
MEAN = 127.5
STD = 127.5
CONV_SHAPE = (-1, 28, 28, 1)
def to_onehot(y, n_classes):
return np.eye(n_classes)[y]
def normalize(x):
# Note: this is a poor but simple normalization
# If you want to be precise, subtract the mean
# and divide with standard deviation
return (x - MEAN) / STD
def get_data():
# Data
train_x, train_y, val_x, val_y = npn.load_mnist()
# One hot encoding
train_y = to_onehot(train_y, val_y.max() + 1)
val_y = to_onehot(val_y, val_y.max() + 1)
# Normalizing
train_x = normalize(train_x)
val_x = normalize(val_x)
# Reshape
if args.model == 'conv':
train_x = train_x.reshape(*CONV_SHAPE)
val_x = val_x.reshape(*CONV_SHAPE)
return train_x, train_y, val_x, val_y
def get_model(inp_channels):
# Model
model_f = npn.DenseModel if args.model == 'dense' else npn.ConvModel
return model_f(inp_channels, N_CLASSES)
# Shuffle the data
def shuffle(x, y):
i = np.arange(len(y))
np.random.shuffle(i)
return x[i], y[i]
# Run a single epoch
def run_epoch(model, loss, X, Y, backprop=True, name='Train'):
# Shuffle data
if name == 'Train':
X, Y = shuffle(X, Y)
losses, hits = [], 0
for start in range(0, len(Y), args.batch_size):
# Get batch
x = X[start:start + args.batch_size]
y = Y[start:start + args.batch_size]
# Predict
y_hat = model(x)
# Metrics
losses.append(loss(y_hat, y))
hits += (y_hat.argmax(axis=1) == y.argmax(axis=1)).sum()
# Backprop if needed
if backprop:
model.update(loss.backward(y_hat, y), lr=args.lr)
# Calculcate total loss and accuracy
total_loss = np.mean(losses)
total_acc = hits / len(Y)
# Print results to standard output
print(f"{name} loss: {(total_loss):.3f} | acc: {total_acc*100:2.2f}%")
if __name__ == "__main__":
# Loss
loss_fn = npn.CrossEntropy()
# Data
train_x, train_y, val_x, val_y = get_data()
# Model
model = get_model(train_x.shape[-1])
# TRAIN
for epoch in range(args.epochs):
print(f"Epoch {epoch+1}/{args.epochs}")
run_epoch(model, loss_fn, train_x, train_y)
run_epoch(model, loss_fn, val_x, val_y, backprop=False, name='Val')
print()
| [
"numpy.mean",
"numpy.eye",
"argparse.ArgumentParser",
"numpy_net.load_mnist",
"numpy_net.CrossEntropy",
"numpy.random.shuffle"
] | [((69, 94), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (92, 94), False, 'import argparse\n'), ((927, 943), 'numpy_net.load_mnist', 'npn.load_mnist', ([], {}), '()\n', (941, 943), True, 'import numpy_net as npn\n'), ((1552, 1572), 'numpy.random.shuffle', 'np.random.shuffle', (['i'], {}), '(i)\n', (1569, 1572), True, 'import numpy as np\n'), ((2281, 2296), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (2288, 2296), True, 'import numpy as np\n'), ((2497, 2515), 'numpy_net.CrossEntropy', 'npn.CrossEntropy', ([], {}), '()\n', (2513, 2515), True, 'import numpy_net as npn\n'), ((648, 665), 'numpy.eye', 'np.eye', (['n_classes'], {}), '(n_classes)\n', (654, 665), True, 'import numpy as np\n')] |
import sys
import time
import pygame
from pygame.image import load
from pipeline import Pipeline
from bird import Bird
from menu import GameMenu
class game(object):
def __init__(self):
pygame.init() # 初始化pygame
pygame.font.init() # 初始化字体
self.font = pygame.font.SysFont("Arial", 50) # 设置字体和大小
size = width, self.height = 400, 650 # 设置窗口
self.screen = pygame.display.set_mode(size) # 显示窗口
self.clock = pygame.time.Clock() # 设置时钟
self.menu = GameMenu()
self.background = load("../assets/background.png") # 加载背景图片
def load_img(self):
piplineImgs = [load("../assets/top.png"),load("../assets/bottom.png")]
birdImgs = [load("../assets/bird1.png"),load("../assets/bird2.png"),load("../assets/birddead.png")]
return piplineImgs,birdImgs
def createMap(self):
"""定义创建地图的方法"""
self.screen.fill((255, 255, 255)) # 填充颜色
self.screen.blit(self.background, (0, 0)) # 填入到背景
# 显示管道
self.screen.blit(self.pipeline.pineUp, (self.pipeline.wallx, self.pipeline.loc_up)) # 上管道坐标位置
self.screen.blit(self.pipeline.pineDown, (self.pipeline.wallx, self.pipeline.loc_down)) # 下管道坐标位置
self.score = self.pipeline.updatePipeline(self.score)
# 显示小鸟
if self.bird.dead: # 撞管道状态
self.bird.status = 2
elif self.bird.jump: # 起飞状态
self.bird.status = 1
else:
self.bird.status = 0
self.screen.blit(self.bird.birdStatus[self.bird.status], (self.bird.birdX, self.bird.birdY)) # 设置小鸟的坐标
self.bird.birdUpdate() # 鸟移动
# 显示分数
self.screen.blit(self.font.render('Score:' + str(self.score), -1, (255, 255, 255)), (100, 50)) # 设置颜色及坐标位置
pygame.display.update() # 更新显示
def checkDead(self):
# 上方管子的矩形位置
upRect = pygame.Rect(self.pipeline.wallx, -300,
self.pipeline.pineUp.get_width() - 10,
self.pipeline.pineUp.get_height())
# 下方管子的矩形位置
downRect = pygame.Rect(self.pipeline.wallx, 500,
self.pipeline.pineDown.get_width() - 10,
self.pipeline.pineDown.get_height())
# 检测小鸟与上下方管子是否碰撞
if upRect.colliderect(self.bird.birdRect) or downRect.colliderect(self.bird.birdRect):
self.bird.dead = True
# 检测小鸟是否飞出上下边界
if not 0 < self.bird.birdRect[1] < self.height:
self.bird.dead = True
return True
else:
return False
def start(self):
p_img, b_img = self.load_img()
self.pipeline = Pipeline(p_img) # 实例化管道类
self.bird = Bird(b_img) # 实例化鸟类
self.score = 0
self.play()
def play(self):
# 轮询事件
while True:
self.clock.tick(60) # 每秒执行60次
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if (event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN) and not self.bird.dead:
self.bird.jump_up()
# print(self.pipeline.loc_up,self.pipeline.loc_down)
if self.checkDead():
self.end()
else:
self.createMap()
def run(self):
while True:
self.menu.menu_start(self.screen,self.background)
self.screen.blit(self.menu.bird_pic,self.menu.bird_loc)
pygame.display.flip()
self.clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
event_keys = pygame.key.get_pressed()
self.menu.menu_update(event_keys,self.screen)
pygame.display.flip()
if event_keys[pygame.K_RETURN]:
if self.menu.start:
self.start()
if self.menu.end:
sys.exit()
def end(self):
final_text1 = "Game Over"
ft1_font = pygame.font.SysFont("Arial", 48) # 设置第一行文字字体
ft1_surf = ft1_font.render(final_text1, 1, (242, 3, 36)) # 设置第一行文字颜色
ft2_font = pygame.font.SysFont("Arial", 28) # 设置第二行文字字体
final_text2 = "Press m for menu"
ft2_surf = ft2_font.render(final_text2, True, (253, 177, 6)) # 设置第二行文字颜色
self.screen.blit(ft1_surf, [self.screen.get_width() / 2 - ft1_surf.get_width() / 2, 100]) # 设置第一行文字显示位置
self.screen.blit(ft2_surf, [self.screen.get_width() / 2 - ft2_surf.get_width() / 2, 400]) # 设置第二行文字显示位置
pygame.display.flip()
while True:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
event_keys = pygame.key.get_pressed()
if event_keys[pygame.K_m]:
self.run()
if __name__ == '__main__':
game().run()
| [
"sys.exit",
"pygame.init",
"pygame.event.get",
"bird.Bird",
"pygame.display.set_mode",
"pygame.display.flip",
"menu.GameMenu",
"pygame.key.get_pressed",
"pygame.font.init",
"pygame.time.Clock",
"pygame.image.load",
"pygame.display.update",
"pipeline.Pipeline",
"pygame.font.SysFont"
] | [((190, 203), 'pygame.init', 'pygame.init', ([], {}), '()\n', (201, 203), False, 'import pygame\n'), ((245, 263), 'pygame.font.init', 'pygame.font.init', ([], {}), '()\n', (261, 263), False, 'import pygame\n'), ((308, 340), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Arial"""', '(50)'], {}), "('Arial', 50)\n", (327, 340), False, 'import pygame\n'), ((423, 452), 'pygame.display.set_mode', 'pygame.display.set_mode', (['size'], {}), '(size)\n', (446, 452), False, 'import pygame\n'), ((477, 496), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (494, 496), False, 'import pygame\n'), ((531, 541), 'menu.GameMenu', 'GameMenu', ([], {}), '()\n', (539, 541), False, 'from menu import GameMenu\n'), ((562, 594), 'pygame.image.load', 'load', (['"""../assets/background.png"""'], {}), "('../assets/background.png')\n", (566, 594), False, 'from pygame.image import load\n'), ((1645, 1668), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (1666, 1668), False, 'import pygame\n'), ((2412, 2427), 'pipeline.Pipeline', 'Pipeline', (['p_img'], {}), '(p_img)\n', (2420, 2427), False, 'from pipeline import Pipeline\n'), ((2452, 2463), 'bird.Bird', 'Bird', (['b_img'], {}), '(b_img)\n', (2456, 2463), False, 'from bird import Bird\n'), ((3486, 3518), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Arial"""', '(48)'], {}), "('Arial', 48)\n", (3505, 3518), False, 'import pygame\n'), ((3617, 3649), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Arial"""', '(28)'], {}), "('Arial', 28)\n", (3636, 3649), False, 'import pygame\n'), ((3990, 4011), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (4009, 4011), False, 'import pygame\n'), ((645, 670), 'pygame.image.load', 'load', (['"""../assets/top.png"""'], {}), "('../assets/top.png')\n", (649, 670), False, 'from pygame.image import load\n'), ((671, 699), 'pygame.image.load', 'load', (['"""../assets/bottom.png"""'], {}), "('../assets/bottom.png')\n", (675, 699), False, 'from pygame.image import load\n'), ((715, 742), 'pygame.image.load', 'load', (['"""../assets/bird1.png"""'], {}), "('../assets/bird1.png')\n", (719, 742), False, 'from pygame.image import load\n'), ((743, 770), 'pygame.image.load', 'load', (['"""../assets/bird2.png"""'], {}), "('../assets/bird2.png')\n", (747, 770), False, 'from pygame.image import load\n'), ((771, 801), 'pygame.image.load', 'load', (['"""../assets/birddead.png"""'], {}), "('../assets/birddead.png')\n", (775, 801), False, 'from pygame.image import load\n'), ((2595, 2613), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2611, 2613), False, 'import pygame\n'), ((3064, 3085), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (3083, 3085), False, 'import pygame\n'), ((3125, 3143), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3141, 3143), False, 'import pygame\n'), ((4042, 4060), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (4058, 4060), False, 'import pygame\n'), ((2654, 2664), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2662, 2664), False, 'import sys\n'), ((3200, 3224), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (3222, 3224), False, 'import pygame\n'), ((3281, 3302), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (3300, 3302), False, 'import pygame\n'), ((4117, 4141), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (4139, 4141), False, 'import pygame\n'), ((3417, 3427), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3425, 3427), False, 'import sys\n')] |
from .environ import env
from .helpers import os_shutdown
from .mlogging import mlogging
import traceback
import winsound
class ExceptiontContainer(object):
def __init__(self, log_name='exception.log', log_prefix=None, use_console=True,
to_raise=False, beep=True, shutdown=False, hibernate=False):
self._logger = mlogging.get_logger(log_name, prefix=log_prefix, use_console=use_console)
self.to_raise = to_raise
self.beep = beep
self._shutdown_opts = {'shutdown':shutdown, 'hibernate':hibernate}
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
self._logger.error('%s: %s\n%s'%(exc_type.__name__, str(exc_val), traceback.format_exc()))
if self.beep:
winsound.Beep(frequency=522, duration=2020)
if env('WORKING', dynamic=True)==False:
os_shutdown(**self._shutdown_opts)
if not self.to_raise:
return True
| [
"traceback.format_exc",
"winsound.Beep"
] | [((829, 872), 'winsound.Beep', 'winsound.Beep', ([], {'frequency': '(522)', 'duration': '(2020)'}), '(frequency=522, duration=2020)\n', (842, 872), False, 'import winsound\n'), ((761, 783), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (781, 783), False, 'import traceback\n')] |
import json
import os
import matplotlib.pyplot as plt
import mmcv
import pandas as pd
import seaborn as sns
prefix = '/home/tml/Nutstore Files/ubuntu/paper/data/iou'
if __name__ == '__main__':
work_dirs = os.listdir('work_dirs')
results = []
best_f1 = []
for i, work_dir in enumerate(work_dirs):
work_dir_files = os.listdir('work_dirs/' + work_dir)
eval_files = []
config_file = None
for file_name in work_dir_files:
if file_name.endswith('_eval.json'):
name = 'work_dirs/' + work_dir + '/' + file_name
data_origin = mmcv.load(name)
epoch = int(name.split('/')[-1].split('_')[1])
data = dict()
data['epoch'] = epoch
config_name = data_origin['config'].split('/')[-1]
data['config'] = config_name
data.update(data_origin['metric'])
try:
iou_curves = data_origin['metric']['iou_infos']
df = pd.DataFrame.from_dict(iou_curves)
df.to_csv(prefix + '/' + work_dir + '=' + str(epoch) + '.csv')
# g = sns.lineplot(x='iou', y='f1_score', data=df, markers=True, dashes=False)
# g.legend(loc='right', bbox_to_anchor=(1.5, 0.5), ncol=1)
# plt.show()
# print(plt)
eval_files.append(data)
except Exception as e:
print(e)
if file_name.endswith('.py'):
config_file = 'work_dirs/' + work_dir + '/' + file_name
eval_files.sort(key=lambda x: x['epoch'])
try:
best_f1.append(
(work_dir, max(eval_files, key=lambda x: x['f1_score'])['f1_score']))
except Exception as e:
print(e)
results.append(eval_files)
print(results)
intput_data = []
for result in results:
intput_data.extend(result)
pass
with open('/home/tml/Nutstore Files/ubuntu/paper/data/1.json', 'w') as f:
json.dump(results, f)
df = pd.DataFrame.from_dict(intput_data)
df.to_csv('/home/tml/Nutstore Files/ubuntu/paper/data/1.csv')
g = sns.lineplot(x='epoch', y='bbox_mAP', data=df, hue='config',
style='config', markers=True, dashes=False)
# g.legend(loc='right', bbox_to_anchor=(1.5, 0.5), ncol=1)
plt.show()
print(plt)
# for result in results:
#
# sns.set_theme(style='darkgrid')
# # Load an example dataset with long-form data
# df = pd.DataFrame.from_dict(result)
#
# # Plot the responses for different events and regions
# sns.lineplot(x='epoch', y='bbox_mAP',
# data=df)
#
# plt.show()
| [
"os.listdir",
"pandas.DataFrame.from_dict",
"seaborn.lineplot",
"mmcv.load",
"json.dump",
"matplotlib.pyplot.show"
] | [((2109, 2144), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['intput_data'], {}), '(intput_data)\n', (2131, 2144), True, 'import pandas as pd\n'), ((2215, 2323), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""epoch"""', 'y': '"""bbox_mAP"""', 'data': 'df', 'hue': '"""config"""', 'style': '"""config"""', 'markers': '(True)', 'dashes': '(False)'}), "(x='epoch', y='bbox_mAP', data=df, hue='config', style='config',\n markers=True, dashes=False)\n", (2227, 2323), True, 'import seaborn as sns\n'), ((2397, 2407), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2405, 2407), True, 'import matplotlib.pyplot as plt\n'), ((214, 237), 'os.listdir', 'os.listdir', (['"""work_dirs"""'], {}), "('work_dirs')\n", (224, 237), False, 'import os\n'), ((2081, 2102), 'json.dump', 'json.dump', (['results', 'f'], {}), '(results, f)\n', (2090, 2102), False, 'import json\n'), ((344, 379), 'os.listdir', 'os.listdir', (["('work_dirs/' + work_dir)"], {}), "('work_dirs/' + work_dir)\n", (354, 379), False, 'import os\n'), ((617, 632), 'mmcv.load', 'mmcv.load', (['name'], {}), '(name)\n', (626, 632), False, 'import mmcv\n'), ((1044, 1078), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['iou_curves'], {}), '(iou_curves)\n', (1066, 1078), True, 'import pandas as pd\n')] |
# Copyright 2020 <NAME>, <NAME>, and <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import time
import logging
import traceback
import tensorflow as tf
from .base import log_hp_to_tensorboard, log_epoch_metrics_to_tensorboard, estimator_gen_fn_wrapper, update_model_results
from ..commons.constants import *
from ..db.dao import *
from ..storage import LocalStore, HDFSStore
from importlib import import_module
from sqlalchemy import and_
def sub_epoch_scheduler(app, db, backend, inter_epoch_wait_time=5, verbose=True):
"""
Sub-epoch scheduling daemon. Reads trainable model configs from the database and runs them on the provided backend.
:param app: Flask applicarion.
:param db: SQLAlchemy DB object.
:param backend: Cerebro backend object
:param inter_epoch_wait_time:
:param verbose:
"""
with app.app_context():
while not exit_event.is_set():
all_models = all_models = Model.query.filter(and_(Model.status.in_([CREATED_STATUS, RUNNING_STATUS]), Model.max_train_epochs > Model.num_trained_epochs)).all()
if all_models is not None and len(all_models) > 0:
estimators = []
estimator_results = {}
all_stores = {}
all_labels = {}
all_features = {}
for m in all_models:
try:
exp_obj = Experiment.query.filter(Experiment.id == m.exp_id).one()
data_store_prefix_path = exp_obj.data_store_prefix_path
if data_store_prefix_path.startswith('hdfs://'):
store = HDFSStore(prefix_path=data_store_prefix_path)
else:
store = LocalStore(prefix_path=data_store_prefix_path)
param = {}
for d in m.param_vals:
if d.dtype == DTYPE_FLOAT:
param[d.name] = float(d.value)
elif d.dtype == DTYPE_INT:
param[d.name] = int(d.value)
else:
param[d.name] = d.value
mod, f = exp_obj.executable_entrypoint.split(':')
mod = import_module(mod)
estimator_gen_fn = getattr(mod, f)
features, labels = exp_obj.feature_columns.split(','), exp_obj.label_columns.split(',')
est = estimator_gen_fn_wrapper(estimator_gen_fn, param, features, labels, store, verbose)
est.setRunId(m.id)
est.setEpochs(m.num_trained_epochs)
# Creating model checkpoint
remote_store = store.to_remote(est.getRunId())
with remote_store.get_local_output_dir() as run_output_dir:
tf.compat.v1.reset_default_graph
model = est._compile_model(est._get_keras_utils())
if m.warm_start_model_id is not None and not est._has_checkpoint(m.id):
# https://www.tensorflow.org/guide/keras/save_and_serialize#apis_for_in-memory_weight_transfer
remote_store2 = store.to_remote(m.warm_start_model_id)
with remote_store2.get_local_output_dir() as run_output_dir2:
model2 = est._compile_model(est._get_keras_utils())
model.set_weights(model2.get_weights())
warm_start_model = Model.query.filter(Model.id == m.warm_start_model_id).one()
db.session.refresh(m)
m.num_trained_epochs = warm_start_model.num_trained_epochs
db.session.commit()
est.setEpochs(m.num_trained_epochs)
for metric in warm_start_model.metrics:
new_metric = Metric(m.id, metric.name, [float(x) for x in metric.values.split(",")])
db.session.add(new_metric)
db.session.commit()
ckpt_file = os.path.join(run_output_dir, remote_store.checkpoint_filename)
model.save(ckpt_file)
remote_store.sync(run_output_dir)
tf.compat.v1.reset_default_graph
estimators.append(est)
all_stores[est.getRunId()] = store
all_features[est.getRunId()] = features
all_labels[est.getRunId()] = labels
if m.status == CREATED_STATUS:
db.session.refresh(m)
m.status = RUNNING_STATUS
db.session.commit()
# Log hyperparameters to TensorBoard
log_hp_to_tensorboard([est], [param], store, verbose)
estimator_results[m.id] = {}
for metric in m.metrics:
estimator_results[m.id][metric.name] = [float(x) for x in metric.values.split(',')]
except Exception as e:
logging.error(traceback.format_exc())
db.session.refresh(m)
m.status = FAILED_STATUS
m.exception_message = str(traceback.format_exc())
db.session.commit()
# Trains all the models for one epoch. Also performs validation
epoch_results = backend.train_for_one_epoch(estimators, all_stores, all_features, all_labels)
update_model_results(estimator_results, epoch_results)
epoch_results = backend.train_for_one_epoch(estimators, all_stores, all_features, all_labels, is_train=False)
update_model_results(estimator_results, epoch_results)
log_epoch_metrics_to_tensorboard(estimators, estimator_results, all_stores, verbose)
for m in all_models:
est_results = estimator_results[m.id]
# Refresh to sync any model stop requests from the db
db.session.refresh(m)
metrics = m.metrics.all()
if len(metrics) == 0:
for k in est_results:
db.session.add(Metric(m.id, k, est_results[k]))
else:
for k in est_results:
metric = [metric for metric in metrics if metric.name == k][0]
metric.values = ",".join(["{:.4f}".format(x) for x in est_results[k]])
db.session.commit()
for m in all_models:
# Refresh to sync any model stop requests from the db
db.session.refresh(m)
m.num_trained_epochs += 1
if m.num_trained_epochs >= m.max_train_epochs:
m.status = COMPLETED_STATUS
db.session.commit()
# inter-epoch waiting
exit_event.wait(inter_epoch_wait_time)
| [
"traceback.format_exc",
"os.path.join",
"importlib.import_module"
] | [((2953, 2971), 'importlib.import_module', 'import_module', (['mod'], {}), '(mod)\n', (2966, 2971), False, 'from importlib import import_module\n'), ((5038, 5100), 'os.path.join', 'os.path.join', (['run_output_dir', 'remote_store.checkpoint_filename'], {}), '(run_output_dir, remote_store.checkpoint_filename)\n', (5050, 5100), False, 'import os\n'), ((6180, 6202), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6200, 6202), False, 'import traceback\n'), ((6349, 6371), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6369, 6371), False, 'import traceback\n')] |
#%%
#==============================================================================#
# #
# Title: Make PostCodes Dataset #
# Purpose: To download and process the data for the App #
# Notes: ... #
# Author: chrimaho #
# Created: 26/Dec/2020 #
# References: ... #
# Sources: ... #
# Edited: ... #
# #
#==============================================================================#
#------------------------------------------------------------------------------#
# #
# Set Up ####
# #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Import packages ####
#------------------------------------------------------------------------------#
# -*- coding: utf-8 -*- #
# import click #<-- Interactivity
import logging #<-- For ease of debugging
from pathlib import Path #<-- Because we need a path forward
from dotenv import find_dotenv, load_dotenv #<-- It's nice to have an environment
import pandas as pd #<-- Frame your Data
from pprint import pprint
import os
import sys
#------------------------------------------------------------------------------#
# Import sources ####
#------------------------------------------------------------------------------#
# Set root directory ----
project_dir = Path(__file__).resolve().parents[2]
# Add directory to Sys path ----
try:
# The directory "." is added to the Path environment so modules can easily be called between files.
if not os.path.abspath(project_dir) in sys.path:
sys.path.append(os.path.abspath(project_dir))
except:
raise ModuleNotFoundError("The custom modules were not able to be loaded.")
# Import modules ----
from src import utils
from src import sources
#------------------------------------------------------------------------------#
# #
# Main Part ####
# #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Process Data ####
#------------------------------------------------------------------------------#
# Extract the data ----
def set_DataFrame(raw):
# Assertions
assert isinstance(raw, dict)
assert list(raw)==['header','dataSets','structure']
# Get data
data = raw['dataSets'][0]['observations']
# Coerce to DataFrame
data = pd.DataFrame(data)
# Return
return data
# Fix the data frame ----
def set_FixData(DataFrame, raw):
"""
Fix the data and make it manageable and logical
Args:
DataFrame (pd.DataFrame): The DataFrame to be processed
raw (dict): The dictionary containing the raw information, as extracted from the ABS.
Returns:
pd.DataFrame: The processed DataFrame
"""
# Assertions
assert isinstance(DataFrame, pd.DataFrame)
assert isinstance(raw, dict)
# Melt the frame
data = DataFrame.melt()
# Split column
data[[1,2,3,4]] = data['variable'].str.split(':',expand=True)
# Duplicate columns
data[[5,6,7,8]] = data[[1,2,3,4]]
# Drop the unnecessary column
del data["variable"]
# Convert data
data.iloc[:,1] = data.iloc[:,1].replace(utils.get_DataLabels(raw, 1, "id"))
data.iloc[:,2] = data.iloc[:,2].replace(utils.get_DataLabels(raw, "SEIFAINDEXTYPE", "id"))
data.iloc[:,3] = data.iloc[:,3].replace(utils.get_DataLabels(raw, "SEIFA_MEASURE", "id"))
data.iloc[:,4] = data.iloc[:,4].replace(utils.get_DataLabels(raw, "TIME_PERIOD", "id"))
data.iloc[:,5] = data.iloc[:,5].replace(utils.get_DataLabels(raw, 1, "name"))
data.iloc[:,6] = data.iloc[:,6].replace(utils.get_DataLabels(raw, "SEIFAINDEXTYPE", "name"))
data.iloc[:,7] = data.iloc[:,7].replace(utils.get_DataLabels(raw, "SEIFA_MEASURE", "name"))
data.iloc[:,8] = data.iloc[:,8].replace(utils.get_DataLabels(raw, "TIME_PERIOD", "name"))
# Rename columns
data = data.rename(columns={
1:raw["structure"]["dimensions"]["observation"][0]["name"].replace(" ",""),
2:raw["structure"]["dimensions"]["observation"][1]["name"].replace(" ",""),
3:raw["structure"]["dimensions"]["observation"][2]["name"].replace(" ",""),
4:raw["structure"]["dimensions"]["observation"][3]["name"].replace(" ",""),
5:raw["structure"]["dimensions"]["observation"][0]["name"].replace(" ","") + "Long",
6:raw["structure"]["dimensions"]["observation"][1]["name"].replace(" ","") + "Long",
7:raw["structure"]["dimensions"]["observation"][2]["name"].replace(" ","") + "Long",
8:raw["structure"]["dimensions"]["observation"][3]["name"].replace(" ","") + "Long",
})
# Return
return data
#------------------------------------------------------------------------------#
# #
# Define & Run the Main ####
# #
#------------------------------------------------------------------------------#
# Main Function ----
# @click.command()
# @click.argument('input_filepath', type=click.Path(exists=True))
# @click.argument('output_filepath', type=click.Path())
def main():
"""
Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
# Run logger
logger.info('making final data set from raw data')
# Get data
raw = utils.get_RawData(sources.PostalAreaCode)
utils.let_DumpData(raw, os.path.join(project_dir, "data/raw"), "Seifa2016_POA_Raw.json")
# Process data
data = set_DataFrame(raw)
utils.let_DumpData(data, os.path.join(project_dir, "data/raw"), "Seifa2016_POA_Raw.csv")
data = set_FixData(data, raw)
utils.let_DumpData(data, os.path.join(project_dir, "data/processed"), TargetFileName="Seifa2016_POA_Processed.csv")
print(data)
return(data)
# Run ----
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format=log_fmt)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
# %%
| [
"logging.getLogger",
"logging.basicConfig",
"dotenv.find_dotenv",
"pathlib.Path",
"os.path.join",
"src.utils.get_DataLabels",
"pandas.DataFrame",
"os.path.abspath",
"src.utils.get_RawData"
] | [((3719, 3737), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (3731, 3737), True, 'import pandas as pd\n'), ((6887, 6928), 'src.utils.get_RawData', 'utils.get_RawData', (['sources.PostalAreaCode'], {}), '(sources.PostalAreaCode)\n', (6904, 6928), False, 'from src import utils\n'), ((7487, 7514), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (7504, 7514), False, 'import logging\n'), ((7519, 7574), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'log_fmt'}), '(level=logging.INFO, format=log_fmt)\n', (7538, 7574), False, 'import logging\n'), ((4555, 4589), 'src.utils.get_DataLabels', 'utils.get_DataLabels', (['raw', '(1)', '"""id"""'], {}), "(raw, 1, 'id')\n", (4575, 4589), False, 'from src import utils\n'), ((4635, 4684), 'src.utils.get_DataLabels', 'utils.get_DataLabels', (['raw', '"""SEIFAINDEXTYPE"""', '"""id"""'], {}), "(raw, 'SEIFAINDEXTYPE', 'id')\n", (4655, 4684), False, 'from src import utils\n'), ((4730, 4778), 'src.utils.get_DataLabels', 'utils.get_DataLabels', (['raw', '"""SEIFA_MEASURE"""', '"""id"""'], {}), "(raw, 'SEIFA_MEASURE', 'id')\n", (4750, 4778), False, 'from src import utils\n'), ((4824, 4870), 'src.utils.get_DataLabels', 'utils.get_DataLabels', (['raw', '"""TIME_PERIOD"""', '"""id"""'], {}), "(raw, 'TIME_PERIOD', 'id')\n", (4844, 4870), False, 'from src import utils\n'), ((4916, 4952), 'src.utils.get_DataLabels', 'utils.get_DataLabels', (['raw', '(1)', '"""name"""'], {}), "(raw, 1, 'name')\n", (4936, 4952), False, 'from src import utils\n'), ((4998, 5049), 'src.utils.get_DataLabels', 'utils.get_DataLabels', (['raw', '"""SEIFAINDEXTYPE"""', '"""name"""'], {}), "(raw, 'SEIFAINDEXTYPE', 'name')\n", (5018, 5049), False, 'from src import utils\n'), ((5095, 5145), 'src.utils.get_DataLabels', 'utils.get_DataLabels', (['raw', '"""SEIFA_MEASURE"""', '"""name"""'], {}), "(raw, 'SEIFA_MEASURE', 'name')\n", (5115, 5145), False, 'from src import utils\n'), ((5191, 5239), 'src.utils.get_DataLabels', 'utils.get_DataLabels', (['raw', '"""TIME_PERIOD"""', '"""name"""'], {}), "(raw, 'TIME_PERIOD', 'name')\n", (5211, 5239), False, 'from src import utils\n'), ((6957, 6994), 'os.path.join', 'os.path.join', (['project_dir', '"""data/raw"""'], {}), "(project_dir, 'data/raw')\n", (6969, 6994), False, 'import os\n'), ((7105, 7142), 'os.path.join', 'os.path.join', (['project_dir', '"""data/raw"""'], {}), "(project_dir, 'data/raw')\n", (7117, 7142), False, 'import os\n'), ((7232, 7275), 'os.path.join', 'os.path.join', (['project_dir', '"""data/processed"""'], {}), "(project_dir, 'data/processed')\n", (7244, 7275), False, 'import os\n'), ((7727, 7740), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (7738, 7740), False, 'from dotenv import find_dotenv, load_dotenv\n'), ((2551, 2579), 'os.path.abspath', 'os.path.abspath', (['project_dir'], {}), '(project_dir)\n', (2566, 2579), False, 'import os\n'), ((2617, 2645), 'os.path.abspath', 'os.path.abspath', (['project_dir'], {}), '(project_dir)\n', (2632, 2645), False, 'import os\n'), ((2360, 2374), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2364, 2374), False, 'from pathlib import Path\n')] |
import os
def configuration(parent_package="", top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration("simulator", parent_package, top_path)
libraries = []
if os.name == "posix":
libraries.append("m")
# cpp_args = ['-stdlib=libc++', '-mmacosx-version-min=10.7']
config.add_extension(
"_simulatorc",
sources=["_simulatorc.pyx", "simulator.cpp"],
include_dirs=numpy.get_include(),
libraries=libraries,
language="c++",
# extra_compile_args = cpp_args,
)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration(top_path="").todict())
| [
"numpy.distutils.misc_util.Configuration",
"numpy.get_include"
] | [((152, 204), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['"""simulator"""', 'parent_package', 'top_path'], {}), "('simulator', parent_package, top_path)\n", (165, 204), False, 'from numpy.distutils.misc_util import Configuration\n'), ((473, 492), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (490, 492), False, 'import numpy\n')] |
#!/usr/bin/env python
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import codecs
from setuptools import setup, find_packages
import restapi
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = 'restapi' ,
version = restapi.__version__ ,
author = restapi.__author__ ,
author_email = restapi.__contact__ ,
description = restapi.__doc__ ,
license = 'APACHE' ,
keywords = 'twitter restapi' ,
url = restapi.__homepage__,
packages = ['restapi'] ,
long_description = read('README') ,
)
| [
"os.path.dirname"
] | [((737, 762), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (752, 762), False, 'import os\n')] |
"""
Defines the blueprint for the users
"""
from flask import Blueprint
from flask_restful import Api
from resources import LoginResource, RegisterResource
USER_BLUEPRINT = Blueprint("user", __name__)
Api(USER_BLUEPRINT).add_resource(
LoginResource, "/login"
)
Api(USER_BLUEPRINT).add_resource(
RegisterResource, "/register"
) | [
"flask.Blueprint",
"flask_restful.Api"
] | [((175, 202), 'flask.Blueprint', 'Blueprint', (['"""user"""', '__name__'], {}), "('user', __name__)\n", (184, 202), False, 'from flask import Blueprint\n'), ((203, 222), 'flask_restful.Api', 'Api', (['USER_BLUEPRINT'], {}), '(USER_BLUEPRINT)\n', (206, 222), False, 'from flask_restful import Api\n'), ((268, 287), 'flask_restful.Api', 'Api', (['USER_BLUEPRINT'], {}), '(USER_BLUEPRINT)\n', (271, 287), False, 'from flask_restful import Api\n')] |
import sys
# 1. Express a solution mathematically: Let's be a Matrix M of (n+1) x (m+1):
# For 0 <= r <= n and 0 <= c <= m, M[r,c] contains the longest path from the source (0, 0) to (r, c)
# M[0 , 0] = 0
# M[0 , c] = M[0 , c - 1] + right[0, c] for 1 <= c <= m
# M[r , 0] = M[r - 1 , 0] + down[r , 0] for 1 <= r <= n
# M[r , c] = max(M[r - 1 , c] + down[r - 1 , c], M[r , c - 1] + right[r , c - 1])
# 2. Proof:
# Let's assume that there is a vertice (r, c) that belongs to the optimal path P with a the longest path length |P|
# But M[r , c] < max(M[r - 1 , c] + down[r - 1 , c], M[r , c - 1] + right[r , c - 1])
# This means that if we replace M[r , c] with max(M[r - 1 , c] + down[r - 1 , c], M[r , c - 1] + right[r , c - 1])
# The new path P length |P'| will be greater than |P| ==> contradiction with the fact that |P| was the longest path
# 3. Implementation:
# Buttom up solution
# Running Time: O(nm) (Quadratic)
# Space complexity: O(nm) (Quadratic)
class Solution:
def __init__(self, n, m):
self.rows_count = n + 1
self.columns_count = m + 1
def longest_path(self, down, right):
M = [ [0 for _ in range(self.columns_count)] for _ in range(self.rows_count) ]
for c in range(1, self.columns_count, 1):
M[0][c] = M[0][c - 1] + right[0][c - 1]
for r in range(1, self.rows_count, 1):
M[r][0] = M[r - 1][0] + down[r - 1][0]
for r in range(1, self.rows_count, 1):
for c in range(1, self.columns_count, 1):
candidate_predecesor_top = M[r - 1][c] + down[r - 1][c]
candidate_predecesor_left = M[r][c - 1] + right[r][c - 1]
M[r][c] = max(candidate_predecesor_top, candidate_predecesor_left)
return M[self.rows_count - 1][self.columns_count - 1]
if __name__ == "__main__":
n,m = map(int, sys.stdin.readline().strip().split())
down = [list(map(int, sys.stdin.readline().strip().split()))
for _ in range(n)]
sys.stdin.readline()
right = [list(map(int, sys.stdin.readline().strip().split()))
for _ in range(n+1)]
s = Solution(n, m)
print(s.longest_path(down, right))
| [
"sys.stdin.readline"
] | [((2060, 2080), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (2078, 2080), False, 'import sys\n'), ((1922, 1942), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1940, 1942), False, 'import sys\n'), ((1986, 2006), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (2004, 2006), False, 'import sys\n'), ((2108, 2128), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (2126, 2128), False, 'import sys\n')] |
import datetime
import random
from .helpers import answer, question
from .quotes import quotes
def launch():
return question("Was möchtest Du über <NAME> wissen?")
def quote_intent():
quote = random.choice(quotes)
return answer(quote["content"])
def who_intent():
return answer(
"<NAME> ist ein Unternehmer und Investor. "
"Er hat sowohl die Staatsangehörigkeit seines Geburtslandes "
"Südafrika als auch die von Kanada und den Vereinigten "
"Staaten. Musk ist bekannt geworden durch seine Beteiligung "
"an der Gründung des Online-Bezahlsystems PayPal sowie mit "
"seinen Erfolgen mit dem privaten Raumfahrtunternehmen "
"SpaceX und dem Elektroautohersteller Tesla.")
def birth_intent():
return answer("<NAME> wurde am 28. Juni 1971 in Pretoria geboren.")
def birthday_intent():
day, month = 28, 6
today = datetime.date.today()
if (datetime.date(today.year, month, day) - today).days >= 0:
# Has not yet have birthday this year
days_until_birthday = (datetime.date(today.year,
month, day) - today).days
else:
days_until_birthday = (datetime.date(today.year +
1, month, day) - today).days
if days_until_birthday == 0:
text = "<NAME> hat heute Geburtstag! Alles Gute <NAME>!"
elif days_until_birthday == 1:
text = "<NAME> hat morgen Geburtstag."
elif days_until_birthday == 2:
text = "<NAME> hat übermorgen Geburtstag."
elif datetime.date(today.year, month, day) - today == -1:
text = "<NAME> hatte gestern Geburtstag."
elif datetime.date(today.year, month, day) - today == -2:
text = "<NAME> hatte vorgestern Geburstag."
else:
text = (
"<NAME> hat am 28. Juni Geburtstag. "
f"Du musst noch {days_until_birthday} Tage bis zu "
"seinem Geburtstag warten."
)
return answer(text)
def age_intent():
today = datetime.date.today()
age = today.year - 1971 - ((today.month,
today.day) < (6, 28))
return answer(f"<NAME> ist {age} Jahre alt.")
def stop_intent():
return answer("Auf Wiedersehen!")
def help_intent():
return question("Frage <NAME> zum Beispiel, wie alt er ist.")
| [
"datetime.date.today",
"random.choice",
"datetime.date"
] | [((205, 226), 'random.choice', 'random.choice', (['quotes'], {}), '(quotes)\n', (218, 226), False, 'import random\n'), ((902, 923), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (921, 923), False, 'import datetime\n'), ((2047, 2068), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2066, 2068), False, 'import datetime\n'), ((932, 969), 'datetime.date', 'datetime.date', (['today.year', 'month', 'day'], {}), '(today.year, month, day)\n', (945, 969), False, 'import datetime\n'), ((1067, 1104), 'datetime.date', 'datetime.date', (['today.year', 'month', 'day'], {}), '(today.year, month, day)\n', (1080, 1104), False, 'import datetime\n'), ((1205, 1246), 'datetime.date', 'datetime.date', (['(today.year + 1)', 'month', 'day'], {}), '(today.year + 1, month, day)\n', (1218, 1246), False, 'import datetime\n'), ((1582, 1619), 'datetime.date', 'datetime.date', (['today.year', 'month', 'day'], {}), '(today.year, month, day)\n', (1595, 1619), False, 'import datetime\n'), ((1694, 1731), 'datetime.date', 'datetime.date', (['today.year', 'month', 'day'], {}), '(today.year, month, day)\n', (1707, 1731), False, 'import datetime\n')] |
import numpy as np
import pandas as pd
import skfuzzy as fuzz
from skfuzzy import control as ctrl
x = ctrl.Antecedent(np.arange(0.0, 2.0), "X")
y = ctrl.Consequent(np.arange(0.0, 2), "Y")
x.automf(names=["pequeno", "médio", "grande"])
y.automf(names=["baixo", "alto"])
regra_1 = ctrl.Rule(antecedent=x["pequeno"], consequent=y["baixo"], label="regra_1")
regra_2 = ctrl.Rule(antecedent=x["médio"], consequent=y["baixo"], label="regra_2")
regra_3 = ctrl.Rule(antecedent=x["médio"], consequent=y["alto"], label="regra_3") ####
regra_4 = ctrl.Rule(antecedent=x["grande"], consequent=y["alto"], label="regra_4") ####
controlador = ctrl.ControlSystem(rules=[regra_1, regra_2, regra_3, regra_4])
simulador = ctrl.ControlSystemSimulation(control_system=controlador)
# -----------------------------------------------------------------------------
def gerador(n=50):
amostras = []
for amostra in range(n):
x = np.random.random()
y = x ** 2
amostras.append([x, y])
return amostras
def main(amostras, valores, verboso=False):
soma_dos_erros = 0
for i, amostra in enumerate(amostras.values):
print("---------------------") if verboso else None
simulador.input["X"] = amostra
simulador.compute()
if verboso:
print(f"AMOSTRA {i}\nX={amostra:.4f}\nY={simulador.output['Y']:.4f}\n")
soma_dos_erros += (valores[i] - amostra) ** 2
erro_total = soma_dos_erros / len(amostras)
print("---------------------") if verboso else None
print(f"ERRO TOTAL: {erro_total:.4f}")
# x.view(sim=simulador)
# y.view(sim=simulador)
if __name__ == "__main__":
# df = pd.read_csv('dados.csv', header=None)
df = pd.DataFrame(gerador(50))
A = df[0]
B = df[1]
main(A, B)
input()
| [
"skfuzzy.control.ControlSystemSimulation",
"numpy.random.random",
"skfuzzy.control.ControlSystem",
"skfuzzy.control.Rule",
"numpy.arange"
] | [((282, 356), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "x['pequeno']", 'consequent': "y['baixo']", 'label': '"""regra_1"""'}), "(antecedent=x['pequeno'], consequent=y['baixo'], label='regra_1')\n", (291, 356), True, 'from skfuzzy import control as ctrl\n'), ((367, 439), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "x['médio']", 'consequent': "y['baixo']", 'label': '"""regra_2"""'}), "(antecedent=x['médio'], consequent=y['baixo'], label='regra_2')\n", (376, 439), True, 'from skfuzzy import control as ctrl\n'), ((450, 521), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "x['médio']", 'consequent': "y['alto']", 'label': '"""regra_3"""'}), "(antecedent=x['médio'], consequent=y['alto'], label='regra_3')\n", (459, 521), True, 'from skfuzzy import control as ctrl\n'), ((538, 610), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "x['grande']", 'consequent': "y['alto']", 'label': '"""regra_4"""'}), "(antecedent=x['grande'], consequent=y['alto'], label='regra_4')\n", (547, 610), True, 'from skfuzzy import control as ctrl\n'), ((633, 695), 'skfuzzy.control.ControlSystem', 'ctrl.ControlSystem', ([], {'rules': '[regra_1, regra_2, regra_3, regra_4]'}), '(rules=[regra_1, regra_2, regra_3, regra_4])\n', (651, 695), True, 'from skfuzzy import control as ctrl\n'), ((708, 764), 'skfuzzy.control.ControlSystemSimulation', 'ctrl.ControlSystemSimulation', ([], {'control_system': 'controlador'}), '(control_system=controlador)\n', (736, 764), True, 'from skfuzzy import control as ctrl\n'), ((119, 138), 'numpy.arange', 'np.arange', (['(0.0)', '(2.0)'], {}), '(0.0, 2.0)\n', (128, 138), True, 'import numpy as np\n'), ((165, 182), 'numpy.arange', 'np.arange', (['(0.0)', '(2)'], {}), '(0.0, 2)\n', (174, 182), True, 'import numpy as np\n'), ((926, 944), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (942, 944), True, 'import numpy as np\n')] |
# Copyright Contributors to the Testing Farm project.
# SPDX-License-Identifier: Apache-2.0
import logging
import re
import six
import bugzilla
import pytest
from mock import MagicMock
import gluetool
from gluetool.utils import load_yaml
from gluetool.log import format_dict
import gluetool_modules_framework.infrastructure.bugzilla
from . import create_module, check_loadable, testing_asset
@pytest.fixture(name='bugzilla')
def fixture_bugzilla(dataset, monkeypatch):
test_data = load_yaml(testing_asset('bugzilla', '{}.yaml'.format(dataset)))
api_key = 'some-api-key'
base_url = 'some-base-url'
class BugzillaMock(MagicMock):
bz_ver_major = '5'
bz_ver_minor = '0'
def __init__(self, url, **kwargs):
assert url == '{}/xmlrpc.cgi'.format(base_url)
assert kwargs['api_key'] == api_key
def getbugs(self, ids, **kwargs):
return [
MagicMock(**test_data['getbugs'][int(bug_id)])
for bug_id in ids
]
def build_update(*args, **kwargs):
return 'update'
def update_bugs(*args, **kwargs):
return True
monkeypatch.setattr(bugzilla, 'Bugzilla', BugzillaMock)
module = create_module(gluetool_modules_framework.infrastructure.bugzilla.Bugzilla)[1]
module._config['api-key'] = api_key
module._config['base-url'] = base_url
module._config['external-tracker-id-tcms'] = 69
module._config['bug-id'] = ','.join(str(id) for id in test_data['getbugs'].keys())
module._config['attributes'] = ['summary', 'priority', 'severity']
module._config['retry-tick'] = 1
module._config['retry-timeout'] = 1
# expected data
module._expected_bz_attrs = test_data['bugzilla_attributes']
module._expected_tcms_tests = test_data['tcms_tests']
return module
@pytest.fixture(name='module')
def fixture_module():
module = create_module(gluetool_modules_framework.infrastructure.bugzilla.Bugzilla)[1]
return module
def test_loadable(module):
check_loadable(module.glue, 'gluetool_modules_framework/infrastructure/bugzilla.py', 'Bugzilla')
@pytest.mark.parametrize('dataset', ['valid'])
def test_list_tcms_tests(bugzilla, log):
bugzilla._config['list-tcms-tests'] = True
bugzilla.execute()
for _, tests in six.iteritems(bugzilla._expected_tcms_tests):
for test in tests:
assert re.search(
'"TC#{} - {}"'.format(test['id'], test['description']),
log.records[-1].message
)
@pytest.mark.parametrize('dataset', ['no-tests'])
def test_list_tcms_tests_no_tests(bugzilla, log):
bugzilla._config['list-tcms-tests'] = True
bugzilla.execute()
if not bugzilla._expected_tcms_tests:
assert log.match(message='No TCMS tests found for given bugzillas.', levelno=logging.DEBUG)
@pytest.mark.parametrize('dataset', ['valid'])
def test_list_attributes(bugzilla, log):
bugzilla._config['list-attributes'] = True
bugzilla.execute()
print(format_dict(bugzilla._expected_bz_attrs))
assert log.match(
message='Bugzilla attributes:\n{}'.format(format_dict(bugzilla._expected_bz_attrs)),
levelno=logging.INFO
)
@pytest.mark.parametrize('dataset', ['valid'])
def test_post_comment(bugzilla, log):
bugzilla._config['post-comment'] = 'this-is-a-comment'
bugzilla.execute()
assert log.match(
message="""Given bugs updated with following comment:
---v---v---v---v---v---
this-is-a-comment
---^---^---^---^---^---""",
levelno=logging.INFO
)
def test_sanity(module):
# mutual exclusive option failure
module._config['list-attributes'] = True
module._config['list-tcms-tests'] = True
with pytest.raises(
gluetool.GlueError,
match="Options list-attributes, list-tcms-tests, post-comment are mutually exclusive"
):
module.sanity()
# required 'bug-id' failure
module._config['list-tcms-tests'] = False
with pytest.raises(gluetool.GlueError, match="Option 'bug-id' is required"):
module.sanity()
# all params fine, note we need to reinitialize bug_ids as it is done in sanity function
del module.bug_ids
module._config['bug-id'] = '123456'
module.sanity()
| [
"bugzilla.execute",
"pytest.mark.parametrize",
"pytest.raises",
"pytest.fixture",
"six.iteritems",
"gluetool.log.format_dict"
] | [((399, 430), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""bugzilla"""'}), "(name='bugzilla')\n", (413, 430), False, 'import pytest\n'), ((1862, 1891), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""module"""'}), "(name='module')\n", (1876, 1891), False, 'import pytest\n'), ((2157, 2202), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dataset"""', "['valid']"], {}), "('dataset', ['valid'])\n", (2180, 2202), False, 'import pytest\n'), ((2569, 2617), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dataset"""', "['no-tests']"], {}), "('dataset', ['no-tests'])\n", (2592, 2617), False, 'import pytest\n'), ((2886, 2931), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dataset"""', "['valid']"], {}), "('dataset', ['valid'])\n", (2909, 2931), False, 'import pytest\n'), ((3252, 3297), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dataset"""', "['valid']"], {}), "('dataset', ['valid'])\n", (3275, 3297), False, 'import pytest\n'), ((2297, 2315), 'bugzilla.execute', 'bugzilla.execute', ([], {}), '()\n', (2313, 2315), False, 'import bugzilla\n'), ((2337, 2381), 'six.iteritems', 'six.iteritems', (['bugzilla._expected_tcms_tests'], {}), '(bugzilla._expected_tcms_tests)\n', (2350, 2381), False, 'import six\n'), ((2721, 2739), 'bugzilla.execute', 'bugzilla.execute', ([], {}), '()\n', (2737, 2739), False, 'import bugzilla\n'), ((3026, 3044), 'bugzilla.execute', 'bugzilla.execute', ([], {}), '()\n', (3042, 3044), False, 'import bugzilla\n'), ((3401, 3419), 'bugzilla.execute', 'bugzilla.execute', ([], {}), '()\n', (3417, 3419), False, 'import bugzilla\n'), ((3056, 3096), 'gluetool.log.format_dict', 'format_dict', (['bugzilla._expected_bz_attrs'], {}), '(bugzilla._expected_bz_attrs)\n', (3067, 3096), False, 'from gluetool.log import format_dict\n'), ((3774, 3904), 'pytest.raises', 'pytest.raises', (['gluetool.GlueError'], {'match': '"""Options list-attributes, list-tcms-tests, post-comment are mutually exclusive"""'}), "(gluetool.GlueError, match=\n 'Options list-attributes, list-tcms-tests, post-comment are mutually exclusive'\n )\n", (3787, 3904), False, 'import pytest\n'), ((4030, 4100), 'pytest.raises', 'pytest.raises', (['gluetool.GlueError'], {'match': '"""Option \'bug-id\' is required"""'}), '(gluetool.GlueError, match="Option \'bug-id\' is required")\n', (4043, 4100), False, 'import pytest\n'), ((3171, 3211), 'gluetool.log.format_dict', 'format_dict', (['bugzilla._expected_bz_attrs'], {}), '(bugzilla._expected_bz_attrs)\n', (3182, 3211), False, 'from gluetool.log import format_dict\n')] |
"""Test the JupyterClient."""
from __future__ import annotations
from typing import TYPE_CHECKING
import httpx
import pytest
import respx
import structlog
from noteburst.jupyterclient.jupyterlab import (
JupyterClient,
JupyterConfig,
JupyterImageSelector,
)
from noteburst.jupyterclient.user import User
from tests.support.gafaelfawr import mock_gafaelfawr
if TYPE_CHECKING:
from tests.support.cachemachine import MockCachemachine
from tests.support.jupyter import MockJupyter
@pytest.mark.asyncio
async def test_jupyterclient(
respx_mock: respx.Router,
jupyter: MockJupyter,
cachemachine: MockCachemachine,
) -> None:
user = User(username="someuser", uid="1234")
mock_gafaelfawr(
respx_mock=respx_mock, username=user.username, uid=user.uid
)
logger = structlog.get_logger(__name__)
jupyter_config = JupyterConfig(
image_selector=JupyterImageSelector.RECOMMENDED
)
async with httpx.AsyncClient() as http_client:
authed_user = await user.login(
scopes=["exec:notebook"], http_client=http_client
)
async with JupyterClient(
user=authed_user, logger=logger, config=jupyter_config
) as jupyter_client:
await jupyter_client.log_into_hub()
image_info = await jupyter_client.spawn_lab()
print(image_info)
async for progress in jupyter_client.spawn_progress():
print(progress)
await jupyter_client.log_into_lab()
# FIXME the test code for this isn't full set up yet
# async with jupyter_client.open_lab_session() as lab_session:
# print(lab_session.kernel_id)
await jupyter_client.stop_lab()
| [
"structlog.get_logger",
"noteburst.jupyterclient.user.User",
"tests.support.gafaelfawr.mock_gafaelfawr",
"noteburst.jupyterclient.jupyterlab.JupyterConfig",
"httpx.AsyncClient",
"noteburst.jupyterclient.jupyterlab.JupyterClient"
] | [((669, 706), 'noteburst.jupyterclient.user.User', 'User', ([], {'username': '"""someuser"""', 'uid': '"""1234"""'}), "(username='someuser', uid='1234')\n", (673, 706), False, 'from noteburst.jupyterclient.user import User\n'), ((711, 787), 'tests.support.gafaelfawr.mock_gafaelfawr', 'mock_gafaelfawr', ([], {'respx_mock': 'respx_mock', 'username': 'user.username', 'uid': 'user.uid'}), '(respx_mock=respx_mock, username=user.username, uid=user.uid)\n', (726, 787), False, 'from tests.support.gafaelfawr import mock_gafaelfawr\n'), ((816, 846), 'structlog.get_logger', 'structlog.get_logger', (['__name__'], {}), '(__name__)\n', (836, 846), False, 'import structlog\n'), ((869, 931), 'noteburst.jupyterclient.jupyterlab.JupyterConfig', 'JupyterConfig', ([], {'image_selector': 'JupyterImageSelector.RECOMMENDED'}), '(image_selector=JupyterImageSelector.RECOMMENDED)\n', (882, 931), False, 'from noteburst.jupyterclient.jupyterlab import JupyterClient, JupyterConfig, JupyterImageSelector\n'), ((962, 981), 'httpx.AsyncClient', 'httpx.AsyncClient', ([], {}), '()\n', (979, 981), False, 'import httpx\n'), ((1129, 1198), 'noteburst.jupyterclient.jupyterlab.JupyterClient', 'JupyterClient', ([], {'user': 'authed_user', 'logger': 'logger', 'config': 'jupyter_config'}), '(user=authed_user, logger=logger, config=jupyter_config)\n', (1142, 1198), False, 'from noteburst.jupyterclient.jupyterlab import JupyterClient, JupyterConfig, JupyterImageSelector\n')] |
import glob, os
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
def quick_plot(results_file, gauss_width, start, stop, step):
with open(results_file, "r") as results:
results = results.read().split('\n')
results = [float(res) for res in results[:-1]]
eigenenergies = results
gauss_width = gauss_width
D_E = 0
E = np.arange(start, stop, step)
for eigenenergy in eigenenergies:
D_E = D_E + np.exp(-(E - eigenenergy)**2 / (2 * gauss_width**2)) / (np.pi * gauss_width * np.sqrt(2))
font = {'family': 'serif',
'color': 'black',
'weight': 'normal',
'size': 16}
plt.figure(figsize=(13.66, 7.68))
plt.plot(E, D_E)
plt.xlabel('\nEnergy [a.u.]', fontsize=15,fontdict=font)
section = np.arange(-1, 1, 1/20.)
plt.fill_between(E, D_E, color='blue', alpha=0.3)
plt.ylabel('DOS\n', fontsize=15,fontdict=font)
plt.title('Density of states\n', fontsize=15,fontdict=font)
plt.xlim(start, stop)
plt.ylim(bottom=0)
plt.subplots_adjust(left=0.15)
plt.xticks(fontsize=11)
plt.yticks(fontsize=11)
#plt.gca().spines['right'].set_position(('data',0))
#plt.gca().spines['top'].set_position(('data',0))
plt.savefig(results_file + '.png', dpi=400)
plt.grid(False)
plt.close()
return
def main():
sns.set()
start = [-7,-6,-1.1,-6]#-7,-5.5,-5,-7,-0.1,-7,-5.,-6.6,-7,-0.5,-6.5,-7,-5,-7,-6,-7,-7,-7,0.1,0.5,-6,-0.5,-7,-7,-0.6,-7,-5.5,-6,-7,-7,-7,-7,-7,-6.5,-7,-7,-7
stop = [7,6,10.1,6] #7,14.5,5,7,14.5,7,13.5,6.5,7,15.5,15,7,14.,7,6,7,7,7,14.5,14.5,6,10,7,7,15.5,7,13.7,6,7,7,7,7,7,6.5,7,7,7
step = 0.01
gauss_width = 0.06
path = "/home/przemek/Documents/Modeling/tight_binding/results_diploma"
results = []
print(len(start), len(stop))
os.chdir(path)
for file in glob.glob("*.txt"):
input_file = path + '/' + file
ready_input_file = open(input_file, 'r')
num_list = [float(num) for num in ready_input_file.read().split()]
max_val = max(num_list)
min_val = min(num_list)
results.append([max_val, min_val, file])
for num, result in enumerate(results):
print(result[2])
print(start[num], stop[num])
quick_plot(path + '/' + result[2], gauss_width, start[num], stop[num], step)
return
if __name__ == '__main__':
exit(main())
| [
"matplotlib.pyplot.grid",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.fill_between",
"numpy.arange",
"seaborn.set",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.exp",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylim",
"glob.glob"... | [((360, 388), 'numpy.arange', 'np.arange', (['start', 'stop', 'step'], {}), '(start, stop, step)\n', (369, 388), True, 'import numpy as np\n'), ((640, 673), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13.66, 7.68)'}), '(figsize=(13.66, 7.68))\n', (650, 673), True, 'import matplotlib.pyplot as plt\n'), ((676, 692), 'matplotlib.pyplot.plot', 'plt.plot', (['E', 'D_E'], {}), '(E, D_E)\n', (684, 692), True, 'import matplotlib.pyplot as plt\n'), ((695, 752), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\nEnergy [a.u.]"""'], {'fontsize': '(15)', 'fontdict': 'font'}), "('\\nEnergy [a.u.]', fontsize=15, fontdict=font)\n", (705, 752), True, 'import matplotlib.pyplot as plt\n'), ((764, 790), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(1 / 20.0)'], {}), '(-1, 1, 1 / 20.0)\n', (773, 790), True, 'import numpy as np\n'), ((790, 839), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['E', 'D_E'], {'color': '"""blue"""', 'alpha': '(0.3)'}), "(E, D_E, color='blue', alpha=0.3)\n", (806, 839), True, 'import matplotlib.pyplot as plt\n'), ((842, 889), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""DOS\n"""'], {'fontsize': '(15)', 'fontdict': 'font'}), "('DOS\\n', fontsize=15, fontdict=font)\n", (852, 889), True, 'import matplotlib.pyplot as plt\n'), ((891, 951), 'matplotlib.pyplot.title', 'plt.title', (['"""Density of states\n"""'], {'fontsize': '(15)', 'fontdict': 'font'}), "('Density of states\\n', fontsize=15, fontdict=font)\n", (900, 951), True, 'import matplotlib.pyplot as plt\n'), ((953, 974), 'matplotlib.pyplot.xlim', 'plt.xlim', (['start', 'stop'], {}), '(start, stop)\n', (961, 974), True, 'import matplotlib.pyplot as plt\n'), ((977, 995), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(0)'}), '(bottom=0)\n', (985, 995), True, 'import matplotlib.pyplot as plt\n'), ((998, 1028), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)'}), '(left=0.15)\n', (1017, 1028), True, 'import matplotlib.pyplot as plt\n'), ((1031, 1054), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(11)'}), '(fontsize=11)\n', (1041, 1054), True, 'import matplotlib.pyplot as plt\n'), ((1057, 1080), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(11)'}), '(fontsize=11)\n', (1067, 1080), True, 'import matplotlib.pyplot as plt\n'), ((1189, 1232), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(results_file + '.png')"], {'dpi': '(400)'}), "(results_file + '.png', dpi=400)\n", (1200, 1232), True, 'import matplotlib.pyplot as plt\n'), ((1235, 1250), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (1243, 1250), True, 'import matplotlib.pyplot as plt\n'), ((1253, 1264), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1262, 1264), True, 'import matplotlib.pyplot as plt\n'), ((1296, 1305), 'seaborn.set', 'sns.set', ([], {}), '()\n', (1303, 1305), True, 'import seaborn as sns\n'), ((1752, 1766), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (1760, 1766), False, 'import glob, os\n'), ((1781, 1799), 'glob.glob', 'glob.glob', (['"""*.txt"""'], {}), "('*.txt')\n", (1790, 1799), False, 'import glob, os\n'), ((443, 499), 'numpy.exp', 'np.exp', (['(-(E - eigenenergy) ** 2 / (2 * gauss_width ** 2))'], {}), '(-(E - eigenenergy) ** 2 / (2 * gauss_width ** 2))\n', (449, 499), True, 'import numpy as np\n'), ((521, 531), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (528, 531), True, 'import numpy as np\n')] |
from flask import Flask
from flask.ext.socketio import SocketIO
from flask.ext.login import LoginManager
from flask.ext.sqlalchemy import SQLAlchemy
import redis
app = Flask(__name__, static_url_path='/static')
app.config.from_pyfile('./config.py')
from config import REDIS_SERVER, REDIS_PORT, REDIS_DB
redis_db = redis.StrictRedis(host=REDIS_SERVER, port=REDIS_PORT, db=REDIS_DB)
socketio = SocketIO(app)
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.login_view = 'sign_in'
login_manager.init_app(app)
from . import views, websockets
from . import wizard_views
| [
"flask.ext.login.LoginManager",
"flask.Flask",
"flask.ext.socketio.SocketIO",
"flask.ext.sqlalchemy.SQLAlchemy",
"redis.StrictRedis"
] | [((169, 211), 'flask.Flask', 'Flask', (['__name__'], {'static_url_path': '"""/static"""'}), "(__name__, static_url_path='/static')\n", (174, 211), False, 'from flask import Flask\n'), ((317, 383), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': 'REDIS_SERVER', 'port': 'REDIS_PORT', 'db': 'REDIS_DB'}), '(host=REDIS_SERVER, port=REDIS_PORT, db=REDIS_DB)\n', (334, 383), False, 'import redis\n'), ((396, 409), 'flask.ext.socketio.SocketIO', 'SocketIO', (['app'], {}), '(app)\n', (404, 409), False, 'from flask.ext.socketio import SocketIO\n'), ((415, 430), 'flask.ext.sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (425, 430), False, 'from flask.ext.sqlalchemy import SQLAlchemy\n'), ((448, 462), 'flask.ext.login.LoginManager', 'LoginManager', ([], {}), '()\n', (460, 462), False, 'from flask.ext.login import LoginManager\n')] |
from ..tweet_sentiment_classifier import Classifier, tokenizer_filter
import pickle as pkl
import numpy as np
import json
import os
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
class BoW_Model(Classifier):
def __init__(self, vocab_size=100000, max_iter=10000, validation_split=0.2, accuracy=0, bootstrap=1,
remove_stopwords=True, remove_punctuation=True, lemmatize=True, **kwargs):
"""
Constructor for BoW_Model
Be sure to add additional parameters to export()
:param vocab_size: (int) Maximum vocabulary size. Default 1E6
:param max_iter: (int) Maximum number of fit iterations
:param remove_punctuation: (Bool) Remove punctuation. Recommended.
:param remove_stopwords: (Bool) Remove stopwords. Recommended.
:param lemmatize: (Bool) Lemmatize words. Recommended.
"""
self.package = 'twitter_nlp_toolkit.tweet_sentiment_classifier.models.bow_models'
self.type = 'BoW_Model'
self.vectorizer = None
self.classifier = None
self.vocab_size = vocab_size
self.max_iter = max_iter
self.validation_split = validation_split
self.accuracy = accuracy
self.bootstrap = bootstrap
self.remove_punctuation = remove_punctuation
self.remove_stopwords = remove_stopwords
self.lemmatize = lemmatize
def fit(self, train_data, y, weights=None, custom_vocabulary=None):
"""
Fit the model (from scratch)
:param train_data: (List-like) List of strings to train on
:param y: (vector) Targets
:param weights: (vector) Training weights. Optional
:param custom_vocabulary: (List of Strings) Custom vocabulary. Not recommended
"""
if weights is not None:
try:
y = np.hstack(y, weights)
except:
print('Weights not accepted')
if 1 < self.bootstrap < len(y):
train_data, y = resample(train_data, y, n_samples=self.bootstrap, stratify=y, replace=False)
elif self.bootstrap < 1:
n_samples = int(self.bootstrap * len(y))
train_data, y = resample(train_data, y, n_samples=n_samples, stratify=y, replace=False)
filtered_data = tokenizer_filter(train_data, remove_punctuation=self.remove_punctuation,
remove_stopwords=self.remove_stopwords, lemmatize=self.lemmatize)
self.vectorizer = TfidfVectorizer(analyzer=str.split, max_features=self.vocab_size)
cleaned_data = [' '.join(tweet) for tweet in filtered_data]
X = self.vectorizer.fit_transform(cleaned_data)
trainX, testX, trainY, testY = train_test_split(X, y, test_size=self.validation_split, stratify=y)
print('Fitting BoW model')
self.classifier = LogisticRegression(max_iter=self.max_iter).fit(trainX, trainY)
self.accuracy = accuracy_score(testY, self.classifier.predict(testX))
def refine(self, train_data, y, bootstrap=True, weights=None, max_iter=500, preprocess=True):
"""
Train the models further on new data. Note that it is not possible to increase the vocabulary
:param train_data: (List-like of Strings) List of strings to train on
:param y: (vector) Targets
:param max_iter: (int) Maximum number of fit iterations. Default: 500
"""
if weights is not None:
try:
y = np.hstack(y, weights)
except:
print('Weights not accepted')
if bootstrap and 1 < self.bootstrap < len(y):
train_data, y = resample(train_data, y, n_samples=self.bootstrap, stratify=y, replace=False)
elif bootstrap and self.bootstrap < 1:
n_samples = int(self.bootstrap * len(y))
train_data, y = resample(train_data, y, n_samples=n_samples, stratify=y, replace=False)
if preprocess:
filtered_data = tokenizer_filter(train_data, remove_punctuation=self.remove_punctuation,
remove_stopwords=self.remove_stopwords, lemmatize=self.lemmatize)
print('\n Filtered data')
else:
filtered_data = train_data
cleaned_data = [' '.join(tweet) for tweet in filtered_data]
X = self.vectorizer.transform(cleaned_data)
self.classifier = LogisticRegression(random_state=0, max_iter=max_iter).fit(X, y)
self.classifier.fit(X, y)
def predict(self, data, **kwargs):
"""
Predict the binary sentiment of a list of tweets
:param data: (list of Strings) Input tweets
:param kwargs: Keywords for predict_proba
:return: (list of bool) Predictions
"""
return np.round(self.predict_proba(data, **kwargs))
def predict_proba(self, data):
"""
Makes predictions
:param data: (List-like) List of strings to predict sentiment
:return: (vector) Un-binarized Predictions
"""
if self.classifier is None:
raise ValueError('Model has not been trained!')
filtered_data = tokenizer_filter(data, remove_punctuation=self.remove_punctuation,
remove_stopwords=self.remove_stopwords, lemmatize=self.lemmatize,
verbose=False)
cleaned_data = [' '.join(tweet) for tweet in filtered_data]
X = self.vectorizer.transform(cleaned_data)
return self.classifier.predict(X)
def export(self, filename):
"""
Saves the model to disk
:param filename: (String) Path to file
"""
parameters = {'Classifier': self.type,
'package': self.package,
'vocab_size': int(self.vocab_size),
'max_iter': int(self.max_iter),
'validation_split': float(self.validation_split),
'accuracy': float(self.accuracy),
'remove_punctuation': self.remove_punctuation,
'remove_stopwords': self.remove_stopwords,
'lemmatize': self.lemmatize,
'bootstrap': self.bootstrap
}
if parameters['bootstrap'] < 1:
parameters['bootstrap'] = float(parameters['bootstrap'])
else:
parameters['bootstrap'] = int(parameters['bootstrap'])
os.makedirs(filename, exist_ok=True)
with open(filename + '/param.json', 'w+') as outfile:
json.dump(parameters, outfile)
with open(filename + '/bow_vectorizer.pkl', 'wb+') as outfile:
pkl.dump(self.vectorizer, outfile)
with open(filename + '/bow_classifier.pkl', 'wb+') as outfile:
pkl.dump(self.classifier, outfile)
def load_model(self, filename):
"""
# TODO revise to properly close pkl files
:param filename: (String) Path to file
"""
self.vectorizer = pkl.load(open(filename + '/bow_vectorizer.pkl', 'rb'))
self.classifier = pkl.load(open(filename + '/bow_classifier.pkl', 'rb'))
| [
"pickle.dump",
"os.makedirs",
"numpy.hstack",
"sklearn.model_selection.train_test_split",
"sklearn.linear_model.LogisticRegression",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.utils.resample",
"json.dump"
] | [((2678, 2743), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'analyzer': 'str.split', 'max_features': 'self.vocab_size'}), '(analyzer=str.split, max_features=self.vocab_size)\n', (2693, 2743), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((2908, 2975), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'self.validation_split', 'stratify': 'y'}), '(X, y, test_size=self.validation_split, stratify=y)\n', (2924, 2975), False, 'from sklearn.model_selection import train_test_split\n'), ((6662, 6698), 'os.makedirs', 'os.makedirs', (['filename'], {'exist_ok': '(True)'}), '(filename, exist_ok=True)\n', (6673, 6698), False, 'import os\n'), ((2183, 2259), 'sklearn.utils.resample', 'resample', (['train_data', 'y'], {'n_samples': 'self.bootstrap', 'stratify': 'y', 'replace': '(False)'}), '(train_data, y, n_samples=self.bootstrap, stratify=y, replace=False)\n', (2191, 2259), False, 'from sklearn.utils import resample\n'), ((3836, 3912), 'sklearn.utils.resample', 'resample', (['train_data', 'y'], {'n_samples': 'self.bootstrap', 'stratify': 'y', 'replace': '(False)'}), '(train_data, y, n_samples=self.bootstrap, stratify=y, replace=False)\n', (3844, 3912), False, 'from sklearn.utils import resample\n'), ((6773, 6803), 'json.dump', 'json.dump', (['parameters', 'outfile'], {}), '(parameters, outfile)\n', (6782, 6803), False, 'import json\n'), ((6887, 6921), 'pickle.dump', 'pkl.dump', (['self.vectorizer', 'outfile'], {}), '(self.vectorizer, outfile)\n', (6895, 6921), True, 'import pickle as pkl\n'), ((7005, 7039), 'pickle.dump', 'pkl.dump', (['self.classifier', 'outfile'], {}), '(self.classifier, outfile)\n', (7013, 7039), True, 'import pickle as pkl\n'), ((2026, 2047), 'numpy.hstack', 'np.hstack', (['y', 'weights'], {}), '(y, weights)\n', (2035, 2047), True, 'import numpy as np\n'), ((2374, 2445), 'sklearn.utils.resample', 'resample', (['train_data', 'y'], {'n_samples': 'n_samples', 'stratify': 'y', 'replace': '(False)'}), '(train_data, y, n_samples=n_samples, stratify=y, replace=False)\n', (2382, 2445), False, 'from sklearn.utils import resample\n'), ((3038, 3080), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': 'self.max_iter'}), '(max_iter=self.max_iter)\n', (3056, 3080), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3665, 3686), 'numpy.hstack', 'np.hstack', (['y', 'weights'], {}), '(y, weights)\n', (3674, 3686), True, 'import numpy as np\n'), ((4041, 4112), 'sklearn.utils.resample', 'resample', (['train_data', 'y'], {'n_samples': 'n_samples', 'stratify': 'y', 'replace': '(False)'}), '(train_data, y, n_samples=n_samples, stratify=y, replace=False)\n', (4049, 4112), False, 'from sklearn.utils import resample\n'), ((4586, 4639), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'max_iter': 'max_iter'}), '(random_state=0, max_iter=max_iter)\n', (4604, 4639), False, 'from sklearn.linear_model import LogisticRegression\n')] |
import os
import server
import unittest
import tempfile
class FlaskrTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, server.app.config['DATABASE'] = tempfile.mkstemp()
server.app.config['TESTING'] = True
self.app = server.app.test_client()
server.init_db()
def tearDown(self):
os.close(self.db_fd)
os.unlink(server.app.config['DATABASE'])
def test_empty_db(self):
rv = self.app.get('/')
assert '200' in rv.status
assert bytes('No entries here so far', 'UTF-8') in rv.data
if __name__ == '__main__':
unittest.main()
| [
"server.init_db",
"server.app.test_client",
"os.close",
"os.unlink",
"unittest.main",
"tempfile.mkstemp"
] | [((603, 618), 'unittest.main', 'unittest.main', ([], {}), '()\n', (616, 618), False, 'import unittest\n'), ((173, 191), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (189, 191), False, 'import tempfile\n'), ((255, 279), 'server.app.test_client', 'server.app.test_client', ([], {}), '()\n', (277, 279), False, 'import server\n'), ((288, 304), 'server.init_db', 'server.init_db', ([], {}), '()\n', (302, 304), False, 'import server\n'), ((338, 358), 'os.close', 'os.close', (['self.db_fd'], {}), '(self.db_fd)\n', (346, 358), False, 'import os\n'), ((367, 407), 'os.unlink', 'os.unlink', (["server.app.config['DATABASE']"], {}), "(server.app.config['DATABASE'])\n", (376, 407), False, 'import os\n')] |
# Owen's experiment to convert a CSDS to the HF data structure
import datasets
import numpy as np
from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments
from datasets import Dataset, DatasetDict, ClassLabel, load_metric
# create a CSDS as dict
# First create a mapping from string labels to integers
c2l = ClassLabel(num_classes=3, names=['CB', 'NCB', 'NA'])
csds_train_dict = {'text': ["John said he * likes * beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary sometimes says she likes beets.",
"Mary maybe likes beets."
],
'label': map(c2l.str2int, ["CB", "NCB", "NCB", "NCB", "NCB", "NCB", "NCB",
"NCB", "NCB", "NCB", "NCB", "NCB", "NCB", "NCB",
"NCB", "NCB", "NCB", "NCB", "NCB", "NCB"])}
csds_eval_dict = {'text': ["Peter said he likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan sometimes says she likes beets.",
"Joan maybe likes beets."
],
'label': map(c2l.str2int, ["CB", "NCB", "NCB", "NCB", "NCB", "NCB", "NCB",
"NCB", "NCB", "NCB", "NCB", "NCB", "NCB", "NCB",
"NCB", "NCB", "NCB", "NCB", "NCB", "NCB"])}
csds_train_dataset = Dataset.from_dict(csds_train_dict)
csds_eval_dataset = Dataset.from_dict(csds_eval_dict)
csds_datasets = DatasetDict({'train': csds_train_dataset,
'eval': csds_eval_dataset})
def notify(string):
print(">>>> ", string, " <<<<")
notify("Created datset, now tokenizing dataset")
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
def tokenize_function(examples):
return tokenizer(examples["text"], padding="max_length", truncation=True)
tokenized_csds_datasets = csds_datasets.map(tokenize_function, batched=True)
notify("Done tokenizing dataset")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=3)
metric = load_metric("accuracy")
# In the named arguments below, replace full_train_dataset
# and full-eval_dataset with small_train_dataset and
# small_eval_dataset, respectively, for experimentation with
# a small subset of the input data and a shorter running time.
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
return metric.compute(predictions=predictions, references=labels)
notify("Starting training")
training_args = TrainingArguments("../CSDS/test_trainer")
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_csds_datasets['train'],
eval_dataset=tokenized_csds_datasets['eval'],
compute_metrics=compute_metrics,
)
trainer.train()
notify("Done training")
results = trainer.evaluate()
print(results)
| [
"datasets.load_metric",
"transformers.TrainingArguments",
"datasets.Dataset.from_dict",
"numpy.argmax",
"transformers.AutoModelForSequenceClassification.from_pretrained",
"datasets.DatasetDict",
"datasets.ClassLabel",
"transformers.AutoTokenizer.from_pretrained",
"transformers.Trainer"
] | [((357, 409), 'datasets.ClassLabel', 'ClassLabel', ([], {'num_classes': '(3)', 'names': "['CB', 'NCB', 'NA']"}), "(num_classes=3, names=['CB', 'NCB', 'NA'])\n", (367, 409), False, 'from datasets import Dataset, DatasetDict, ClassLabel, load_metric\n'), ((3639, 3673), 'datasets.Dataset.from_dict', 'Dataset.from_dict', (['csds_train_dict'], {}), '(csds_train_dict)\n', (3656, 3673), False, 'from datasets import Dataset, DatasetDict, ClassLabel, load_metric\n'), ((3694, 3727), 'datasets.Dataset.from_dict', 'Dataset.from_dict', (['csds_eval_dict'], {}), '(csds_eval_dict)\n', (3711, 3727), False, 'from datasets import Dataset, DatasetDict, ClassLabel, load_metric\n'), ((3744, 3813), 'datasets.DatasetDict', 'DatasetDict', (["{'train': csds_train_dataset, 'eval': csds_eval_dataset}"], {}), "({'train': csds_train_dataset, 'eval': csds_eval_dataset})\n", (3755, 3813), False, 'from datasets import Dataset, DatasetDict, ClassLabel, load_metric\n'), ((3969, 4017), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""bert-base-cased"""'], {}), "('bert-base-cased')\n", (3998, 4017), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments\n'), ((4254, 4341), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (['"""bert-base-cased"""'], {'num_labels': '(3)'}), "('bert-base-cased',\n num_labels=3)\n", (4304, 4341), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments\n'), ((4347, 4370), 'datasets.load_metric', 'load_metric', (['"""accuracy"""'], {}), "('accuracy')\n", (4358, 4370), False, 'from datasets import Dataset, DatasetDict, ClassLabel, load_metric\n'), ((4834, 4875), 'transformers.TrainingArguments', 'TrainingArguments', (['"""../CSDS/test_trainer"""'], {}), "('../CSDS/test_trainer')\n", (4851, 4875), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments\n'), ((4886, 5063), 'transformers.Trainer', 'Trainer', ([], {'model': 'model', 'args': 'training_args', 'train_dataset': "tokenized_csds_datasets['train']", 'eval_dataset': "tokenized_csds_datasets['eval']", 'compute_metrics': 'compute_metrics'}), "(model=model, args=training_args, train_dataset=\n tokenized_csds_datasets['train'], eval_dataset=tokenized_csds_datasets[\n 'eval'], compute_metrics=compute_metrics)\n", (4893, 5063), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments\n'), ((4690, 4716), 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (4699, 4716), True, 'import numpy as np\n')] |
import logging
import time
from main.handlers import create_graph_from_path
from main.models import Graph
log = logging.getLogger(__name__)
def create_graph_from_filename(filename):
graph = create_graph_from_path(filename)
state = Graph.State.INITIALIZING
log.info('Initializing graph %d', graph.id)
# This is done outside of transaction management.
while state == Graph.State.INITIALIZING:
time.sleep(1)
state = (
Graph.objects
.filter(id=graph.id)
.values_list('state', flat=True)
.first()
)
log.info('Initializing finished. State: %s', state)
return Graph.objects.get(id=graph.id)
| [
"logging.getLogger",
"main.models.Graph.objects.get",
"time.sleep",
"main.handlers.create_graph_from_path",
"main.models.Graph.objects.filter"
] | [((114, 141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (131, 141), False, 'import logging\n'), ((198, 230), 'main.handlers.create_graph_from_path', 'create_graph_from_path', (['filename'], {}), '(filename)\n', (220, 230), False, 'from main.handlers import create_graph_from_path\n'), ((660, 690), 'main.models.Graph.objects.get', 'Graph.objects.get', ([], {'id': 'graph.id'}), '(id=graph.id)\n', (677, 690), False, 'from main.models import Graph\n'), ((425, 438), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (435, 438), False, 'import time\n'), ((469, 502), 'main.models.Graph.objects.filter', 'Graph.objects.filter', ([], {'id': 'graph.id'}), '(id=graph.id)\n', (489, 502), False, 'from main.models import Graph\n')] |
from math import floor
class Nullablefloat(float):
def __new__(cls, val, *, is_null=False):
return super().__new__(cls, val)
def __init__(self, val, *, is_null=False):
self.is_null = is_null
def __repr__(self):
return 'null' if self.is_null else super().__repr__()
def neg(self):
return type(self)(-self)
def abs(self):
return self if self.is_null else type(self)(abs(self))
def floor(self):
return self if self.is_null else type(self)(floor(self))
def round(self, n):
return self if self.is_null else type(self)(round(self, n))
def truncate(self, n):
return self if self.is_null else type(self)(f'{self:.{int(n)}f}')
def __add__(self, other):
return self.__modify(super().__add__(other), other)
def __sub__(self, other):
return self.__modify(super().__sub__(other), other)
def __mul__(self, other):
return self.__modify(super().__mul__(other), other)
def __truediv__(self, other):
return self.__modify(super().__truediv__(other), other)
def __modify(self, val, other):
return type(self)(val, is_null=self.is_null & other.is_null)
def nullablefloat(val):
try:
return Nullablefloat(val, is_null=False)
except (ValueError, TypeError):
return Nullablefloat(0, is_null=True)
| [
"math.floor"
] | [((515, 526), 'math.floor', 'floor', (['self'], {}), '(self)\n', (520, 526), False, 'from math import floor\n')] |
"""
Test integrators with simple ODE
dx/dy = 3x^2y given x0 = 1, y0 = 2
ANALYTIC SOLUTION:
y = e^{x^3 + c}, c = ln(2) - 1
y(1,1.1,1.2,1.3,1.4) = [2,2.78471958461639,4.141869187709196,6.6203429951303265,11.440356871885081]
"""
# Import package, test suite, and other packages as needed
import numpy as np
from pycc.rt import integrators as ints
def f(x,y):
"""dy/dx = f(x,y) = 3x^2y"""
Y = 3.*x**2. * y
return Y
def chk_ode(ode):
h = 0.1
ODE = ode(h)
t0 = 1
y0 = 2
y1 = ODE(f,t0,y0)
y2 = ODE(f,t0+h,y1)
y3 = ODE(f,t0+2*h,y2)
y4 = ODE(f,t0+3*h,y3)
return np.array([y0,y1,y2,y3,y4])
def test_rk4():
"""Test 4th-order Runge-Kutta"""
rk4 = chk_ode(ints.rk4)
ref = np.array([2,2.7846419118859376,4.141490537335979,6.618844434974082,11.434686303979237])
assert np.allclose(rk4,ref)
def test_rk38():
"""Test "corrected" 3rd-order Runge-Kutta"""
rk38 = chk_ode(ints.rk38)
ref = np.array([2,2.7846719015333337,4.141594947022453,6.619134913159302,11.435455703714204])
assert np.allclose(rk38,ref)
def test_rk3():
"""Test 3rd-order Runge-Kutta"""
rk3 = chk_ode(ints.rk3)
ref = np.array([2,2.783897725,4.137908208354427,6.60545045860959,11.38808439342214])
assert np.allclose(rk3,ref)
def test_rk2():
"""Test 2nd-order Runge-Kutta"""
rk2 = chk_ode(ints.rk2)
ref = np.array([2,2.7643999999999997,4.066743395,6.396857224546359,10.804576512405294])
assert np.allclose(rk2,ref)
def test_gl6():
"""Test 6th-order Gauss-Legendre"""
gl6 = chk_ode(ints.gl6)
ref = np.array([2,2.78364923694925,4.1371512621094695,6.603613786914487,11.383853535021142])
assert np.allclose(gl6,ref)
| [
"numpy.array",
"numpy.allclose"
] | [((611, 641), 'numpy.array', 'np.array', (['[y0, y1, y2, y3, y4]'], {}), '([y0, y1, y2, y3, y4])\n', (619, 641), True, 'import numpy as np\n'), ((730, 826), 'numpy.array', 'np.array', (['[2, 2.7846419118859376, 4.141490537335979, 6.618844434974082, \n 11.434686303979237]'], {}), '([2, 2.7846419118859376, 4.141490537335979, 6.618844434974082, \n 11.434686303979237])\n', (738, 826), True, 'import numpy as np\n'), ((830, 851), 'numpy.allclose', 'np.allclose', (['rk4', 'ref'], {}), '(rk4, ref)\n', (841, 851), True, 'import numpy as np\n'), ((958, 1054), 'numpy.array', 'np.array', (['[2, 2.7846719015333337, 4.141594947022453, 6.619134913159302, \n 11.435455703714204]'], {}), '([2, 2.7846719015333337, 4.141594947022453, 6.619134913159302, \n 11.435455703714204])\n', (966, 1054), True, 'import numpy as np\n'), ((1058, 1080), 'numpy.allclose', 'np.allclose', (['rk38', 'ref'], {}), '(rk38, ref)\n', (1069, 1080), True, 'import numpy as np\n'), ((1172, 1259), 'numpy.array', 'np.array', (['[2, 2.783897725, 4.137908208354427, 6.60545045860959, 11.38808439342214]'], {}), '([2, 2.783897725, 4.137908208354427, 6.60545045860959, \n 11.38808439342214])\n', (1180, 1259), True, 'import numpy as np\n'), ((1263, 1284), 'numpy.allclose', 'np.allclose', (['rk3', 'ref'], {}), '(rk3, ref)\n', (1274, 1284), True, 'import numpy as np\n'), ((1376, 1466), 'numpy.array', 'np.array', (['[2, 2.7643999999999997, 4.066743395, 6.396857224546359, 10.804576512405294]'], {}), '([2, 2.7643999999999997, 4.066743395, 6.396857224546359, \n 10.804576512405294])\n', (1384, 1466), True, 'import numpy as np\n'), ((1470, 1491), 'numpy.allclose', 'np.allclose', (['rk2', 'ref'], {}), '(rk2, ref)\n', (1481, 1491), True, 'import numpy as np\n'), ((1586, 1681), 'numpy.array', 'np.array', (['[2, 2.78364923694925, 4.1371512621094695, 6.603613786914487, 11.383853535021142\n ]'], {}), '([2, 2.78364923694925, 4.1371512621094695, 6.603613786914487, \n 11.383853535021142])\n', (1594, 1681), True, 'import numpy as np\n'), ((1685, 1706), 'numpy.allclose', 'np.allclose', (['gl6', 'ref'], {}), '(gl6, ref)\n', (1696, 1706), True, 'import numpy as np\n')] |
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from netaddr import IPAddress
from netaddr import IPNetwork
import testtools
import uuid
from testtools.matchers import ContainsDict
from testtools.matchers import Equals
from tempest.api.network import test_floating_ips
from tempest.common.utils import net_utils
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions
from tempest.test import decorators
from nuage_tempest_plugin.lib.features import NUAGE_FEATURES
from nuage_tempest_plugin.lib.topology import Topology
from nuage_tempest_plugin.lib.utils import constants
from nuage_tempest_plugin.services.nuage_client import NuageRestClient
CONF = Topology.get_conf()
class FloatingIPTestJSONNuage(test_floating_ips.FloatingIPTestJSON):
_interface = 'json'
@classmethod
def setup_clients(cls):
super(FloatingIPTestJSONNuage, cls).setup_clients()
cls.nuage_client = NuageRestClient()
@classmethod
def resource_setup(cls):
super(FloatingIPTestJSONNuage, cls).resource_setup()
# Creating two more ports which will be added in VSD
for i in range(2):
post_body = {
"device_owner": "compute:None", "device_id": str(uuid.uuid1())}
if CONF.network.port_vnic_type:
post_body['binding:vnic_type'] = CONF.network.port_vnic_type
if CONF.network.port_profile:
post_body['binding:profile'] = CONF.network.port_profile
port = cls.create_port(cls.network, **post_body)
cls.ports.append(port)
def _verify_fip_on_vsd(self, created_floating_ip,
router_id, port_id, subnet_id, associated=True):
# verifying on Domain level that the floating ip is added
nuage_domain = self.nuage_client.get_l3domain(
filters='externalID',
filter_value=router_id)
nuage_domain_fip = self.nuage_client.get_floatingip(
constants.DOMAIN, nuage_domain[0]['ID'])
if associated:
# verifying on vminterface level that the floating ip is associated
vsd_subnets = self.nuage_client.get_domain_subnet(
None, None, 'externalID', subnet_id)
nuage_vport = self.nuage_client.get_vport(constants.SUBNETWORK,
vsd_subnets[0]['ID'],
'externalID',
port_id)
validation = False
for fip in nuage_domain_fip:
if (fip['address'] ==
created_floating_ip['floating_ip_address'] and
nuage_vport[0]['associatedFloatingIPID'] == fip['ID']):
validation = True
error_message = ("FIP IP on OpenStack " +
created_floating_ip['floating_ip_address'] +
" does not match VSD FIP IP" + " (OR) FIP is not"
" associated to the port" + port_id + " on VSD")
self.assertTrue(validation, msg=error_message)
else:
vsd_fip_list = [fip['address'] for fip in nuage_domain_fip]
self.assertNotIn(created_floating_ip['floating_ip_address'],
vsd_fip_list)
@decorators.attr(type='smoke')
def test_create_list_show_update_delete_floating_ip(self):
# Creates a floating IP
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=self.ports[2]['id'])
created_floating_ip = body['floatingip']
self.addCleanup(self.floating_ips_client.delete_floatingip,
created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['tenant_id'])
self.assertIsNotNone(created_floating_ip['floating_ip_address'])
self.assertEqual(created_floating_ip['port_id'], self.ports[2]['id'])
self.assertEqual(created_floating_ip['floating_network_id'],
self.ext_net_id)
self.assertIn(created_floating_ip['fixed_ip_address'],
[ip['ip_address'] for ip in self.ports[2]['fixed_ips']])
# Verifies the details of a floating_ip
floating_ip = self.floating_ips_client.show_floatingip(
created_floating_ip['id'])
shown_floating_ip = floating_ip['floatingip']
self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
self.assertEqual(shown_floating_ip['floating_network_id'],
self.ext_net_id)
self.assertEqual(shown_floating_ip['tenant_id'],
created_floating_ip['tenant_id'])
self.assertEqual(shown_floating_ip['floating_ip_address'],
created_floating_ip['floating_ip_address'])
self.assertEqual(shown_floating_ip['port_id'], self.ports[2]['id'])
# VSD Validation
self._verify_fip_on_vsd(
created_floating_ip, created_floating_ip['router_id'],
self.ports[2]['id'], self.subnet['id'], True)
# Verify the floating ip exists in the list of all floating_ips
floating_ips = self.floating_ips_client.list_floatingips()
floatingip_id_list = list()
for f in floating_ips['floatingips']:
floatingip_id_list.append(f['id'])
self.assertIn(created_floating_ip['id'], floatingip_id_list)
# Disassociate floating IP from the port
floating_ip = self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
port_id=None)
updated_floating_ip = floating_ip['floatingip']
self.assertIsNone(updated_floating_ip['port_id'])
self.assertIsNone(updated_floating_ip['fixed_ip_address'])
self.assertIsNone(updated_floating_ip['router_id'])
# Associate floating IP to the other port
floating_ip = self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
port_id=self.ports[3]['id'])
updated_floating_ip = floating_ip['floatingip']
self.assertEqual(updated_floating_ip['port_id'], self.ports[3]['id'])
self.assertEqual(updated_floating_ip['fixed_ip_address'],
self.ports[3]['fixed_ips'][0]['ip_address'])
self.assertEqual(updated_floating_ip['router_id'], self.router['id'])
# VSD Validation
self._verify_fip_on_vsd(
created_floating_ip, created_floating_ip['router_id'],
self.ports[3]['id'], self.subnet['id'], True)
# Disassociate floating IP from the port
floating_ip = self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
port_id=None)
updated_floating_ip = floating_ip['floatingip']
self.assertIsNone(updated_floating_ip['port_id'])
self.assertIsNone(updated_floating_ip['fixed_ip_address'])
self.assertIsNone(updated_floating_ip['router_id'])
# VSD Validation
self._verify_fip_on_vsd(
created_floating_ip, self.router['id'], None, None, False)
@decorators.attr(type='smoke')
def test_create_update_floating_ip(self):
# Creates a floating IP
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=self.ports[2]['id'])
created_floating_ip = body['floatingip']
self.addCleanup(self.floating_ips_client.delete_floatingip,
created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['tenant_id'])
self.assertIsNotNone(created_floating_ip['floating_ip_address'])
self.assertEqual(created_floating_ip['port_id'], self.ports[2]['id'])
self.assertEqual(created_floating_ip['floating_network_id'],
self.ext_net_id)
self.assertIn(created_floating_ip['fixed_ip_address'],
[ip['ip_address'] for ip in self.ports[2]['fixed_ips']])
# Verifies the details of a floating_ip
floating_ip = self.floating_ips_client.show_floatingip(
created_floating_ip['id'])
shown_floating_ip = floating_ip['floatingip']
self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
self.assertEqual(shown_floating_ip['floating_network_id'],
self.ext_net_id)
self.assertEqual(shown_floating_ip['tenant_id'],
created_floating_ip['tenant_id'])
self.assertEqual(shown_floating_ip['floating_ip_address'],
created_floating_ip['floating_ip_address'])
self.assertEqual(shown_floating_ip['port_id'], self.ports[2]['id'])
# VSD Validation
self._verify_fip_on_vsd(
created_floating_ip, created_floating_ip['router_id'],
self.ports[2]['id'], self.subnet['id'], True)
# Verify the floating ip exists in the list of all floating_ips
floating_ips = self.floating_ips_client.list_floatingips()
floatingip_id_list = list()
for f in floating_ips['floatingips']:
floatingip_id_list.append(f['id'])
self.assertIn(created_floating_ip['id'], floatingip_id_list)
if Topology.from_openstack('Newton') and Topology.is_ml2:
self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
port_id=self.ports[3]['id'])
updated_floating_ip = self.floating_ips_client.show_floatingip(
created_floating_ip['id'])['floatingip']
self.assertEqual(updated_floating_ip['port_id'],
self.ports[3]['id'])
self._verify_fip_on_vsd(
updated_floating_ip, updated_floating_ip['router_id'],
self.ports[3]['id'], self.subnet['id'], True)
else:
# Associate floating IP to the other port
self.assertRaises(exceptions.ServerFault,
self.floating_ips_client.update_floatingip,
created_floating_ip['id'],
port_id=self.ports[3]['id'])
@decorators.attr(type='smoke')
def test_floating_ip_delete_port(self):
# Create a floating IP
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id)
created_floating_ip = body['floatingip']
self.addCleanup(self.floating_ips_client.delete_floatingip,
created_floating_ip['id'])
# Create a port
post_body = {
"device_owner": "compute:None", "device_id": str(uuid.uuid1())}
port = self.ports_client.create_port(
network_id=self.network['id'], **post_body)
created_port = port['port']
floating_ip = self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
port_id=created_port['id'])
self.assertIsNotNone(floating_ip)
# VSD Validation
self._verify_fip_on_vsd(created_floating_ip, self.router['id'],
created_port['id'], self.subnet['id'],
True)
# Delete port
self.ports_client.delete_port(created_port['id'])
# Verifies the details of the floating_ip
floating_ip = self.floating_ips_client.show_floatingip(
created_floating_ip['id'])
shown_floating_ip = floating_ip['floatingip']
# Confirm the fields are back to None
self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
self.assertIsNone(shown_floating_ip['port_id'])
self.assertIsNone(shown_floating_ip['fixed_ip_address'])
self.assertIsNone(shown_floating_ip['router_id'])
def test_floating_ip_update_different_router(self):
# Associate a floating IP to a port on a router
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=self.ports[3]['id'])
created_floating_ip = body['floatingip']
self.addCleanup(self.floating_ips_client.delete_floatingip,
created_floating_ip['id'])
self.assertEqual(created_floating_ip['router_id'], self.router['id'])
# VSD Validation
self._verify_fip_on_vsd(
created_floating_ip, created_floating_ip['router_id'],
self.ports[3]['id'], self.subnet['id'], True)
network2 = self.create_network()
subnet2 = self.create_subnet(network2)
router2 = self.create_router(data_utils.rand_name('router-'),
external_network_id=self.ext_net_id)
self.create_router_interface(router2['id'], subnet2['id'])
post_body = {
"device_owner": "compute:None", "device_id": str(uuid.uuid1())}
port_other_router = self.create_port(network2, **post_body)
self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
port_id=port_other_router['id'])
updated_floating_ip = self.floating_ips_client.show_floatingip(
created_floating_ip['id'])['floatingip']
self.assertEqual(updated_floating_ip['port_id'],
port_other_router['id'])
self._verify_fip_on_vsd(
updated_floating_ip, updated_floating_ip['router_id'],
port_other_router['id'], subnet2['id'], True)
@testtools.skipIf(Topology.before_nuage('5.4'), 'Unsupported pre-5.4')
def test_floating_ip_disassociate_delete_router_associate(self):
# Create topology
network = self.create_network()
subnet = self.create_subnet(network)
router = self.create_router(data_utils.rand_name('router-'),
external_network_id=self.ext_net_id)
self.create_router_interface(router['id'], subnet['id'])
post_body = {
"device_owner": "compute:None", "device_id": str(uuid.uuid1())}
port1 = self.create_port(network, **post_body)
# Associate a floating IP to a port on a router
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=port1['id'])
created_floating_ip = body['floatingip']
self.addCleanup(self.floating_ips_client.delete_floatingip,
created_floating_ip['id'])
self.assertEqual(created_floating_ip['router_id'], router['id'])
# VSD Validation
self._verify_fip_on_vsd(
created_floating_ip, created_floating_ip['router_id'],
port1['id'], subnet['id'], True)
# Disassociate fip from port
self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
port_id=None)
# Delete existing router
self.delete_router(router)
# Associate to second router
network2 = self.create_network()
subnet2 = self.create_subnet(network2)
router2 = self.create_router(data_utils.rand_name('router-'),
external_network_id=self.ext_net_id)
self.create_router_interface(router2['id'], subnet2['id'])
post_body = {
"device_owner": "compute:None", "device_id": str(uuid.uuid1())}
port_other_router = self.create_port(network2, **post_body)
self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
port_id=port_other_router['id'])
updated_floating_ip = self.floating_ips_client.show_floatingip(
created_floating_ip['id'])['floatingip']
self.assertEqual(updated_floating_ip['port_id'],
port_other_router['id'])
self._verify_fip_on_vsd(
updated_floating_ip, updated_floating_ip['router_id'],
port_other_router['id'], subnet2['id'], True)
@decorators.attr(type='smoke')
def test_create_floating_ip_specifying_a_fixed_ip_address(self):
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=self.ports[3]['id'],
fixed_ip_address=self.ports[3]['fixed_ips'][0]['ip_address'])
created_floating_ip = body['floatingip']
self.addCleanup(self.floating_ips_client.delete_floatingip,
created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['id'])
self.assertEqual(created_floating_ip['fixed_ip_address'],
self.ports[3]['fixed_ips'][0]['ip_address'])
# VSD validation
self._verify_fip_on_vsd(
created_floating_ip, created_floating_ip['router_id'],
self.ports[3]['id'], self.subnet['id'], True)
floating_ip = self.floating_ips_client.update_floatingip(
created_floating_ip['id'],
port_id=None)
self.assertIsNone(floating_ip['floatingip']['port_id'])
# VSD Validation
self._verify_fip_on_vsd(
created_floating_ip, self.router['id'], None, None, False)
@decorators.attr(type='smoke')
def test_create_update_floatingip_with_port_multiple_ip_address(self):
# TODO(Team) Adapt once we are on 5.3.2
# Find out ips that can be used for tests
list_ips = net_utils.get_unused_ip_addresses(
self.ports_client,
self.subnets_client,
self.subnet['network_id'],
self.subnet['id'],
2)
fixed_ips = [{'ip_address': list_ips[0]}, {'ip_address': list_ips[1]}]
# Create port
body = self.ports_client.create_port(network_id=self.network['id'],
fixed_ips=fixed_ips)
port = body['port']
self.addCleanup(self.ports_client.delete_port, port['id'])
# Create floating ip
self.assertRaises(exceptions.BadRequest,
self.floating_ips_client.create_floatingip,
floating_network_id=self.ext_net_id,
port_id=port['id'],
fixed_ip_address=list_ips[0])
@decorators.attr(type='smoke')
def test_create_floatingip_with_rate_limiting(self):
rate_limit = 10
# Create port
post_body = {"network_id": self.network['id']}
body = self.ports_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.ports_client.delete_port, port['id'])
# Associate a fip to the port
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=port['id'],
fixed_ip_address=port['fixed_ips'][0]['ip_address'],
nuage_fip_rate=rate_limit)
created_floating_ip = body['floatingip']
self.addCleanup(self.floating_ips_client.delete_floatingip,
created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['id'])
fip_id = created_floating_ip['id']
body = self.floating_ips_client.show_floatingip(fip_id)
fip = body['floatingip']
if NUAGE_FEATURES.bidirectional_fip_rate_limit:
# rate_limit is in kbps now!
self.assertThat(fip, ContainsDict(
{'nuage_ingress_fip_rate_kbps': Equals(-1)}))
self.assertThat(fip, ContainsDict(
{'nuage_egress_fip_rate_kbps': Equals(rate_limit * 1000)}))
# attribute 'nuage_fip_rate' is no longer in response
self.assertIsNone(fip.get('nuage_fip_rate'))
else:
self.assertThat(fip, ContainsDict(
{'nuage_fip_rate': Equals(str(rate_limit))}))
# Check vsd
vsd_subnets = self.nuage_client.get_domain_subnet(
None, None, 'externalID', self.subnet['id'])
self.assertEqual(1, len(vsd_subnets))
vports = self.nuage_client.get_vport(constants.SUBNETWORK,
vsd_subnets[0]['ID'],
'externalID',
port['id'])
self.assertEqual(1, len(vports))
qos = self.nuage_client.get_qos(constants.VPORT, vports[0]['ID'])
self.assertEqual(1, len(qos))
self.assertThat(qos[0], ContainsDict(
{'externalID':
Equals(self.nuage_client.get_vsd_external_id(fip_id))}))
self.assertThat(qos[0], ContainsDict(
{'FIPRateLimitingActive': Equals(True)}))
self.assertThat(qos[0], ContainsDict(
{'FIPPeakInformationRate': Equals(str(rate_limit))}))
self.assertThat(qos[0], ContainsDict(
{'FIPPeakBurstSize': Equals(str(100))}))
if NUAGE_FEATURES.bidirectional_fip_rate_limit:
self.assertThat(qos[0], ContainsDict(
{'EgressFIPPeakInformationRate': Equals('INFINITY')}))
self.assertThat(qos[0], ContainsDict(
{'EgressFIPPeakBurstSize': Equals(str(100))}))
else:
self.assertEqual(str(rate_limit), qos[0]['FIPPeakInformationRate'])
@decorators.attr(type='smoke')
def test_create_floatingip_without_rate_limiting(self):
# Create port
post_body = {"network_id": self.network['id']}
body = self.ports_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.ports_client.delete_port, port['id'])
# Associate a fip to the port
body = self.floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=port['id'],
fixed_ip_address=port['fixed_ips'][0]['ip_address'])
created_floating_ip = body['floatingip']
self.addCleanup(self.floating_ips_client.delete_floatingip,
created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['id'])
fip_id = created_floating_ip['id']
body = self.floating_ips_client.show_floatingip(fip_id)
fip = body['floatingip']
if NUAGE_FEATURES.bidirectional_fip_rate_limit:
self.assertIsNotNone(fip.get('nuage_ingress_fip_rate_kbps'))
self.assertIsNotNone(fip.get('nuage_egress_fip_rate_kbps'))
else:
os_fip_rate = fip.get('nuage_fip_rate')
self.assertIsNotNone(os_fip_rate)
# Check vsd
vsd_subnets = self.nuage_client.get_domain_subnet(
None, None, 'externalID', self.subnet['id'])
self.assertEqual(1, len(vsd_subnets))
vports = self.nuage_client.get_vport(constants.SUBNETWORK,
vsd_subnets[0]['ID'],
'externalID',
port['id'])
self.assertEqual(1, len(vports))
qos = self.nuage_client.get_qos(constants.VPORT, vports[0]['ID'])
self.assertEqual(1, len(qos))
self.assertEqual(self.nuage_client.get_vsd_external_id(fip_id),
qos[0]['externalID'])
self.assertEqual(True, qos[0]['FIPRateLimitingActive'])
if NUAGE_FEATURES.bidirectional_fip_rate_limit:
self.assertEqual('INFINITY', qos[0]['FIPPeakInformationRate'])
self.assertEqual('INFINITY',
qos[0]['EgressFIPPeakInformationRate'])
else:
self.assertEqual('INFINITY', qos[0]['FIPPeakInformationRate'])
@decorators.attr(type='smoke')
def test_delete_associated_port_fip_cleanup(self):
port = self.create_port(self.network)
fip = self.floating_ips_client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=port['id'])['floatingip']
self.ports_client.delete_port(port['id'])
self.floating_ips_client.delete_floatingip(fip['id'])
vsd_l3domain = self.nuage_client.get_l3domain(
filters='externalID',
filter_value=fip['router_id'])
vsd_fips = self.nuage_client.get_floatingip(
constants.DOMAIN, vsd_l3domain[0]['ID'])
for vsd_fip in vsd_fips:
if vsd_fip['address'] == fip['floating_ip_address']:
self.fail("No cleanup happened. Floatingip still exists on "
"VSD and not in Neutron.")
@decorators.attr(type='smoke')
def test_fip_on_multiple_ip_port(self):
network = self.create_network()
self.assertIsNotNone(network, "Unable to create network")
subnet = self.create_subnet(network)
self.assertIsNotNone(subnet, "Unable to create subnet")
router = self.create_router(
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(router, "Unable to create router")
self.create_router_interface(router_id=router["id"],
subnet_id=subnet["id"])
# 1. Assigning fip to port with multiple ip address
cidr4 = IPNetwork(CONF.network.project_network_cidr)
port_args = {
'fixed_ips': [
{'subnet_id': subnet['id'],
'ip_address': str(IPAddress(cidr4.first) + 4)},
{'subnet_id': subnet['id'],
'ip_address': str(IPAddress(cidr4.first) + 5)},
{'subnet_id': subnet['id'],
'ip_address': str(IPAddress(cidr4.first) + 6)},
{'subnet_id': subnet['id'],
'ip_address': str(IPAddress(cidr4.first) + 7)}],
}
port = self.create_port(network=network, **port_args)
floating_ip = self.create_floatingip(
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(floating_ip, "Unabe to create floating ip")
msg = 'floating ip cannot be associated to port %s ' \
'because it has multiple ipv4 or multiple ipv6ips' % port['id']
self.assertRaisesRegex(exceptions.BadRequest,
msg,
self.floating_ips_client.update_floatingip,
floating_ip['id'],
port_id=port['id'])
# 2. Assigning multiple ip address to a port with fip
port = self.create_port(network=network)
floating_ip = self.create_floatingip(
external_network_id=CONF.network.public_network_id)
self.assertIsNotNone(floating_ip, "Unable to create floating ip")
self.floating_ips_client.update_floatingip(
floating_ip['id'], port_id=port['id'])
port_args = {
'fixed_ips': [
{'subnet_id': subnet['id'],
'ip_address': str(IPAddress(cidr4.first) + 8)},
{'subnet_id': subnet['id'],
'ip_address': str(IPAddress(cidr4.first) + 9)}]}
msg = ("It is not possible to add multiple ipv4 or multiple ipv6"
" addresses on port {} since it has fip {} associated"
"to it.").format(port['id'], floating_ip['id'])
self.assertRaisesRegex(exceptions.BadRequest,
msg,
self.update_port,
port=port, **port_args)
| [
"nuage_tempest_plugin.lib.topology.Topology.get_conf",
"tempest.test.decorators.attr",
"testtools.matchers.Equals",
"uuid.uuid1",
"netaddr.IPAddress",
"tempest.common.utils.net_utils.get_unused_ip_addresses",
"tempest.lib.common.utils.data_utils.rand_name",
"nuage_tempest_plugin.services.nuage_client.... | [((1270, 1289), 'nuage_tempest_plugin.lib.topology.Topology.get_conf', 'Topology.get_conf', ([], {}), '()\n', (1287, 1289), False, 'from nuage_tempest_plugin.lib.topology import Topology\n'), ((3961, 3990), 'tempest.test.decorators.attr', 'decorators.attr', ([], {'type': '"""smoke"""'}), "(type='smoke')\n", (3976, 3990), False, 'from tempest.test import decorators\n'), ((7878, 7907), 'tempest.test.decorators.attr', 'decorators.attr', ([], {'type': '"""smoke"""'}), "(type='smoke')\n", (7893, 7907), False, 'from tempest.test import decorators\n'), ((11012, 11041), 'tempest.test.decorators.attr', 'decorators.attr', ([], {'type': '"""smoke"""'}), "(type='smoke')\n", (11027, 11041), False, 'from tempest.test import decorators\n'), ((16808, 16837), 'tempest.test.decorators.attr', 'decorators.attr', ([], {'type': '"""smoke"""'}), "(type='smoke')\n", (16823, 16837), False, 'from tempest.test import decorators\n'), ((18005, 18034), 'tempest.test.decorators.attr', 'decorators.attr', ([], {'type': '"""smoke"""'}), "(type='smoke')\n", (18020, 18034), False, 'from tempest.test import decorators\n'), ((19068, 19097), 'tempest.test.decorators.attr', 'decorators.attr', ([], {'type': '"""smoke"""'}), "(type='smoke')\n", (19083, 19097), False, 'from tempest.test import decorators\n'), ((22067, 22096), 'tempest.test.decorators.attr', 'decorators.attr', ([], {'type': '"""smoke"""'}), "(type='smoke')\n", (22082, 22096), False, 'from tempest.test import decorators\n'), ((24417, 24446), 'tempest.test.decorators.attr', 'decorators.attr', ([], {'type': '"""smoke"""'}), "(type='smoke')\n", (24432, 24446), False, 'from tempest.test import decorators\n'), ((25286, 25315), 'tempest.test.decorators.attr', 'decorators.attr', ([], {'type': '"""smoke"""'}), "(type='smoke')\n", (25301, 25315), False, 'from tempest.test import decorators\n'), ((1518, 1535), 'nuage_tempest_plugin.services.nuage_client.NuageRestClient', 'NuageRestClient', ([], {}), '()\n', (1533, 1535), False, 'from nuage_tempest_plugin.services.nuage_client import NuageRestClient\n'), ((14345, 14373), 'nuage_tempest_plugin.lib.topology.Topology.before_nuage', 'Topology.before_nuage', (['"""5.4"""'], {}), "('5.4')\n", (14366, 14373), False, 'from nuage_tempest_plugin.lib.topology import Topology\n'), ((18227, 18353), 'tempest.common.utils.net_utils.get_unused_ip_addresses', 'net_utils.get_unused_ip_addresses', (['self.ports_client', 'self.subnets_client', "self.subnet['network_id']", "self.subnet['id']", '(2)'], {}), "(self.ports_client, self.subnets_client,\n self.subnet['network_id'], self.subnet['id'], 2)\n", (18260, 18353), False, 'from tempest.common.utils import net_utils\n'), ((25971, 26015), 'netaddr.IPNetwork', 'IPNetwork', (['CONF.network.project_network_cidr'], {}), '(CONF.network.project_network_cidr)\n', (25980, 26015), False, 'from netaddr import IPNetwork\n'), ((10081, 10114), 'nuage_tempest_plugin.lib.topology.Topology.from_openstack', 'Topology.from_openstack', (['"""Newton"""'], {}), "('Newton')\n", (10104, 10114), False, 'from nuage_tempest_plugin.lib.topology import Topology\n'), ((13455, 13486), 'tempest.lib.common.utils.data_utils.rand_name', 'data_utils.rand_name', (['"""router-"""'], {}), "('router-')\n", (13475, 13486), False, 'from tempest.lib.common.utils import data_utils\n'), ((14614, 14645), 'tempest.lib.common.utils.data_utils.rand_name', 'data_utils.rand_name', (['"""router-"""'], {}), "('router-')\n", (14634, 14645), False, 'from tempest.lib.common.utils import data_utils\n'), ((15935, 15966), 'tempest.lib.common.utils.data_utils.rand_name', 'data_utils.rand_name', (['"""router-"""'], {}), "('router-')\n", (15955, 15966), False, 'from tempest.lib.common.utils import data_utils\n'), ((11500, 11512), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (11510, 11512), False, 'import uuid\n'), ((13712, 13724), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (13722, 13724), False, 'import uuid\n'), ((14868, 14880), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (14878, 14880), False, 'import uuid\n'), ((16192, 16204), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (16202, 16204), False, 'import uuid\n'), ((1824, 1836), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (1834, 1836), False, 'import uuid\n'), ((21449, 21461), 'testtools.matchers.Equals', 'Equals', (['(True)'], {}), '(True)\n', (21455, 21461), False, 'from testtools.matchers import Equals\n'), ((20251, 20261), 'testtools.matchers.Equals', 'Equals', (['(-1)'], {}), '(-1)\n', (20257, 20261), False, 'from testtools.matchers import Equals\n'), ((20359, 20384), 'testtools.matchers.Equals', 'Equals', (['(rate_limit * 1000)'], {}), '(rate_limit * 1000)\n', (20365, 20384), False, 'from testtools.matchers import Equals\n'), ((21832, 21850), 'testtools.matchers.Equals', 'Equals', (['"""INFINITY"""'], {}), "('INFINITY')\n", (21838, 21850), False, 'from testtools.matchers import Equals\n'), ((26144, 26166), 'netaddr.IPAddress', 'IPAddress', (['cidr4.first'], {}), '(cidr4.first)\n', (26153, 26166), False, 'from netaddr import IPAddress\n'), ((26253, 26275), 'netaddr.IPAddress', 'IPAddress', (['cidr4.first'], {}), '(cidr4.first)\n', (26262, 26275), False, 'from netaddr import IPAddress\n'), ((26362, 26384), 'netaddr.IPAddress', 'IPAddress', (['cidr4.first'], {}), '(cidr4.first)\n', (26371, 26384), False, 'from netaddr import IPAddress\n'), ((26471, 26493), 'netaddr.IPAddress', 'IPAddress', (['cidr4.first'], {}), '(cidr4.first)\n', (26480, 26493), False, 'from netaddr import IPAddress\n'), ((27692, 27714), 'netaddr.IPAddress', 'IPAddress', (['cidr4.first'], {}), '(cidr4.first)\n', (27701, 27714), False, 'from netaddr import IPAddress\n'), ((27801, 27823), 'netaddr.IPAddress', 'IPAddress', (['cidr4.first'], {}), '(cidr4.first)\n', (27810, 27823), False, 'from netaddr import IPAddress\n')] |
from typing import Optional
from pisat.util.platform import is_raspberry_pi
from pisat.handler.digital_input_handler_base import DigitalInputHandlerBase
if is_raspberry_pi():
import pigpio
class PigpioDigitalInputHandler(DigitalInputHandlerBase):
def __init__(self,
pi,
pin: int,
pullup: bool = False,
pulldown: bool = False,
name: Optional[str] = None) -> None:
self._pi: pigpio.pi = pi
self._pi.set_mode(pin, pigpio.INPUT)
super().__init__(pin, pullup=pullup, pulldown=pulldown, name=name)
def set_pull_up_down(self, pulldown: bool = False) -> None:
if pulldown:
self._pi.set_pull_up_down(self._pin, pigpio.PUD_DOWN)
else:
self._pi.set_pull_up_down(self._pin, pigpio.PUD_UP)
def clear_pull_up_down(self) -> None:
self._pi.set_pull_up_down(self._pin, pigpio.PUD_DOWN)
def observe(self) -> bool:
return bool(self._pi.read(self._pin))
| [
"pisat.util.platform.is_raspberry_pi"
] | [((159, 176), 'pisat.util.platform.is_raspberry_pi', 'is_raspberry_pi', ([], {}), '()\n', (174, 176), False, 'from pisat.util.platform import is_raspberry_pi\n')] |
# system
import os
from enum import Enum
# lib
import numpy as np
class GloVeSize(Enum):
tiny = 50
small = 100
medium = 200
large = 300
__DEFAULT_SIZE = GloVeSize.small
def get_pretrained_embedding_matrix(word_to_index,
vocab_size=10000,
glove_dir="./bin/GloVe",
use_cache_if_present=True,
cache_if_computed=True,
cache_dir='./bin/cache',
size=__DEFAULT_SIZE,
verbose=1):
"""
get pre-trained word embeddings from GloVe: https://github.com/stanfordnlp/GloVe
:param word_to_index: a word to index map of the corpus
:param vocab_size: the vocab size
:param glove_dir: the dir of glove
:param use_cache_if_present: whether to use a cached weight file if present
:param cache_if_computed: whether to cache the result if re-computed
:param cache_dir: the directory of the project's cache
:param size: an enumerated choice of GloVeSize
:param verbose: the verbosity level of logging
:return: a matrix of the embeddings
"""
def vprint(*args, with_arrow=True):
if verbose > 0:
if with_arrow:
print(">>", *args)
else:
print(*args)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_path = os.path.join(cache_dir, 'glove_%d_embedding_matrix.npy' % size.value)
if use_cache_if_present and os.path.isfile(cache_path):
return np.load(cache_path)
else:
vprint('computing embeddings', with_arrow=True)
embeddings_index = {}
size_value = size.value
f = open(os.path.join(glove_dir, 'glove.6B.' + str(size_value) + 'd.txt'),
encoding="ascii", errors='ignore')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
vprint('Found', len(embeddings_index), 'word vectors.')
embedding_matrix = np.random.normal(size=(vocab_size, size.value))
non = 0
for word, index in word_to_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
else:
non += 1
vprint(non, "words did not have mappings")
vprint(with_arrow=False)
if cache_if_computed:
np.save(cache_path, embedding_matrix)
return embedding_matrix
| [
"numpy.random.normal",
"os.path.exists",
"os.makedirs",
"os.path.join",
"numpy.asarray",
"os.path.isfile",
"numpy.load",
"numpy.save"
] | [((1503, 1572), 'os.path.join', 'os.path.join', (['cache_dir', "('glove_%d_embedding_matrix.npy' % size.value)"], {}), "(cache_dir, 'glove_%d_embedding_matrix.npy' % size.value)\n", (1515, 1572), False, 'import os\n'), ((1427, 1452), 'os.path.exists', 'os.path.exists', (['cache_dir'], {}), '(cache_dir)\n', (1441, 1452), False, 'import os\n'), ((1462, 1484), 'os.makedirs', 'os.makedirs', (['cache_dir'], {}), '(cache_dir)\n', (1473, 1484), False, 'import os\n'), ((1605, 1631), 'os.path.isfile', 'os.path.isfile', (['cache_path'], {}), '(cache_path)\n', (1619, 1631), False, 'import os\n'), ((1648, 1667), 'numpy.load', 'np.load', (['cache_path'], {}), '(cache_path)\n', (1655, 1667), True, 'import numpy as np\n'), ((2232, 2279), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(vocab_size, size.value)'}), '(size=(vocab_size, size.value))\n', (2248, 2279), True, 'import numpy as np\n'), ((2038, 2077), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (2048, 2077), True, 'import numpy as np\n'), ((2680, 2717), 'numpy.save', 'np.save', (['cache_path', 'embedding_matrix'], {}), '(cache_path, embedding_matrix)\n', (2687, 2717), True, 'import numpy as np\n')] |
import numpy as np
arr = np.array([[2, 5], [1, 3]])
arr_inv = np.linalg.inv(arr)
print(arr_inv)
# [[ 3. -5.]
# [-1. 2.]]
mat = np.matrix([[2, 5], [1, 3]])
mat_inv = np.linalg.inv(mat)
print(mat_inv)
# [[ 3. -5.]
# [-1. 2.]]
mat_inv = mat**-1
print(mat_inv)
# [[ 3. -5.]
# [-1. 2.]]
mat_inv = mat.I
print(mat_inv)
# [[ 3. -5.]
# [-1. 2.]]
result = mat * mat.I
print(result)
# [[1. 0.]
# [0. 1.]]
# print(arr.I)
# AttributeError: 'numpy.ndarray' object has no attribute 'I'
arr_s = np.array([[0, 0], [1, 3]])
# print(np.linalg.inv(arr_s))
# LinAlgError: Singular matrix
arr_pinv = np.linalg.pinv(arr_s)
print(arr_pinv)
# [[0. 0.1]
# [0. 0.3]]
print(arr_s @ arr_inv)
# [[0. 0.]
# [0. 1.]]
print(np.linalg.pinv(arr_pinv))
# [[0. 0.]
# [1. 3.]]
print(np.linalg.inv(arr))
# [[ 3. -5.]
# [-1. 2.]]
print(np.linalg.pinv(arr))
# [[ 3. -5.]
# [-1. 2.]]
mat_s = np.mat([[0, 0], [1, 3]])
# print(np.linalg.inv(mat_s))
# LinAlgError: Singular matrix
# print(mat_s**-1)
# LinAlgError: Singular matrix
# print(mat_s.I)
# LinAlgError: Singular matrix
print(np.linalg.pinv(mat_s))
# [[0. 0.1]
# [0. 0.3]]
| [
"numpy.mat",
"numpy.linalg.pinv",
"numpy.array",
"numpy.linalg.inv",
"numpy.matrix"
] | [((26, 52), 'numpy.array', 'np.array', (['[[2, 5], [1, 3]]'], {}), '([[2, 5], [1, 3]])\n', (34, 52), True, 'import numpy as np\n'), ((64, 82), 'numpy.linalg.inv', 'np.linalg.inv', (['arr'], {}), '(arr)\n', (77, 82), True, 'import numpy as np\n'), ((132, 159), 'numpy.matrix', 'np.matrix', (['[[2, 5], [1, 3]]'], {}), '([[2, 5], [1, 3]])\n', (141, 159), True, 'import numpy as np\n'), ((171, 189), 'numpy.linalg.inv', 'np.linalg.inv', (['mat'], {}), '(mat)\n', (184, 189), True, 'import numpy as np\n'), ((498, 524), 'numpy.array', 'np.array', (['[[0, 0], [1, 3]]'], {}), '([[0, 0], [1, 3]])\n', (506, 524), True, 'import numpy as np\n'), ((599, 620), 'numpy.linalg.pinv', 'np.linalg.pinv', (['arr_s'], {}), '(arr_s)\n', (613, 620), True, 'import numpy as np\n'), ((885, 909), 'numpy.mat', 'np.mat', (['[[0, 0], [1, 3]]'], {}), '([[0, 0], [1, 3]])\n', (891, 909), True, 'import numpy as np\n'), ((718, 742), 'numpy.linalg.pinv', 'np.linalg.pinv', (['arr_pinv'], {}), '(arr_pinv)\n', (732, 742), True, 'import numpy as np\n'), ((774, 792), 'numpy.linalg.inv', 'np.linalg.inv', (['arr'], {}), '(arr)\n', (787, 792), True, 'import numpy as np\n'), ((828, 847), 'numpy.linalg.pinv', 'np.linalg.pinv', (['arr'], {}), '(arr)\n', (842, 847), True, 'import numpy as np\n'), ((1079, 1100), 'numpy.linalg.pinv', 'np.linalg.pinv', (['mat_s'], {}), '(mat_s)\n', (1093, 1100), True, 'import numpy as np\n')] |
# *****************************************************************************
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See NOTICE file for details.
#
# *****************************************************************************
import _jpype
import jpype
from jpype.types import *
from jpype import java
import common
try:
import numpy as np
except ImportError:
pass
class CustomizerTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
self.fixture = JClass('jpype.common.Fixture')()
def testSticky(self):
@jpype.JImplementationFor("jpype.override.A")
class _A:
@jpype.JOverride(sticky=True, rename="remove_")
def remove(self, obj):
pass
A = jpype.JClass("jpype.override.A")
B = jpype.JClass("jpype.override.B")
self.assertEqual(A.remove, _A.remove)
self.assertEqual(B.remove, _A.remove)
self.assertEqual(str(A.remove_), "jpype.override.A.remove")
self.assertEqual(str(B.remove_), "jpype.override.B.remove")
| [
"jpype.JImplementationFor",
"common.JPypeTestCase.setUp",
"jpype.JClass",
"jpype.JOverride"
] | [((987, 1019), 'common.JPypeTestCase.setUp', 'common.JPypeTestCase.setUp', (['self'], {}), '(self)\n', (1013, 1019), False, 'import common\n'), ((1112, 1156), 'jpype.JImplementationFor', 'jpype.JImplementationFor', (['"""jpype.override.A"""'], {}), "('jpype.override.A')\n", (1136, 1156), False, 'import jpype\n'), ((1304, 1336), 'jpype.JClass', 'jpype.JClass', (['"""jpype.override.A"""'], {}), "('jpype.override.A')\n", (1316, 1336), False, 'import jpype\n'), ((1349, 1381), 'jpype.JClass', 'jpype.JClass', (['"""jpype.override.B"""'], {}), "('jpype.override.B')\n", (1361, 1381), False, 'import jpype\n'), ((1188, 1234), 'jpype.JOverride', 'jpype.JOverride', ([], {'sticky': '(True)', 'rename': '"""remove_"""'}), "(sticky=True, rename='remove_')\n", (1203, 1234), False, 'import jpype\n')] |
import os
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import logging
class Email(object):
def __init__(self):
self.user = os.getenv('SMTP_USER')
self.password = os.getenv('SMTP_PWD')
self.host = os.getenv('SMTP_HOST') or 'smtp.%s' % self.user.split('@')[-1]
self.port = os.getenv('SMTP_PORT')
self.ssl = os.getenv('SMTP_SSL')
def config(self, user, password, host=None, port=None, ssl=True):
self.user = user
self.password = password
self.host = host or 'smtp.%s' % self.user.split('@')[-1]
self.port = port
self.ssl = ssl
def test(self):
server = smtplib.SMTP_SSL(self.host, self.port) if self.ssl else smtplib.SMTP(self.host, self.port)
server.login(self.user, self.password)
print('test success')
def send(self, subject, receivers, body=None, html=None, template=None, attachments=None):
if not all([self.host, self.user, self.password]):
raise RuntimeError('Send no email for missing self.host,self.user or self.pwd')
if isinstance(receivers, str):
receivers = receivers.split(',')
if self.port and isinstance(self.port, str):
try:
self.port = int(self.port)
except Exception as ex:
logging.exception(ex)
self.port = None
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = self.user
msg['To'] = ','.join(receivers)
# handle email body --------------------
if body:
msg.attach(MIMEText(body, 'plain', 'utf-8'))
if html:
msg.attach(MIMEText(html, 'html', 'utf-8'))
if template:
if not os.path.isfile(template):
raise FileNotFoundError('Template file %s not found' % template)
with open(template, encoding='utf-8') as f:
msg.attach(MIMEText(f.read().strip(), 'html', 'utf-8'))
# handle attachments --------------------
if attachments:
if isinstance(attachments, str):
attachments = [attachments]
for file_path in attachments:
if os.path.isfile(file_path):
try:
att = MIMEText(open(file_path, 'rb').read(), 'base64', 'utf-8')
except Exception as ex:
logging.exception(ex)
else:
att['Content-Type'] = 'application/octet-stream'
att["Content-Disposition"] = f'attachment; filename={os.path.basename(file_path)}'
msg.attach(att)
# handle receivers --------------------
if isinstance(receivers, str):
if ',' in receivers:
receivers = [receiver.strip() for receiver in receivers.split(',')]
else:
receivers = [receivers]
try:
server = smtplib.SMTP_SSL(self.host, self.port) if self.ssl else smtplib.SMTP(self.host, self.port)
server.login(self.user, self.password)
server.sendmail(self.user, receivers, msg.as_string())
logging.info("Send email to %s done!" % ','.join(receivers))
except Exception as ex:
logging.exception(ex)
email = Email()
| [
"smtplib.SMTP",
"os.getenv",
"smtplib.SMTP_SSL",
"logging.exception",
"os.path.isfile",
"email.mime.multipart.MIMEMultipart",
"os.path.basename",
"email.mime.text.MIMEText"
] | [((191, 213), 'os.getenv', 'os.getenv', (['"""SMTP_USER"""'], {}), "('SMTP_USER')\n", (200, 213), False, 'import os\n'), ((238, 259), 'os.getenv', 'os.getenv', (['"""SMTP_PWD"""'], {}), "('SMTP_PWD')\n", (247, 259), False, 'import os\n'), ((363, 385), 'os.getenv', 'os.getenv', (['"""SMTP_PORT"""'], {}), "('SMTP_PORT')\n", (372, 385), False, 'import os\n'), ((405, 426), 'os.getenv', 'os.getenv', (['"""SMTP_SSL"""'], {}), "('SMTP_SSL')\n", (414, 426), False, 'import os\n'), ((1443, 1458), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', ([], {}), '()\n', (1456, 1458), False, 'from email.mime.multipart import MIMEMultipart\n'), ((280, 302), 'os.getenv', 'os.getenv', (['"""SMTP_HOST"""'], {}), "('SMTP_HOST')\n", (289, 302), False, 'import os\n'), ((707, 745), 'smtplib.SMTP_SSL', 'smtplib.SMTP_SSL', (['self.host', 'self.port'], {}), '(self.host, self.port)\n', (723, 745), False, 'import smtplib\n'), ((763, 797), 'smtplib.SMTP', 'smtplib.SMTP', (['self.host', 'self.port'], {}), '(self.host, self.port)\n', (775, 797), False, 'import smtplib\n'), ((1654, 1686), 'email.mime.text.MIMEText', 'MIMEText', (['body', '"""plain"""', '"""utf-8"""'], {}), "(body, 'plain', 'utf-8')\n", (1662, 1686), False, 'from email.mime.text import MIMEText\n'), ((1728, 1759), 'email.mime.text.MIMEText', 'MIMEText', (['html', '"""html"""', '"""utf-8"""'], {}), "(html, 'html', 'utf-8')\n", (1736, 1759), False, 'from email.mime.text import MIMEText\n'), ((1801, 1825), 'os.path.isfile', 'os.path.isfile', (['template'], {}), '(template)\n', (1815, 1825), False, 'import os\n'), ((2261, 2286), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (2275, 2286), False, 'import os\n'), ((3035, 3073), 'smtplib.SMTP_SSL', 'smtplib.SMTP_SSL', (['self.host', 'self.port'], {}), '(self.host, self.port)\n', (3051, 3073), False, 'import smtplib\n'), ((3091, 3125), 'smtplib.SMTP', 'smtplib.SMTP', (['self.host', 'self.port'], {}), '(self.host, self.port)\n', (3103, 3125), False, 'import smtplib\n'), ((3361, 3382), 'logging.exception', 'logging.exception', (['ex'], {}), '(ex)\n', (3378, 3382), False, 'import logging\n'), ((1373, 1394), 'logging.exception', 'logging.exception', (['ex'], {}), '(ex)\n', (1390, 1394), False, 'import logging\n'), ((2469, 2490), 'logging.exception', 'logging.exception', (['ex'], {}), '(ex)\n', (2486, 2490), False, 'import logging\n'), ((2667, 2694), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (2683, 2694), False, 'import os\n')] |
from flask import render_template, flash, redirect, url_for, request
from flask_login import login_user, logout_user, current_user, login_required
from werkzeug.urls import url_parse
from app import app, db
from app.models import User
from app.util import get_movie_info_min, get_movie_info, predict, set_preferences, get_preferences, create_connection
from app.forms import LoginForm, RegistrationForm
import pandas as pd
from app import Config
from random import randint
import json
import random
list1=[]
@app.route('/')
@login_required
def index():
result = get_preferences(user_id=current_user.id)
if len(result) < 1:
return redirect(url_for('calib'))
movies = get_movie_info_min(result)
return render_template('index.html', movies=movies)
@app.route('/movie', methods=['GET'])
@login_required
def movie():
list = []
for i in range(1,7):
list.append(randint(1,45296))
similar_movies = get_movie_info_min(list)
movie_id = request.args.get('id')
movie = get_movie_info(movie_id)
return render_template('movie.html', movie=movie, similar_movies=similar_movies)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('login'))
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
print(user)
if user is None or not user.check_password(form.password.data):
flash('Invalid email or password')
return redirect(url_for('login'))
login_user(user)
return redirect(url_for('index'))
return render_template('login.html', title='Sign In', form=form)
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@login_required
@app.route('/calibrate',methods=['GET','POST'])
def calib():
global list1
movie_db = create_connection(Config.MOVIE_DATABASE_PATH)
data = pd.read_sql_query('SELECT distinct(id) from movlens', movie_db)
data=list(data['id'])
if request.method=='GET':
list1=random.sample(data,50)
movies = get_movie_info_min(list1)
return render_template('calib.html',movies=movies)
if request.method=='POST':
print('submited')
movshown=[]
movsel=[]
for i in list1:
movshown.append(i)
movsel.extend(request.form.getlist('sel'))
movsel=[int(i) for i in movsel]
print("\n\n",type(request.form['sel']),"\n\n")
print("\n\nmovshown: ",movshown,"\n\n")
print("\n\nmovsel: ",movsel,"\n\n")
movie_ids = predict(movshown,movsel)
set_preferences(user_id=current_user.id, movie_ids=movie_ids)
movies=get_movie_info_min(movie_ids[:100])
return redirect(url_for('index'))
| [
"flask.render_template",
"flask.request.args.get",
"app.db.session.commit",
"app.models.User",
"app.util.get_preferences",
"app.db.session.add",
"pandas.read_sql_query",
"flask.flash",
"app.app.route",
"random.randint",
"app.util.get_movie_info",
"app.forms.RegistrationForm",
"random.sample"... | [((513, 527), 'app.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (522, 527), False, 'from app import app, db\n'), ((783, 819), 'app.app.route', 'app.route', (['"""/movie"""'], {'methods': "['GET']"}), "('/movie', methods=['GET'])\n", (792, 819), False, 'from app import app, db\n'), ((1142, 1162), 'app.app.route', 'app.route', (['"""/logout"""'], {}), "('/logout')\n", (1151, 1162), False, 'from app import app, db\n'), ((1235, 1279), 'app.app.route', 'app.route', (['"""/login"""'], {'methods': "['GET', 'POST']"}), "('/login', methods=['GET', 'POST'])\n", (1244, 1279), False, 'from app import app, db\n'), ((1821, 1868), 'app.app.route', 'app.route', (['"""/register"""'], {'methods': "['GET', 'POST']"}), "('/register', methods=['GET', 'POST'])\n", (1830, 1868), False, 'from app import app, db\n'), ((2381, 2429), 'app.app.route', 'app.route', (['"""/calibrate"""'], {'methods': "['GET', 'POST']"}), "('/calibrate', methods=['GET', 'POST'])\n", (2390, 2429), False, 'from app import app, db\n'), ((571, 611), 'app.util.get_preferences', 'get_preferences', ([], {'user_id': 'current_user.id'}), '(user_id=current_user.id)\n', (586, 611), False, 'from app.util import get_movie_info_min, get_movie_info, predict, set_preferences, get_preferences, create_connection\n'), ((698, 724), 'app.util.get_movie_info_min', 'get_movie_info_min', (['result'], {}), '(result)\n', (716, 724), False, 'from app.util import get_movie_info_min, get_movie_info, predict, set_preferences, get_preferences, create_connection\n'), ((736, 780), 'flask.render_template', 'render_template', (['"""index.html"""'], {'movies': 'movies'}), "('index.html', movies=movies)\n", (751, 780), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((948, 972), 'app.util.get_movie_info_min', 'get_movie_info_min', (['list'], {}), '(list)\n', (966, 972), False, 'from app.util import get_movie_info_min, get_movie_info, predict, set_preferences, get_preferences, create_connection\n'), ((994, 1016), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (1010, 1016), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((1029, 1053), 'app.util.get_movie_info', 'get_movie_info', (['movie_id'], {}), '(movie_id)\n', (1043, 1053), False, 'from app.util import get_movie_info_min, get_movie_info, predict, set_preferences, get_preferences, create_connection\n'), ((1065, 1138), 'flask.render_template', 'render_template', (['"""movie.html"""'], {'movie': 'movie', 'similar_movies': 'similar_movies'}), "('movie.html', movie=movie, similar_movies=similar_movies)\n", (1080, 1138), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((1181, 1194), 'flask_login.logout_user', 'logout_user', ([], {}), '()\n', (1192, 1194), False, 'from flask_login import login_user, logout_user, current_user, login_required\n'), ((1384, 1395), 'app.forms.LoginForm', 'LoginForm', ([], {}), '()\n', (1393, 1395), False, 'from app.forms import LoginForm, RegistrationForm\n'), ((1760, 1817), 'flask.render_template', 'render_template', (['"""login.html"""'], {'title': '"""Sign In"""', 'form': 'form'}), "('login.html', title='Sign In', form=form)\n", (1775, 1817), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((1976, 1994), 'app.forms.RegistrationForm', 'RegistrationForm', ([], {}), '()\n', (1992, 1994), False, 'from app.forms import LoginForm, RegistrationForm\n'), ((2293, 2354), 'flask.render_template', 'render_template', (['"""register.html"""'], {'title': '"""Register"""', 'form': 'form'}), "('register.html', title='Register', form=form)\n", (2308, 2354), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((2474, 2519), 'app.util.create_connection', 'create_connection', (['Config.MOVIE_DATABASE_PATH'], {}), '(Config.MOVIE_DATABASE_PATH)\n', (2491, 2519), False, 'from app.util import get_movie_info_min, get_movie_info, predict, set_preferences, get_preferences, create_connection\n'), ((2531, 2594), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""SELECT distinct(id) from movlens"""', 'movie_db'], {}), "('SELECT distinct(id) from movlens', movie_db)\n", (2548, 2594), True, 'import pandas as pd\n'), ((1215, 1231), 'flask.url_for', 'url_for', (['"""login"""'], {}), "('login')\n", (1222, 1231), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((1690, 1706), 'flask_login.login_user', 'login_user', (['user'], {}), '(user)\n', (1700, 1706), False, 'from flask_login import login_user, logout_user, current_user, login_required\n'), ((2044, 2071), 'app.models.User', 'User', ([], {'email': 'form.email.data'}), '(email=form.email.data)\n', (2048, 2071), False, 'from app.models import User\n'), ((2126, 2146), 'app.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (2140, 2146), False, 'from app import app, db\n'), ((2155, 2174), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2172, 2174), False, 'from app import app, db\n'), ((2183, 2239), 'flask.flash', 'flash', (['"""Congratulations, you are now a registered user!"""'], {}), "('Congratulations, you are now a registered user!')\n", (2188, 2239), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((2665, 2688), 'random.sample', 'random.sample', (['data', '(50)'], {}), '(data, 50)\n', (2678, 2688), False, 'import random\n'), ((2705, 2730), 'app.util.get_movie_info_min', 'get_movie_info_min', (['list1'], {}), '(list1)\n', (2723, 2730), False, 'from app.util import get_movie_info_min, get_movie_info, predict, set_preferences, get_preferences, create_connection\n'), ((2746, 2790), 'flask.render_template', 'render_template', (['"""calib.html"""'], {'movies': 'movies'}), "('calib.html', movies=movies)\n", (2761, 2790), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((3234, 3259), 'app.util.predict', 'predict', (['movshown', 'movsel'], {}), '(movshown, movsel)\n', (3241, 3259), False, 'from app.util import get_movie_info_min, get_movie_info, predict, set_preferences, get_preferences, create_connection\n'), ((3267, 3328), 'app.util.set_preferences', 'set_preferences', ([], {'user_id': 'current_user.id', 'movie_ids': 'movie_ids'}), '(user_id=current_user.id, movie_ids=movie_ids)\n', (3282, 3328), False, 'from app.util import get_movie_info_min, get_movie_info, predict, set_preferences, get_preferences, create_connection\n'), ((3344, 3379), 'app.util.get_movie_info_min', 'get_movie_info_min', (['movie_ids[:100]'], {}), '(movie_ids[:100])\n', (3362, 3379), False, 'from app.util import get_movie_info_min, get_movie_info, predict, set_preferences, get_preferences, create_connection\n'), ((661, 677), 'flask.url_for', 'url_for', (['"""calib"""'], {}), "('calib')\n", (668, 677), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((909, 926), 'random.randint', 'randint', (['(1)', '(45296)'], {}), '(1, 45296)\n', (916, 926), False, 'from random import randint\n'), ((1355, 1371), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (1362, 1371), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((1601, 1635), 'flask.flash', 'flash', (['"""Invalid email or password"""'], {}), "('Invalid email or password')\n", (1606, 1635), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((1731, 1747), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (1738, 1747), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((1947, 1963), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (1954, 1963), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((2264, 2280), 'flask.url_for', 'url_for', (['"""login"""'], {}), "('login')\n", (2271, 2280), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((2986, 3013), 'flask.request.form.getlist', 'request.form.getlist', (['"""sel"""'], {}), "('sel')\n", (3006, 3013), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((3404, 3420), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (3411, 3420), False, 'from flask import render_template, flash, redirect, url_for, request\n'), ((1445, 1488), 'app.models.User.query.filter_by', 'User.query.filter_by', ([], {'email': 'form.email.data'}), '(email=form.email.data)\n', (1465, 1488), False, 'from app.models import User\n'), ((1664, 1680), 'flask.url_for', 'url_for', (['"""login"""'], {}), "('login')\n", (1671, 1680), False, 'from flask import render_template, flash, redirect, url_for, request\n')] |