input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>corehq/apps/reminders/models.py
import pytz
from pytz import timezone
from datetime import timedelta, datetime, date, time
import re
from couchdbkit.ext.django.schema import *
from casexml.apps.case.models import CommCareCase, CommCareCaseGroup
from corehq.apps.sms.models import CommConnectCase
from corehq.apps.users.cases import get_owner_id, get_wrapped_owner
from corehq.apps.users.models import CommCareUser, CouchUser
from corehq.apps.groups.models import Group
from dimagi.utils.parsing import string_to_datetime, json_format_datetime
from dateutil.parser import parse
from corehq.apps.reminders.util import get_form_name, enqueue_reminder_directly
from couchdbkit.exceptions import ResourceConflict
from couchdbkit.resource import ResourceNotFound
from corehq.apps.sms.util import create_task, close_task, update_task
from corehq.apps.smsforms.app import submit_unfinished_form
from dimagi.utils.couch import LockableMixIn, CriticalSection
from dimagi.utils.couch.database import SafeSaveDocument
from dimagi.utils.couch.cache.cache_core import get_redis_client
from dimagi.utils.multithreading import process_fast
from dimagi.utils.logging import notify_exception
from random import randint
from django.conf import settings
class IllegalModelStateException(Exception):
pass
METHOD_SMS = "sms"
METHOD_SMS_CALLBACK = "callback"
METHOD_SMS_SURVEY = "survey"
METHOD_IVR_SURVEY = "ivr_survey"
METHOD_EMAIL = "email"
METHOD_STRUCTURED_SMS = "structured_sms"
METHOD_CHOICES = [
METHOD_SMS,
METHOD_SMS_CALLBACK,
METHOD_SMS_SURVEY,
METHOD_IVR_SURVEY,
METHOD_EMAIL,
]
# The Monday - Sunday constants are meant to match the result from
# date.weekday()
DAY_ANY = -1
DAY_MON = 0
DAY_TUE = 1
DAY_WED = 2
DAY_THU = 3
DAY_FRI = 4
DAY_SAT = 5
DAY_SUN = 6
DAY_OF_WEEK_CHOICES = [
DAY_ANY,
DAY_MON,
DAY_TUE,
DAY_WED,
DAY_THU,
DAY_FRI,
DAY_SAT,
DAY_SUN,
]
REPEAT_SCHEDULE_INDEFINITELY = -1
EVENT_AS_SCHEDULE = "SCHEDULE"
EVENT_AS_OFFSET = "OFFSET"
EVENT_INTERPRETATIONS = [EVENT_AS_SCHEDULE, EVENT_AS_OFFSET]
UI_SIMPLE_FIXED = "SIMPLE_FIXED"
UI_COMPLEX = "COMPLEX"
UI_CHOICES = [UI_SIMPLE_FIXED, UI_COMPLEX]
RECIPIENT_SENDER = "SENDER"
RECIPIENT_USER = "USER"
RECIPIENT_OWNER = "OWNER"
RECIPIENT_CASE = "CASE"
RECIPIENT_PARENT_CASE = "PARENT_CASE"
RECIPIENT_ALL_SUBCASES = "ALL_SUBCASES"
RECIPIENT_SUBCASE = "SUBCASE"
RECIPIENT_SURVEY_SAMPLE = "SURVEY_SAMPLE"
RECIPIENT_USER_GROUP = "USER_GROUP"
RECIPIENT_CHOICES = [
RECIPIENT_USER, RECIPIENT_OWNER, RECIPIENT_CASE, RECIPIENT_SURVEY_SAMPLE,
RECIPIENT_PARENT_CASE, RECIPIENT_SUBCASE, RECIPIENT_USER_GROUP,
]
KEYWORD_RECIPIENT_CHOICES = [RECIPIENT_SENDER, RECIPIENT_OWNER, RECIPIENT_USER_GROUP]
KEYWORD_ACTION_CHOICES = [METHOD_SMS, METHOD_SMS_SURVEY, METHOD_STRUCTURED_SMS]
FIRE_TIME_DEFAULT = "DEFAULT"
FIRE_TIME_CASE_PROPERTY = "CASE_PROPERTY"
FIRE_TIME_RANDOM = "RANDOM"
FIRE_TIME_CHOICES = [FIRE_TIME_DEFAULT, FIRE_TIME_CASE_PROPERTY, FIRE_TIME_RANDOM]
MATCH_EXACT = "EXACT"
MATCH_REGEX = "REGEX"
MATCH_ANY_VALUE = "ANY_VALUE"
MATCH_TYPE_CHOICES = [MATCH_EXACT, MATCH_REGEX, MATCH_ANY_VALUE]
CASE_CRITERIA = "CASE_CRITERIA"
ON_DATETIME = "ON_DATETIME"
START_CONDITION_TYPES = [CASE_CRITERIA, ON_DATETIME]
SURVEY_METHOD_LIST = ["SMS","CATI"]
UI_FREQUENCY_ADVANCED = "ADVANCED"
UI_FREQUENCY_CHOICES = [UI_FREQUENCY_ADVANCED]
QUESTION_RETRY_CHOICES = [1, 2, 3, 4, 5]
FORM_TYPE_ONE_BY_ONE = "ONE_BY_ONE" # Answer each question one at a time
FORM_TYPE_ALL_AT_ONCE = "ALL_AT_ONCE" # Complete the entire form with just one sms using the delimiter to separate answers
FORM_TYPE_CHOICES = [FORM_TYPE_ONE_BY_ONE, FORM_TYPE_ALL_AT_ONCE]
REMINDER_TYPE_ONE_TIME = "ONE_TIME"
REMINDER_TYPE_KEYWORD_INITIATED = "KEYWORD_INITIATED"
REMINDER_TYPE_DEFAULT = "DEFAULT"
REMINDER_TYPE_SURVEY_MANAGEMENT = "SURVEY_MANAGEMENT"
REMINDER_TYPE_CHOICES = [REMINDER_TYPE_DEFAULT, REMINDER_TYPE_ONE_TIME,
REMINDER_TYPE_KEYWORD_INITIATED, REMINDER_TYPE_SURVEY_MANAGEMENT]
SEND_NOW = "NOW"
SEND_LATER = "LATER"
# This time is used when the case property used to specify the reminder time isn't a valid time
# TODO: Decide whether to keep this or retire the reminder
DEFAULT_REMINDER_TIME = time(12, 0)
def is_true_value(val):
return val == 'ok' or val == 'OK'
def looks_like_timestamp(value):
try:
regex = re.compile("^\d\d\d\d-\d\d-\d\d.*$")
return (regex.match(value) is not None)
except Exception:
return False
def property_references_parent(case_property):
return isinstance(case_property, basestring) and case_property.startswith("parent/")
def get_case_property(case, case_property):
"""
case the case
case_property the name of the case property (can be 'parent/property' to lookup
on the parent, or 'property' to lookup on the case)
"""
if case_property is None or case is None:
return None
elif property_references_parent(case_property):
parent_case = case.parent
if parent_case is None:
return None
else:
return parent_case.get_case_property(case_property[7:])
else:
return case.get_case_property(case_property)
def case_matches_criteria(case, match_type, case_property, value_to_match):
result = False
case_property_value = get_case_property(case, case_property)
if match_type == MATCH_EXACT:
result = (case_property_value == value_to_match) and (value_to_match is not None)
elif match_type == MATCH_ANY_VALUE:
result = case_property_value is not None
elif match_type == MATCH_REGEX:
try:
regex = re.compile(value_to_match)
result = regex.match(str(case_property_value)) is not None
except Exception:
result = False
return result
class MessageVariable(object):
def __init__(self, variable):
self.variable = variable
def __unicode__(self):
return unicode(self.variable)
@property
def days_until(self):
try: variable = string_to_datetime(self.variable)
except Exception:
return "(?)"
else:
# add 12 hours and then floor == round to the nearest day
return (variable - datetime.utcnow() + timedelta(hours=12)).days
def __getattr__(self, item):
try:
return super(MessageVariable, self).__getattribute__(item)
except Exception:
pass
try:
return MessageVariable(getattr(self.variable, item))
except Exception:
pass
try:
return MessageVariable(self.variable[item])
except Exception:
pass
return "(?)"
class Message(object):
def __init__(self, template, **params):
self.template = template
self.params = {}
for key, value in params.items():
self.params[key] = MessageVariable(value)
def __unicode__(self):
return self.template.format(**self.params)
@classmethod
def render(cls, template, **params):
if isinstance(template, str):
template = unicode(template, encoding='utf-8')
return unicode(cls(template, **params))
class CaseReminderEvent(DocumentSchema):
"""
A CaseReminderEvent is the building block for representing reminder schedules in
a CaseReminderHandler (see CaseReminderHandler.events).
day_num See CaseReminderHandler, depends on event_interpretation.
fire_time See CaseReminderHandler, depends on event_interpretation.
fire_time_aux Usage depends on fire_time_type.
fire_time_type FIRE_TIME_DEFAULT: the event will be scheduled at the time specified by fire_time.
FIRE_TIME_CASE_PROPERTY: the event will be scheduled at the time specified by the
case property named in fire_time_aux.
FIRE_TIME_RANDOM: the event will be scheduled at a random minute on the interval that
starts with fire_time and lasts for time_window_length minutes
time_window_length Used in FIRE_TIME_RANDOM to define a time interval that starts at fire_time and lasts
for this many minutes
message The text to send along with language to send it, represented
as a dictionary: {"en": "Hello, {user.full_name}, you're having issues."}
callback_timeout_intervals For CaseReminderHandlers whose method is "callback", a list of
timeout intervals (in minutes). The message is resent based on
the number of entries in this list until the callback is received,
or the number of timeouts is exhausted.
form_unique_id For CaseReminderHandlers whose method is "survey", this the unique id
of the form to play as a survey.
"""
day_num = IntegerProperty()
fire_time = TimeProperty()
fire_time_aux = StringProperty()
fire_time_type = StringProperty(choices=FIRE_TIME_CHOICES, default=FIRE_TIME_DEFAULT)
time_window_length = IntegerProperty()
message = DictProperty()
callback_timeout_intervals = ListProperty(IntegerProperty)
form_unique_id = StringProperty()
def run_rule(case_id, handler, schedule_changed, prev_definition):
case = CommCareCase.get(case_id)
handler.set_rule_checkpoint(1, incr=True)
try:
handler.case_changed(case, schedule_changed=schedule_changed,
prev_definition=prev_definition)
except ResourceConflict:
# Sometimes the reminder fires in the middle of reprocessing
# the scheduling.
handler.case_changed(case, schedule_changed=schedule_changed,
prev_definition=prev_definition)
handler.set_rule_checkpoint(2, incr=True)
try:
# It shouldn't be necessary to lock this out, but a deadlock can
# happen in rare cases without it
with CriticalSection(["reminder-rule-processing-%s" % handler._id], timeout=15):
client = get_redis_client()
client.incr("reminder-rule-processing-current-%s" % handler._id)
except:
pass
handler.set_rule_checkpoint(3, incr=True)
def retire_reminder(reminder_id):
r = CaseReminder.get(reminder_id)
r.retire()
def get_case_ids(domain):
"""
Had to add this because this query kept intermittently raising
"NoMoreData: Can't parse headers" exceptions.
"""
max_tries = 5
for i in range(max_tries):
try:
result = CommCareCase.view('hqcase/types_by_domain',
reduce=False,
startkey=[domain],
endkey=[domain, {}],
include_docs=False,
).all()
return [entry["id"] for entry in result]
except Exception:
if i == (max_tries - 1):
raise
class CaseReminderHandler(Document):
"""
A CaseReminderHandler defines the rules and schedule which govern how messages
should go out. The "start" and "until" attributes will spawn and deactivate a
CaseReminder for a CommCareCase, respectively, when their conditions are reached.
Below both are described in more detail:
start This defines when the reminder schedule kicks off.
Examples: start="edd"
- The reminder schedule kicks off for a CommCareCase on
the date defined by the CommCareCase's "edd" property.
start="form_started"
- The reminder schedule kicks off for a CommCareCase when
the CommCareCase's "form_started" property equals "ok".
until This defines when the reminders should stop being sent. Once this condition
is reached, the CaseReminder is deactivated.
Examples: until="followup_1_complete"
- The reminders will stop being sent for a CommCareCase when
the CommCareCase's "followup_1_complete" property equals "ok".
Once a CaseReminder is spawned (i.e., when the "start" condition is met for a
CommCareCase), the intervals at which reminders are sent and the messages sent
are defined by the "events" attribute on the CaseReminderHandler.
One complete cycle through all events is considered to be an "iteration", and the attribute
that defines the maximum number of iterations for this schedule is "max_iteration_count".
Reminder messages will continue to be sent until the events cycle has occurred "max_iteration_count"
times, or until the "until" condition is met, whichever comes first. To ignore the "max_iteration_count",
it can be set to REPEAT_SCHEDULE_INDEFINITELY, in which case only the "until" condition
stops the reminder messages.
The events can either be interpreted as offsets from each other and from the original "start"
condition, or as fixed schedule times from the original "start" condition:
Example of "event_interpretation" == EVENT_AS_OFFSET:
start = "form1_completed"
start_offset = 1
events = [
CaseReminderEvent(
day_num = 0
,fire_time = time(hour=1)
,message = {"en": "Form not yet completed."}
)
]
schedule_length = 0
event_interpretation = EVENT_AS_OFFSET
max_iteration_count = REPEAT_SCHEDULE_INDEFINITELY
until = "form2_completed"
This CaseReminderHandler can be used to send an hourly message starting one day (start_offset=1)
after "form1_completed", and will keep sending the message every hour until "form2_completed". So,
if "form1_completed" is reached on January 1, 2012, at 9:46am, the reminders will begin being sent
at January 2, 2012, at 10:46am and every hour subsequently until "form2_completed". Specifically,
when "event_interpretation" is EVENT_AS_OFFSET:
day_num is interpreted to be a number of days after | |
<filename>alphaconf/__init__.py
import contextlib
import contextvars
import logging
import os
import re
import sys
import uuid
from typing import Any, Dict, Iterable, List, Union
from omegaconf import DictConfig, MissingMandatoryValue, OmegaConf
from . import arg_parser
__doc__ = """AlphaConf
Based on omegaconf, provide a simple way to declare and run your application
while loading the configuration from various files and command line
arguments.
Use `alphaconf.get()` or `alphaconf.configuration()` to read
the current application's configuration.
if __name__ == '__main__':
alphaconf.Application().run(main)
"""
_log = logging.getLogger(__name__)
"""A list of functions which given a key indicate whether it's a secret"""
SECRET_MASKS = [
# mask if contains a kind of secret and it's not in a file
re.compile(r'.*(password|secret|key)(?!_file)(_|$)').match,
]
#######################################
# APPLICATION
class Application:
"""An application configuration description
:param properties: Properties of the application, such as:
name, version, short_description, description, etc.
"""
def __init__(self, **properties) -> None:
"""Initialize the application.
Properties:
- name: the name of the application (always updated)
- verison: version number
- description: description shown in help
- short_description: shorter description
:param properties: Properties for the app
"""
self.__config = None # initialize
if not properties.get('name'):
properties['name'] = self.__get_default_name()
self.properties = properties
self._arg_parser = arg_parser.ArgumentParser(properties)
arg_parser.add_default_option_handlers(self._arg_parser)
@staticmethod
def __get_default_name() -> str:
"""Find the default name from sys.argv"""
name = os.path.basename(sys.argv[0])
if name.endswith('.py'):
name = name[:-3]
if name == '__main__':
# executing a module using python -m
name = os.path.basename(os.path.dirname(sys.argv[0]))
return name
def _app_configuration(self) -> DictConfig:
"""Get the application configuration key"""
return OmegaConf.create(
{
'application': {
'name': self.name,
'version': self.properties.get('version'),
'uuid': str(uuid.uuid4()),
},
}
)
@property
def name(self):
"""Get the name of the application"""
return self.properties['name']
@property
def argument_parser(self):
"""The argument parser instance"""
return self._arg_parser
@property
def configuration(self) -> DictConfig:
"""Get the configuration of the application, initialize if necessary"""
if self.__config is None:
self.setup_configuration(
arguments=None, resolve_configuration=False, setup_logging=False
)
_log.info('alphaconf initialized')
assert self.__config is not None
return self.__config
def get_config(self, key: str = "", type=None) -> Any:
"""Get a configuration value by key
The value is resolved and a missing exception may be thrown for mandatory arguments.
:param key: Optional selection key for the configuration
:param type: Optional type to convert to
:return: The value or None
"""
if key:
c = OmegaConf.select(self.configuration, key, throw_on_missing=True)
else:
c = self.configuration
if isinstance(c, DictConfig):
c = OmegaConf.to_object(c)
if type and c is not None:
from . import arg_type
c = arg_type.convert_to_type(c, type)
return c
def _get_possible_configuration_paths(self) -> Iterable[str]:
"""List of paths where to find configuration files"""
name = self.name
is_windows = sys.platform.startswith('win')
for path in [
'$APPDATA/{}.yaml' if is_windows else '/etc/{}.yaml',
'$LOCALAPPDATA/{}.yaml' if is_windows else '',
'$HOME/.{}.yaml',
'$HOME/.config/{}.yaml',
'$PWD/{}.yaml',
]:
path = os.path.expandvars(path.format(name))
if path and '$' not in path:
yield path
def _load_dotenv(self, load_dotenv: Union[bool, None] = None):
"""Load dotenv variables (optionally)"""
if load_dotenv is False:
return
try:
import dotenv
_log.debug('Loading dotenv')
dotenv.load_dotenv()
except ModuleNotFoundError:
if not load_dotenv:
raise
_log.debug('dotenv is not installed')
def _get_configurations(
self,
env_prefixes: Union[bool, Iterable[str]] = True,
) -> Iterable[DictConfig]:
"""List of all configurations that can be loaded automatically
- All of the default configurations
- The app configuration
- Reads existing files from possible configuration paths
- Reads environment variables based on given prefixes
:param env_prefixes: Prefixes of environment variables to load
:return: OmegaConf configurations (to be merged)
"""
_log.debug('Loading default and app configurations')
yield from _DEFAULT_CONFIGURATIONS
yield self._app_configuration()
# Read files
for path in self._get_possible_configuration_paths():
if os.path.exists(path):
_log.debug('Load configuration from %s', path)
yield OmegaConf.load(path)
# Environment
if env_prefixes is True:
_log.debug('Detecting accepted env prefixes')
default_keys = {k for cfg in _DEFAULT_CONFIGURATIONS for k in cfg.keys()}
prefixes = tuple(
k.upper() + '_'
for k in default_keys
if k not in ('base', 'python') and not k.startswith('_')
)
elif isinstance(env_prefixes, Iterable):
prefixes = tuple(env_prefixes)
else:
prefixes = None
if prefixes:
_log.debug('Loading env configuration from prefixes %s' % (prefixes,))
yield OmegaConf.from_dotlist(
[
"%s=%s" % (name.lower().replace('_', '.'), value)
for name, value in os.environ.items()
if name.startswith(prefixes)
]
)
def setup_configuration(
self,
arguments: Union[bool, List[str]] = True,
*,
load_dotenv: Union[bool, None] = None,
env_prefixes: Union[bool, Iterable[str]] = True,
resolve_configuration: bool = True,
setup_logging: bool = True,
) -> None:
"""Setup the application configuration
Can be called only once to setup the configuration and initialize the application.
The function may raise ExitApplication.
:param arguments: The argument list to parse (default: True to parse sys.argv)
:param load_dotenv: Whether to load dotenv environment (default: yes if installed)
:param env_prefixes: The env prefixes to load the configuration values from (default: auto)
:param resolve_configuration: Test whether the configuration can be resolved (default: True)
:param setup_logging: Whether to setup logging (default: True)
"""
if self.__config is not None:
raise RuntimeError('Configuration already set')
_log.debug('Start setup application')
# Parse arguments
parser_result = None
if arguments is True:
arguments = sys.argv[1:]
if isinstance(arguments, list):
self.argument_parser.reset()
self.argument_parser.parse_arguments(arguments)
parser_result = self.argument_parser.parse_result
_log.debug('Parse arguments result: %s', parser_result)
# Load and merge configurations
self._load_dotenv(load_dotenv=load_dotenv)
configurations = list(self._get_configurations(env_prefixes=env_prefixes))
if parser_result:
configurations.extend(self.argument_parser.configurations())
self.__config = OmegaConf.merge(*configurations)
_log.debug('Merged %d configurations', len(configurations))
# Handle the result
if parser_result == 'show_configuration':
print(self.yaml_configuration())
raise ExitApplication
elif parser_result == 'exit':
raise ExitApplication
elif parser_result is not None and parser_result != 'ok':
raise RuntimeError('Invalid argument parsing result: %s' % parser_result)
# Try to get the whole configuration to resolve links
if resolve_configuration:
self.get_config()
# Logging
if setup_logging:
_log.debug('Setup logging')
self.setup_logging()
def setup_logging(self) -> None:
"""Setup logging
Set the time to GMT, log key 'logging' from configuration or if none, base logging.
"""
import logging
from . import logging_util
logging_util.set_gmt()
log = logging.getLogger()
logging_config = self.get_config('logging')
if logging_config:
# Configure using the st configuration
import logging.config
logging.config.dictConfig(logging_config)
elif len(log.handlers) == 0:
# Default logging if not yet initialized
output = logging.StreamHandler()
output.setFormatter(logging_util.ColorFormatter())
log.addHandler(output)
log.setLevel(logging.INFO)
@contextlib.contextmanager
def update_configuration(self, conf: Union[DictConfig, Dict]):
"""Returns a context where the application configuration is merged
with a given configuration.
:param conf: The configuraiton
"""
current_config = self.configuration
try:
self.__config = OmegaConf.merge(current_config, conf)
yield self
finally:
self.__config = current_config
def yaml_configuration(self, mask_base: bool = True, mask_secrets: bool = True) -> str:
"""Get the configuration as yaml string
:param mask_base: Whether to mask "base" entry
:return: Configuration as string (yaml)
"""
configuration = self.configuration
if mask_base or mask_secrets:
configuration = configuration.copy()
if mask_secrets:
configuration = Application.__mask_secrets(configuration)
if mask_base:
configuration['base'] = {
key: list(choices.keys()) for key, choices in configuration.base.items()
}
return OmegaConf.to_yaml(configuration)
@staticmethod
def __mask_secrets(configuration):
for key in list(configuration):
if any(mask(key) for mask in SECRET_MASKS):
configuration[key] = '*****'
elif isinstance(configuration[key], (Dict, DictConfig)):
configuration[key] = Application.__mask_secrets(configuration[key])
return configuration
def run(self, main, should_exit=True, **configuration):
"""Run this application
:param main: The main function to call
:param should_exit: Whether an exception should sys.exit (default: True)
:param configuration: Arguments passed to setup_configuration()
:return: The result of main
"""
try:
self.setup_configuration(**configuration)
except MissingMandatoryValue as e:
_log.error(e)
if should_exit:
sys.exit(2)
raise
except ExitApplication:
_log.debug('Normal application exit')
if should_exit:
sys.exit()
return
# Run the application
log = logging.getLogger()
token = application.set(self)
try:
log.info('Application start (%s: %s)', self.name, main.__qualname__)
result = main()
if result is None:
log.info('Application end.')
else:
log.info('Application end: %s', result)
return result
except Exception as e:
# no need to log exc_info beacause the parent will handle it
log.error('Application failed (%s) %s', type(e).__name__, e, exc_info=should_exit)
if should_exit:
log.debug('Exit application')
sys.exit(1)
raise
finally:
application.reset(token)
def __str__(self) -> str:
running = self == application.get()
ready = self.__config is not None
return f"{type(self).__name__}({self.name}; loaded={ready}; running={running})"
class ExitApplication(BaseException):
"""Signal to exit the application normally"""
pass
#######################################
# APPLICATION CONTEXT
"""The application context"""
application = contextvars.ContextVar('application')
_DEFAULT_CONFIGURATIONS = []
def configuration() -> DictConfig:
"""Get the configuration for the current application"""
app = application.get()
return app.configuration
def get(config_key: str, type=None) -> Any:
"""Select a configuration from the current application"""
app = application.get()
return app.get_config(config_key, type=type)
def setup_configuration(conf: Union[DictConfig, str, Dict]):
"""Add a default configuration"""
if not isinstance(conf, DictConfig):
conf = OmegaConf.create(conf)
_DEFAULT_CONFIGURATIONS.append(conf)
#######################################
# BASIC CONFIGURATION
def __alpha_configuration():
"""The default configuration for alphaconf"""
logging_default = {
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s %(levelname)s %(name)s: %(message)s',
'datefmt': '%H:%M:%S',
},
'default': {
'format': '%(asctime)s %(levelname)s'
' %(name)s [%(process)s,%(threadName)s]: %(message)s',
},
'color': {
'class': 'alphaconf.logging_util.ColorFormatter',
'format': '${..default.format}',
},
'json': {
'class': 'alphaconf.logging_util.JSONFormatter',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'color',
'stream': 'ext://sys.stdout',
},
},
'root': {
'handlers': ['console'],
| |
def __lshift__(self, added_code):
""" implicit code insertion through << operator """
indented_code = self.reindent(added_code)
return self.append_code(indented_code)
def inc_level(self):
""" increase indentation level """
self.tablevel += 1
self.expanded_code += CodeObject.tab
def dec_level(self):
""" decrease indentation level """
self.tablevel -= 1
# deleting last inserted tab
if self.expanded_code[-len(CodeObject.tab):] == CodeObject.tab:
self.expanded_code = self.expanded_code[:-len(CodeObject.tab)]
def open_level(self, inc=True, header=None):
""" open nested block """
header = self.level_header if header is None else header
self << header # "{\n"
if inc: self.inc_level()
def close_level(self, cr="\n", inc=True, footer=None):
""" close nested block """
footer = self.level_footer if footer is None else footer
if inc: self.dec_level()
self << "%s%s" % (footer, cr)
def link_level(self, transition = ""):
""" close nested block """
self.dec_level()
self << "} %s {" % transition
self.inc_level()
def get_multiline_comment(self, comment_list):
result = "/**\n"
for comment in comment_list:
result += " * " + comment.replace("\n", "\n * ") + "\n"
result += "**/\n"
return result
def generate_header_code(self, git_tag = True):
""" generate code for header file inclusion """
result = ""
# generating git comment
if git_tag:
git_comment = CodeConfiguration.get_git_comment()
self.header_comment.insert(0, git_comment)
# generating header comments
result += self.get_multiline_comment(self.header_comment)
for header_file in self.header_list:
result += """#include <%s>\n""" % (header_file)
return result
def get_free_symbol_name(self, symbol_type, symbol_format, prefix="cotmp", declare=True, symbol_ctor=Variable):
free_symbol_name = self.symbol_table.get_free_name(symbol_format, prefix)
if declare:
self.symbol_table.declare_symbol_name(free_symbol_name, symbol_ctor(free_symbol_name), symbol_type=symbol_type)
return free_symbol_name
def get_free_var_name(self, var_type, prefix = "cotmp", declare = True, var_ctor = Variable):
assert not var_type is None
free_var_name = self.symbol_table.get_free_name(var_type, self.GENERAL_PREFIX + prefix)
# declare free var if required
if declare:
self.symbol_table.declare_var_name(free_var_name, var_ctor(free_var_name, precision = var_type))
return free_var_name
def declare_var_name(self, var_name, var_object):
self.symbol_table.declare_var_name(var_name, var_object)
return var_name
def declare_protected_symbol(self, name, node):
self.symbol_table.declare_protected_symbol(name, node)
return node
def get_free_name(self, var_type, prefix = "cotmp"):
assert not var_type is None
return self.symbol_table.get_free_name(var_type, prefix)
## Declare a new constant object whose name is build
# from @p prefix
# @param cst_objet Constant constant object to be declared
# @para, prefix str constant name prefix
def declare_cst(self, cst_object, prefix = "cst"):
""" declare a new constant object and return the registered name """
free_var_name = self.symbol_table.get_free_cst_name(cst_object.get_precision(), prefix)
self.symbol_table.declare_cst_name(free_var_name, cst_object)
return free_var_name
def declare_table(self, table_object, prefix):
table_name = self.table_has_definition(table_object)
if table_name != None:
return table_name
else:
free_var_name = self.symbol_table.get_free_name(table_object.get_storage_precision(), prefix)
self.symbol_table.declare_table_name(free_var_name, table_object)
return free_var_name
def declare_function(self, function_name, function_object):
self.symbol_table.declare_function_name(function_name, function_object)
return function_name
def declare_component(self, component_name, component_object):
self.symbol_table.declare_component_name(component_name, component_object)
return component_name
def get(self, code_generator, static_cst = False, static_table = False, headers = False, skip_function = False):
""" generate unrolled code content """
result = ""
if headers:
result += self.generate_header_code()
result += "\n\n"
declaration_exclusion_list = [MultiSymbolTable.ConstantSymbol] if static_cst else []
declaration_exclusion_list += [MultiSymbolTable.TableSymbol] if static_table else []
declaration_exclusion_list += [MultiSymbolTable.FunctionSymbol] if skip_function else []
declaration_exclusion_list += self.DEFAULT_EXCLUSION_LIST
result += self.symbol_table.generate_declarations(code_generator, exclusion_list = declaration_exclusion_list)
result += self.symbol_table.generate_initializations(code_generator, init_required_list = [MultiSymbolTable.ConstantSymbol, MultiSymbolTable.VariableSymbol])
result += "\n" if result != "" else ""
result += self.expanded_code
return result
def push_into_parent_code(self, parent_code, code_generator, static_cst = False, static_table = False, headers = False, skip_function = False):
if headers:
parent_code << self.generate_header_code()
parent_code << "\n\n"
declaration_exclusion_list = [MultiSymbolTable.ConstantSymbol] if static_cst else []
declaration_exclusion_list += [MultiSymbolTable.TableSymbol] if static_table else []
declaration_exclusion_list += [MultiSymbolTable.FunctionSymbol] if skip_function else []
declaration_exclusion_list += self.DEFAULT_EXCLUSION_LIST
parent_code << self.symbol_table.generate_declarations(code_generator, exclusion_list = declaration_exclusion_list)
parent_code << self.symbol_table.generate_initializations(code_generator, init_required_list = [MultiSymbolTable.ConstantSymbol, MultiSymbolTable.VariableSymbol])
parent_code << "\n"
parent_code << self.expanded_code
def add_comment(self, comment):
""" add a full line comment """
self << ("/* %s */\n" % comment)
def add_multiline_comment(self, comment):
self.add_comment(comment)
@RegisterDefaultCodeObject([C_Code, OpenCL_Code])
class CstyleCodeObject(CodeObject):
pass
@RegisterDefaultCodeObject([ASM_Code])
class AsmCodeObject(CodeObject):
# function/level opening does not have a specific symbol
level_header = ""
# function/level closing does not have a specific symbol
level_footer = ""
# always exclude the following from asm code generation
DEFAULT_EXCLUSION_LIST = [
MultiSymbolTable.VariableSymbol,
MultiSymbolTable.LabelSymbol
]
class Gappa_Unknown(object):
def __str__(self):
return "?"
class GappaCodeObject(CodeObject):
DEFAULT_EXCLUSION_LIST = [MultiSymbolTable.VariableSymbol]
def __init__(self):
CodeObject.__init__(self, Gappa_Code)
self.hint_table = []
self.hypothesis_table = []
self.goal_table = []
def add_hint(self, hypoth_code, goal_code, annotation_code, isApprox = False):
self.hint_table.append((hypoth_code, goal_code, annotation_code, isApprox))
def add_hypothesis(self, hypoth_code, hypoth_value):
self.hypothesis_table.append((hypoth_code, hypoth_value))
def add_goal(self, goal_code, goal_value = Gappa_Unknown):
self.goal_table.append((goal_code, goal_value))
def gen_hint(self):
result = "#hints\n"
for hypoth_code, goal_code, annotation_code, isApprox in self.hint_table:
annotation_code = "{%s}" % annotation_code.get() if annotation_code is not None else ""
symbol = "~" if isApprox else "->"
result += "%s %s %s %s;\n\n" % (hypoth_code.get(), symbol, goal_code.get(), annotation_code)
return result
def gen_complete_goal(self):
result = "# goals\n"
hypothesis = []
for hc, hv in self.hypothesis_table:
if isinstance(hv, MetaIntervalList):
disjonction_list =[
("%s in %s" % (hc.get(), self.get_value_str(sub_interval))) for sub_interval in hv.interval_list]
disjonction = " \/ ".join(disjonction_list)
hypothesis.append("( %s )" % disjonction)
else:
hypothesis.append("%s in %s" % (hc.get(), self.get_value_str(hv)))
if isinstance(hc.precision, ML_Fixed_Format):
hypothesis.append("@FIX(%s,%s)" % (hc.get(), str(- hc.precision.get_frac_size())))
if isinstance(hc.precision, ML_FP_Format):
hypothesis.append("@FLT(%s,%s)" % (hc.get(), str(hc.precision.get_field_size()+1)))
goal = ["%s in %s" % (hc.get(), self.get_value_str(hv)) for hc, hv in self.goal_table]
result += "{ %s -> %s }\n\n" % (" /\ ".join(hypothesis), " /\ ".join(goal))
return result
def get_value_str(self, value):
if value is Gappa_Unknown:
return "?"
elif isinstance(value, MetaInterval):
return self.get_value_str(value.interval)
elif isinstance(value, MetaIntervalList):
# MetaIntervalList should have been catched early and
# should have generated a disjonction of cases
raise NotImplementedError
elif isinstance(value, sollya.SollyaObject) and value.is_range():
return "[%s, %s]" % (sollya.inf(value), sollya.sup(value))
else:
return str(value)
def get(self, code_generator, static_cst = False, static_table = False, headers = False, skip_function = True):
result = ""
# symbol exclusion list
declaration_exclusion_list = [MultiSymbolTable.ConstantSymbol] if static_cst else []
declaration_exclusion_list += [MultiSymbolTable.TableSymbol] if static_table else []
declaration_exclusion_list += [MultiSymbolTable.FunctionSymbol] if skip_function else []
declaration_exclusion_list += self.DEFAULT_EXCLUSION_LIST
# declaration generation
result += self.symbol_table.generate_declarations(code_generator, exclusion_list = declaration_exclusion_list)
result += self.symbol_table.generate_initializations(code_generator, init_required_list = [MultiSymbolTable.ConstantSymbol, MultiSymbolTable.VariableSymbol])
result += "\n" if result != "" else ""
result += self.expanded_code
result += "\n\n"
result += self.gen_complete_goal()
result += self.gen_hint()
return result
def push_into_parent_code(self, parent_code, code_generator, static_cst = False, static_table = False, headers = False, skip_function = False):
# symbol exclusion list
declaration_exclusion_list = [MultiSymbolTable.ConstantSymbol] if static_cst else []
declaration_exclusion_list += [MultiSymbolTable.TableSymbol] if static_table else []
declaration_exclusion_list += [MultiSymbolTable.FunctionSymbol] if skip_function else []
declaration_exclusion_list += self.DEFAULT_EXCLUSION_LIST
# declaration generation
parent_code << self.symbol_table.generate_declarations(code_generator, exclusion_list = declaration_exclusion_list)
parent_code << self.symbol_table.generate_initializations(code_generator, init_required_list = [MultiSymbolTable.ConstantSymbol, MultiSymbolTable.VariableSymbol])
parent_code << "\n"
parent_code << self.expanded_code
parent_code << "\n\n"
parent_code << self.gen_complete_goal()
parent_code << self.gen_hint()
@RegisterDefaultCodeObject([LLVM_IR_Code])
class LLVMCodeObject(CodeObject):
GENERAL_PREFIX = "%"
# always exclude the following from llvm-ir code generation
DEFAULT_EXCLUSION_LIST = [
MultiSymbolTable.VariableSymbol,
MultiSymbolTable.LabelSymbol
]
def __init__(self, language, shared_tables=None, parent_tables=None,
rounding_mode=ML_GlobalRoundMode, uniquifier="",
main_code_level=None, var_ctor=None):
CodeObject.__init__(
self, LLVM_IR_Code, shared_tables, parent_tables, rounding_mode,
uniquifier, main_code_level, var_ctor
)
def get_multiline_comment(self, comment_list):
result = ""
for comment in comment_list:
result += "; " + comment.replace("\n", "\n;") + "\n"
return result
def get(self, code_generator, static_cst=False, static_table=False, headers=False, skip_function = True):
result = ""
if headers:
result += self.generate_header_code()
result += "\n\n"
# symbol exclusion list
declaration_exclusion_list = [MultiSymbolTable.ConstantSymbol] if static_cst else []
declaration_exclusion_list += [MultiSymbolTable.TableSymbol] if static_table else []
declaration_exclusion_list += [MultiSymbolTable.FunctionSymbol] if skip_function else []
declaration_exclusion_list += [MultiSymbolTable.VariableSymbol]
declaration_exclusion_list += self.DEFAULT_EXCLUSION_LIST
# declaration generation
result += self.symbol_table.generate_declarations(code_generator, exclusion_list=declaration_exclusion_list)
result += self.symbol_table.generate_initializations(
code_generator,
init_required_list=[
MultiSymbolTable.ConstantSymbol, MultiSymbolTable.VariableSymbol
]
)
result += "\n" if result != "" else ""
result += self.expanded_code
result += "\n"
return result
def push_into_parent_code(self, parent_code, code_generator, static_cst = False, static_table = False, headers = False, skip_function=True):
# symbol exclusion list
declaration_exclusion_list = [MultiSymbolTable.ConstantSymbol] if static_cst else []
declaration_exclusion_list += [MultiSymbolTable.TableSymbol] if static_table else []
declaration_exclusion_list += [MultiSymbolTable.FunctionSymbol] if skip_function else []
declaration_exclusion_list += self.DEFAULT_EXCLUSION_LIST
# declaration generation
parent_code << self.symbol_table.generate_declarations(code_generator, exclusion_list = declaration_exclusion_list)
parent_code << self.symbol_table.generate_initializations(code_generator, init_required_list = [MultiSymbolTable.ConstantSymbol, MultiSymbolTable.VariableSymbol])
parent_code << "\n"
parent_code << self.expanded_code
parent_code << "\n"
class VHDLCodeObject(CodeConfiguration, CommonCodeObject):
def __init__(self, language, shared_tables = None, parent_tables = None, rounding_mode = ML_GlobalRoundMode, uniquifier = "", main_code_level = False, var_ctor = None):
""" code object initialization """
self.expanded_code = ""
self.uniquifier = uniquifier
self.tablevel = 0
self.header_list = []
self.library_list = []
self.symbol_table = MultiSymbolTable(shared_tables if shared_tables else {}, parent_tables = (parent_tables if parent_tables else []), uniquifier = self.uniquifier)
self.language = language
self.header_comment = []
self.shared_symbol_table_f = MultiSymbolTable.SignalSymbol in shared_tables
self.main_code_level = main_code_level
self.default_var_ctor = var_ctor
def __lshift__(self, added_code):
""" implicit code insertion through << operator | |
# -*- coding: utf-8 -*-
import traceback, logging
from os import path as opath
from functools import partial
import csv
import codecs
from datetime import datetime, timedelta
from .db import db_context, get_db
from . import utils
from . import settings
#from .stats import get_statistic # api xinterface @todo
from .log import Record, Index, RecordNotMatch
from nebula_utils import settings as global_settings#Temp_Query_Path, LogPath_Format # @todo
logger = logging.getLogger('nebula_utils.main')
DEBUG_PREFIX = '==============='
FORCUS_PREFIX = '$$$$$$$$$$$$$$$'
def get_log_path(log_path, timestamp):
"""
生成持久化存储日志path
优先 specify_db_path
次之 从时间戳获取 到小时的数据文件夹
Output:
- db_path ex. /path/to/persistent/2015040112/
- log_path ex. /path/to/persistent/2015040112/log
"""
if log_path:
db_path = log_path
else:
db_path = utils.get_db_path(timestamp)
log_path = opath.join(db_path, settings.LOG_PATH)
return db_path, log_path
def get_interval_timestamps(fromtime, endtime, interval):
"""
获取从fromtime到endtime, 每隔interval秒的时间戳, [fromtime, endtime) 包括fromtime, 不包括endtime
fromtime: timestamp (float)
endtime: timestamp (float)
interval: seconds (int)
"""
if fromtime >= endtime:
return []
ts = []
while fromtime < endtime:
ts.append(fromtime)
fromtime = fromtime + interval
if ts and ts[-1] + interval < endtime:
ts.append(endtime)
return ts
def get_half_min_strs_fromtimestamp(fromtime, endtime ):
"""
返回从fromtime到endtime 每30s的时间戳
"""
return get_interval_timestamps(fromtime, endtime, 30)
def get_timestamp_belong_half_min(timestamp):
"""
找到时间戳所属的间隔为半分钟的时间戳
>>> t = 1471586420.0
>>> get_timestamp_belong_half_min(t)
1471586400.0
>>> t = 1471586450.0
>>> get_timestamp_belong_half_min(t)
1471586430.0
"""
seconds = timestamp % 60
if seconds >= 30:
tmp = timestamp - seconds
timestamp = tmp + 30
else:
timestamp -= seconds
return timestamp
def query_visit_stream(key, key_type, fromtime, endtime, specify_db_path=None, limit=None):
"""
获取一个小时内一段时间范围内每条记录的 user, 时间戳, 是否有报警
Paramter:
- key: (str)维度的值
- key_type: (str)维度, ex. ip, page, user, did ,ipc
- fromtime: 时间戳 unix(float)
- endtime: 时间戳 unix(float)
- specify_db_path:(str)
指定的持久化数据库目录, 而非从timestamp中获取
- limit:(int) 限制返回的个数
Return
[{user:(ip维度, 其他维度是ip), timestamp:, if_notice:}, ... ]
"""
if limit is None:
# 默认只返回2000个
limit = 2000
col_mappings = {
'ip': 'c_ip',
'did': 'did',
'user': 'uid',
'page': 'page',
'sid': 'sid'
}
records, err = get_request_log(key, fromtime, key_type, specify_db_path, limit=limit, end=endtime)
if records:
notice_statistic = dict()
http_count = 0
traverse_count = 0
result = []
for record in records:
if record:
traverse_count += 1
if record.name == 'HTTP_DYNAMIC':
http_count += 1
col_val = {show_col: record.get(get_col, '') for show_col, get_col in col_mappings.iteritems()
if show_col != key_type}
if_notice = True if record.notices or notice_statistic.get(record.id, False) else False
col_val['timestamp'] = record.timestamp
col_val['if_notice'] = if_notice
result.append(col_val)
if traverse_count >= limit:
break
logger.debug( DEBUG_PREFIX+'遍历过的非空的record 个数:%s', traverse_count)
logger.debug( DEBUG_PREFIX+'遍历过的http_dynamic 个数:%s', http_count)
return result
else:
logger.error("散点图获得日志出错:", err)
return
def query_clicks_period(key, key_type, fromtime, endtime, specify_db_path=None):
"""
每30s的DYNAMIC日志统计
- key: (str)维度的值
- key_type: (str)维度, ex. ip, page, user, did ,ipc
- fromtime: 时间戳 unix(float)
- endtime: 时间戳 unix(float)
- specify_db_path:(str)
指定的持久化数据库目录, 而非从timestamp中获取
Return:
{
timestamp1: {count:, if_notice:True},
...
}
"""
limit = 10000000 # 1kw means no limit
records, err = get_request_log(key, fromtime, key_type, specify_db_path, limit=limit, end=endtime)
if records:
result = dict( (ts, {'count':0, 'if_notice':False})
for ts in get_half_min_strs_fromtimestamp(fromtime, endtime))
# logger.debug(DEBUG_PREFIX+u"查询范围内的ts: %s", result.keys())
tmp_records = dict() # {id: {if_notice , timestamp}}
http_count = 0
traverse_count = 0
for record in records:
if record:
traverse_count += 1
if record.name == 'HTTP_DYNAMIC':
http_count += 1
tmp_records[record.id] = dict(if_notice=bool(record.notices), timestamp=record.timestamp)
elif record.pid in tmp_records and \
not tmp_records[record.pid]['if_notice'] and record.notices:
# 当子事件的父事件没有报警时,会补充报警信息
tmp_records[record.pid]['if_notice'] = True
for k, r in tmp_records.iteritems():
ts = get_timestamp_belong_half_min(r['timestamp']/ 1000.0)
# logger.debug(DEBUG_PREFIX+u"时间戳%s, 查到的ts:%s", r['timestamp'], ts)
if not result.has_key(ts):
logger.debug(DEBUG_PREFIX+u"不是查询范围内的时间戳 :%s", ts)
continue
result[ts]['count'] += 1
if not result[ts]['if_notice']:
# 30s内任一条有报警,既标记
result[ts]['if_notice'] = r['if_notice']
logger.debug( DEBUG_PREFIX+'遍历过的非空的record 个数:%s', traverse_count)
logger.debug( DEBUG_PREFIX+'遍历过的http_dynamic 个数:%s', http_count)
return result
else:
logger.error("散点图获得日志出错:", err)
return
def find_next_log_db_path(log_path, start_time_ts, end_time_ts):
"""
从log_path出发 由早到近查找 start_time_ts (datetime obj) 和end_time_ts(datetime obj)之间存在的日志文件夹
log_path ex. /path/to/persistent/2015040112/log
Return:
db_path ex. /path/to/persistent/2015040112/
"""
persist_prefix, current_hour = log_path.rsplit('/', 2)[:-1]
last_hour = datetime.strptime(current_hour, global_settings.LogPath_Format) + timedelta(hours=1)
db_path = None
while db_path is None:
if last_hour > end_time_ts or last_hour < start_time_ts:
break
tmp_db_path = opath.join(persist_prefix, last_hour.strftime(global_settings.LogPath_Format))
if opath.exists(tmp_db_path):
db_path = tmp_db_path
break
last_hour = last_hour + timedelta(hours=1)
return db_path
def query_log(start_time, end_time, query_terms, **kwargs):
"""
日志查询
start_time: timestamp
end_time: timestamp
query_terms:
show_cols:
size:
specify_file_path:
request_id:
"""
from nebula_utils.persist_compute.condition import eval_statement
shards = range(settings.Shard_Size)
size = kwargs.get('size', 20)
specify_file_path = kwargs.get('specify_file_path', None)
offset = kwargs.get('offset', None)
write_to_file= kwargs.get('write_to_file', False)
show_cols = kwargs.get('show_cols', []) # api层保证不为空
request_id = kwargs.get('request_id', None)
start_time_ts = datetime.fromtimestamp(start_time)
end_time_ts = datetime.fromtimestamp(end_time)
logger.debug(DEBUG_PREFIX+"查询的参数: start_time: %s, endtime: %s, args:%s", start_time_ts, end_time_ts, kwargs)
if not show_cols or not request_id:
# 没有需要展示的字段, 可区别的放查询日志的文件名那还查询什么呢
logger.warn(u'日志查询没有 展示字段和请求id参数')
return
def query_record(record, query_conds):
if record is None:
return
if all( eval_statement(conds.get('op'),
record.get( conds.get('left'), None),
conds.get('right', None))
for conds in query_conds):
return record
return
# 生成查询的回调函数
callbacks = []
callbacks.append( partial(query_record, query_conds=query_terms) )
if specify_file_path is None:
# 第一次查询时,从start_time获取对应小时的日志的目录, 然后从0分片开始查找, offset 为默认的None
try:
db_path, log_path = get_log_path(None, start_time)
filepath = opath.join(log_path, '0')
except ValueError:
logger.critical(u'timestamp: %s can not gain log path.', start_time)
traceback.print_exc()
return None
else:
# 第二次继续查询时, 从上次的文件继续, offset 一般不为空
filepath = specify_file_path
db_path = specify_file_path.rsplit('/', 2)[0]
logger.debug(DEBUG_PREFIX+ u'查询的日志目录是:%s', filepath)
# load查询小时内schema和header key的version
logger.debug(DEBUG_PREFIX+ 'load 当前小时的配置: %s', db_path)
if not opath.exists(db_path):
db_path = find_next_log_db_path(log_path, start_time_ts, end_time_ts)
if db_path is None:
# 查询时间范围内没有日志文件夹
return None #dict(info=u'查询时间范围内没有日志文件') @todo 查询结果信息
else:
filepath = opath.join(db_path, settings.LOG_PATH , '0')
logger.debug(DEBUG_PREFIX+ u'查询的日志目录是:%s', filepath)
# load查询小时内schema和header key的version
logger.debug(DEBUG_PREFIX+ 'load 当前小时的配置: %s', db_path)
utils.load_schema(db_path)
utils.load_header_version(db_path)
records = []
result = dict()
temp_records_file = None
has_record_offset = False
try:
if write_to_file:
# 只有第一次查询的时候会写文件,和统计总数
result['temp_log_path'] = 'log_query_%s.csv' % request_id # just name
temp_records_file_path = opath.join(global_settings.Temp_Query_Path, result['temp_log_path'])
temp_records_file = open(temp_records_file_path, 'a') # @todo settings
temp_records_file.write(codecs.BOM_UTF8)
csv_writer = csv.writer(temp_records_file)
csv_writer.writerow(show_cols)
# 统计扫出来的日志总数
result['total'] = 0
while True:
for record, context in Record.record_generator(filepath, offset, end_time):
# callback不能放在record里计算,避免报错不继续查询
if callbacks:
for func in callbacks:
record = func(record)
if record:
if len(records) < size:
# 无论是否是第一次查询, 查询不到size都会填充
records.append(record)
elif len(records) == size:
if not has_record_offset:
result['logs'] = [
dict( (col,r.get(col, None)) for col in show_cols)
for r in records]
result['last_file'] = context['filepath']
result['last_offset'] = context['offset']
has_record_offset = True
# break
if write_to_file:
# 第一次查询将需要显示的字段写入临时的查询日志文件
csv_writer.writerow([ record.get(col , None) for col in show_cols])
# 统计日志数量
result['total'] += 1
else:
continue
# 如果不是第一次查询遍历全部,且已经找满了下一页的日志,就直接退出
if has_record_offset and not write_to_file:
break
# 如果遍历完一个文件,找到的数量不够,然后就去下一个shard 如果当前小时都补不齐还要去拿下一个小时的文件夹来继续
log_path, shard_name = filepath.rsplit('/', 1)
next_filename = int(shard_name) + 1
# 翻页查询时, 继续上次查询的offset之后,别的文件遍历需要清掉offset
offset = 0
if next_filename < settings.Shard_Size: #@todo
filepath = opath.join(log_path, str(next_filename))
logger.info(u'继续查找文件:%s', filepath)
else:
db_path = find_next_log_db_path(log_path, start_time_ts, end_time_ts)
if db_path is None:
if not result.has_key('logs'):
# 最后一页如果找不满, 且超出了查找范围, 需要将结果返回,但是已经穷尽, 就没有last_file 和 last_offset了,
result['logs'] = [
dict( (col,r.get(col, None)) for col in show_cols)
for r in records]
break
filepath = opath.join(db_path, settings.LOG_PATH , '0')
# load查询小时内schema和header key的version
logger.debug(DEBUG_PREFIX+ 'load 当前小时的配置: %s', db_path)
utils.load_schema(db_path)
utils.load_header_version(db_path)
finally:
if temp_records_file is not None:
# 当第一次查询的时候会将查询到的日志写入到临时文件
temp_records_file.close()
return result
def get_request_log(key, timestamp, key_type, specify_db_path=None, eid=None, query=None, limit=None, end=None):
"""
查询单条日志详情或者查询日志列表的工具函数
Paramter:
- key: (str)维度的值
- key_type: (str)维度, ex. ip, page, user, did ,ipc
- timestamp: 时间戳 unix(float) 起始时间戳
- specify_db_path:(str)
指定的持久化数据库目录, 而非从timestamp中获取
- eid:(ObjectId)
查询页面详情的时候需要的event id(come from bson ObjectId).
- query:(str)
过滤record的查询参数
- limit:(int) 限制返回的个数
- end: 时间戳 unix(float) 结束时间戳
leveldb key pattern: 4bytes of ip, 4bytes of timestamp(分钟秒毫秒)
"""
# a.获得持久化存储日志path, ex.
# - db_path ex. /path/to/persistent/2015040112/index
# - log_path ex. /path/to/persistent/2015040112/log
from nebula_utils.persist_compute.condition import eval_statement
try:
db_path, log_path = get_log_path(specify_db_path, timestamp)
except ValueError as e:
logger.critical(u'key: %s, key_type: %s, specify_db_path: %s, timestamp: %s can not gain log path.', key, key_type, specify_db_path, timestamp)
traceback.print_exc()
return None, 'key:%s, %s' % (key, e.message)
# 查询返回数量的限制, 默认20
if limit is None:
limit = 20
# load查询小时内schema和header key的version
utils.load_schema(db_path)
utils.load_header_version(db_path)
db_path = opath.join(db_path, settings.INDEX_PATH)
# a. checkpoint db_path
logger.debug( DEBUG_PREFIX+ '查询的db_path is: %s', db_path)
logger.debug( DEBUG_PREFIX+ '查询的log_path is: %s', log_path)
# 匹配 event id, 各种维度值的过滤函数
# @todo 有event id匹配的,只匹配到一个就丢出来了
filters = []
filters.append( partial(Record.filter_header_keys, key=key, key_type=key_type) )
if eid:
filters.append( partial(Record.filter_event_id, event_id=eid) )
def filter_timestamp(record, fromtime, endtime):
if record is None:
return None
if fromtime and record.timestamp < fromtime * 1000:
return None
if endtime and record.timestamp > endtime * 1000:
return None
return record
def query_record(record, query):
if record is None:
return None
if all([any(eval_statement(cond.get('op'), record.get(cond.get('left'), None), cond.get('right', None))
for cond in conds) for conds in query]):
return record
return None
# 查询、格式化record字段的毁掉函数注册
callbacks = list()
callbacks.append(partial(filter_timestamp, fromtime=timestamp, endtime=end))
try:
db = get_db(db_path)
if not db:
return
if eid:
log_offsets = Index.get_offsets(key, key_type, db, timestamp)
else:
log_offsets = | |
pass
if time_range[0] is None:
# first partition of sequence
DS_t_numtype = traj.indepdomain.type
if isinputcts(traj):
DS_t_typestr = 'continuous'
# continuous time must be float
if not compareNumTypes(DS_t_numtype, _all_float):
raise TypeError('continuous time inconsistent with '
'non-float type')
else:
DS_t_typestr = 'discrete'
# discrete time can be floats or ints
traj_vars = traj._FScompatibleNamesInv(traj.variables)
assert intersect(self.allvars, traj_vars.keys()), \
"variable name for traj not present in sub-model's variables"
# Pick one observable variable ('varname') to test with -- they
# must all have the same type. This variable must be present in
# all trajectory segments.
varname = self.obsvars[0]
if varname in traj_vars:
DS_x_numtype = traj_vars[varname].depdomain.type
else:
raise ValueError("varname not known")
## assert DS_x_numtype == self.obsvars[varname], \
## ('Mismatch between declared type of variable and '
## 'that found in trajectory segment')
time_range = traj.indepdomain.get()
try:
time_range[0] += traj.globalt0
time_range[1] += traj.globalt0
except IndexError:
raise ValueError('time interval of segment in hybrid '
'trajectory cannot be singleton')
# DEBUG
# temp
# print "\n---- %s _addTraj case 1: "%self.name
# print traj.indepdomain.get()
# print time_range
# print traj.globalt0
time_partitions = [(traj.indepdomain, \
traj.globalt0, \
traj.checklevel)] # initial value
else:
# remove the following line to support trajectories that
# only have partially-defined variable information, esp.
# if part is a map (vars defined only at discrete times)
if not compareNumTypes(DS_t_numtype, traj.indepdomain.type):
raise TypeError('Mismatched time types for hybrid '
'trajectory sequence')
# print "\n---- %s _addTraj case 2: "%self.name
# print traj.indepdomain.get()
# temp = copy.copy(time_range)
# temp[1] += traj.indepdomain[1]
# print temp
# print traj.globalt0
if not traj.indepdomain.atEndPoint(time_range[1] \
- traj.globalt0, 'lo'):
# temp
# print "\n***", time_range
# print traj.indepdomain.get()
# print traj.globalt0
raise ValueError('Hybrid trajectory sequence time intervals'
' must be contiguous: ' + \
str(traj.indepdomain[0]) + ' vs. ' + \
str(time_range[1]-traj.globalt0))
time_range[1] += traj.indepdomain[1]
time_partitions.append((traj.indepdomain, \
traj.globalt0, \
traj.checklevel))
# Full time interval of the trajectory
time_interval = Interval(indepvarname, DS_t_numtype, time_range,
abseps=self._abseps)
# Add to trajectory dictionary, using hierarchical event names for
# genEvents and genEventTimes (which are called directly by user methods
# of Model)
self.trajectories.update({trajname: \
HybridTrajectory(trajname, trajseq,
timePartitions=time_partitions,
timeInterval=time_interval,
eventTimes=genEventTimes,
events=genEvents,
modelEventStructs=genEvStructs,
modelNames=modelNames,
FScompatibleNames=FScompatibleNames,
FScompatibleNamesInv=FScompatibleNamesInv,
abseps=self._abseps,
globalt0=0, norm=self._normord)
})
def haveJacobian(self):
"""Returns True iff all objects in modelInfo have
defined Jacobians."""
result = True
for model in self.registry.values():
result = result and model.haveJacobian()
return result
def haveJacobian_pars(self):
"""Returns True iff all objects in modelInfo have
defined Jacobians."""
result = True
for model in self.registry.values():
result = result and model.haveJacobian_pars()
return result
def Rhs(self, dsName, t, xdict, pdict=None, asarray=False):
"""Direct access to a sub-model generator's Rhs function.
Arguments:
dsName Name of a sub-model
t time (can use 0 for an autonomous system)
xdict state dictionary or Point.
pdict parameter dictionary or Point
(optional, default current parameters)
asarray (Bool, optional, default False) If true, will return an array
in state name alphabetical order, else a Point
"""
try:
dsi = self.modelInfo[dsName]['dsi']
except KeyError:
raise ValueError("No DS named %s was found"%dsName)
if pdict is None:
pdict = self.pars
if asarray:
return dsi.Rhs(t, xdict, pdict, asarray=True)
else:
# get returns FS compatible names
varnames = dsi.get('funcspec', xdict, t).vars
return Point({'coorddict': dict(zip(varnames,
dsi.Rhs(t, xdict, pdict))),
'coordtype': float,
'norm': self._normord})
def Jacobian(self, dsName, t, xdict, pdict=None, asarray=False):
"""Direct access to a sub-model generator's Jacobian function (if defined).
Arguments:
dsName Name of a sub-model
t time (can use 0 for an autonomous system)
xdict state dictionary or Point.
pdict parameter dictionary or Point
(optional, default current parameters)
asarray (Bool, optional, default False) If true, will return an array
in state name alphabetical order, else a Point
"""
try:
dsi = self.modelInfo[dsName]['dsi']
except KeyError:
raise ValueError("No DS named %s was found"%dsName)
if pdict is None:
pdict = self.pars
if dsi.haveJacobian():
if asarray:
return dsi.Jacobian(t, xdict, pdict, asarray=True)
else:
varnames = dsi.get('funcspec', xdict, t).vars
return Pointset({'coorddict': dict(zip(varnames,
dsi.Jacobian(t, xdict, pdict))),
'coordtype': float,
'norm': self._normord})
else:
raise PyDSTool_ExistError("Jacobian not defined")
def JacobianP(self, dsName, t, xdict, pdict=None, asarray=False):
"""Direct access to a generator's JacobianP function (if defined).
Arguments:
dsName Name of a sub-model
t time (can use 0 for an autonomous system)
xdict state dictionary or Point.
pdict parameter dictionary or Point
(optional, default current parameters)
asarray (Bool, optional, default False) If true, will return an array
in state name alphabetical order, else a Point
"""
try:
dsi = self.modelInfo[dsName]['dsi']
except KeyError:
raise ValueError("No DS named %s was found"%dsName)
if dsi.haveJacobian_pars():
if pdict is None:
pdict = self.pars
if asarray:
return dsi.JacobianP(t, xdict, pdict, asarray=True)
else:
parnames = dsi.get('funcspec', xdict, t).pars
return Pointset({'coorddict': dict(zip(parnames,
dsi.JacobianP(t, xdict, pdict))),
'coordtype': float,
'norm': self._normord})
else:
raise PyDSTool_ExistError("Jacobian w.r.t. pars not defined")
def MassMatrix(self, dsName, t, xdict, pdict=None, asarray=False):
"""Direct access to a generator's MassMatrix function (if defined).
Arguments:
dsName Name of a sub-model
t time (can use 0 for an autonomous system)
xdict state dictionary or Point.
pdict parameter dictionary or Point
(optional, default current parameters)
asarray (Bool, optional, default False) If true, will return an array
in state name alphabetical order, else a Point
"""
try:
dsi = self.modelInfo[dsName]['dsi']
except KeyError:
raise ValueError("No DS named %s was found"%dsName)
if pdict is None:
pdict = self.pars
if asarray:
dsi.MassMatrix(t, xdict, pdict, asarray=True)
else:
varnames = dsi.get('funcspec', xdict, t).vars
return Point({'coorddict': dict(zip(varnames,
dsi.MassMatrix(t, xdict, pdict))),
'coordtype': float,
'norm': self._normord})
def AuxVars(self, dsName, t, xdict, pdict=None, asarray=False):
"""Direct access to a generator's auxiliary variables
definition (if defined).
Arguments:
dsName Name of a sub-model
t time (can use 0 for an autonomous system)
xdict state dictionary or Point.
pdict parameter dictionary or Point
(optional, default current parameters)
asarray (Bool, optional, default False) If true, will return an array
in state name alphabetical order, else a Point
"""
try:
dsi = self.modelInfo[dsName]['dsi']
except KeyError:
raise ValueError("No DS named %s was found"%dsName)
if pdict is None:
pdict = self.pars
if asarray:
return dsi.AuxVars(t, xdict, pdict, asarray=True)
else:
auxvarnames = dsi.get('funcspec', xdict, t).auxvars
return Point({'coorddict': dict(list(zip(auxvarnames,
dsi.AuxVars(t, xdict, pdict)))),
'coordtype': float,
'norm': self._normord})
def compute(self, trajname, **kw):
"""Compute a hybrid trajectory and store it internally in the 'trajectories'
attribute.
Arguments (non-keyword):
trajname Name of trajectory to create (string)
Arguments (keyword only, all optional):
force (Bool, default False) - force overwrite of any trajectory
stored in this object with the same name
verboselevel (int, default 0)
ics initial conditions dict or Point
pars parameters dict or Point
tdata time data (interval as sequence of 2 numeric values)
"""
# initially expect to compute over a global time interval,
# [t0_global, t1_global], which is truncated if it extends beyond
# DS's independent variable domain to give [t0, t1].
# DS tdata is then set to compute over relative time interval,
# [0, t1-t0].
tdata, t0_global, t1_global, force_overwrite = \
Model._prepareCompute(self, trajname, **kw)
# Set initial reason for an epoch to end to None to initiate
# search for eligible Generator or Model for first epoch
end_reasons = None
# check initial condition specification in icdict
if self.icdict == {}:
raise PyDSTool_ExistError("No initial conditions specified")
xdict = {}
for xname, value in self.icdict.items():
# ensure string in case Symbolic
xname = str(xname)
if xname not in self.allvars:
raise ValueError("Invalid variable name in initial "
"conditions: " + xname)
xdict[xname] = ensurefloat(value)
# clean up self.icdict
self.icdict = xdict.copy()
# initial values
notDone = True # finished computing trajectory segment?
t0 = t0_global
partition_num = 0
trajseq = []
traj = None
ti_1 = None
modelNames = []
epochEvents = []
MI_prev = None
MI = None
swRules = None
# flag for re-use of a model from one hybrid segment to the next
reused = False
# reset persistent storage of event times
self.resetEventTimes()
# From t0 (if non-autonomous system), icdict, and switching rules
# (self-consistency validity conditions), determine which model
# applies for the initial portion of the trajectory
while notDone:
# find appropriate model to compute trajectory segment
# (reused flag indicates whether MI is the same as on previous loop
# but is not | |
r = s.post(url, data=data)
if r.status_code == 401:
r = self.authenticate(data, r, url)
if r.status_code == 201:
constants.MSC_LOGIN_CACHE[self.mscolab_server_url] = (username, password)
if r.status_code == 201:
self.error_dialog = QtWidgets.QErrorMessage()
self.error_dialog.showMessage('You are registered, you can now log in.')
else:
self.error_dialog = QtWidgets.QErrorMessage()
self.error_dialog.showMessage(r.json()["message"])
else:
self.error_dialog = QtWidgets.QErrorMessage()
self.error_dialog.showMessage('Oh no, your passwords don\'t match')
def close_help_dialog(self):
self.help_dialog = None
def open_help_dialog(self):
if self.help_dialog is not None:
self.help_dialog.raise_()
self.help_dialog.activateWindow()
else:
self.help_dialog = MscolabHelpDialog(self)
self.help_dialog.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.help_dialog.viewCloses.connect(self.close_help_dialog)
self.help_dialog.show()
def handle_delete_project(self):
entered_project_name, ok = QtWidgets.QInputDialog.getText(
self,
self.tr('Delete Project'),
self.tr(f"You're about to delete the project - '{self.active_project_name}'. "
f"Enter the project name to confirm: "))
if ok:
if entered_project_name == self.active_project_name:
data = {
"token": <PASSWORD>,
"p_id": self.active_pid
}
url = url_join(self.mscolab_server_url, 'delete_project')
try:
res = requests.post(url, data=data)
res.raise_for_status()
except requests.exceptions.RequestException as e:
logging.debug(e)
show_popup(self, "Error", "Some error occurred! Could not delete project.")
else:
show_popup(self, "Error", "Entered project name did not match!")
def open_chat_window(self):
if self.active_pid is None:
return
if self.chat_window is not None:
self.chat_window.raise_()
self.chat_window.activateWindow()
return
self.chat_window = mp.MSColabProjectWindow(self.token, self.active_pid, self.user, self.active_project_name,
self.access_level, self.conn,
mscolab_server_url=self.mscolab_server_url)
self.chat_window.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.chat_window.viewCloses.connect(self.close_chat_window)
self.chat_window.reloadWindows.connect(self.reload_windows_slot)
self.chat_window.show()
def close_chat_window(self):
self.raise_()
self.chat_window = None
def open_admin_window(self):
if self.active_pid is None:
return
if self.admin_window is not None:
self.admin_window.raise_()
self.admin_window.activateWindow()
return
self.admin_window = maw.MSColabAdminWindow(self.token, self.active_pid, self.user,
self.active_project_name, self.projects, self.conn,
mscolab_server_url=self.mscolab_server_url)
self.admin_window.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.admin_window.viewCloses.connect(self.close_admin_window)
self.admin_window.show()
def close_admin_window(self):
self.raise_()
self.admin_window = None
def open_version_history_window(self):
if self.active_pid is None:
return
if self.version_window is not None:
self.version_window.raise_()
self.version_window.activateWindow()
return
self.version_window = mvh.MSColabVersionHistory(self.token, self.active_pid, self.user,
self.active_project_name, self.conn,
mscolab_server_url=self.mscolab_server_url)
self.version_window.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.version_window.viewCloses.connect(self.close_version_history_window)
self.version_window.reloadWindows.connect(self.reload_windows_slot)
self.version_window.show()
def close_version_history_window(self):
self.raise_()
self.version_window = None
def create_local_project_file(self):
with open_fs(self.data_dir) as mss_dir:
rel_file_path = fs.path.join('local_mscolab_data', self.user['username'],
self.active_project_name, 'mscolab_project.ftml')
if mss_dir.exists(rel_file_path) is True:
return
mss_dir.makedirs(fs.path.dirname(rel_file_path))
server_data = self.waypoints_model.get_xml_content()
mss_dir.writetext(rel_file_path, server_data)
def handle_work_locally_toggle(self):
if self.workLocallyCheckBox.isChecked():
if self.version_window is not None:
self.version_window.close()
self.create_local_project_file()
self.local_ftml_file = fs.path.combine(self.data_dir,
fs.path.join('local_mscolab_data',
self.user['username'], self.active_project_name,
'mscolab_project.ftml'))
self.helperTextLabel.setText(
self.tr("Working On: Local File. Your changes are only available to you."
"To save your changes with everyone, use the \"Save to Server\" button."))
self.save_ft.setEnabled(True)
self.fetch_ft.setEnabled(True)
self.versionHistoryBtn.setEnabled(False)
self.reload_local_wp()
else:
self.local_ftml_file = None
self.helperTextLabel.setText(
self.tr("Working On: Shared File. All your changes will be shared with everyone."
"Turn on work locally to work on local flight track file"))
self.save_ft.setEnabled(False)
self.fetch_ft.setEnabled(False)
if self.access_level == "admin" or self.access_level == "creator":
self.versionHistoryBtn.setEnabled(True)
self.waypoints_model = None
self.load_wps_from_server()
self.reload_view_windows()
def authorize(self):
for key, value in config_loader(dataset="MSC_login").items():
if key not in constants.MSC_LOGIN_CACHE:
constants.MSC_LOGIN_CACHE[key] = value
auth = constants.MSC_LOGIN_CACHE.get(self.mscolab_server_url, (None, None))
# get mscolab /token http auth credentials from cache
emailid = self.emailid.text()
password = self.password.text()
data = {
"email": emailid,
"password": password
}
s = requests.Session()
s.auth = (auth[0], auth[1])
s.headers.update({'x-test': 'true'})
url = self.mscolab_server_url + '/token'
try:
r = s.post(url, data=data)
except requests.exceptions.ConnectionError as ex:
logging.error("unexpected error: %s %s %s", type(ex), url, ex)
# popup that Failed to establish a connection
self.error_dialog = QtWidgets.QErrorMessage()
self.error_dialog.showMessage('Failed to establish a new connection'
f' to "{self.mscolab_server_url}". Try in a moment again.')
return
if r.status_code == 401:
r = self.authenticate(data, r, url)
if r.status_code == 200 and not r.text == "False":
constants.MSC_LOGIN_CACHE[self.mscolab_server_url] = (auth[0], auth[1])
else:
self.error_dialog = QtWidgets.QErrorMessage()
self.error_dialog.showMessage('Oh no, server authentication were incorrect.')
if r.text == "False" or r.text == "Unauthorized Access":
# popup that has wrong credentials
self.error_dialog = QtWidgets.QErrorMessage()
self.error_dialog.showMessage('Oh no, your credentials were incorrect.')
else:
# remove the login modal and put text there
self.after_authorize(emailid, r)
def after_authorize(self, emailid, r):
_json = json.loads(r.text)
self.token = _json["token"]
self.user = _json["user"]
self.label.setText(self.tr(f"Welcome, {self.user['username']}"))
self.loggedInWidget.show()
self.loginWidget.hide()
self.add_projects()
# create socket connection here
self.conn = sc.ConnectionManager(self.token, user=self.user, mscolab_server_url=self.mscolab_server_url)
self.conn.signal_reload.connect(self.reload_window)
self.conn.signal_new_permission.connect(self.render_new_permission)
self.conn.signal_update_permission.connect(self.handle_update_permission)
self.conn.signal_revoke_permission.connect(self.handle_revoke_permission)
self.conn.signal_project_deleted.connect(self.handle_project_deleted)
# activate add project button here
self.addProject.setEnabled(True)
save_settings_qsettings('mscolab', self.settings)
def add_projects(self):
# add projects
data = {
"token": self.token
}
r = requests.get(self.mscolab_server_url + '/projects', data=data)
_json = json.loads(r.text)
self.projects = _json["projects"]
self.add_projects_to_ui(self.projects)
def get_recent_pid(self):
"""
get most recent project's p_id
"""
data = {
"token": self.token
}
r = requests.get(self.mscolab_server_url + '/projects', data=data)
_json = json.loads(r.text)
projects = _json["projects"]
p_id = None
if projects:
p_id = projects[-1]["p_id"]
return p_id
def get_recent_project(self):
"""
get most recent project
"""
data = {
"token": self.token
}
r = requests.get(self.mscolab_server_url + '/projects', data=data)
_json = json.loads(r.text)
projects = _json["projects"]
recent_project = None
if projects:
recent_project = projects[-1]
return recent_project
def add_projects_to_ui(self, projects):
logging.debug("adding projects to ui")
self.listProjects.clear()
selectedProject = None
for project in projects:
project_desc = f'{project["path"]} - {project["access_level"]}'
widgetItem = QtWidgets.QListWidgetItem(project_desc, parent=self.listProjects)
widgetItem.p_id = project["p_id"]
widgetItem.access_level = project["access_level"]
if widgetItem.p_id == self.active_pid:
selectedProject = widgetItem
self.listProjects.addItem(widgetItem)
if selectedProject is not None:
self.listProjects.setCurrentItem(selectedProject)
self.listProjects.itemActivated.emit(selectedProject)
self.listProjects.itemActivated.connect(self.set_active_pid)
def force_close_view_windows(self):
for window in self.active_windows[:]:
window.handle_force_close()
self.active_windows = []
def set_active_pid(self, item):
if item.p_id == self.active_pid:
return
# close all hanging window
self.force_close_view_windows()
self.close_external_windows()
# Turn off work locally toggle
self.workLocallyCheckBox.blockSignals(True)
self.workLocallyCheckBox.setChecked(False)
self.workLocallyCheckBox.blockSignals(False)
self.save_ft.setEnabled(False)
self.fetch_ft.setEnabled(False)
# set active_pid here
self.active_pid = item.p_id
self.access_level = item.access_level
self.active_project_name = item.text().split("-")[0].strip()
self.waypoints_model = None
# set active flightpath here
self.load_wps_from_server()
# enable project specific buttons
self.helperTextLabel.setVisible(True)
self.helperTextLabel.setText(self.tr("Working On: Shared File. All your changes will be shared with everyone."
"Turn on work locally to work on local flight track file"))
self.importBtn.setEnabled(True)
self.exportBtn.setEnabled(True)
self.topview.setEnabled(True)
self.sideview.setEnabled(True)
self.tableview.setEnabled(True)
self.workLocallyCheckBox.setEnabled(True)
if self.access_level == "viewer" or self.access_level == "collaborator":
if self.access_level == "viewer":
self.workLocallyCheckBox.setEnabled(False)
self.importBtn.setEnabled(False)
self.chatWindowBtn.setEnabled(False)
else:
self.chatWindowBtn.setEnabled(True)
self.adminWindowBtn.setEnabled(False)
self.versionHistoryBtn.setEnabled(False)
else:
self.adminWindowBtn.setEnabled(True)
self.chatWindowBtn.setEnabled(True)
self.versionHistoryBtn.setEnabled(True)
if self.access_level == "creator":
self.deleteProjectBtn.setEnabled(True)
else:
self.deleteProjectBtn.setEnabled(False)
# change font style for selected
font = QtGui.QFont()
for i in range(self.listProjects.count()):
self.listProjects.item(i).setFont(font)
font.setBold(True)
item.setFont(font)
def reload_wps_from_server(self):
if self.active_pid is None:
return
self.load_wps_from_server()
self.reload_view_windows()
def request_wps_from_server(self):
data = {
"token": <PASSWORD>.token,
"p_id": self.active_pid
}
r = requests.get(self.mscolab_server_url + '/get_project_by_id', data=data)
xml_content = json.loads(r.text)["content"]
return xml_content
def load_wps_from_server(self):
if self.workLocallyCheckBox.isChecked():
return
xml_content = self.request_wps_from_server()
self.waypoints_model = ft.WaypointsTableModel(xml_content=xml_content)
self.waypoints_model.dataChanged.connect(self.handle_waypoints_changed)
def open_topview(self):
# showing dummy info dialog
if self.active_pid is None:
return
self.create_view_window("topview")
def open_sideview(self):
# showing dummy info dialog
if self.active_pid is None:
return
self.create_view_window("sideview")
def open_tableview(self):
# showing dummy info dialog
if self.active_pid is None:
return
self.create_view_window("tableview")
def create_view_window(self, _type):
for active_window in self.active_windows:
if active_window.view_type == _type:
active_window.raise_()
active_window.activateWindow()
return
if _type == "topview":
view_window = topview.MSSTopViewWindow(model=self.waypoints_model,
parent=self.listProjects,
_id=self.id_count)
view_window.view_type = "topview"
elif _type == "sideview":
view_window = sideview.MSSSideViewWindow(model=self.waypoints_model,
parent=self.listProjects,
_id=self.id_count)
view_window.view_type = "sideview"
else:
view_window = tableview.MSSTableViewWindow(model=self.waypoints_model,
parent=self.listProjects,
_id=self.id_count)
view_window.view_type = "tableview"
if self.access_level == "viewer":
self.disable_navbar_action_buttons(_type, view_window)
view_window.setAttribute(QtCore.Qt.WA_DeleteOnClose)
view_window.show()
view_window.viewClosesId.connect(self.handle_view_close)
self.active_windows.append(view_window)
# increment id_count
self.id_count += 1
def disable_navbar_action_buttons(self, _type, view_window):
"""
_type: view type (topview, sideview, tableview)
view_window: PyQt view window
function disables some control, used if access_level is not appropriate
"""
if _type == "topview" or _type == "sideview":
actions = view_window.mpl.navbar.actions()
for action in actions:
action_text = action.text()
if action_text == "Ins WP" or action_text == "Del WP" or action_text == "Mv WP":
action.setEnabled(False)
else:
# _type == tableview
view_window.btAddWayPointToFlightTrack.setEnabled(False)
view_window.btCloneWaypoint.setEnabled(False)
view_window.btDeleteWayPoint.setEnabled(False)
view_window.btInvertDirection.setEnabled(False)
def enable_navbar_action_buttons(self, _type, view_window):
"""
_type: view type (topview, sideview, tableview)
view_window: PyQt view window
function enables some control, used if access_level is appropriate
"""
if _type == "topview" or _type == "sideview":
actions = view_window.mpl.navbar.actions()
for action in actions:
action_text = action.text()
if action_text == "Ins WP" or action_text == "Del WP" or action_text == "Mv WP":
action.setEnabled(True)
else:
# _type == tableview
view_window.btAddWayPointToFlightTrack.setEnabled(True)
view_window.btCloneWaypoint.setEnabled(True)
view_window.btDeleteWayPoint.setEnabled(True)
view_window.btInvertDirection.setEnabled(True)
def logout(self):
self.clean_up_window()
self.emailid.setEnabled(True)
self.password.setEnabled(True)
def delete_account(self):
w = QtWidgets.QWidget()
qm = QtWidgets.QMessageBox
reply = qm.question(w, self.tr('Continue?'),
self.tr("You're about to delete your account. You cannot undo this operation!"),
qm.Yes, qm.No)
if reply == QtWidgets.QMessageBox.No:
return
data = {
"token": self.token
}
requests.post(self.mscolab_server_url + '/delete_user', data=data)
self.clean_up_window()
def close_external_windows(self):
if self.chat_window is not None:
self.chat_window.close()
if self.admin_window is not None:
self.admin_window.close()
if self.version_window is not None:
self.version_window.close()
def clean_up_window(self):
# delete token and show login widget-items
self.token = None
# delete active-project-id
self.active_pid = None
# delete active access_level
self.access_level = None
# delete active project_name
self.active_project_name = None
# delete local file name
self.local_ftml_file = None
# clear projects list here
self.loggedInWidget.hide()
self.loginWidget.show()
# clear project listing
self.listProjects.clear()
# disconnect socket
if self.conn is not None:
self.conn.disconnect()
self.conn = None
# close all hanging window
self.force_close_view_windows()
self.close_external_windows()
self.disable_action_buttons()
# delete mscolab http_auth settings for the url
if self.mscolab_server_url in self.settings["auth"].keys():
| |
"""
Phase vocoder.
The phase vocoder is a digital signal processing technique of potentially
great musical significance. It can be used to perform very high fidelity
time scaling, pitch transposition, and myriad other modifications of sounds.
"""
from __future__ import absolute_import
"""
Copyright 2009-2015 <NAME>
This file is part of pyo, a python module to help digital signal
processing script creation.
pyo is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
pyo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with pyo. If not, see <http://www.gnu.org/licenses/>.
"""
from ._core import *
from ._maps import *
from ._widgets import createSpectrumWindow
from .pattern import Pattern
class PVAnal(PyoPVObject):
"""
Phase Vocoder analysis object.
PVAnal takes an input sound and performs the phase vocoder
analysis on it. This results in two streams, one for the bin's
magnitudes and the other for the bin's true frequencies. These
two streams are used by the PVxxx object family to transform
the input signal using spectral domain algorithms. The last
object in the phase vocoder chain must be a PVSynth to perform
the spectral to time domain conversion.
:Parent: :py:class:`PyoPVObject`
:Args:
input: PyoObject
Input signal to process.
size: int {pow-of-two > 4}, optional
FFT size. Must be a power of two greater than 4.
Defaults to 1024.
The FFT size is the number of samples used in each
analysis frame.
overlaps: int, optional
The number of overlaped analysis block. Must be a
power of two. Defaults to 4.
More overlaps can greatly improved sound quality
synthesis but it is also more CPU expensive.
wintype: int, optional
Shape of the envelope used to filter each input frame.
Possible shapes are:
0. rectangular (no windowing)
1. Hamming
2. Hanning (default)
3. Bartlett (triangular)
4. Blackman 3-term
5. Blackman-Harris 4-term
6. Blackman-Harris 7-term
7. Tuckey (alpha = 0.66)
8. Sine (half-sine window)
callback: callable, optional
If not None (default), this function will be called with the result
of the analysis at the end of every overlap. The function will
receive two arguments, a list of floats for both the magnitudes
and the frequencies. The signature is:
callback(magnitudes, frequencies)
If you analyse a multi-channel signal, you should pass a list
of callables, one per channel to analyse.
>>> s = Server().boot()
>>> s.start()
>>> a = SfPlayer(SNDS_PATH+"/transparent.aif", loop=True, mul=0.7)
>>> pva = PVAnal(a, size=1024, overlaps=4, wintype=2)
>>> pvs = PVSynth(pva).mix(2).out()
"""
def __init__(self, input, size=1024, overlaps=4, wintype=2, callback=None):
pyoArgsAssert(self, "oiiic", input, size, overlaps, wintype, callback)
PyoPVObject.__init__(self)
self._input = input
self._size = size
self._overlaps = overlaps
self._wintype = wintype
self._callback = callback
self._in_fader = InputFader(input)
in_fader, size, overlaps, wintype, callback, lmax = convertArgsToLists(
self._in_fader, size, overlaps, wintype, callback
)
self._base_objs = [
PVAnal_base(wrap(in_fader, i), wrap(size, i), wrap(overlaps, i), wrap(wintype, i), wrap(callback, i))
for i in range(lmax)
]
self._init_play()
def setInput(self, x, fadetime=0.05):
"""
Replace the `input` attribute.
:Args:
x: PyoObject
New signal to process.
fadetime: float, optional
Crossfade time between old and new input. Default to 0.05.
"""
pyoArgsAssert(self, "oN", x, fadetime)
self._input = x
self._in_fader.setInput(x, fadetime)
def setSize(self, x):
"""
Replace the `size` attribute.
:Args:
x: int
new `size` attribute.
"""
pyoArgsAssert(self, "i", x)
self._size = x
x, lmax = convertArgsToLists(x)
[obj.setSize(wrap(x, i)) for i, obj in enumerate(self._base_objs)]
def setOverlaps(self, x):
"""
Replace the `overlaps` attribute.
:Args:
x: int
new `overlaps` attribute.
"""
pyoArgsAssert(self, "i", x)
self._overlaps = x
x, lmax = convertArgsToLists(x)
[obj.setOverlaps(wrap(x, i)) for i, obj in enumerate(self._base_objs)]
def setWinType(self, x):
"""
Replace the `wintype` attribute.
:Args:
x: int
new `wintype` attribute.
"""
pyoArgsAssert(self, "i", x)
self._wintype = x
x, lmax = convertArgsToLists(x)
[obj.setWinType(wrap(x, i)) for i, obj in enumerate(self._base_objs)]
def setCallback(self, x):
"""
Replace the `callback` attribute.
:Args:
x: callable
new `callback` attribute.
"""
pyoArgsAssert(self, "c", x)
self._callback = x
x, lmax = convertArgsToLists(x)
[obj.setCallback(wrap(x, i)) for i, obj in enumerate(self._base_objs)]
@property
def input(self):
"""PyoObject. Input signal to process."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def size(self):
"""int. FFT size."""
return self._size
@size.setter
def size(self, x):
self.setSize(x)
@property
def overlaps(self):
"""int. FFT overlap factor."""
return self._overlaps
@overlaps.setter
def overlaps(self, x):
self.setOverlaps(x)
@property
def wintype(self):
"""int. Windowing method."""
return self._wintype
@wintype.setter
def wintype(self, x):
self.setWinType(x)
class PVSynth(PyoObject):
"""
Phase Vocoder synthesis object.
PVSynth takes a PyoPVObject as its input and performed
the spectral to time domain conversion on it. This step
converts phase vocoder magnitude and true frequency's
streams back to a real signal.
:Parent: :py:class:`PyoObject`
:Args:
input: PyoPVObject
Phase vocoder streaming object to process.
wintype: int, optional
Shape of the envelope used to filter each input frame.
Possible shapes are:
0. rectangular (no windowing)
1. Hamming
2. Hanning (default)
3. Bartlett (triangular)
4. Blackman 3-term
5. Blackman-Harris 4-term
6. Blackman-Harris 7-term
7. Tuckey (alpha = 0.66)
8. Sine (half-sine window)
>>> s = Server().boot()
>>> s.start()
>>> a = SfPlayer(SNDS_PATH+"/transparent.aif", loop=True, mul=0.7)
>>> pva = PVAnal(a, size=1024, overlaps=4, wintype=2)
>>> pvs = PVSynth(pva).mix(2).out()
"""
def __init__(self, input, wintype=2, mul=1, add=0):
pyoArgsAssert(self, "piOO", input, wintype, mul, add)
PyoObject.__init__(self, mul, add)
self._input = input
self._wintype = wintype
input, wintype, mul, add, lmax = convertArgsToLists(self._input, wintype, mul, add)
self._base_objs = [
PVSynth_base(wrap(input, i), wrap(wintype, i), wrap(mul, i), wrap(add, i)) for i in range(lmax)
]
self._init_play()
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x: PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x, i)) for i, obj in enumerate(self._base_objs)]
def setWinType(self, x):
"""
Replace the `wintype` attribute.
:Args:
x: int
new `wintype` attribute.
"""
pyoArgsAssert(self, "i", x)
self._wintype = x
x, lmax = convertArgsToLists(x)
[obj.setWinType(wrap(x, i)) for i, obj in enumerate(self._base_objs)]
def ctrl(self, map_list=None, title=None, wxnoserver=False):
self._map_list = [SLMapMul(self._mul)]
PyoObject.ctrl(self, map_list, title, wxnoserver)
@property
def input(self):
"""PyoPVObject. Input signal to process."""
return self._input
@input.setter
def input(self, x):
self.setInput(x)
@property
def wintype(self):
"""int. Windowing method."""
return self._wintype
@wintype.setter
def wintype(self, x):
self.setWinType(x)
class PVAddSynth(PyoObject):
"""
Phase Vocoder additive synthesis object.
PVAddSynth takes a PyoPVObject as its input and resynthesize
the real signal using the magnitude and true frequency's
streams to control amplitude and frequency envelopes of an
oscillator bank.
:Parent: :py:class:`PyoObject`
:Args:
input: PyoPVObject
Phase vocoder streaming object to process.
pitch: float or PyoObject, optional
Transposition factor. Defaults to 1.
num: int, optional
Number of oscillators used to synthesize the
output sound. Defaults to 100.
first: int, optional
The first bin to synthesize, starting from 0.
Defaults to 0.
inc: int, optional
Starting from bin `first`, resynthesize bins
`inc` apart. Defaults to 1.
>>> s = Server().boot()
>>> s.start()
>>> a = SfPlayer(SNDS_PATH+"/transparent.aif", loop=True, mul=0.7)
>>> pva = PVAnal(a, size=1024, overlaps=4, wintype=2)
>>> pvs = PVAddSynth(pva, pitch=1.25, num=100, first=0, inc=2).out()
"""
def __init__(self, input, pitch=1, num=100, first=0, inc=1, mul=1, add=0):
pyoArgsAssert(self, "pOiiiOO", input, pitch, num, first, inc, mul, add)
PyoObject.__init__(self, mul, add)
self._input = input
self._pitch = pitch
self._num = num
self._first = first
self._inc = inc
input, pitch, num, first, inc, mul, add, lmax = convertArgsToLists(
self._input, pitch, num, first, inc, mul, add
)
self._base_objs = [
PVAddSynth_base(
wrap(input, i), wrap(pitch, i), wrap(num, i), wrap(first, i), wrap(inc, i), wrap(mul, i), wrap(add, i)
)
for i in range(lmax)
]
self._init_play()
def setInput(self, x):
"""
Replace the `input` attribute.
:Args:
x: PyoPVObject
New signal to process.
"""
pyoArgsAssert(self, "p", x)
self._input = x
x, lmax = convertArgsToLists(x)
[obj.setInput(wrap(x, i)) for i, obj in enumerate(self._base_objs)]
def setPitch(self, x):
"""
Replace the `pitch` attribute.
:Args:
x: float or PyoObject
new `pitch` attribute.
"""
pyoArgsAssert(self, "O", x)
self._pitch = x
x, lmax = convertArgsToLists(x)
[obj.setPitch(wrap(x, i)) for i, obj in enumerate(self._base_objs)]
def setNum(self, x):
"""
Replace the `num` attribute.
:Args:
| |
"""ResNet v1, v2, and segmentation models for Keras.
# Reference
- [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
- [Identity Mappings in Deep Residual Networks](https://arxiv.org/abs/1603.05027)
Reference material for extended functionality:
- [ResNeXt](https://arxiv.org/abs/1611.05431) for Tiny ImageNet support.
- [Dilated Residual Networks](https://arxiv.org/pdf/1705.09914) for segmentation support
- [Deep Residual Learning for Instrument Segmentation in
Robotic Surgery](https://arxiv.org/abs/1703.08580)
for segmentation support.
Implementation Adapted from: github.com/raghakot/keras-resnet
""" # pylint: disable=E501
from __future__ import division
import six
from tensorflow import keras
from tensorflow.keras import backend as K, Model, Input
from tensorflow.keras.layers import BatchNormalization, Activation, AveragePooling3D, Flatten, Dense, Dropout, ConvLSTM2D, add, GlobalAveragePooling3D, Reshape, GlobalMaxPooling3D, \
MaxPooling3D
from tensorflow.keras.regularizers import l2
def _bn_relu(x, bn_name=None, relu_name=None):
"""Helper to build a BN -> relu block
"""
norm = BatchNormalization(axis=CHANNEL_AXIS, name=bn_name)(x)
return Activation("relu", name=relu_name)(norm)
def _bn_relu2(x, bn_name=None, relu_name=None):
"""Helper to build a BN -> relu block
"""
norm = BatchNormalization(axis=CHANNEL_AXIS, name=bn_name)(x)
# norm = AveragePooling3D((5, 3, 3))(norm)#change this according to number of frames have 5
# norm=Reshape((-1, 8192))(norm)
# norm=Reshape((1, 2048))(norm)
# norm = Flatten()(norm)
# norm = GlobalMaxPooling3D()(norm)
norm=GlobalAveragePooling3D()(norm)
norm=Dropout(0.25)(norm)
# norm=Dense(units=2048,activation='relu')(norm)#4092#1024#2048
# norm = Dense(units=2048, activation='relu')(norm) # 4092#1024#2048
# norm = BatchNormalization()(norm)
# norm = Dropout(0.25)(norm)
# norm = Dense(units=8192, activation='relu')(norm)
# norm = BatchNormalization()(norm)
norm=Dense(units=2, activation='softmax',kernel_initializer="he_normal")(norm)
return norm#Activation("relu", name=relu_name)(norm)
def _conv_bn_relu(**conv_params):
"""Helper to build a conv -> BN -> relu residual unit activation function.
This is the original ResNet v1 scheme in https://arxiv.org/abs/1512.03385
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
dilation_rate = conv_params.setdefault("dilation_rate", (1, 1))
conv_name = conv_params.setdefault("conv_name", None)
bn_name = conv_params.setdefault("bn_name", None)
relu_name = conv_params.setdefault("relu_name", None)
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(x):
# x = Conv2D(filters=filters, kernel_size=kernel_size,
# strides=strides, padding=padding,
# dilation_rate=dilation_rate,
# kernel_initializer=kernel_initializer,
# kernel_regularizer=kernel_regularizer,
# name=conv_name)(x)
x = ConvLSTM2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
dilation_rate=dilation_rate,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,return_sequences=True,
name=conv_name)(x)
return _bn_relu(x, bn_name=bn_name, relu_name=relu_name)
return f
def _bn_relu_conv(**conv_params):
"""Helper to build a BN -> relu -> conv residual unit with full pre-activation
function. This is the ResNet v2 scheme proposed in
http://arxiv.org/pdf/1603.05027v2.pdf
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
dilation_rate = conv_params.setdefault("dilation_rate", (1, 1))
conv_name = conv_params.setdefault("conv_name", None)
bn_name = conv_params.setdefault("bn_name", None)
relu_name = conv_params.setdefault("relu_name", None)
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(x):
activation = _bn_relu(x, bn_name=bn_name, relu_name=relu_name)
# return Conv2D(filters=filters, kernel_size=kernel_size,
# strides=strides, padding=padding,
# dilation_rate=dilation_rate,
# kernel_initializer=kernel_initializer,
# kernel_regularizer=kernel_regularizer,
# name=conv_name)(activation)
return ConvLSTM2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
dilation_rate=dilation_rate,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,return_sequences=True,
name=conv_name)(activation)
return f
def _shortcut(input_feature, residual, conv_name_base=None, bn_name_base=None):
"""Adds a shortcut between input and residual block and merges them with "sum"
"""
# Expand channels of shortcut to match residual.
# Stride appropriately to match residual (width, height)
# Should be int if network architecture is correctly configured.
input_shape = K.int_shape(input_feature)
residual_shape = K.int_shape(residual)
stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]
shortcut = input_feature
# 1 X 1 conv if shape is different. Else identity.
if stride_width > 1 or stride_height > 1 or not equal_channels:
print('reshaping via a convolution...')
if conv_name_base is not None:
conv_name_base = conv_name_base + '1'
# shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
# kernel_size=(1, 1),
# strides=(stride_width, stride_height),
# padding="valid",
# kernel_initializer="he_normal",
# kernel_regularizer=l2(0.0001),
# name=conv_name_base)(input_feature)
shortcut = ConvLSTM2D(filters=residual_shape[CHANNEL_AXIS],
kernel_size=(1, 1),
strides=(stride_width, stride_height),
padding="valid",
kernel_initializer="he_normal",
kernel_regularizer=l2(0.0001),return_sequences=True,
name=conv_name_base)(input_feature)
if bn_name_base is not None:
bn_name_base = bn_name_base + '1'
shortcut = BatchNormalization(axis=CHANNEL_AXIS,
name=bn_name_base)(shortcut)
return add([shortcut, residual])
def _residual_block(block_function, filters, blocks, stage,
transition_strides=None, transition_dilation_rates=None,
dilation_rates=None, is_first_layer=False, dropout=None,
residual_unit=_bn_relu_conv):
"""Builds a residual block with repeating bottleneck blocks.
stage: integer, current stage label, used for generating layer names
blocks: number of blocks 'a','b'..., current block label, used for generating
layer names
transition_strides: a list of tuples for the strides of each transition
transition_dilation_rates: a list of tuples for the dilation rate of each
transition
"""
if transition_dilation_rates is None:
transition_dilation_rates = [(1, 1)] * blocks
if transition_strides is None:
transition_strides = [(1, 1)] * blocks
if dilation_rates is None:
dilation_rates = [1] * blocks
def f(x):
for i in range(blocks):
is_first_block = is_first_layer and i == 0
x = block_function(filters=filters, stage=stage, block=i,
transition_strides=transition_strides[i],
dilation_rate=dilation_rates[i],
is_first_block_of_first_layer=is_first_block,
dropout=dropout,
residual_unit=residual_unit)(x)
return x
return f
def _block_name_base(stage, block):
"""Get the convolution name base and batch normalization name base defined by
stage and block.
If there are less than 26 blocks they will be labeled 'a', 'b', 'c' to match the
paper and keras and beyond 26 blocks they will simply be numbered.
"""
if block < 27:
block = '%c' % (block + 97) # 97 is the ascii number for lowercase 'a'
conv_name_base = 'res' + str(stage) + str(block) + '_branch'
bn_name_base = 'bn' + str(stage) + str(block) + '_branch'
return conv_name_base, bn_name_base
def basic_block(filters, stage, block, transition_strides=(1, 1),
dilation_rate=(1, 1), is_first_block_of_first_layer=False, dropout=None,
residual_unit=_bn_relu_conv):
"""Basic 3 X 3 convolution blocks for use on resnets with layers <= 34.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
"""
def f(input_features):
conv_name_base, bn_name_base = _block_name_base(stage, block)
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
# x = Conv2D(filters=filters, kernel_size=(3, 3),
# strides=transition_strides,
# dilation_rate=dilation_rate,
# padding="same",
# kernel_initializer="he_normal",
# kernel_regularizer=l2(1e-4),
# name=conv_name_base + '2a')(input_features)
x = ConvLSTM2D(filters=filters, kernel_size=(3, 3),
strides=transition_strides,
dilation_rate=dilation_rate,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=l2(1e-4),return_sequences=True,
name=conv_name_base + '2a')(input_features)
else:
x = residual_unit(filters=filters, kernel_size=(3, 3),
strides=transition_strides,
dilation_rate=dilation_rate,
conv_name_base=conv_name_base + '2a',
bn_name_base=bn_name_base + '2a')(input_features)
if dropout is not None:
x = Dropout(dropout)(x)
x = residual_unit(filters=filters, kernel_size=(3, 3),
conv_name_base=conv_name_base + '2b',
bn_name_base=bn_name_base + '2b')(x)
return _shortcut(input_features, x)
return f
def bottleneck(filters, stage, block, transition_strides=(1, 1),
dilation_rate=(1, 1), is_first_block_of_first_layer=False, dropout=None,
residual_unit=_bn_relu_conv):
"""Bottleneck architecture for > 34 layer resnet.
Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
Returns:
A final conv layer of filters * 4
"""
def f(input_feature):
conv_name_base, bn_name_base = _block_name_base(stage, block)
if is_first_block_of_first_layer:
# don't repeat bn->relu since we just did bn->relu->maxpool
# x = Conv2D(filters=filters, kernel_size=(1, 1),
# strides=transition_strides,
# dilation_rate=dilation_rate,
# padding="same",
# kernel_initializer="he_normal",
# kernel_regularizer=l2(1e-4),
# name=conv_name_base + '2a')(input_feature)
x = ConvLSTM2D(filters=filters, kernel_size=(3, 3),
strides=transition_strides,
dilation_rate=dilation_rate,
padding="same",
kernel_initializer="he_normal",return_sequences=True,
kernel_regularizer=l2(1e-4),
name=conv_name_base + '2a')(input_feature)
else:
x = residual_unit(filters=filters, kernel_size=(1, 1),
strides=transition_strides,
dilation_rate=dilation_rate,
conv_name_base=conv_name_base + '2a',
bn_name_base=bn_name_base + '2a')(input_feature)
if dropout is not None:
x = Dropout(dropout)(x)
x = residual_unit(filters=filters, kernel_size=(3, 3),
conv_name_base=conv_name_base + '2b',
bn_name_base=bn_name_base + '2b')(x)
if dropout is not None:
x = Dropout(dropout)(x)
x = residual_unit(filters=filters * 4, kernel_size=(1, 1),
conv_name_base=conv_name_base + '2c',
bn_name_base=bn_name_base + '2c')(x)
return _shortcut(input_feature, x)
return f
def _handle_dim_ordering():
global ROW_AXIS
global COL_AXIS
global CHANNEL_AXIS
if K.image_data_format() == 'channels_last':
ROW_AXIS = 2
COL_AXIS = 3
CHANNEL_AXIS = 4
else:
CHANNEL_AXIS = 2
ROW_AXIS = 3
COL_AXIS = 4
def _string_to_function(identifier):
if isinstance(identifier, six.string_types):
res = globals().get(identifier)
if not res:
raise ValueError('Invalid {}'.format(identifier))
return res
return identifier
def ResNet(input_shape=None, classes=10, block='bottleneck', residual_unit='v2',
repetitions=None, initial_filters=64, activation='softmax', include_top=True,
input_tensor=None, dropout=None, transition_dilation_rate=(1, 1),
initial_strides=(2, 2), initial_kernel_size=(7, 7), initial_pooling='max',
final_pooling=None, top='classification'):
"""Builds a custom ResNet like architecture. Defaults to ResNet50 v2.
Args:
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` dim ordering)
or `(3, 224, 224)` (with `channels_first` dim ordering).
It should have exactly 3 dimensions,
and width and height should be no smaller than 8.
E.g. `(224, 224, 3)` would be one valid value.
classes: The number of outputs at final softmax layer
block: The block function to use. This is either `'basic'` or `'bottleneck'`.
The original paper used `basic` for layers < 50.
repetitions: Number of repetitions of various block units.
At each block unit, the number of filters are doubled and the input size
is halved. Default of None implies the ResNet50v2 values of [3, 4, 6, 3].
residual_unit: the basic residual unit, 'v1' for conv bn relu, 'v2' for bn relu
conv. See [Identity Mappings in
Deep Residual Networks](https://arxiv.org/abs/1603.05027)
for details.
dropout: None for no dropout, otherwise rate of dropout from 0 to 1.
Based on [Wide Residual Networks.(https://arxiv.org/pdf/1605.07146) paper.
transition_dilation_rate: Dilation rate for transition layers. For semantic
segmentation of images use a dilation rate of (2, 2).
initial_strides: Stride of the very first residual unit and MaxPooling2D call,
with default (2, 2), set to (1, 1) for small images | |
<reponame>MrMeeb/qbit_manage
import logging, os, sys
from qbittorrentapi import Client, Version, LoginFailed, APIConnectionError, NotFound404Error, Conflict409Error
from modules import util
from modules.util import Failed, print_line, print_multiline, separator, list_in_text
from datetime import timedelta
from collections import Counter
from fnmatch import fnmatch
from alive_progress import alive_it, config_handler
logger = logging.getLogger("qBit Manage")
class Qbt:
def __init__(self, config, params):
self.config = config
config_handler.set_global(bar=None, receipt=False)
self.host = params["host"]
self.username = params["username"]
self.password = params["password"]
logger.debug(f'Host: {self.host}, Username: {self.username}, Password: {self.password if self.password is None else "[REDACTED]"}')
try:
self.client = Client(host=self.host, username=self.username, password=self.password, VERIFY_WEBUI_CERTIFICATE=False)
self.client.auth_log_in()
SUPPORTED_VERSION = Version.latest_supported_app_version()
CURRENT_VERSION = self.client.app.version
logger.debug(f'qBittorrent: {self.client.app.version}')
logger.debug(f'qBittorrent Web API: {self.client.app.web_api_version}')
logger.debug(f'qbit_manage support version: {SUPPORTED_VERSION}')
if not Version.is_app_version_supported(CURRENT_VERSION):
e = (f"Qbittorrent Error: qbit_manage is only comaptible with {SUPPORTED_VERSION} or lower. You are currently on {CURRENT_VERSION}." + '\n'
+ f"Please downgrade to your Qbittorrent version to {SUPPORTED_VERSION} to use qbit_manage.")
self.config.notify(e, "Qbittorrent")
print_multiline(e, 'CRITICAL')
sys.exit(0)
logger.info("Qbt Connection Successful")
except LoginFailed:
e = "Qbittorrent Error: Failed to login. Invalid username/password."
self.config.notify(e, "Qbittorrent")
raise Failed(e)
except APIConnectionError:
e = "Qbittorrent Error: Unable to connect to the client."
self.config.notify(e, "Qbittorrent")
raise Failed(e)
except Exception:
e = "Qbittorrent Error: Unable to connect to the client."
self.config.notify(e, "Qbittorrent")
raise Failed(e)
separator("Getting Torrent List", space=False, border=False)
self.torrent_list = self.get_torrents({'sort': 'added_on'})
# Will create a 2D Dictionary with the torrent name as the key
# torrentdict = {'TorrentName1' : {'Category':'TV', 'save_path':'/data/torrents/TV', 'count':1, 'msg':'[]'...},
# 'TorrentName2' : {'Category':'Movies', 'save_path':'/data/torrents/Movies'}, 'count':2, 'msg':'[]'...}
# List of dictionary key definitions
# Category = Returns category of the torrent (str)
# save_path = Returns the save path of the torrent (str)
# count = Returns a count of the total number of torrents with the same name (int)
# msg = Returns a list of torrent messages by name (list of str)
# status = Returns the list of status numbers of the torrent by name
# (0: Tracker is disabled (used for DHT, PeX, and LSD),
# 1: Tracker has not been contacted yet,
# 2: Tracker has been contacted and is working,
# 3: Tracker is updating,
# 4: Tracker has been contacted, but it is not working (or doesn't send proper replies)
# is_complete = Returns the state of torrent (Returns True if at least one of the torrent with the State is categorized as Complete.)
# first_hash = Returns the hash number of the original torrent (Assuming the torrent list is sorted by date added (Asc))
def get_torrent_info(torrent_list):
dry_run = self.config.args['dry_run']
loglevel = 'DRYRUN' if dry_run else 'INFO'
torrentdict = {}
t_obj_unreg = []
t_obj_valid = []
t_obj_list = []
settings = self.config.settings
separator("Checking Settings", space=False, border=False)
if settings['force_auto_tmm']:
print_line('force_auto_tmm set to True. Will force Auto Torrent Management for all torrents.', loglevel)
separator("Gathering Torrent Information", space=True, border=True)
for torrent in alive_it(torrent_list):
is_complete = False
msg = None
status = None
working_tracker = None
issue = {'potential': False}
if torrent.auto_tmm is False and settings['force_auto_tmm'] and torrent.category != '' and not dry_run:
torrent.set_auto_management(True)
try:
torrent_name = torrent.name
torrent_hash = torrent.hash
torrent_is_complete = torrent.state_enum.is_complete
save_path = torrent.save_path
category = torrent.category
torrent_trackers = torrent.trackers
except Exception as e:
self.config.notify(e, 'Get Torrent Info', False)
logger.warning(e)
if torrent_name in torrentdict:
t_obj_list.append(torrent)
t_count = torrentdict[torrent_name]['count'] + 1
msg_list = torrentdict[torrent_name]['msg']
status_list = torrentdict[torrent_name]['status']
is_complete = True if torrentdict[torrent_name]['is_complete'] is True else torrent_is_complete
first_hash = torrentdict[torrent_name]['first_hash']
else:
t_obj_list = [torrent]
t_count = 1
msg_list = []
status_list = []
is_complete = torrent_is_complete
first_hash = torrent_hash
for x in torrent_trackers:
if x.url.startswith('http'):
status = x.status
msg = x.msg.upper()
exception = [
"DOWN",
"DOWN.",
"IT MAY BE DOWN,",
"UNREACHABLE",
"(UNREACHABLE)",
"BAD GATEWAY",
"TRACKER UNAVAILABLE"
]
if x.status == 2:
working_tracker = True
break
# Add any potential unregistered torrents to a list
if x.status == 4 and not list_in_text(msg, exception):
issue['potential'] = True
issue['msg'] = msg
issue['status'] = status
if working_tracker:
status = 2
msg = ''
t_obj_valid.append(torrent)
elif issue['potential']:
status = issue['status']
msg = issue['msg']
t_obj_unreg.append(torrent)
if msg is not None: msg_list.append(msg)
if status is not None: status_list.append(status)
torrentattr = {
'torrents': t_obj_list, 'Category': category, 'save_path': save_path, 'count': t_count,
'msg': msg_list, 'status': status_list, 'is_complete': is_complete, 'first_hash': first_hash
}
torrentdict[torrent_name] = torrentattr
return torrentdict, t_obj_unreg, t_obj_valid
self.torrentinfo = None
self.torrentissue = None
self.torrentvalid = None
if config.args['recheck'] or config.args['cross_seed'] or config.args['rem_unregistered'] or config.args['tag_tracker_error'] or config.args['tag_nohardlinks']:
# Get an updated torrent dictionary information of the torrents
self.torrentinfo, self.torrentissue, self.torrentvalid = get_torrent_info(self.torrent_list)
def get_torrents(self, params):
return self.client.torrents.info(**params)
def category(self):
dry_run = self.config.args['dry_run']
loglevel = 'DRYRUN' if dry_run else 'INFO'
num_cat = 0
if self.config.args['cat_update']:
separator("Updating Categories", space=False, border=False)
torrent_list = self.get_torrents({'category': '', 'filter': ''})
for torrent in torrent_list:
new_cat = self.config.get_category(torrent.save_path)
tracker = self.config.get_tags([x.url for x in torrent.trackers if x.url.startswith('http')])
if not dry_run:
try:
torrent.set_category(category=new_cat)
if torrent.auto_tmm is False and self.config.settings['force_auto_tmm']:
torrent.set_auto_management(True)
except Conflict409Error:
e = print_line(f'Existing category "{new_cat}" not found for save path {torrent.save_path}, category will be created.', loglevel)
self.config.notify(e, 'Update Category', False)
self.client.torrent_categories.create_category(name=new_cat, save_path=torrent.save_path)
torrent.set_category(category=new_cat)
body = []
body += print_line(util.insert_space(f'Torrent Name: {torrent.name}', 3), loglevel)
body += print_line(util.insert_space(f'New Category: {new_cat}', 3), loglevel)
body += print_line(util.insert_space(f'Tracker: {tracker["url"]}', 8), loglevel)
attr = {
"function": "cat_update",
"title": "Updating Categories",
"body": "\n".join(body),
"torrent_name": torrent.name,
"torrent_category": new_cat,
"torrent_tracker": tracker["url"],
"notifiarr_indexer": tracker["notifiarr"]
}
self.config.send_notifications(attr)
num_cat += 1
if num_cat >= 1:
print_line(f"{'Did not update' if dry_run else 'Updated'} {num_cat} new categories.", loglevel)
else:
print_line('No new torrents to categorize.', loglevel)
return num_cat
def tags(self):
dry_run = self.config.args['dry_run']
loglevel = 'DRYRUN' if dry_run else 'INFO'
num_tags = 0
ignore_tags = self.config.settings['ignoreTags_OnUpdate']
if self.config.args['tag_update']:
separator("Updating Tags", space=False, border=False)
for torrent in self.torrent_list:
check_tags = util.get_list(torrent.tags)
if torrent.tags == '' or (len([x for x in check_tags if x not in ignore_tags]) == 0):
tracker = self.config.get_tags([x.url for x in torrent.trackers if x.url.startswith('http')])
if tracker["tag"]:
num_tags += len(tracker["tag"])
body = []
body += print_line(util.insert_space(f'Torrent Name: {torrent.name}', 3), loglevel)
body += print_line(util.insert_space(f'New Tag{"s" if len(tracker["tag"]) > 1 else ""}: {", ".join(tracker["tag"])}', 8), loglevel)
body += print_line(util.insert_space(f'Tracker: {tracker["url"]}', 8), loglevel)
body.extend(self.set_tags_and_limits(torrent, tracker["max_ratio"], tracker["max_seeding_time"], tracker["limit_upload_speed"], tracker["tag"]))
category = self.config.get_category(torrent.save_path) if torrent.category == '' else torrent.category
attr = {
"function": "tag_update",
"title": "Updating Tags",
"body": "\n".join(body),
"torrent_name": torrent.name,
"torrent_category": category,
"torrent_tag": ", ".join(tracker["tag"]),
"torrent_tracker": tracker["url"],
"notifiarr_indexer": tracker["notifiarr"],
"torrent_max_ratio": tracker["max_ratio"],
"torrent_max_seeding_time": tracker["max_seeding_time"],
"torrent_limit_upload_speed": tracker["limit_upload_speed"]
}
self.config.send_notifications(attr)
if num_tags >= 1:
print_line(f"{'Did not update' if dry_run else 'Updated'} {num_tags} new tags.", loglevel)
else:
print_line('No new torrents to tag.', loglevel)
return num_tags
def set_tags_and_limits(self, torrent, max_ratio, max_seeding_time, limit_upload_speed=None, tags=None, restore=False):
dry_run = self.config.args['dry_run']
loglevel = 'DRYRUN' if dry_run else 'INFO'
body = []
# Print Logs
if limit_upload_speed:
if limit_upload_speed == -1: body += print_line(util.insert_space('Limit UL Speed: Infinity', 1), loglevel)
else: body += print_line(util.insert_space(f'Limit UL Speed: {limit_upload_speed} kB/s', 1), loglevel)
if max_ratio or max_seeding_time:
if (max_ratio == -2 or max_seeding_time == -2) and not restore: body += print_line(util.insert_space('Share Limit: Use Global Share Limit', 4), loglevel)
elif (max_ratio == -1 or max_seeding_time == -1) and not restore: body += print_line(util.insert_space('Share Limit: Set No Share Limit', 4), loglevel)
else:
if max_ratio != torrent.max_ratio and (not max_seeding_time or max_seeding_time < 0):
body += print_line(util.insert_space(f'Share Limit: Max Ratio = {max_ratio}', 4), loglevel)
elif max_seeding_time != torrent.max_seeding_time and (not max_ratio or max_ratio < 0):
body += print_line(util.insert_space(f'Share Limit: Max Seed Time = {max_seeding_time} min', 4), loglevel)
elif max_ratio != torrent.max_ratio and max_seeding_time != torrent.max_seeding_time:
body += print_line(util.insert_space(f'Share Limit: Max Ratio = {max_ratio}, Max Seed Time = {max_seeding_time} min', 4), loglevel)
# Update Torrents
if not dry_run:
if tags: torrent.add_tags(tags)
if limit_upload_speed:
if limit_upload_speed == -1: torrent.set_upload_limit(-1)
else: torrent.set_upload_limit(limit_upload_speed*1024)
if (max_ratio or max_seeding_time) and not restore:
if max_ratio == -2 or max_seeding_time == -2:
torrent.set_share_limits(-2, -2)
return body
elif max_ratio == -1 or max_seeding_time == -1:
torrent.set_share_limits(-1, -1)
return body
if not max_ratio: max_ratio = torrent.max_ratio
if not max_seeding_time: max_seeding_time = torrent.max_seeding_time
torrent.set_share_limits(max_ratio, max_seeding_time)
return body
def tag_nohardlinks(self):
dry_run = self.config.args['dry_run']
loglevel = 'DRYRUN' if dry_run else 'INFO'
num_tags = 0 # counter for the number of torrents that has no hard links
del_tor = 0 # counter for the number of torrents that has | |
Type for Variable or Meter 11"+"\n")
lines.append(" WaterSystems:DistrictCooling,!- Variable or Meter 12 Name"+"\n")
lines.append(" ValueWhenMaximumOrMinimum, !- Aggregation Type for Variable or Meter 12"+"\n")
lines.append(" Cogeneration:DistrictCooling,!- Variable or Meter 13 Name"+"\n")
lines.append(" ValueWhenMaximumOrMinimum; !- Aggregation Type for Variable or Meter 13"+"\n")
lines.append("\n")
else:
lines.append(line)
else:
lines.append(line)
else:
prepare=True;
lines.append(line)
fi.close()
fiw = open(str(idfFilePath),'w')
for line in lines:
fiw.write(line)
fiw.close()
def runAnalysis(self, osmFile, useRunManager = False):
# Preparation
workingDir, fileName = os.path.split(osmFile)
projectName = (".").join(fileName.split(".")[:-1])
osmPath = ops.Path(osmFile)
# create idf - I separated this job as putting them together
# was making EnergyPlus to crash
idfFolder, idfPath = self.osmToidf(workingDir, projectName, osmPath)
print 'made idf: ' + idfFolder,idfPath
if not useRunManager:
resultFile = self.writeBatchFile(idfFolder, "ModelToIdf\\in.idf", self.weatherFile, EPDirectory = 'C:\\EnergyPlusV8-1-0')
return os.path.join(idfFolder, "ModelToIdf", "in.idf"), resultFile
outputPath = ops.Path(idfFolder)
rmDBPath = ops.Path(os.path.join(idfFolder, projectName + ".db"))
try:
rm = ops.RunManager(rmDBPath, True, True)
# set up tool info to pass to run manager
energyPlusTool = ops.ToolInfo(self.EPPath)
toolInfo = ops.Tools()
toolInfo.append(energyPlusTool)
# get manager configration options
configOptions = rm.getConfigOptions()
EPRunJob = ops.JobFactory.createEnergyPlusJob(energyPlusTool, self.iddFile, idfPath,
self.epwFile, outputPath)
# put in queue and let it go
rm.enqueue(EPRunJob, True)
rm.setPaused(False)
# This make Rhino and NOT Grasshopper to crash
# I should send this as a discussion later
#rm.showStatusDialog()
while rm.workPending():
time.sleep(1)
print "Running simulation..."
# print "Process Event:" + str(ops.Application.instance().processEvents())
jobErrors = EPRunJob.errors()
# print jobErrors.succeeded()
# print "Process: " + str(ops.Application.instance().processEvents())
print "Errors and Warnings:"
for msg in list(jobErrors.errors()):
print msg
rm.Dispose() # don't remove this as Rhino will crash if you don't dispose run manager
if jobErrors.succeeded():
return os.path.join(idfFolder, "ModelToIdf", "in.idf"), idfFolder + "\\EnergyPlus\\epluszsz.csv"
else:
return None, None
except Exception, e:
rm.Dispose() # in case anything goes wrong it closes the rm
print `e`
def writeBatchFile(self, workingDir, idfFileName, epwFileAddress, EPDirectory = 'C:\\EnergyPlusV8-1-0'):
"""
This is here as an alternate until I can get RunManager to work
"""
workingDrive = workingDir[:2]
if idfFileName.EndsWith('.idf'): shIdfFileName = idfFileName.replace('.idf', '')
else: shIdfFileName = idfFileName
if not workingDir.EndsWith('\\'): workingDir = workingDir + '\\'
fullPath = workingDir + shIdfFileName
folderName = workingDir.replace( (workingDrive + '\\'), '')
batchStr = workingDrive + '\ncd\\' + folderName + '\n' + EPDirectory + \
'\\Epl-run ' + fullPath + ' ' + fullPath + ' idf ' + epwFileAddress + ' EP N nolimit N N 0 Y'
batchFileAddress = fullPath +'.bat'
batchfile = open(batchFileAddress, 'w')
batchfile.write(batchStr)
batchfile.close()
#execute the batch file
os.system(batchFileAddress)
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"+fullPath
return fullPath + "Zsz.csv",fullPath+".sql"
class RunOPSRManage(object):
def __init__(self, model, measuredict, weatherFilePath = r"C:\EnergyPlusV8-1-0\WeatherData\USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw"):
self.weatherFile = weatherFilePath # just for batch file as an alternate solution
self.EPPath = ops.Path(r"C:\EnergyPlusV8-1-0\EnergyPlus.exe")
self.epwFile = ops.Path(weatherFilePath)
self.iddFile = ops.Path(r"C:\EnergyPlusV8-1-0\Energy+.idd")
self.model = model
self.measuredict = measuredict
def osmToidf(self, workingDir, projectName, osmPath):
# create a new folder to run the analysis
projectFolder =os.path.join(workingDir, projectName)
try: os.mkdir(projectFolder)
except: pass
idfFolder = os.path.join(projectFolder)
idfFilePath = ops.Path(os.path.join(projectFolder, "ModelToIdf", "in.idf"))
forwardTranslator = ops.EnergyPlusForwardTranslator()
workspace = forwardTranslator.translateModel(self.model)
# remove the current object
tableStyleObjects = workspace.getObjectsByType(ops.IddObjectType("OutputControl_Table_Style"))
for obj in tableStyleObjects: obj.remove()
tableStyle = ops.IdfObject(ops.IddObjectType("OutputControl_Table_Style"))
tableStyle.setString(0, "CommaAndHTML")
workspace.addObject(tableStyle)
workspace.save(idfFilePath, overwrite = True)
#DBPath = ops.Path(os.path.join(projectFolder, projectName + "_osmToidf.db"))
# start run manager
#rm = ops.RunManager(DBPath, True, True)
# create workflow
#wf = ops.Workflow("EnergyPlus")
# put in queue and let it go
#rm.enqueue(wf.create(ops.Path(projectFolder), osmPath, self.epwFile), True)
#rm.setPaused(False)
#while rm.workPending():
# time.sleep(.5)
# print "Converting osm to idf ..."
#rm.Dispose() # don't remove this as Rhino will crash if you don't dispose run manager
return idfFolder, idfFilePath
def runAnalysis(self, osmFile, RunSimulation = False):
# Preparation
workingDir, fileName = os.path.split(osmFile)
projectName = (".").join(fileName.split(".")[:-1])
osmPath = ops.Path(osmFile)
projectFolder =os.path.join(workingDir, projectName)
try: os.mkdir(projectFolder)
except: pass
try:
# create idf - I separated this job as putting them together
# was making EnergyPlus to crash
#idfFolder, idfPath = self.osmToidf(workingDir, projectName, osmPath)
zone_handles = " "
#remote = ops.RemoteBCL()
#remote.downloadComponent("5f126600-ca2f-4611-9121-6dfea2de49d6")
bclfile = 'C:\\Users\\Chiensi\\BCL\\5f126600-ca2f-4611-9121-6dfea2de49d6\\e83a66c9-c8d6-4896-a979-ba75f3dacb02'
#local = ops.LocalBCL()
#print dir(local)
#measure = local.getMeasure("5f126600-ca2f-4611-9121-6dfea2de49d6")
#measure = component.files("rb")
#if measure.empty == True:
# print 'No .rb file found'
# assert False
# return
#measure_path = component.files("rb")[0]
bclpath = ops.Path(bclfile)
#measure_root_path = os.path.dirname(bclpath)
#print measure_root_path
bcl_measure = ops.BCLMeasure(bclpath)
DBPath = ops.Path(os.path.join(projectFolder, projectName + "_osmToidf.db"))
run_manager = ops.RunManager(DBPath,True)
#run_manager.setPaused(true)
values = []
ruleset = ops.OpenStudioRuleset()
#print dir(ruleset)
# arguments = ruleset.getArguments(bcl_measure,model)
arguments = ""
zone_arg = arguments[0]
zone_arg.setValue(zone_handles)
values.append(zone_arg)
rjb = ops.Runmanager.RubyJobBuilder(bcl_measure,values)
#rjb.setIncludeDir(OpenStudiio::Path.new("$OpenStudio_Dir")
workflow = ops.Runmanager.Workflow()
workflow.addJob(rjb.toWorkItem())
if RunSimulation:
workflow.addJob(ops.Runmanager.JobType("ModelToIdf"))
workflow.addJob(ops.Runmanager.JobType("EnergyPlusPreProcess"))
workflow.addJob(ops.Runmanager.JobType("EnergyPlus"))
co = ops.RunManager.ConfigOptions()
co.fastFindEnergyPlus()
co.findTools(False)
tools = co.getTools()
workflow.add(tools)
job = workflow.create(projectFolder, osmPath, self.epwFile)
run_manager.enqueue(job, false)
run_manager.waitForFinished()
except Exception, e:
try:
run_manager.Dispose() # in case anything goes wrong it closes the rm
except:
pass
print `e`
def writeBatchFile(self, workingDir, idfFileName, epwFileAddress, EPDirectory = 'C:\\EnergyPlusV8-1-0'):
"""
This is here as an alternate until I can get RunManager to work
"""
workingDrive = workingDir[:2]
if idfFileName.EndsWith('.idf'): shIdfFileName = idfFileName.replace('.idf', '')
else: shIdfFileName = idfFileName
if not workingDir.EndsWith('\\'): workingDir = workingDir + '\\'
fullPath = workingDir + shIdfFileName
folderName = workingDir.replace( (workingDrive + '\\'), '')
batchStr = workingDrive + '\ncd\\' + folderName + '\n' + EPDirectory + \
'\\Epl-run ' + fullPath + ' ' + fullPath + ' idf ' + epwFileAddress + ' EP N nolimit N N 0 Y'
batchFileAddress = fullPath +'.bat'
batchfile = open(batchFileAddress, 'w')
batchfile.write(batchStr)
batchfile.close()
#execute the batch file
os.system(batchFileAddress)
return fullPath + ".csv"
def main(HBZones, HBContext, north, epwWeatherFile, analysisPeriod, simParameters, simulationOutputs, runIt, workingDir = "C:\ladybug", fileName = "openStudioModel.osm"):
# import the classes
w = gh.GH_RuntimeMessageLevel.Warning
if not sc.sticky.has_key('ladybug_release')and sc.sticky.has_key('honeybee_release'):
print "You should first let both Ladybug and Honeybee to fly..."
ghenv.Component.AddRuntimeMessage(w, "You should first let both Ladybug and Honeybee to fly...")
return -1
units = sc.doc.ModelUnitSystem
print units
if `units` != 'Rhino.UnitSystem.Meters':
msg = "Currently the OpenStudio component only works in meters. Change the units to Meters and try again!"
ghenv.Component.AddRuntimeMessage(w, msg)
return -1
# version check
try:
if not sc.sticky['honeybee_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Honeybee to use this compoent." + \
" Use updateHoneybee component to update userObjects.\n" + \
"If you have already updated userObjects drag Honeybee_Honeybee component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
try:
if not sc.sticky['ladybug_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Ladybug to use this compoent." + \
" Use updateLadybug component to update userObjects.\n" + \
"If you have already updated userObjects drag Ladybug_Ladybug component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
# make sure epw file address is correct
if not epwWeatherFile.endswith(epwWeatherFile) or not os.path.isfile(epwWeatherFile):
msg = "Wrong weather file!"
print msg
ghenv.Component.AddRuntimeMessage(w, msg)
return -1
lb_preparation = sc.sticky["ladybug_Preparation"]()
hb_hive = sc.sticky["honeybee_Hive"]()
if workingDir == None: workingDir = sc.sticky["Honeybee_DefaultFolder"]
if fileName == None: fileName = "unnamed"
subWorkingDir = lb_preparation.makeWorkingDir(os.path.join(workingDir, fileName, "OpenStudio")).replace("\\\\", "\\")
print 'Current working directory is set to: ', subWorkingDir
fname = os.path.join(subWorkingDir, fileName + ".osm")
# initiate OpenStudio model
model = ops.Model()
hb_writeOPS = WriteOPS(simParameters, epwWeatherFile)
#set runningPeriod
hb_writeOPS.setRunningPeriod(analysisPeriod, model)
# set north
hb_writeOPS.setNorth(north, model)
# set timestep
hb_writeOPS.setTimestep(model)
# set simulation control
hb_writeOPS.setSimulationControls(model)
# set shadow calculation parameters
hb_writeOPS.setShadowCalculation(model)
# add design DAY
hb_writeOPS.addDesignDays(model)
# call Honeybee objects from the hive
HBZones = hb_hive.callFromHoneybeeHive(HBZones)
# generate stories
hb_writeOPS.generateStories(HBZones, model)
for zoneCount, zone in enumerate(HBZones):
# prepare non-planar zones
if zone.hasNonPlanarSrf or zone.hasInternalEdge:
zone.prepareNonPlanarZone(meshingLevel = 1)
# create a space - OpenStudio works based of space and not zone
# Honeybee though is structured | |
"""Script defining SMALMesh, an object capable of rendering a mesh version of the SMAL Model, for optimising the fit to other, existing meshes.
With modifications now to work with:
- newest SMAL Model
- Newly define scale factor parameters"""
from absl import flags
from pytorch3d.structures import Meshes
import sys, os
sys.path.append(os.path.dirname(sys.path[0]))
from smbld_model.smal_model.smal_torch import SMAL
import torch
from smbld_model.smal_model.smal_torch import batch_rodrigues
import numpy as np
import pickle
from vis import stack_as_batch, try_mkdir
from pytorch_arap.pytorch_arap.arap import ARAPMeshes
from smbld_model.config import SMPL_MODEL_PATH, SMPL_DATA_PATH
nn = torch.nn
opts = flags.FLAGS
kappa_map = {
"front_left_leg": 7,
"front_right_leg" : 11,
"rear_left_leg": 17,
"rear_right_leg": 21,
"tail": 25,
"core": 1, # NOTE: this is linked to head/front legs, will have to reduce them by an equal amount
"neck": 15, # Head is connected to this
"head": 16,
"left_ear": 33,
"right_ear":34,
}
def batch_global_rigid_transformation(Rs, Js, parent, rotate_base = False, betas_extra=None, device="cuda"):
"""
Computes absolute joint locations given pose.
rotate_base: if True, rotates the global rotation by 90 deg in x axis.
if False, this is the original SMPL coordinate.
Args:
Rs: N x 24 x 3 x 3 rotation vector of K joints
Js: N x 24 x 3, joint locations before posing
parent: 24 holding the parent id for each index
Returns
new_J : `Tensor`: N x 24 x 3 location of absolute joints
A : `Tensor`: N x 24 4 x 4 relative joint transformations for LBS.
"""
# Now Js is N x 24 x 3 x 1
Js = Js.unsqueeze(-1)
N = Rs.shape[0]
if rotate_base:
print('Flipping the SMPL coordinate frame!!!!')
rot_x = torch.Tensor([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
rot_x = torch.reshape(torch.repeat(rot_x, [N, 1]), [N, 3, 3]) # In tf it was tile
root_rotation = torch.matmul(Rs[:, 0, :, :], rot_x)
else:
root_rotation = Rs[:, 0, :, :]
Js_orig = Js.clone()
scaling_factors = torch.ones(N, parent.shape[0], 3).to(device)
if betas_extra is not None:
scaling_factors = betas_extra.reshape(-1, 35, 3)
# debug_only
# scaling_factors[:, 25:32, 0] = 0.2
# scaling_factors[:, 7, 2] = 2.0
scale_factors_3x3 = torch.diag_embed(scaling_factors, dim1=-2, dim2=-1)
def make_A(R, t):
# Rs is N x 3 x 3, ts is N x 3 x 1
R_homo = torch.nn.functional.pad(R, (0,0,0,1,0,0))
t_homo = torch.cat([t, torch.ones([N, 1, 1]).to(device)], 1)
return torch.cat([R_homo, t_homo], 2)
A0 = make_A(root_rotation, Js[:, 0])
results = [A0]
for i in range(1, parent.shape[0]):
j_here = Js[:, i] - Js[:, parent[i]]
s_par_inv = torch.inverse(scale_factors_3x3[:, parent[i]])
rot = Rs[:, i]
s = scale_factors_3x3[:, i]
rot_new = s_par_inv @ rot @ s
A_here = make_A(rot_new, j_here)
res_here = torch.matmul(
results[parent[i]], A_here)
results.append(res_here)
# 10 x 24 x 4 x 4
results = torch.stack(results, dim=1)
# scale updates
new_J = results[:, :, :3, 3]
# --- Compute relative A: Skinning is based on
# how much the bone moved (not the final location of the bone)
# but (final_bone - init_bone)
# ---
Js_w0 = torch.cat([Js_orig, torch.zeros([N, 35, 1, 1]).to(device)], 2)
init_bone = torch.matmul(results, Js_w0)
# Append empty 4 x 3:
init_bone = torch.nn.functional.pad(init_bone, (3,0,0,0,0,0,0,0))
A = results - init_bone
return new_J, A
class SMBLDMesh(SMAL, nn.Module):
"""SMAL Model, with addition of scale factors to individual body parts"""
def __init__(self, n_batch = 1, fixed_betas = False, device="cuda", shape_family_id = 1,
model_path = SMPL_MODEL_PATH, data_path = SMPL_DATA_PATH, num_betas=20, **kwargs):
SMAL.__init__(self, model_path=model_path, data_path=data_path, opts = opts, shape_family_id=shape_family_id,
align = False)
nn.Module.__init__(self)
self.use_smal_betas = True
self.n_batch = n_batch
self.device = device
self.v_template= self.v_template.to(device)
self.faces = self.f
faces_single = torch.from_numpy(self.faces.astype(np.float32)).to(device)
self.faces_batch = stack_as_batch(faces_single, n_batch)
self.n_verts = self.v_template.shape[0]
#parameters
self.global_rot = nn.Parameter(torch.full((n_batch, 3), 0.0, device = device, requires_grad=True))
self.joint_rot = nn.Parameter(torch.full((n_batch, 34, 3), 0.0, device = device, requires_grad=True))
self.trans = nn.Parameter(torch.full((n_batch, 3,), 0.0, device = device, requires_grad=True))
self.scale_factors = torch.nn.Parameter(torch.ones((self.parents.shape[0])),
requires_grad = True)
# This sets up a new set of betas that define the scale factor parameters
self.num_beta_shape = self.n_betas = 20
self.num_betascale = 7
leg_joints = list(range(7,11)) + list(range(11,15)) + list(range(17,21)) + list(range(21,25))
tail_joints = list(range(25, 32))
ear_joints = [33, 34]
beta_scale_mask = torch.zeros(35, 3, 7).to(device)
beta_scale_mask[leg_joints, [2], [0]] = 1.0 # Leg lengthening
beta_scale_mask[leg_joints, [0], [1]] = 1.0 # Leg fatness
beta_scale_mask[leg_joints, [1], [1]] = 1.0 # Leg fatness
beta_scale_mask[tail_joints, [0], [2]] = 1.0 # Tail lengthening
beta_scale_mask[tail_joints, [1], [3]] = 1.0 # Tail fatness
beta_scale_mask[tail_joints, [2], [3]] = 1.0 # Tail fatness
beta_scale_mask[ear_joints, [1], [4]] = 1.0 # Ear y
beta_scale_mask[ear_joints, [2], [5]] = 1.0 # Ear z
self.beta_scale_mask = torch.transpose(beta_scale_mask.reshape(35*3, self.num_betascale), 0, 1)
self.fixed_betas = fixed_betas
self.num_betas = num_betas # number of used betas
max_betas = self.shapedirs.shape[0]
assert max_betas >= self.num_betas, f"Insufficient number of betas in shapedir (Requested {self.num_betas}, shapedir has {max_betas})"
# Load mean betas from SMAL model
with open(data_path, "rb") as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
smal_data = u.load()
shape_family = self.shape_family_id # Canine is family=1
if shape_family == -1:
self.mean_betas = torch.zeros((41)).to(device)
else:
loaded_betas = smal_data['cluster_means'][shape_family]
if len(loaded_betas) < max_betas:
loaded_betas = np.pad(loaded_betas, (0, self.num_betas-len(loaded_betas))) # pad with 0s to max shape
self.mean_betas = torch.FloatTensor(loaded_betas).to(device)
multi_betas = self.mean_betas[:self.num_betas]
multi_betas_scale = torch.zeros(self.num_betascale).float().to(device)
multi_betas = torch.cat([multi_betas, multi_betas_scale], dim = 0)
if self.fixed_betas:
self.multi_betas = nn.Parameter(multi_betas.repeat(1, 1))
else:
self.multi_betas = nn.Parameter(multi_betas.repeat(self.n_batch, 1))
self.deform_verts = nn.Parameter(torch.zeros((n_batch, self.n_verts, 3), device=device, requires_grad=True))
self.smbld_shape = [self.global_rot, self.trans, self.multi_betas]
self.smbld_params = [self.global_rot, self.joint_rot, self.trans, self.multi_betas] # params of SMBDL model
self.deform_params = [self.deform_verts]
self.meshes = self.get_meshes()
def get_verts(self, return_joints=False):
"""Returns vertices and faces of SMAL Model"""
# For reference on running the forward() method of SMAL model, see smal3d_renderer.py
smal_params = self.parameters()
# Split betas by standard betas, and scale factor betas
all_betas = self.multi_betas
betas_pred = all_betas[:, :self.num_betas] # usual betas
betas_logscale = all_betas[:, self.num_betas:] # Scale factor betas
betas_scale_pred = torch.exp(betas_logscale @ self.beta_scale_mask) # Scale SF betas correctly
#betas = betas_pred.repeat(self.n_batch, 1) # Stack Betas correctly if fixed across batch
#sf = self.scale_factors.repeat(self.n_batch, 1) # Stack Betas correctly if fixed across batch
verts, joints_3d, R = self(betas_pred,
torch.cat((self.global_rot, self.joint_rot.view(self.n_batch, -1)), dim = 1),
betas_scale_pred.to(self.device), trans=self.trans, deform_verts=self.deform_verts)
if return_joints:
return verts, self.faces_batch, joints_3d
return verts, self.faces_batch # each of these have shape (n_batch, n_vert/faces, 3)
def get_meshes(self):
"""Returns Meshes object of all SMAL meshes."""
self.meshes = ARAPMeshes(*self.get_verts(), device=self.device)
return self.meshes
def __call__(self, beta, theta, betas_extra, deform_verts=None, trans=None, get_skin=True):
if self.use_smal_betas: # Always use smal betas
nBetas = beta.shape[1]
else:
nBetas = 0
# 1. Add shape blend shapes
if nBetas > 0:
if deform_verts is None:
v_shaped = self.v_template.to(self.device) + torch.reshape(torch.matmul(beta.cpu(), self.shapedirs[:nBetas, :]),
[-1, self.size[0], self.size[1]]).to(self.device)
else:
v_shaped = self.v_template + deform_verts + torch.reshape(torch.matmul(beta, self.shapedirs[:nBetas, :].to(self.device)),
[-1, self.size[0], self.size[1]]).to(self.device)
else:
if deform_verts is None:
v_shaped = self.v_template.unsqueeze(0)
else:
v_shaped = self.v_template + deform_verts
# 2. Infer shape-dependent joint locations.
Jx = torch.matmul(v_shaped[:, :, 0], self.J_regressor.to(self.device))
Jy = torch.matmul(v_shaped[:, :, 1], self.J_regressor.to(self.device))
Jz = torch.matmul(v_shaped[:, :, 2], self.J_regressor.to(self.device))
J = torch.stack([Jx, Jy, Jz], dim=2)
# 3. Add pose blend shapes
# N x 24 x 3 x 3
Rs = torch.reshape(batch_rodrigues(torch.reshape(theta, [self.n_batch * 35, 3]).cpu()), [-1, 35, 3, 3]).to(self.device)
# Ignore global rotation.
pose_feature = torch.reshape(Rs[:, 1:, :, :].to(self.device) - torch.eye(3).to(self.device), [-1, 306]) # torch.eye(3).cuda(device=self.opts.gpu_id)
v_posed = torch.reshape(
torch.matmul(pose_feature, self.posedirs.to(self.device)),
[-1, self.size[0], self.size[1]]) + v_shaped.to(self.device)
# 4. Get the global joint location
self.J_transformed, A = batch_global_rigid_transformation(Rs, J, self.parents,
betas_extra=betas_extra, device=self.device)
# 5. Do skinning:
num_batch = theta.shape[0]
weights_t = self.weights.repeat([num_batch, 1]).to(self.device)
W = torch.reshape(weights_t, [num_batch, -1, 35])
T = torch.reshape(
torch.matmul(W, torch.reshape(A, [num_batch, 35, 16])),
[num_batch, -1, 4, 4])
v_posed_homo = torch.cat(
[v_posed, torch.ones([num_batch, v_posed.shape[1], 1]).to(self.device)], 2) #.cuda(device=self.opts.gpu_id)
v_homo = torch.matmul(T, v_posed_homo.unsqueeze(-1))
verts = v_homo[:, :, :3, 0]
if trans is None:
trans = torch.zeros((num_batch, 3)).to(self.device)#.cuda(device=self.opts.gpu_id)
verts = verts + trans[:, None, :]
# Get joints:
self.J_regressor = self.J_regressor.to(self.device)
joint_x = torch.matmul(verts[:, :, 0], self.J_regressor)
joint_y = torch.matmul(verts[:, :, 1], self.J_regressor)
joint_z = torch.matmul(verts[:, :, 2], self.J_regressor)
joints = torch.stack([joint_x, joint_y, joint_z], dim=2)
if get_skin:
return verts, joints, Rs
else:
return joints
def save_npz(self, out_dir, title="", labels=None):
"""Given a directory, saves a .npz file of all params
labels: optional list of size n_batch, to save as labels for all entries"""
out = {}
for param in ["global_rot", | |
<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
from __future__ import annotations
from typing import TYPE_CHECKING
from SPARQLWrapper import SPARQLWrapper, RDFXML
from oc_ocdm.reader import Reader
from oc_ocdm.abstract_set import AbstractSet
from oc_ocdm.support.support import get_count, get_short_name
if TYPE_CHECKING:
from typing import Dict, ClassVar, Tuple, Optional, List, Set
from rdflib import ConjunctiveGraph
from rdflib import Graph, Namespace, URIRef
from oc_ocdm.graph.graph_entity import GraphEntity
from oc_ocdm.counter_handler.counter_handler import CounterHandler
from oc_ocdm.counter_handler.filesystem_counter_handler import FilesystemCounterHandler
from oc_ocdm.counter_handler.in_memory_counter_handler import InMemoryCounterHandler
from oc_ocdm.graph.entities.identifier import Identifier
from oc_ocdm.graph.entities.bibliographic.agent_role import AgentRole
from oc_ocdm.graph.entities.bibliographic.bibliographic_reference import BibliographicReference
from oc_ocdm.graph.entities.bibliographic.bibliographic_resource import BibliographicResource
from oc_ocdm.graph.entities.bibliographic.citation import Citation
from oc_ocdm.graph.entities.bibliographic.discourse_element import DiscourseElement
from oc_ocdm.graph.entities.bibliographic.pointer_list import PointerList
from oc_ocdm.graph.entities.bibliographic.reference_annotation import ReferenceAnnotation
from oc_ocdm.graph.entities.bibliographic.reference_pointer import ReferencePointer
from oc_ocdm.graph.entities.bibliographic.resource_embodiment import ResourceEmbodiment
from oc_ocdm.graph.entities.bibliographic.responsible_agent import ResponsibleAgent
class GraphSet(AbstractSet):
# Labels
labels: ClassVar[Dict[str, str]] = {
"an": "annotation",
"ar": "agent role",
"be": "bibliographic entry",
"br": "bibliographic resource",
"ci": "citation",
"de": "discourse element",
"id": "identifier",
"pl": "single location pointer list",
"ra": "responsible agent",
"re": "resource embodiment",
"rp": "in-text reference pointer"
}
def __init__(self, base_iri: str, info_dir: str = "", supplier_prefix: str = "",
wanted_label: bool = True) -> None:
super(GraphSet, self).__init__()
# The following variable maps a URIRef with the related graph entity
self.res_to_entity: Dict[URIRef, GraphEntity] = {}
self.base_iri: str = base_iri
self.supplier_prefix: str = supplier_prefix
self.wanted_label: bool = wanted_label
# Graphs
# The following structure of URL is quite important for the other classes
# developed and should not be changed. The only part that can change is the
# value of the base_iri
self.g_an: str = base_iri + "an/"
self.g_ar: str = base_iri + "ar/"
self.g_be: str = base_iri + "be/"
self.g_br: str = base_iri + "br/"
self.g_ci: str = base_iri + "ci/"
self.g_de: str = base_iri + "de/"
self.g_id: str = base_iri + "id/"
self.g_pl: str = base_iri + "pl/"
self.g_ra: str = base_iri + "ra/"
self.g_re: str = base_iri + "re/"
self.g_rp: str = base_iri + "rp/"
if info_dir is not None and info_dir != "":
self.counter_handler: CounterHandler = FilesystemCounterHandler(info_dir)
else:
self.counter_handler: CounterHandler = InMemoryCounterHandler()
def get_entity(self, res: URIRef) -> Optional[GraphEntity]:
if res in self.res_to_entity:
return self.res_to_entity[res]
# Add resources related to bibliographic entities
def add_an(self, resp_agent: str, source: str = None, res: URIRef = None,
preexisting_graph: Graph = None) -> ReferenceAnnotation:
if res is not None and get_short_name(res) != "an":
raise ValueError(f"Given res: <{res}> is inappropriate for a ReferenceAnnotation entity.")
if res is not None and res in self.res_to_entity:
return self.res_to_entity[res]
cur_g, count, label = self._add(self.g_an, "an", res)
return ReferenceAnnotation(cur_g, self, res, GraphEntity.iri_note,
resp_agent, source, count, label, "an",
preexisting_graph)
def add_ar(self, resp_agent: str, source: str = None, res: URIRef = None,
preexisting_graph: Graph = None) -> AgentRole:
if res is not None and get_short_name(res) != "ar":
raise ValueError(f"Given res: <{res}> is inappropriate for an AgentRole entity.")
if res is not None and res in self.res_to_entity:
return self.res_to_entity[res]
cur_g, count, label = self._add(self.g_ar, "ar", res)
return AgentRole(cur_g, self, res, GraphEntity.iri_role_in_time,
resp_agent, source, count, label, "ar",
preexisting_graph)
def add_be(self, resp_agent: str, source: str = None, res: URIRef = None,
preexisting_graph: Graph = None) -> BibliographicReference:
if res is not None and get_short_name(res) != "be":
raise ValueError(f"Given res: <{res}> is inappropriate for a BibliographicReference entity.")
if res is not None and res in self.res_to_entity:
return self.res_to_entity[res]
cur_g, count, label = self._add(self.g_be, "be", res)
return BibliographicReference(cur_g, self, res, GraphEntity.iri_bibliographic_reference,
resp_agent, source, count, label, "be",
preexisting_graph)
def add_br(self, resp_agent: str, source: str = None, res: URIRef = None,
preexisting_graph: Graph = None) -> BibliographicResource:
if res is not None and get_short_name(res) != "br":
raise ValueError(f"Given res: <{res}> is inappropriate for a BibliographicResource entity.")
if res is not None and res in self.res_to_entity:
return self.res_to_entity[res]
cur_g, count, label = self._add(self.g_br, "br", res)
return BibliographicResource(cur_g, self, res, GraphEntity.iri_expression,
resp_agent, source, count, label, "br",
preexisting_graph)
def add_ci(self, resp_agent: str, source: str = None, res: URIRef = None,
preexisting_graph: Graph = None) -> Citation:
if res is not None and get_short_name(res) != "ci":
raise ValueError(f"Given res: <{res}> is inappropriate for a Citation entity.")
if res is not None and res in self.res_to_entity:
return self.res_to_entity[res]
cur_g, count, label = self._add(self.g_ci, "ci", res)
return Citation(cur_g, self, res, GraphEntity.iri_citation,
resp_agent, source, count, label, "ci",
preexisting_graph)
def add_de(self, resp_agent: str, source: str = None, res: URIRef = None,
preexisting_graph: Graph = None) -> DiscourseElement:
if res is not None and get_short_name(res) != "de":
raise ValueError(f"Given res: <{res}> is inappropriate for a DiscourseElement entity.")
if res is not None and res in self.res_to_entity:
return self.res_to_entity[res]
cur_g, count, label = self._add(self.g_de, "de", res)
return DiscourseElement(cur_g, self, res, GraphEntity.iri_discourse_element,
resp_agent, source, count, label, "de",
preexisting_graph)
def add_id(self, resp_agent: str, source: str = None, res: URIRef = None,
preexisting_graph: Graph = None) -> Identifier:
if res is not None and get_short_name(res) != "id":
raise ValueError(f"Given res: <{res}> is inappropriate for an Identifier entity.")
if res is not None and res in self.res_to_entity:
return self.res_to_entity[res]
cur_g, count, label = self._add(self.g_id, "id", res)
return Identifier(cur_g, self, res, GraphEntity.iri_identifier,
resp_agent, source, count, label, "id",
preexisting_graph)
def add_pl(self, resp_agent: str, source: str = None, res: URIRef = None,
preexisting_graph: Graph = None) -> PointerList:
if res is not None and get_short_name(res) != "pl":
raise ValueError(f"Given res: <{res}> is inappropriate for a PointerList entity.")
if res is not None and res in self.res_to_entity:
return self.res_to_entity[res]
cur_g, count, label = self._add(self.g_pl, "pl", res)
return PointerList(cur_g, self, res, GraphEntity.iri_singleloc_pointer_list,
resp_agent, source, count, label, "pl",
preexisting_graph)
def add_rp(self, resp_agent: str, source: str = None, res: URIRef = None,
preexisting_graph: Graph = None) -> ReferencePointer:
if res is not None and get_short_name(res) != "rp":
raise ValueError(f"Given res: <{res}> is inappropriate for a ReferencePointer entity.")
if res is not None and res in self.res_to_entity:
return self.res_to_entity[res]
cur_g, count, label = self._add(self.g_rp, "rp", res)
return ReferencePointer(cur_g, self, res, GraphEntity.iri_intextref_pointer,
resp_agent, source, count, label, "rp",
preexisting_graph)
def add_ra(self, resp_agent: str, source: str = None, res: URIRef = None,
preexisting_graph: Graph = None) -> ResponsibleAgent:
if res is not None and get_short_name(res) != "ra":
raise ValueError(f"Given res: <{res}> is inappropriate for a ResponsibleAgent entity.")
if res is not None and res in self.res_to_entity:
return self.res_to_entity[res]
cur_g, count, label = self._add(self.g_ra, "ra", res)
return ResponsibleAgent(cur_g, self, res, GraphEntity.iri_agent,
resp_agent, source, count, label, "ra",
preexisting_graph)
def add_re(self, resp_agent: str, source: str = None, res: URIRef = None,
preexisting_graph: Graph = None) -> ResourceEmbodiment:
if res is not None and get_short_name(res) != "re":
raise ValueError(f"Given res: <{res}> is inappropriate for a ResourceEmbodiment entity.")
if res is not None and res in self.res_to_entity:
return self.res_to_entity[res]
cur_g, count, label = self._add(self.g_re, "re", res)
return ResourceEmbodiment(cur_g, self, res, GraphEntity.iri_manifestation,
resp_agent, source, count, label, "re",
preexisting_graph)
def _add(self, graph_url: str, short_name: str, res: URIRef = None) -> Tuple[Graph, Optional[str], Optional[str]]:
cur_g: Graph = Graph(identifier=graph_url)
self._set_ns(cur_g)
count: Optional[str] = None
label: Optional[str] = None
if res is not None:
try:
res_count: int = int(get_count(res))
except ValueError:
res_count: int = -1
if res_count > self.counter_handler.read_counter(short_name):
self.counter_handler.set_counter(res_count, short_name)
return cur_g, count, label
count = self.supplier_prefix + str(self.counter_handler.increment_counter(short_name))
if self.wanted_label:
label = "%s %s [%s/%s]" % (self.labels[short_name], count, short_name, count)
return cur_g, count, label
def get_orphans(self) -> List[GraphEntity]:
full_set_of_entities: Set[URIRef] = set(self.res_to_entity.keys())
referenced_entities: Set[URIRef] = set()
for res, entity in self.res_to_entity.items():
for obj in entity.g.objects(subject=res, predicate=None):
if type(obj) == URIRef:
referenced_entities.add(obj)
set_of_orphan_res: Set[URIRef] = full_set_of_entities - referenced_entities
result_list: List[GraphEntity] = []
for orphan_res in set_of_orphan_res:
entity: Optional[GraphEntity] = self.get_entity(orphan_res)
if | |
u'z', u'g'] ,
u'㨝' : [u'x'] ,
u'缟' : [u'g'] ,
u'䢦' : [u's'] ,
u'趨' : [u'q', u'c'] ,
u'圯' : [u'y'] ,
u'頱' : [u'l'] ,
u'斸' : [u'z'] ,
u'伿' : [u'y'] ,
u'灁' : [u'y'] ,
u'巈' : [u'j'] ,
u'黊' : [u'x'] ,
u'桑' : [u's'] ,
u'㗘' : [u'b', u'f'] ,
u'盚' : [u'q'] ,
u'蕣' : [u's'] ,
u'滪' : [u'y'] ,
u'鏬' : [u'x'] ,
u'㡱' : [u'j'] ,
u'絳' : [u'j'] ,
u'䛺' : [u'j'] ,
u'诼' : [u'z'] ,
u'龉' : [u'y'] ,
u'怌' : [u'p'] ,
u'錖' : [u'd'] ,
u'瞙' : [u'm'] ,
u'堜' : [u'l'] ,
u'㲟' : [u'y'] ,
u'謦' : [u'q'] ,
u'澩' : [u'x'] ,
u'麳' : [u'l'] ,
u'挶' : [u'j'] ,
u'䞹' : [u'h'] ,
u'鉀' : [u'h', u'j', u'g'] ,
u'盃' : [u'b'] ,
u'孆' : [u'y'] ,
u'㿉' : [u't'] ,
u'詐' : [u'z'] ,
u'滓' : [u'z'] ,
u'駝' : [u't'] ,
u'扠' : [u'c', u'z'] ,
u'燭' : [u'z'] ,
u'婰' : [u'd'] ,
u'蕺' : [u'q', u'j'] ,
u'槽' : [u'c', u'z'] ,
u'舋' : [u'x', u'w'] ,
u'綊' : [u'x'] ,
u'䬑' : [u'w'] ,
u'稛' : [u'k'] ,
u'喚' : [u'h'] ,
u'蒤' : [u't'] ,
u'别' : [u'b'] ,
u'䶪' : [u'c'] ,
u'鴵' : [u'j'] ,
u'粴' : [u'l'] ,
u'䨻' : [u'b'] ,
u'畅' : [u'c'] ,
u'哄' : [u'h'] ,
u'蟎' : [u'm'] ,
u'浕' : [u'j'] ,
u'䳔' : [u'j'] ,
u'鱟' : [u'h'] ,
u'翞' : [u'j'] ,
u'䕥' : [u'y', u'n'] ,
u'瑯' : [u'l'] ,
u'埮' : [u't'] ,
u'㵵' : [u'z'] ,
u'蛸' : [u'x', u's'] ,
u'汿' : [u'x'] ,
u'俾' : [u'b'] ,
u'㨆' : [u'l'] ,
u'缈' : [u'm'] ,
u'䎋' : [u'h', u'k'] ,
u'肍' : [u'q'] ,
u'團' : [u't'] ,
u'㮛' : [u'c'] ,
u'鐚' : [u'y'] ,
u'碝' : [u'r'] ,
u'伨' : [u'x'] ,
u'谪' : [u'z'] ,
u'傭' : [u'y'] ,
u'閯' : [u's'] ,
u'携' : [u'x'] ,
u'䢽' : [u'x'] ,
u'趿' : [u'q', u's', u't'] ,
u'届' : [u'j'] ,
u'腌' : [u'y', u'a'] ,
u'族' : [u'c', u'z'] ,
u'㑚' : [u'n'] ,
u'祜' : [u'h'] ,
u'巟' : [u'h'] ,
u'苡' : [u'y', u's'] ,
u'公' : [u'g'] ,
u'陮' : [u'd'] ,
u'竱' : [u'z'] ,
u'蹾' : [u'd'] ,
u'吅' : [u'x'] ,
u'餇' : [u't'] ,
u'暎' : [u'y'] ,
u'䰕' : [u'l'] ,
u'焗' : [u'j'] ,
u'庞' : [u'p'] ,
u'莠' : [u'y', u'x'] ,
u'箰' : [u's'] ,
u'䄷' : [u's', u'd'] ,
u'蘹' : [u'h'] ,
u'叀' : [u'h', u'z'] ,
u'郂' : [u'g'] ,
u'㥇' : [u'c'] ,
u'繉' : [u'h'] ,
u'䯐' : [u'h'] ,
u'裒' : [u'p', u'b'] ,
u'噙' : [u'q'] ,
u'魛' : [u'd'] ,
u'惢' : [u's', u'r'] ,
u'乩' : [u'j'] ,
u'獫' : [u'x'] ,
u'売' : [u'm'] ,
u'鷴' : [u'x'] ,
u'死' : [u's'] ,
u'閁' : [u'm'] ,
u'爄' : [u'l'] ,
u'庇' : [u'b'] ,
u'㬊' : [u'h'] ,
u'趑' : [u'c', u'z'] ,
u'樔' : [u'c', u'j'] ,
u'㚗' : [u'q', u'x'] ,
u'餞' : [u'j'] ,
u'斡' : [u'g', u'w'] ,
u'䈤' : [u'q'] ,
u'钫' : [u'f'] ,
u'焮' : [u'x'] ,
u'嶱' : [u'k', u'g'] ,
u'㨴' : [u'j'] ,
u'費' : [u'b', u'f'] ,
u'椾' : [u'z', u'j'] ,
u'㗁' : [u'e'] ,
u'顈' : [u'j'] ,
u'擋' : [u'd', u't'] ,
u'䅎' : [u'y'] ,
u'灘' : [u'h', u't', u'n'] ,
u'峛' : [u'l'] ,
u'㥞' : [u'q', u'c', u's'] ,
u'菥' : [u'x', u's'] ,
u'桨' : [u'j'] ,
u'齲' : [u'q'] ,
u'篵' : [u'c'] ,
u'䁸' : [u'n'] ,
u'逃' : [u't'] ,
u'瞂' : [u'f'] ,
u'変' : [u'b'] ,
u'㢈' : [u't'] ,
u'蠓' : [u'm'] ,
u'澒' : [u'h', u'g'] ,
u'麜' : [u'l'] ,
u'怣' : [u'y'] ,
u'䞢' : [u'z'] ,
u'霭' : [u'a'] ,
u'皬' : [u'h'] ,
u'堳' : [u'm'] ,
u'輽' : [u'b'] ,
u'溼' : [u'q', u's'] ,
u'鷆' : [u'z', u't'] ,
u'杍' : [u'z'] ,
u'䛌' : [u'p', u't'] ,
u'陗' : [u'q'] ,
u'痖' : [u'y'] ,
u'彝' : [u'y'] ,
u'蹧' : [u'z'] ,
u'淦' : [u'g'] ,
u'㝭' : [u'x'] ,
u'晷' : [u'g'] ,
u'䗶' : [u'l'] ,
u'甀' : [u'z'] ,
u'銅' : [u't'] ,
u'洐' : [u'x'] ,
u'誕' : [u'd'] ,
u'䔠' : [u's'] ,
u'蘢' : [u'l'] ,
u'报' : [u'b'] ,
u'㴰' : [u'n'] ,
u'縲' : [u'l'] ,
u'媵' : [u'y'] ,
u'螷' : [u'p', u'b'] ,
u'噂' : [u'z'] ,
u'鍄' : [u'l'] ,
u'翇' : [u'f'] ,
u'乒' : [u'p'] ,
u'譔' : [u'z'] ,
u'埗' : [u'b'] ,
u'郙' : [u'f'] ,
u'裩' : [u'g'] ,
u'孴' : [u'n'] ,
u'葶' : [u't', u'd'] ,
u'惹' : [u'r'] ,
u'粆' : [u's'] ,
u'䘍' : [u'c'] ,
u'茏' : [u'l'] ,
u'咖' : [u'k', u'g'] ,
u'醘' : [u'k'] ,
u'㸝' : [u'k'] ,
u'笟' : [u'g'] ,
u'䲦' : [u'c', u'd', u't'] ,
u'覨' : [u'e'] ,
u'匯' : [u'h'] ,
u'鰱' : [u'l'] ,
u'憸' : [u'x'] ,
u'䬿' : [u'w'] ,
u'瑁' : [u'm'] ,
u'姈' : [u'l'] ,
u'髊' : [u'c'] ,
u'汑' : [u't'] ,
u'狚' : [u'd'] ,
u'腣' : [u'd'] ,
u'櫪' : [u'l'] ,
u'韬' : [u't'] ,
u'㱱' : [u'h', u'w', u'x'] ,
u'祳' : [u'c'] ,
u'迼' : [u'j'] ,
u'㔂' : [u'l'] ,
u'鮉' : [u'd'] ,
u'搌' : [u'z'] ,
u'霖' : [u'l'] ,
u'玙' : [u'y'] ,
u'尜' : [u'g'] ,
u'㢟' : [u'c'] ,
u'輦' : [u'n'] ,
u'殩' : [u'c'] ,
u'㐬' : [u'l'] ,
u'骳' : [u'b'] ,
u'朶' : [u'd'] ,
u'䎹' : [u'w'] ,
u'陀' : [u't', u'd'] ,
u'狃' : [u'n'] ,
u'彆' : [u'b'] ,
u'㯉' : [u'h'] ,
u'蹐' : [u'j'] ,
u'櫓' : [u'l'] ,
u'㝖' : [u'y'] ,
u'鷝' : [u'b'] ,
u'晠' : [u's'] ,
u'䋣' : [u'p', u'f'] ,
u'痭' : [u'p'] ,
u'幰' : [u'x'] ,
u'㫳' : [u'c'] ,
u'腺' : [u'x'] ,
u'淽' : [u'z'] ,
u'圁' : [u'y'] ,
u'蘋' : [u'p'] ,
u'禊' : [u'x'] ,
u'休' : [u'x'] ,
u'縛' : [u'f'] ,
u'冚' : [u'k'] ,
u'肤' : [u'f'] ,
u'嘫' : [u'r'] ,
u'䦪' : [u'y'] ,
u'餵' : [u'w'] ,
u'碴' : [u'c', u'z'] ,
u'主' : [u'z'] ,
u'煅' : [u'd'] ,
u'僄' : [u'p'] ,
u'菎' : [u'k', u'j'] ,
u'楕' : [u't'] ,
u'䣔' : [u's'] ,
u'顟' : [u'l'] ,
u'篞' : [u'n'] ,
u'䅥' : [u'j'] ,
u'灯' : [u'd'] ,
u'叮' : [u'd'] ,
u'㥵' : [u'h'] ,
u'苸' : [u'h'] ,
u'桿' : [u'g'] ,
u'䯾' : [u't'] ,
u'笈' : [u'j'] ,
u'䞋' : [u'c'] ,
u'蒍' : [u'w'] ,
u'匘' : [u'n'] ,
u'㾛' : [u'q'] ,
u'通' : [u't'] ,
u'粝' : [u'l'] ,
u'䬨' : [u'c', u'j', u'z'] ,
u'蠪' : [u'l'] ,
u'咭' : [u'j'] ,
u'醯' : [u'x'] ,
u'䲽' : [u's', u'd'] ,
u'覿' : [u'j', u'd'] ,
u'塊' : [u'k'] ,
u'蕌' : [u'l'] ,
u'懏' : [u'j'] ,
u'絜' : [u'x', u'j'] ,
u'姟' : [u'g'] ,
u'蛡' : [u'y'] ,
u'啬' : [u's'] ,
u'鉮' : [u's'] ,
u'绱' : [u'z'] ,
u'詾' : [u'x'] ,
u'倅' : [u'c'] ,
u'鴇' : [u'b'] ,
u'抎' : [u'y'] ,
u'甗' : [u'y'] ,
u'媞' : [u't', u'd'] ,
u'螠' : [u'y'] ,
u'洧' : [u'w'] ,
u'羰' : [u't'] ,
u'䔷' : [u'q'] ,
u'船' : [u'c'] ,
u'埀' : [u'c', u'z'] ,
u'铂' : [u'b'] ,
u'穉' : [u'z'] ,
u'俐' : [u'l'] ,
u'賒' : [u's'] ,
u'剙' : [u'c'] ,
u'齛' : [u's'] ,
u'擢' : [u'z'] ,
u'䩩' : [u'y'] ,
u'睫' : [u's', u'j'] ,
u'峲' : [u'l'] ,
u'駴' : [u'x'] ,
u'潻' : [u's'] ,
u'馁' : [u'n'] ,
u'劇' : [u'j'] ,
u'㜊' : [u'p', u'z'] ,
u'膑' : [u'b'] ,
u'昔' : [u'x', u'c'] ,
u'㪗' : [u't'] ,
u'锞' : [u'k'] ,
u'榡' : [u's'] ,
u'两' : [u'l'] ,
u'颫' : [u'f'] ,
u'紮' : [u'z'] ,
u'冱' : [u'h'] ,
u'㘴' : [u'z'] ,
u'肻' : [u'k'] ,
u'放' : [u'f'] ,
u'㧁' : [u'q'] ,
u'鑈' : [u'n'] ,
u'棋' : [u'q', u'j'] ,
u'籘' : [u't'] ,
u'僛' : [u'q'] ,
u'迥' : [u'j'] ,
u'摨' : [u'n'] ,
u'㣫' : [u'z'] ,
u'鍲' : [u'm'] ,
u'矵' : [u'q', u'd'] ,
u'鰃' : [u'w'] ,
u'箂' : [u'l'] ,
u'唉' : [u'a'] ,
u'㒈' : [u'h'] ,
u'萓' : [u'y'] ,
u'排' : [u'p', u'b'] ,
u'益' : [u'y'] ,
u'㴙' : [u'z'] ,
u'銜' : [u'x'] ,
u'氣' : [u'q', u'x'] ,
u'䮢' : [u'd'] ,
u'鬭' : [u'd'] ,
u'窬' : [u'y', u'd'] ,
u'吳' : [u'w'] ,
u'茽' : [u'z'] ,
u'押' : [u'y', u'x'] ,
u'㱃' : [u'y'] ,
u'釆' : [u'c', u'b'] ,
u'歍' : [u'w'] ,
u'䫌' : [u'p'] ,
u'驗' : [u'y'] ,
u'秖' : [u'z'] ,
u'卝' : [u'k'] ,
u'艧' : [u'h'] ,
u'懦' : [u'n'] ,
u'㭭' : [u'f'] ,
u'郰' : [u'z'] ,
u'橷' : [u'd'] ,
u'䧶' : [u'k'] ,
u'嶃' : [u'z'] ,
u'麅' : [u'p', u'b'] ,
u'愐' : [u'm'] ,
u'䖓' : [u'n'] ,
u'蚕' : [u'c', u't'] ,
u'䤠' : [u'j', u'z'] ,
u'訢' : [u'y', u'x'] ,
u'溥' : [u'p', u'b', u'f'] ,
u'爲' : [u'w'] ,
u'嚵' : [u'c'] ,
u'讷' : [u'n'] ,
u'婂' : [u'm'] ,
u'㻅' : [u'h', u'k'] ,
u'齄' : [u'z'] ,
u'珇' : [u'z'] ,
u'䉒' : [u'f'] ,
u'蝔' : [u'j'] ,
u'寗' : [u'n'] ,
u'鳙' : [u'y'] ,
u'潤' : [u'r'] ,
u'䏧' : [u'n'] ,
u'蓩' : [u'm'] ,
u'坴' : [u'l'] ,
u'衶' : [u'c', u'z'] ,
u'㾄' : [u'd'] ,
u'炆' : [u'w'] ,
u'䨍' : [u'y'] ,
u'輏' : [u'y'] ,
u'墖' : [u't'] ,
u'鶘' | |
' ' +\
self.x_field('COMMODITY===================',comm_size) + ' ' +\
self.x_field('CURRENT', 10)
#get the extract data
data = ('',)
sql = 'select car.railroad, car.car, place.station, place.code, ' +\
'warehouse.industry, warehouse.destination, warehouse.commodity, commodity.name, car.train, car.station ' +\
'from car, waybill, warehouse, place, commodity ' +\
'where car.carorder != 0 ' +\
'and car.commodity == ? ' +\
'and car.carorder = waybill.id ' +\
'and waybill.warehouse = warehouse.id ' +\
'and place.industry = warehouse.industry ' +\
'and warehouse.commodity = commodity.commodity ' +\
'order by place.station, place.code, car.railroad, car.car'
count, ds_cars = self.db_read(sql, data)
if count < 0:
return
#process the data
line_count = 0
for row in ds_cars:
if line_count == 0:
print(titles)
railroad = row[0]
car = row[2]
stax = row[2]
plax = row[3]
industry = row[4]
staxplax = stax + '/' + plax + ' ' + industry
destination = row[5]
commodity = row[6]
commodity_name = row[7]
train = row[8]
car_at = row[9]
current_loc = car_at + train
current_loc = current_loc.strip()
if stax == filter_station or filter_station == '*':
if industry == filter_industry or filter_industry == '*':
print(self.x_field(railroad, self.railsize) + " " +
self.x_field(car, self.carxsize) + " " +
self.x_field(staxplax, self.staxsize + self.plaxsize + 12) + " " +
self.x_field(destination, 10) + " " +
self.x_field(commodity, self.commsize) + " " +
self.x_field(commodity_name, comm_size - self.commsize) + " " +
self.x_field(current_loc, 10))
line_count = line_count + 1
if line_count > 20:
line_count = 0
reply = raw_input('+')
if reply == 'x':
break
print(' ** END OF DATA: ' + str(count) + ' RECORDS DISPLAYED **')
return
def lacars(self, message):
"""lists activity details about cars. can be filtered by types, railroad, or station. this
list shows cars in traffic (ie not maintenance) and can be filtered by station/class. also
indicates clean/dirty and loaded/unloaded
"""
if self.show_access(message, 'LACARS (^car class^);(^station^)', 'R') != 0:
return
#get the parameters for the report
value, rc = self.extract_field(message, 0, '')
if rc == 0:
filter_class = value
else:
filter_class = '*'
value, rc = self.extract_field(message, 1, '')
if rc == 0:
filter_stax = value
else:
filter_stax = '*'
# build the column titles
place_size = 79 - self.railsize - self.carxsize - self.classize - self.staxsize - self.plaxsize - self.commsize - 39
titles = self.x_field('RAILROAD==', self.railsize) + ' ' +\
self.x_field('CAR=======', self.carxsize) + ' ' +\
self.x_field('CLASS=====', self.classize) + ' ' +\
self.x_field('STATION======================', self.staxsize + 9) + ' ' +\
self.x_field('PLACE=========================', place_size) + ' ' +\
self.x_field('COMMODITY=', self.commsize) + ' ' +\
self.x_field('DESTINATION', 10) + ' ' +\
self.x_field('C', 1) + ' ' +\
self.x_field('BLOCK=====', 5) + ' ' +\
self.x_field('MAINT', 5)
#get the extract data
sql = 'select car.railroad, car.car, car.carclass, car.station, station.short_name, ' +\
'car.train, car.commodity, car.clean_dirty, car.block, car.place_id, car.carorder, ' +\
'car.time_to_maint, car.is_attached_set ' +\
'from car left outer join station on car.station = station.station order by car.railroad, car.car'
count, ds_cars = self.db_read(sql, '')
if count < 0:
return
#process the data
line_count = 0
counter = 0
for row in ds_cars:
clas = row[2]
stax = row[3]
if stax == '':
station_detail = 'ON TRAIN'
else:
station_detail = stax + ' ' + row[4]
train = row[5]
place = row[9]
car_order = row[10]
block = row[8]
time_to_maint = row[11]
place_name = ''
destination = ''
attached_set = row[12]
if (clas == filter_class) or (filter_class == '*'):
if (stax == filter_stax) or (filter_stax == '*'):
if line_count == 0:
print(titles)
if place != 0:
data = (place,)
sql = 'select name, code from place where id = ?'
pcount, ds_places = self.db_read(sql, data)
if pcount < 0: #Rev 1
return
for xrow in ds_places:
place_name = xrow[0]
place_code = xrow[1]
place_name = place_code + ' ' + place_name
data = (car_order,)
sql = 'select destination from waybill where id = ?'
wcount, ds_orders = self.db_read(sql, data)
if wcount < 0: #Rev 1
return
for wrow in ds_orders:
destination = wrow[0]
if stax == '':
if attached_set == '':
place_name = train
else:
place_name = train + ' (' + attached_set + ')'
print(self.x_field(row[0], self.railsize) + " " +
self.x_field(row[1], self.carxsize) + " " +
self.x_field(row[2], self.classize) + " " +
self.x_field(station_detail, self.staxsize + 9) + " " +
self.x_field(place_name, place_size) + " " +
self.x_field(row[6], self.commsize) + " " +
self.x_field(destination, 10) + " " +
self.x_field(row[7], 1) + " " +
self.x_field(block, 5) + " " +
self.x_field(time_to_maint,5, 'R'))
line_count = line_count + 1
counter = counter + 1
if line_count > 20:
line_count = 0
reply = raw_input('+')
if reply == 'x':
break
print(' ** END OF DATA: ' + str(counter) + ' RECORDS DISPLAYED **')
return
def lmtcar(self, message):
"""lists activity details about cars. can be filtered by types, railroad, or station. this
list shows cars in traffic (ie not maintenance) and can be filtered by station/class. also
indicates clean/dirty and loaded/unloaded
"""
if self.show_access(message, 'LMTCAR (^area^);(^station^);(^car class^)', 'R') != 0:
return
#get the parameters for the report
value, rc = self.extract_field(message, 0, '')
if rc == 0:
filter_area = value
else:
filter_area = '*'
value, rc = self.extract_field(message, 1, '')
if rc == 0:
filter_stax = value
else:
filter_stax = '*'
value, rc = self.extract_field(message, 2, '')
if rc == 0:
filter_class = value
else:
filter_class = '*'
# build the column titles
titles = self.x_field('RAILROAD==', self.railsize) + ' ' +\
self.x_field('AREA======', self.areasize) + ' ' +\
self.x_field('STATION===', self.staxsize) + ' ' +\
self.x_field('NAME======', 8) + ' ' +\
self.x_field('PLACE=====', self.plaxsize) + ' ' +\
self.x_field('TYPE======', self.cartsize) + ' ' +\
self.x_field('CAR=======', self.carxsize) + ' ' +\
self.x_field('C', 1) + ' ' +\
self.x_field('BLOCK', 8) + ' ' +\
self.x_field('LOADING===', self.loadsize) + ' ' +\
self.x_field('UNLOAD====', self.loadsize)
#get the extract data
data = ('', 0)
sql = 'select car.railroad, station.area, car.station, car.place_id, car.cartype, ' +\
'car.car, car.clean_dirty, car.block, cartype.loading, cartype.unloading, car.carclass, station.short_name ' +\
'from car, station, cartype ' +\
'where car.station = station.station and cartype.cartype = car.cartype ' +\
'and car.station != ? and car.carorder = ? order by car.car'
count, ds_cars = self.db_read(sql, data)
if count < 0:
return
#process the data
line_count = 0
counter = 0
for row in ds_cars:
railroad = row[0]
area = row[1]
station = row[2]
place = row[3]
cartype = row[4]
car = row[5]
clean = row[6]
block = row[7]
loading = row[8]
unloading = row[9]
carclass = row[10]
short_name = row[11]
if carclass == filter_class or filter_class == '*':
if station == filter_stax or filter_stax == '*':
if area == filter_area or filter_area == '*':
if line_count == 0:
print(titles)
place_id = ' '
data = (place,)
sql = 'select code from place where id = ?'
pcount, ds_places = self.db_read(sql, data)
if pcount < 0: #Rev 1
return
for prow in ds_places:
place_id = prow[0]
print(self.x_field(railroad, self.railsize) + " " +
self.x_field(area, self.areasize) + " " +
self.x_field(station, self.staxsize) + " " +
self.x_field(short_name, 8) + " " +
self.x_field(place_id, self.plaxsize) + " " +
self.x_field(cartype, self.cartsize) + " " +
self.x_field(car, self.carxsize) + " " +
self.x_field(clean, 1) + " " +
self.x_field(block, 8) + " " +
self.x_field(loading, self.loadsize) + " " +
self.x_field(unloading, self.loadsize))
line_count = line_count + 1
counter = counter + 1
if line_count > 20:
line_count = 0
reply = raw_input('+')
if reply == 'x':
break
print(' ** END OF DATA: ' + str(counter) + ' RECORDS DISPLAYED **')
return
def licars(self, message):
"""list basic details about cars, sortable by various orders and can be filtered by type,
railroad or station
"""
if self.show_access(message, 'LICARS (sort[0/1/2/3]);(^car class^);(^railroad^);(^station^)', 'R') != 0:
return
#get the parameters for the report
value, rc = self.extract_field(message, 0, '')
if rc == 0:
sort_order = value
else:
sort_order = '0'
value, rc = self.extract_field(message, 1, '')
if rc == 0:
filter_class = value
else:
filter_class | |
def run(self) -> None:
await self.manager.run_service(self._body_syncer)
class BodyChainGapSyncer(Service):
"""
A service to sync historical blocks without executing them. This service is meant to be run
in tandem with other operations that sync the state.
"""
_idle_time = BLOCK_BACKFILL_IDLE_TIME
_starting_tip: BlockHeaderAPI = None
logger = get_logger('trinity.sync.beam.chain.BodyChainGapSyncer')
def __init__(self,
chain: AsyncChainAPI,
db: BaseAsyncChainDB,
peer_pool: ETHPeerPool) -> None:
self._chain = chain
self._db = db
self._peer_pool = peer_pool
self._pauser = Pauser()
self._body_syncer: FastChainBodySyncer = None
self._max_backfill_block_bodies_at_once = MAX_BACKFILL_BLOCK_BODIES_AT_ONCE
async def _setup_for_next_gap(self) -> None:
gap_start, gap_end = self._get_next_gap()
fill_start = BlockNumber(max(
gap_start,
gap_end - self._max_backfill_block_bodies_at_once,
))
start_num = BlockNumber(fill_start - 1)
_starting_tip = await self._db.coro_get_canonical_block_header_by_number(start_num)
if self._pauser.is_paused:
# If the syncer was paused while we were busy setting it up throw the current setup
# away. A new setup will be performed as soon as `resume()` was called again.
raise ValidationError("Syncer was paused by the user")
async def _get_launch_header() -> BlockHeaderAPI:
return _starting_tip
self.logger.debug("Starting to sync missing blocks from #%s to #%s", fill_start, gap_end)
self._body_syncer = FastChainBodySyncer(
self._chain,
self._db,
self._peer_pool,
DatabaseBlockRangeHeaderSyncer(self._db, (fill_start, gap_end,)),
launch_header_fn=_get_launch_header,
should_skip_header_fn=body_for_header_exists(self._db, self._chain)
)
self._body_syncer.logger = self.logger
def _get_next_gap(self) -> BlockRange:
gaps, future_tip_block = self._db.get_chain_gaps()
header_gaps, future_tip_header = self._db.get_header_chain_gaps()
try:
actionable_gap = self.get_topmost_actionable_gap(gaps, header_gaps)
except NoActionableGap:
# We do not have gaps in the chain of blocks but we may still have a gap from the last
# block up until the highest consecutive written header.
if len(header_gaps) > 0:
# The header chain has gaps, find out the lowest missing header
lowest_missing_header, _ = header_gaps[0]
else:
# It doesn't have gaps, so the future_tip_header is the lowest missing header
lowest_missing_header = future_tip_header
highest_consecutive_header = lowest_missing_header - 1
if highest_consecutive_header >= future_tip_block:
# The header before the lowest missing header is the highest consecutive header
# that exists in the db and it is higher than the future tip block. That's a gap
# we can try to close.
return future_tip_block, BlockNumber(highest_consecutive_header)
else:
raise ValidationError("No gaps in the chain of blocks")
else:
return actionable_gap
def get_topmost_actionable_gap(self,
gaps: Tuple[BlockRange, ...],
header_gaps: Tuple[BlockRange, ...]) -> BlockRange:
'''
Returns the most recent gap of blocks of max size = _max_backfill_block_bodies_at_once
for which the headers exist in DB, along with the header preceding the gap.
'''
for gap in gaps[::-1]:
if gap[1] - gap[0] > self._max_backfill_block_bodies_at_once:
gap = (BlockNumber(gap[1] - self._max_backfill_block_bodies_at_once), gap[1])
# We want to be sure the header preceding the block gap is in DB
gap_with_prev_block = (BlockNumber(gap[0] - 1), gap[1])
for header_gap in header_gaps[::-1]:
if not self._have_empty_intersection(gap_with_prev_block, header_gap):
break
else:
return gap
else:
raise NoActionableGap
def _have_empty_intersection(self, block_gap: BlockRange, header_gap: BlockRange) -> bool:
return block_gap[0] > header_gap[1] or block_gap[1] < header_gap[0]
@property
def is_paused(self) -> bool:
"""
Return ``True`` if the sync is currently paused, otherwise ``False``.
"""
return self._pauser.is_paused
def pause(self) -> None:
"""
Pause the sync. Pause and resume are wasteful actions and should not happen too frequently.
"""
self._pauser.pause()
if self._body_syncer:
# Pausing the syncer is valid at any point that the service is running but that might
# hit a point where we are still resolving the gaps and have no body_syncer set up yet.
self._body_syncer.get_manager().cancel()
self.logger.debug2("BodyChainGapSyncer paused")
def resume(self) -> None:
"""
Resume the sync.
"""
self._pauser.resume()
self.logger.debug2("BodyChainGapSyncer resumed")
async def run(self) -> None:
"""
Run the sync indefinitely until it is cancelled externally.
"""
while True:
if self._pauser.is_paused:
await self._pauser.await_resume()
try:
await self._setup_for_next_gap()
except ValidationError:
self.logger.debug(
"There are no gaps in the chain of blocks at this time. Sleeping for %ss",
self._idle_time
)
await asyncio.sleep(self._idle_time)
else:
await self.manager.run_service(self._body_syncer)
class NoActionableGap(BaseTrinityError):
"""
Raised when no actionable gap of blocks is found.
"""
pass
class HeaderLaunchpointSyncer(HeaderSyncerAPI):
"""
Wraps a "real" header syncer, and drops headers on the floor, until triggered
at a "launchpoint".
Return the headers at the launchpoint, and then pass through all the headers
subsequently found by the header syncer.
Can be used by a body syncer to pause syncing until a header launchpoint is reached.
"""
logger = get_logger('trinity.sync.beam.chain.HeaderLaunchpointSyncer')
def __init__(self, passthrough: HeaderSyncerAPI) -> None:
self._real_syncer = passthrough
self._at_launchpoint = asyncio.Event()
self._launchpoint_headers: Tuple[BlockHeaderAPI, ...] = None
def set_launchpoint_headers(self, headers: Tuple[BlockHeaderAPI, ...]) -> None:
"""
Identify the given headers as launchpoint headers. These will be returned first.
Immediately after these launchpoint headers are returned, start consuming and
passing through all headers from the wrapped header syncer.
"""
self._launchpoint_headers = headers
self._at_launchpoint.set()
async def new_sync_headers(
self,
max_batch_size: int = None) -> AsyncIterator[Tuple[BlockHeaderAPI, ...]]:
await self._at_launchpoint.wait()
self.logger.info(
"Choosing %s as launchpoint headers to sync from",
[str(header) for header in self._launchpoint_headers],
)
yield self._launchpoint_headers
async for headers in self._real_syncer.new_sync_headers(max_batch_size):
yield headers
def get_target_header_hash(self) -> Hash32:
return self._real_syncer.get_target_header_hash()
class HeaderOnlyPersist(Service):
"""
Store all headers returned by the header syncer, until the target is reached, then exit.
"""
def __init__(self,
header_syncer: ETHHeaderChainSyncer,
db: BaseAsyncHeaderDB,
force_end_block_number: int = None,
launch_strategy: SyncLaunchStrategyAPI = None) -> None:
self.logger = get_logger('trinity.sync.beam.chain.HeaderOnlyPersist')
self._db = db
self._header_syncer = header_syncer
self._final_headers: Tuple[BlockHeaderAPI, ...] = None
self._force_end_block_number = force_end_block_number
self._launch_strategy = launch_strategy
async def run(self) -> None:
self.manager.run_daemon_task(self._persist_headers_if_tip_too_old)
# run sync until cancelled
await self.manager.wait_finished()
def _is_header_eligible_to_beam_sync(self, header: BlockHeaderAPI) -> bool:
time_gap = time.time() - header.timestamp
estimated_max_lag_seconds = MAX_BEAM_SYNC_LAG * PREDICTED_BLOCK_TIME
return time_gap < (estimated_max_lag_seconds * (1 - BEAM_PIVOT_BUFFER_FRACTION))
async def _persist_headers_if_tip_too_old(self) -> None:
tip = await self._db.coro_get_canonical_head()
if self._is_header_eligible_to_beam_sync(tip):
self._force_end_block_number = tip.block_number + 1
self.logger.info("Tip is recent enough, syncing from last synced header at %s", tip)
else:
self.logger.warning("Tip %s is too far behind to Beam Sync, skipping ahead...", tip)
async for persist_info in persist_headers(
self.logger, self._db, self._header_syncer, self._exit_if_launchpoint):
if len(persist_info.new_canon_headers):
head = persist_info.new_canon_headers[-1]
else:
head = await self._db.coro_get_canonical_head()
self.logger.info(
"Imported %d headers in %0.2f seconds, new head: %s",
len(persist_info.imported_headers),
persist_info.elapsed_time,
head,
)
async def _exit_if_launchpoint(self, headers: Sequence[BlockHeaderAPI]) -> bool:
"""
Determine if the supplied headers have reached the end of headers-only persist.
This might be in the form of a forced launchpoint, or because we caught up to
our peer's target launchpoint.
In the case that we have reached the launchpoint:
- trigger service exit
- persist the headers before the launchpoint
- save the headers that triggered the launchpoint (retrievable via get_final_headers)
:return: whether we have reached the launchpoint
"""
ending_header_search = [
header for header in headers if header.block_number == self._force_end_block_number
]
if ending_header_search:
# Force an early exit to beam sync
self.logger.info(
"Forced the beginning of Beam Sync at %s",
ending_header_search[0],
)
persist_headers = tuple(
h for h in headers
if h.block_number < self._force_end_block_number
)
final_headers = tuple(
h for h in headers
if h.block_number >= self._force_end_block_number
)
else:
target_hash = self._header_syncer.get_target_header_hash()
if target_hash in (header.hash for header in headers):
self.logger.info(
"Caught up to skeleton peer. Switching to beam mode at %s",
headers[-1],
)
# We have reached the header syncer's target
# Only sync against the most recent header
persist_headers, final_headers = tuple(headers[:-1]), tuple(headers[-1:])
else:
# We have not reached the header syncer's target, continue normally
return False
new_canon_headers, old_canon_headers = await self._db.coro_persist_header_chain(
persist_headers)
if persist_headers:
self.logger.debug(
"Final header import before launchpoint: %s..%s, "
"old canon: %s..%s, new canon: %s..%s",
persist_headers[0],
persist_headers[-1],
old_canon_headers[0] if len(old_canon_headers) else None,
old_canon_headers[-1] if len(old_canon_headers) else None,
new_canon_headers[0] if len(new_canon_headers) else None,
new_canon_headers[-1] if len(new_canon_headers) else None,
)
else:
self.logger.debug("Final header import before launchpoint: None")
self._final_headers = final_headers
self.manager.cancel()
return True
def get_final_headers(self) -> Tuple[BlockHeaderAPI, ...]:
"""
Which header(s) triggered the launchpoint to switch out of header-only persist state.
:raise ValidationError: if the syncer has not reached the launchpoint yet
"""
if self._final_headers is None:
raise ValidationError("Must not try to access final headers before it has been set")
else:
return self._final_headers
class BeamBlockImporter(BaseBlockImporter, Service):
"""
Block Importer that emits DoStatelessBlockImport and waits on the event bus for a
StatelessBlockImportDone to show that the import is complete.
It independently runs other state preloads, like the accounts for the
block transactions.
"""
def __init__(
self,
chain: AsyncChainAPI,
db: DatabaseAPI,
state_getter: BeamDownloader,
backfiller: BeamStateBackfill,
event_bus: EndpointAPI,
metrics_registry: MetricsRegistry) -> None:
self.logger = get_logger('trinity.sync.beam.chain.BeamBlockImporter')
self._chain = chain
self._db = | |
from __future__ import print_function
import json
import optparse
import sqlite3
import sys
version = "0.4.0"
gene_count = 0
class Sequence(object):
def __init__(self, header, sequence_parts):
self.header = header
self.sequence_parts = sequence_parts
self._sequence = None
@property
def sequence(self):
if self._sequence is None:
self._sequence = ''.join(self.sequence_parts)
return self._sequence
def print(self, fh=sys.stdout):
print(self.header, file=fh)
for line in self.sequence_parts:
print(line, file=fh)
def FASTAReader_gen(fasta_filename):
with open(fasta_filename) as fasta_file:
line = fasta_file.readline()
while True:
if not line:
return
assert line.startswith('>'), "FASTA headers must start with >"
header = line.rstrip()
sequence_parts = []
line = fasta_file.readline()
while line and line[0] != '>':
sequence_parts.append(line.rstrip())
line = fasta_file.readline()
yield Sequence(header, sequence_parts)
def create_tables(conn):
cur = conn.cursor()
cur.execute('''CREATE TABLE meta (
version VARCHAR PRIMARY KEY NOT NULL)''')
cur.execute('INSERT INTO meta (version) VALUES (?)',
(version, ))
cur.execute('''CREATE TABLE gene (
gene_id VARCHAR PRIMARY KEY NOT NULL,
gene_symbol VARCHAR,
seq_region_name VARCHAR NOT NULL,
seq_region_start INTEGER NOT NULL,
seq_region_end INTEGER NOT NULL,
seq_region_strand INTEGER NOT NULL,
species VARCHAR NOT NULL,
gene_json VARCHAR NOT NULL)''')
cur.execute('CREATE INDEX gene_symbol_index ON gene (gene_symbol)')
cur.execute('''CREATE TABLE transcript (
transcript_id VARCHAR PRIMARY KEY NOT NULL,
protein_id VARCHAR UNIQUE,
protein_sequence VARCHAR,
gene_id VARCHAR NOT NULL REFERENCES gene(gene_id))''')
cur.execute('''CREATE VIEW transcript_species AS
SELECT transcript_id, species, seq_region_name
FROM transcript JOIN gene
ON transcript.gene_id = gene.gene_id''')
conn.commit()
def remove_type_from_list_of_ids(l):
return ','.join(remove_type_from_id(_) for _ in l.split(','))
def remove_type_from_id(id_):
colon_index = id_.find(':')
if colon_index >= 0:
return id_[colon_index + 1:]
else:
return id_
def feature_to_dict(cols, parent_dict=None):
d = {
'end': int(cols[4]),
'start': int(cols[3]),
}
for attr in cols[8].split(';'):
if '=' in attr:
(tag, value) = attr.split('=')
if tag == 'ID':
tag = 'id'
value = remove_type_from_id(value)
elif tag == 'Parent':
value = remove_type_from_list_of_ids(value)
d[tag] = value
if cols[6] == '+':
d['strand'] = 1
elif cols[6] == '-':
d['strand'] = -1
else:
raise Exception("Unrecognized strand '%s'" % cols[6])
if parent_dict is not None and 'Parent' in d:
# a 3' UTR can be split among multiple exons
# a 5' UTR can be split among multiple exons
# a CDS can be part of multiple transcripts
for parent in d['Parent'].split(','):
if parent not in parent_dict:
parent_dict[parent] = [d]
else:
parent_dict[parent].append(d)
return d
def add_gene_to_dict(cols, species, gene_dict):
global gene_count
gene = feature_to_dict(cols)
gene.update({
'member_id': gene_count,
'object_type': 'Gene',
'seq_region_name': cols[0],
'species': species,
'Transcript': [],
'display_name': gene.get('Name', None)
})
if gene['id']:
gene_dict[gene['id']] = gene
gene_count = gene_count + 1
def add_transcript_to_dict(cols, species, transcript_dict):
transcript = feature_to_dict(cols)
transcript.update({
'object_type': 'Transcript',
'seq_region_name': cols[0],
'species': species,
})
transcript_dict[transcript['id']] = transcript
def add_exon_to_dict(cols, species, exon_parent_dict):
exon = feature_to_dict(cols, exon_parent_dict)
exon.update({
'length': int(cols[4]) - int(cols[3]) + 1,
'object_type': 'Exon',
'seq_region_name': cols[0],
'species': species,
})
if 'id' not in exon and 'Name' in exon:
exon['id'] = exon['Name']
def add_cds_to_dict(cols, cds_parent_dict):
cds = feature_to_dict(cols, cds_parent_dict)
if 'id' not in cds:
if 'Name' in cds:
cds['id'] = cds['Name']
elif 'Parent' in cds and ',' not in cds['Parent']:
cds['id'] = cds['Parent']
def join_dicts(gene_dict, transcript_dict, exon_parent_dict, cds_parent_dict, five_prime_utr_parent_dict, three_prime_utr_parent_dict):
for parent, exon_list in exon_parent_dict.items():
if parent in transcript_dict:
exon_list.sort(key=lambda _: _['start'])
transcript_dict[parent]['Exon'] = exon_list
for transcript_id, transcript in transcript_dict.items():
translation = {
'CDS': [],
'id': None,
'end': transcript['end'],
'object_type': 'Translation',
'species': transcript['species'],
'start': transcript['start'],
}
found_cds = False
derived_translation_start = None
derived_translation_end = None
if transcript_id in cds_parent_dict:
cds_list = cds_parent_dict[transcript_id]
cds_ids = set(_['id'] for _ in cds_list)
if len(cds_ids) > 1:
raise Exception("Transcript %s has multiple CDSs: this is not supported by Ensembl JSON format" % transcript_id)
cds_id = cds_ids.pop()
translation['id'] = cds_id
cds_list.sort(key=lambda _: _['start'])
translation['CDS'] = cds_list
translation['start'] = cds_list[0]['start']
translation['end'] = cds_list[-1]['end']
found_cds = True
if transcript_id in five_prime_utr_parent_dict:
five_prime_utr_list = five_prime_utr_parent_dict[transcript_id]
five_prime_utr_list.sort(key=lambda _: _['start'])
if transcript['strand'] == 1:
derived_translation_start = five_prime_utr_list[-1]['end'] + 1
else:
derived_translation_end = five_prime_utr_list[0]['start'] - 1
if transcript_id in three_prime_utr_parent_dict:
three_prime_utr_list = three_prime_utr_parent_dict[transcript_id]
three_prime_utr_list.sort(key=lambda _: _['start'])
if transcript['strand'] == 1:
derived_translation_end = three_prime_utr_list[0]['start'] - 1
else:
derived_translation_start = three_prime_utr_list[-1]['end'] + 1
if derived_translation_start is not None:
if found_cds:
if derived_translation_start > translation['start']:
raise Exception("Transcript %s has the start of CDS %s overlapping with the UTR end" % (transcript_id, cds_id))
else:
translation['start'] = derived_translation_start
if derived_translation_end is not None:
if found_cds:
if derived_translation_end < translation['end']:
raise Exception("Transcript %s has the end of CDS %s overlapping with the UTR start" % (transcript_id, cds_id))
else:
translation['end'] = derived_translation_end
if found_cds or derived_translation_start is not None or derived_translation_end is not None:
transcript['Translation'] = translation
for transcript in transcript_dict.values():
if 'Parent' in transcript:
# A polycistronic transcript can have multiple parents
for parent in transcript['Parent'].split(','):
if parent in gene_dict:
gene_dict[parent]['Transcript'].append(transcript)
def write_gene_dict_to_db(conn, gene_dict):
cur = conn.cursor()
for gene in gene_dict.values():
if gene is None:
# This can happen when loading a JSON file from Ensembl
continue
gene_id = gene['id']
cur.execute('INSERT INTO gene (gene_id, gene_symbol, seq_region_name, seq_region_start, seq_region_end, seq_region_strand, species, gene_json) VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
(gene_id, gene.get('display_name', None), gene['seq_region_name'], gene['start'], gene['end'], gene['strand'], gene['species'], json.dumps(gene)))
if "Transcript" in gene:
for transcript in gene["Transcript"]:
transcript_id = transcript['id']
protein_id = transcript.get('Translation', {}).get('id', None)
try:
cur.execute('INSERT INTO transcript (transcript_id, protein_id, gene_id) VALUES (?, ?, ?)',
(transcript_id, protein_id, gene_id))
except Exception as e:
raise Exception("Error while inserting (%s, %s, %s) into transcript table: %s" % (transcript_id, protein_id, gene_id, e))
conn.commit()
def fetch_species_and_seq_region_for_transcript(conn, transcript_id):
cur = conn.cursor()
cur.execute('SELECT species, seq_region_name FROM transcript_species WHERE transcript_id=?',
(transcript_id, ))
results = cur.fetchone()
if not results:
return None
return results
def fetch_gene_id_for_transcript(conn, transcript_id):
cur = conn.cursor()
cur.execute('SELECT gene_id FROM transcript WHERE transcript_id=?',
(transcript_id, ))
results = cur.fetchone()
if not results:
return None
return results[0]
def remove_id_version(s):
"""
Remove the optional '.VERSION' from an Ensembl id.
"""
if s.startswith('ENS'):
return s.split('.')[0]
else:
return s
def __main__():
parser = optparse.OptionParser()
parser.add_option('--gff3', action='append', default=[], help='GFF3 file to convert, in SPECIES:FILENAME format. Use multiple times to add more files')
parser.add_option('--json', action='append', default=[], help='JSON file to merge. Use multiple times to add more files')
parser.add_option('--fasta', action='append', default=[], help='Path of the input FASTA files')
parser.add_option('-l', action='store_true', default=False, dest='longestCDS', help='Keep only the longest CDS per gene')
parser.add_option('--headers', action='store_true', default=False, help='Change the header line of the FASTA sequences to the >TranscriptId_species format')
parser.add_option('--regions', default="", help='Comma-separated list of region IDs for which FASTA sequences should be filtered')
parser.add_option('-o', '--output', help='Path of the output SQLite file')
parser.add_option('--of', help='Path of the output FASTA file')
parser.add_option('--ff', help='Path of the filtered sequences output FASTA file')
options, args = parser.parse_args()
if args:
raise Exception('Use options to provide inputs')
conn = sqlite3.connect(options.output)
conn.execute('PRAGMA foreign_keys = ON')
create_tables(conn)
for gff3_arg in options.gff3:
try:
(species, filename) = gff3_arg.split(':')
except ValueError:
raise Exception("Argument for --gff3 '%s' is not in the SPECIES:FILENAME format" % gff3_arg)
gene_dict = dict()
transcript_dict = dict()
exon_parent_dict = dict()
cds_parent_dict = dict()
five_prime_utr_parent_dict = dict()
three_prime_utr_parent_dict = dict()
unimplemented_feature_nlines_dict = dict()
with open(filename) as f:
for i, line in enumerate(f, start=1):
line = line.strip()
if not line:
# skip empty lines
continue
if line[0] == '#':
# skip comment lines
continue
cols = line.split('\t')
if len(cols) != 9:
raise Exception("Line %i in file '%s': '%s' does not have 9 columns" % (i, filename, line))
feature_type = cols[2]
try:
if feature_type == 'gene':
add_gene_to_dict(cols, species, gene_dict)
elif feature_type in ('mRNA', 'transcript'):
add_transcript_to_dict(cols, species, transcript_dict)
elif feature_type == 'exon':
add_exon_to_dict(cols, species, exon_parent_dict)
elif feature_type == 'five_prime_UTR':
feature_to_dict(cols, five_prime_utr_parent_dict)
elif feature_type == 'three_prime_UTR':
feature_to_dict(cols, three_prime_utr_parent_dict)
elif feature_type == 'CDS':
add_cds_to_dict(cols, cds_parent_dict)
elif feature_type in unimplemented_feature_nlines_dict:
unimplemented_feature_nlines_dict[feature_type] += 1
else:
unimplemented_feature_nlines_dict[feature_type] = 0
except Exception as e:
print("Line %i in file '%s': %s" % (i, filename, e), file=sys.stderr)
for unimplemented_feature, nlines in unimplemented_feature_nlines_dict.items():
print("Skipped %d lines in file '%s': '%s' is not an implemented feature type" % (nlines, filename, unimplemented_feature), file=sys.stderr)
join_dicts(gene_dict, transcript_dict, exon_parent_dict, cds_parent_dict, five_prime_utr_parent_dict, three_prime_utr_parent_dict)
write_gene_dict_to_db(conn, gene_dict)
for json_arg in options.json:
with open(json_arg) as f:
write_gene_dict_to_db(conn, json.load(f))
if options.longestCDS:
gene_transcripts_dict = dict()
for fasta_arg in options.fasta:
for entry in FASTAReader_gen(fasta_arg):
# Extract the transcript id by removing everything after the first space and then removing the version if it is an Ensembl id
transcript_id = remove_id_version(entry.header[1:].lstrip().split(' ')[0])
if len(entry.sequence) % 3 != 0:
print("Transcript '%s' in file '%s' has a coding sequence length which is not multiple of 3" % | |
#!/usr/bin/python3.8
# -*- coding: utf-8 -*-
# please adjust these two lines if necessary
# multizaehlen.py
# (C) <NAME> 2017-2021
# Programm zum Auswerten der Auszählungen von Textdateien
# =======================================================
# Pickle-Daten (erzeugt durch zaehlen.py) werden ausgewertet und zusammengefasst
# noch
# + auch sonst Variablen aufführen
# + zusätzliche ini-Datei
# + Schalter für Sprache
# + ini-Datei robust laden
# History:
# 1.0.0: 2017-02-17: Anfang
# 1.1.0: 2017-02-17: erstmals funktionsfähig
# 1.2.0: 2017-02-18: Filterung (-r, -l, -t, -f) aktiviert
# 1.3.0: 2017-02-18: Sortierung funktioniert
# 1.4.0: 2017-02-18: Fehlerabprüfung beim Öffnen von Dateien
# 1.5.0: 2017-02-19: Filterung nach Zahl der Dateien
# 1.6.0: 2017-02-19: ini-Datei per import
# 1.7.0: 2017-02-19: -fd / -ld
# 1.8.0: 2017-07-10: -sd / -cd angefangen
# 1.9.0: 2017-07-17: neues Konzept für -d, -ld, -cd, -sd
# 1.10.0: 2017-07-19: Berechnung von Modus und Summen für -d, -ld, -cd, -sd
# 1.10.9: 2020-08-02: Voreinstellungen geändert
# 1.11.0: 2020-08-15: Ausgabe-Strings
# 1.11.1: 2020-08-27: ini-Datei robust geladen
# 1.11.2: 2020-08-27: Programm-Name und -version vereinheitlicht
# --------------------------------------------------------------
# Fehlermeldungen
# ---Ausgabedatei <out_file> kann nicht geöffnet werden. Programmabbruch
# ---Datei <file> kann nicht geöffnet werden. Programmabbruch!
# ---Datei <file> ist vom falschen Typ: Programmabbruch!
# ---Strukturen der vorherigen Ergebnisdateien sind nicht kompatibel. Programmabbruch!
# ---keine Dateien angegeben. Programmabbruch!
# ---Warnung: zaehlen_ini.py nicht gefunden
# --------------------------------------------------------------
# Programmabfolge
# (1) Programm-Parameter: global
# (2) Module laden
# (3) eigene Methoden
# (4) Strukturen vorbesetzen und initialisieren
# (5) Variablen vorbesetzen
# (6) Programm-Parameter
# (6-1) Programm-Parameter: Datum und Uhrzeit
# (6-2) Programm-Parameter: Aufruf
# (6-3) Definition und Beschreibung der Aufrufparameter:
# (6-4) Programm-Parameter: reguläre Ausdrücke
# (6-5) Programm-Parameter: Filterung
# (7) Ausgabedatei öffnen
# (8) Daten wiedergewinnen
# (8-1) prüfen, ob die gewonnenen Daten kompatibel sind: ggf. Abbruch
# (9) Strukturen vereinigen zu einer neuen Struktur
# (10) 1. Sortierung
# (11) daraus Aufbau einer Liste mit Zeichenkette, Anzahl und Länge (für den Gesamttext)
# (12) 2. Sortierung
# (13) Ausgabe
# (13-1) Ausgabe vorbereiten
# (13-2) Ausgabe, Kopf, allgemein
# (13-3) Ausgabe, Kopf, Ausgabe Programm-Parameter
# (13-4) Ausgabe, Legende
# (13-5) Ausgabe, Kopfzeile
# (13-6) Ausgabe, eigentliche Ausgabe
# (13-7) Ausgabe, Zusammenfassung
# (13-8) Ausgabe, ld ausgeben (Längen-Verteilung)
# (13-9) Ausgabe, fd ausgeben (Häufigkeitsverteilung)
# (13-10) Ausgabe, cd ausgeben (Zeichen-Verteilung)
# (13-11) Ausgabe, sd ausgeben (Trennzeichen-Verteilung)
# (14) Ausgabe schließen
# ==============================================================
# (1) Programm-Parameter: global
multizaehlen_vers = "1.11.2"
multizaehlen_date = "2020-08-26"
# sonst in ini-Datei verschoben
# ...
# ==============================================================
# (2) Module laden:
import pickle # Pickle ermöglichen
import sys # Aufruf-Parameter, System-Zugriffe
from operator import itemgetter # Sortieren nach Items
import argparse # Defintion und Verarbeitung von Aufrufparameter
import re # reguläre Ausdrücke
from time import * # Datum, Uhrzeit
import math # math. Funktionen
# ==============================================================
# (3) eigene Methoden:
# -----------------------------------------------------
def __ueberschrift(text,z="-"):
"""dient zum Ausgeben von Überschriften bei der Ausgabe."""
aus.write("\n" + str(text) + "\n")
aus.write(z*len(text) + "\n\n")
# -----------------------------------------------------
def __chr_hex(c):
"""dient zur Ausgabe eines Zeichens im Hex-Code."""
return str(hex(ord(c)))
# -----------------------------------------------------
def __chr_out(c):
"""dient zur Ausgabe eines beliebigen Zeichens."""
# www: lokale Hilfsvariable
if (c == "\n"):
www = r"\n"
elif (c == "\r"):
www = r"\r"
elif (c == "\f"):
www = r"\f"
elif (c == "\v"):
www = r"\v"
elif (c == "\t"):
www = r"\t"
elif (c == leer):
www = "leer"
else:
www = c
return www
# ==============================================================
# (4) Strukturen vorbesetzen und initialisieren:
P = [] # Filehandles aller pkl-Dateien
P_load = [] # alle Daten aller pkl-Dateien
P_kopf = [] # Kopfdaten aus allen pkl-Dateien
P_programmdaten = [] # Programmdaten aller pkl-Dateien
P_sortiert = [] # Ergebnisdaten (aus sortiert) aus allen pkl-Dateien
P_ges_alle_zeichen = [] # Ergebnisdaten (aus ges_alle_zeichen) aus allen pkl-Dateien
P_ges_trennzeichen = [] # Ergebnisdaten (aus ges_trennzeichen) aus allen pkl-Dateien
P_ges_haeufigkeiten= [] # Ergebnisdaten (aus ges_haeufigkeiten) aus allen pkl-Dateien
P_ges_alle_laengen = [] # Ergebnisdaten (aus ges_alle_laengen) aus allen pkl-Dateien
neu = {} # Directory zum Sammeln der Ergebnisdaten (sortiert)
neu3 = [] # nach Sortierung und Umstrukturierung
akk_anz = 0 # akk. Anzahl (über alle Dateien)
akk_anz_vor = 0 # akk. Anzahl (über alle Dateien; vor Filterung) [=akk_anz]
akk_anz_nach = 0 # ; nach Filterung
anz_dat = [] # Anzahl für die verschiedenen Dateien
fd_vor = {} # Directory für Häufigkeitsverteilung vor Filterung
fd_nach = {} # Directory für Häufigkeitsverteilung nach Filterung
ld_vor = {} # Directory für Längenverteilung vor Filterung
ld_nach = {} # Directory für Längenverteilung nach Filterung
cd_vor = {} # Directory für Zeichenverteilung vor Filterung
cd_nach = {} # Directory für Zeichenverteilung nach Filterung
sd_vor = {} # Directory für Trennzeichenverteilung vor Filterung
# ==============================================================
# (5) Variablen vorbesetzen:
# + Import von multizaehlen_ini.py
# + falls nicht erfolgreich: lokale Spezifikationen
# Texte für Programmparameter: autor_text, cd_text, fd_text, files_anz_text, files_text, freq_text, ld_text, lengths_text,
# out_text, rank_text, sd_text, sort1_text, sort2_text, template_text, version_text
# Pogrammparameter: character_distribution, frequency_distribution, in_name, length_distribution, out_name, p_files, p_frequency,
# p_lengths, p_rank, separator_distribution, sort_first, sort_second, word_template
# Hilfsvariable: leer, trenner
# Kurznamen für math. Funktionen: floor, lg
try: # Konfiguration/Initialisierung von multizaehlen.py einlesen
exec(open("multizaehlen_ini.py", encoding="utf-8", mode="r").read())
except FileNotFoundError: # falls Fehler: lokal Programm-Parameter und -Variablen initialisieren
sys.stderr.write(warn_no_ini)
menu_multizaehlen_ini_date = "2021-03-06"
menu_multizaehlen_date = "2020-08-27"
menu_multizaehlen_vers = "1.2.10"
multizaehlen_ini_date = "2020-08-26"
in_name = ""
out_name = "./mz_out.txt"
word_template = """^.+$"""
p_lengths = "1,100"
p_files = "1,20"
p_frequency = "1,20000"
p_rank = "1,60000"
sort_first = "a+"
sort_second = ""
frequency_distribution = False
length_distribution = False
separator_distribution = False
character_distribution = False
rndg = 3
main_caption_text = "Vergleichendes Auszählen von Texten"
prg_name_text = "multizaehlen.py"
prg_author_text = "<NAME>"
author_email_text = "<EMAIL>"
author_institution = "Justus-Liebig-Universität Gießen, Hochschulrechenzentrum"
files_text = "zu verarbeitende Dateien (*.plk)"
files_anz_text = "Beschränkung der Dateien-Zahl (*.plk) mit Zeichenkette"
sort1_text = "1. Sortierung [a+|a-|A+|A-]"
sort2_text = "2. Sortierung [L+|L-|F+|F-|D+|D-]"
out_text = "Ausgabedatei"
template_text = "Beschränkung auf bestimmte Wort-Muster (Muster)"
lengths_text = "Beschränkung auf bestimmte Wortlängen"
rank_text = "Beschränkung auf bestimmte Rangfolge"
freq_text = "Beschränkung auf bestimmte Worthäufigkeiten"
version_text = "Version des Programms ausgeben"
autor_text = "Autor des Programms ausgeben"
fd_text = "Worthäufigkeiten-Verteilung berechnen"
ld_text = "Wortlängen-Verteilung berechnen"
sd_text = "Trennzeichen-Verteilung berechnen"
cd_text = "Zeichen-Verteilung berechnen"
argp_pos_par = 'Positionsparameter'
argp_opt_par = 'Optionale Parameter'
argp_default = 'Voreinstellung'
head_content = "Inhalt"
head_prg_name = "Name des Programms"
head_prg_vers = "Version des Programms"
head_prg_date = "Bearbeitungsdatum"
prg_author = "Autor des Programms"
author_email = "E-Mail-Adresse"
author_inst = "Institution"
res_pre_ld = "Ergebnisse (Längenverteilung vor dem Filtern)"
res_pre_fd = "Ergebnisse (Häufigkeitsverteilung vor dem Filtern)"
res_pre_cd = "Ergebnisse (Zeichenverteilung vor dem Filtern)"
res_pre_sd = "Ergebnisse (Trennzeichenverteilung vor dem Filtern)"
head_prg_para = "Programm-Parameter"
head_result = "Ergebnisse"
head_summary = "Zusammenfassung"
sub_caption = "Programm-Parameter"
prg_call = "Programm-Aufruf"
caption_leg = "Ergebnisse"
leg_rank = "Rangfolge"
leg_str_len = "Länge der Zeichenkette"
leg_string = "Zeichenkette"
leg_str_freq = "Häufigkeit der Zeichenkette in allen Dateien"
leg_acc_freq = "akk. Häufigkeit der Zeichenkette"
leg_file_nr = "Zahl der Dateien mit dieser Zeichenkette"
leg_in_file = "Eingabedatei"
result_summ = "Zusammenfassung"
res_token_pre = "Zahl der Tokens (vor dem Filtern)"
res_types_pre = "Zahl der Types (vor dem Filtern)"
res_ratio_pre = "Verhältnis Types/Tokens (vor dem Filtern)"
res_token_post = "Zahl der Tokens (nach dem Filtern)"
res_types_post = "Zahl der Types (nach dem Filtern)"
res_ratio_post = "Verhältnis Types/Tokens (nach dem Filtern)"
types_pre_post = "Verhältnis Types (nach/vor Filtern)"
token_pre_post = "Verhältnis Tokens (nach/vor Filtern)"
caption_ld = "Ergebnisse (Längenverteilung vor dem Filtern)"
ld_hdr_nr = "laufende Nummer"
ld_hdr_length = "Länge"
ld_hdr__word_nr= "Anzahl der Wörter mit dieser Länge über alle Dateien"
ld_hdr_files_nr= "Zahl der Dateien mit Wörter dieser Länge"
ld_hdr_infile = "Eingabedatei"
ld_summary_sum = "Summen:"
ld_modus = "Modus:"
ld_at = "bei:"
ld_wa_short = "gMW"
ld_wa_long = "(gMW = gewichteter Mittelwert)"
ld_min_length = "kleinste Länge"
ld_max_length = "größte Länge"
caption_fd = "Ergebnisse (Häufigkeitsverteilung vor dem Filtern)"
fd_hdr_nr = ld_hdr_nr
fd_hdr_freq = "Häufigkeit"
fd_hdr_freq_nr = "Anzahl der Wörter mit dieser Häufigkeit über alle Dateien"
fd_hdr_files_nr= "Zahl der Dateien mit Wörter dieser Häufigkeit"
fd_hdr_infile = ld_hdr_infile
fd_summary_sum = ld_summary_sum
fd_modus = ld_modus
fd_at = ld_at
fd_min_freq = "kleinste Häufigkeit"
fd_max_freq = "größte Häufigkeit"
caption_cd = "Ergebnisse (Zeichenverteilung vor dem Filtern)"
cd_hdr_nr = ld_hdr_nr
cd_hdr_char = "Zeichen"
cd_hdr_hex = "zugehöriger Hex-Code"
cd_hdr_char_nr = "Anzahl dieses Zeichens über alle Dateien"
cd_hdr_files_nr= "Zahl der Dateien mit diesem Zeichen"
cd_hdr_infile = ld_hdr_infile
cd_summary_sum = ld_summary_sum
cd_modus = ld_modus
| |
<filename>code/python/ExchangeDataFeedSnapshotAPISymbolList/v1/fds/sdk/ExchangeDataFeedSnapshotAPISymbolList/model/fields.py<gh_stars>1-10
"""
Exchange DataFeed Snapshot
FactSet’s Exchange DataFeed Snapshot API provides cost-effective access to real-time and delayed global exchange data. Proprietary technology normalizes over 200 global exchanges and 150+ data fields. Asset types integrated include equities, futures, options, warrants, fixed income, mutual funds, ETFs, indices, commodities, and FX rates. <p>Cutting-edge technology ensures reliability and provides scalability that allow applications to request multiple items at a time. To simplify client-side development an entire response can be placed in a matrix or table for effortless integration into internal and external applications. Using specified output formats (CSV, XML, JSON) receive all standard fields by default or customize the list based on specific needs.</p></p>Below are the current hosts:</p><p>Production: api.factset.com<p>Sandbox: api-sandbox.factset.com</p> # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.ExchangeDataFeedSnapshotAPISymbolList.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.ExchangeDataFeedSnapshotAPISymbolList.exceptions import ApiAttributeError
class Fields(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'exchange': (str,), # noqa: E501
'product': (str,), # noqa: E501
'bid': (float,), # noqa: E501
'bid_date': (str,), # noqa: E501
'bid_time': (int,), # noqa: E501
'bid_vol': (int,), # noqa: E501
'bid_tick': (str,), # noqa: E501
'bid_close': (float,), # noqa: E501
'bid_close_date': (str,), # noqa: E501
'bid_close_vol': (int,), # noqa: E501
'bid_exch': (str,), # noqa: E501
'ask': (float,), # noqa: E501
'ask_date': (str,), # noqa: E501
'ask_time': (int,), # noqa: E501
'ask_vol': (int,), # noqa: E501
'ask_close': (float,), # noqa: E501
'ask_close_date': (str,), # noqa: E501
'ask_close_vol': (int,), # noqa: E501
'ask_exch': (str,), # noqa: E501
'short_sale_indicator': (int,), # noqa: E501
'quote_condition': (str,), # noqa: E501
'last_price': (float,), # noqa: E501
'last_date': (str,), # noqa: E501
'last_time': (int,), # noqa: E501
'last_vol': (int,), # noqa: E501
'last_tick': (str,), # noqa: E501
'official_close': (float,), # noqa: E501
'official_close_time': (int,), # noqa: E501
'last_exch': (str,), # noqa: E501
'settlement': (float,), # noqa: E501
'traded_price': (float,), # noqa: E501
'traded_date': (str,), # noqa: E501
'traded_time': (int,), # noqa: E501
'traded_vol': (int,), # noqa: E501
'traded_condition': (str,), # noqa: E501
'net_change': (float,), # noqa: E501
'percent_change': (float,), # noqa: E501
'premkt_price': (float,), # noqa: E501
'premkt_time': (int,), # noqa: E501
'premkt_vol': (int,), # noqa: E501
'premkt_c_vol': (int,), # noqa: E501
'postmkt_price': (float,), # noqa: E501
'postmkt_time': (int,), # noqa: E501
'postmkt_vol': (int,), # noqa: E501
'postmkt_cvol': (int,), # noqa: E501
'offbook_cum_vol': (int,), # noqa: E501
'official_bid_close': (float,), # noqa: E501
'official_ask_close': (float,), # noqa: E501
'mid_date': (str,), # noqa: E501
'mid_time': (int,), # noqa: E501
'cvol': (int,), # noqa: E501
'turnover': (float,), # noqa: E501
'vwap': (float,), # noqa: E501
'trade_count': (int,), # noqa: E501
'block_trade_count': (int,), # noqa: E501
'block_cvol': (int,), # noqa: E501
'prev_close': (float,), # noqa: E501
'close_date': (str,), # noqa: E501
'prev_close_unadj': (float,), # noqa: E501
'prev_close_2': (float,), # noqa: E501
'prev_close_unadj_2': (float,), # noqa: E501
'lower_trading_band': (float,), # noqa: E501
'upper_trading_band': (float,), # noqa: E501
'buy_imbalance': (int,), # noqa: E501
'sell_imbalance': (int,), # noqa: E501
'nas_buy_imbalance': (int,), # noqa: E501
'nas_sell_imbalance': (int,), # noqa: E501
'open': (float,), # noqa: E501
'high': (float,), # noqa: E501
'low': (float,), # noqa: E501
'venue': (str,), # noqa: E501
'buy_id': (str,), # noqa: E501
'sell_id': (str,), # noqa: E501
'auto_trade_vwap': (float,), # noqa: E501
'auto_trade_cvol': (int,), # noqa: E501
'auto_trade_count': (int,), # noqa: E501
'ex_date_status': (str,), # noqa: E501
'premkt_net_change': (float,), # noqa: E501
'premkt_percent_change': (float,), # noqa: E501
'closing_vol': (int,), # noqa: E501
'primary_market': (str,), # noqa: E501
'iso_country_exchange': (str,), # noqa: E501
'premkt_exch': (str,), # noqa: E501
'postmkt_exch': (str,), # noqa: E501
'fref_security_type': (str,), # noqa: E501
'security_sub_type': (str,), # noqa: E501
'postmkt_net_change': (float,), # noqa: E501
'postmkt_percent_change': (float,), # noqa: E501
'isin': (str,), # noqa: E501
'cusip': (str,), # noqa: E501
'sedol': (str,), # noqa: E501
'description': (str,), # noqa: E501
'shares_outstanding': (float,), # noqa: E501
'price_currency': (str,), # noqa: E501
'security_status': (str,), # noqa: E501
'gmt_offset': (int,), # noqa: E501
'market_segment': (str,), # noqa: E501
'market_sector': (str,), # noqa: E501
'period': (str,), # noqa: E501
'country_code': (str,), # noqa: E501
'financial_status': (int,), # noqa: E501
'factset_industry': (str,), # noqa: E501
'factset_sector': (str,), # noqa: E501
'halt_info': (int,), # noqa: E501
'homepage': (str,), # noqa: E501
'halt_description': (str,), # noqa: E501
'feed_currency': (str,), # noqa: E501
'country_name': (str,), # noqa: E501
'order_lot_size': (int,), # noqa: E501
'trade_lot_size': (int,), # noqa: E501
'tick_size': (float,), # noqa: E501
'tick_group': (str,), # noqa: E501
'tick_pilot_eff_date': (str,), # noqa: E501
'avg_30_day_vol': (float,), # noqa: E501
'avg_5_day_vol': (float,), # noqa: E501
'high_52_week': (float,), # noqa: E501
'low_52_week': (float,), # noqa: E501
'high_52_week_date': (str,), # noqa: E501
'low_52_week_date': (str,), # noqa: E501
'trade_condition': (str,), # noqa: E501
'total_return_3_m': (float,), # noqa: E501
'total_return_52_w': (float,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'exchange': 'Exchange', # noqa: E501
'product': 'product', # noqa: E501
'bid': 'Bid', # noqa: E501
'bid_date': 'Bid_Date', # noqa: E501
'bid_time': 'Bid_Time', # noqa: E501
'bid_vol': 'Bid_Vol', # noqa: E501
'bid_tick': 'Bid_Tick', # noqa: E501
'bid_close': 'Bid_Close', # noqa: E501
'bid_close_date': 'Bid_Close_Date', # noqa: E501
'bid_close_vol': 'Bid_Close_Vol', # noqa: E501
'bid_exch': 'Bid_Exch', # noqa: E501
'ask': 'Ask', # noqa: E501
'ask_date': 'Ask_Date', # noqa: E501
'ask_time': 'Ask_Time', # noqa: E501
'ask_vol': 'Ask_Vol', # noqa: E501
'ask_close': 'Ask_Close', # noqa: E501
'ask_close_date': 'Ask_Close_Date', # noqa: E501
'ask_close_vol': 'Ask_Close_Vol', # noqa: E501
'ask_exch': 'Ask_Exch', # noqa: E501
'short_sale_indicator': 'Short_Sale_Indicator', # noqa: E501
'quote_condition': 'Quote_Condition', # noqa: E501
'last_price': 'Last_Price', # noqa: E501
'last_date': 'Last_Date', # noqa: E501
'last_time': 'Last_Time', # noqa: E501
'last_vol': 'Last_Vol', # noqa: E501
'last_tick': 'Last_Tick', # noqa: E501
'official_close': 'Official_Close', # noqa: E501
'official_close_time': 'Official_Close_Time', # noqa: E501
'last_exch': 'Last_Exch', # noqa: E501
'settlement': 'Settlement', # noqa: E501
'traded_price': 'Traded_Price', # noqa: E501
'traded_date': 'Traded_Date', # noqa: E501
'traded_time': 'Traded_Time', # noqa: E501
'traded_vol': 'Traded_Vol', # noqa: E501
'traded_condition': 'Traded_Condition', # noqa: E501
'net_change': 'Net_Change', # noqa: E501
'percent_change': 'Percent_Change', # noqa: E501
'premkt_price': 'Premkt_Price', # noqa: E501
'premkt_time': 'Premkt_Time', # noqa: E501
'premkt_vol': 'Premkt_Vol', # noqa: E501
'premkt_c_vol': 'Premkt_CVol', # noqa: E501
'postmkt_price': 'Postmkt_Price', # noqa: E501
'postmkt_time': 'Postmkt_Time', # noqa: E501
'postmkt_vol': 'Postmkt_Vol', # noqa: E501
'postmkt_cvol': 'Postmkt_Cvol', # noqa: E501
'offbook_cum_vol': 'Offbook_Cum_Vol', # noqa: E501
'official_bid_close': 'Official_Bid_Close', # noqa: E501
'official_ask_close': 'Official_Ask_Close', # noqa: E501
'mid_date': 'Mid_Date', # noqa: E501
'mid_time': 'Mid_Time', # noqa: E501
'cvol': 'Cvol', # noqa: E501
'turnover': 'Turnover', # noqa: E501
'vwap': 'Vwap', | |
Optional[int] = None,
origin_host_header: Optional[str] = None,
priority: Optional[int] = None,
weight: Optional[int] = None,
shared_private_link_resource: Optional[object] = None,
enabled_state: Optional[Union[str, "EnabledState"]] = None,
**kwargs
):
super(AFDOriginProperties, self).__init__(azure_origin=azure_origin, host_name=host_name, http_port=http_port, https_port=https_port, origin_host_header=origin_host_header, priority=priority, weight=weight, shared_private_link_resource=shared_private_link_resource, enabled_state=enabled_state, **kwargs)
self.azure_origin = azure_origin
self.host_name = host_name
self.http_port = http_port
self.https_port = https_port
self.origin_host_header = origin_host_header
self.priority = priority
self.weight = weight
self.shared_private_link_resource = shared_private_link_resource
self.enabled_state = enabled_state
self.provisioning_state = None
self.deployment_status = None
class AFDOriginUpdateParameters(msrest.serialization.Model):
"""AFDOrigin properties needed for origin update.
:param azure_origin: Resource reference to the Azure origin resource.
:type azure_origin: ~azure.mgmt.cdn.models.ResourceReference
:param host_name: The address of the origin. Domain names, IPv4 addresses, and IPv6 addresses
are supported.This should be unique across all origins in an endpoint.
:type host_name: str
:param http_port: The value of the HTTP port. Must be between 1 and 65535.
:type http_port: int
:param https_port: The value of the HTTPS port. Must be between 1 and 65535.
:type https_port: int
:param origin_host_header: The host header value sent to the origin with each request. If you
leave this blank, the request hostname determines this value. Azure CDN origins, such as Web
Apps, Blob Storage, and Cloud Services require this host header value to match the origin
hostname by default. This overrides the host header defined at Endpoint.
:type origin_host_header: str
:param priority: Priority of origin in given origin group for load balancing. Higher priorities
will not be used for load balancing if any lower priority origin is healthy.Must be between 1
and 5.
:type priority: int
:param weight: Weight of the origin in given origin group for load balancing. Must be between 1
and 1000.
:type weight: int
:param shared_private_link_resource: The properties of the private link resource for private
origin.
:type shared_private_link_resource: object
:param enabled_state: Whether to enable health probes to be made against backends defined under
backendPools. Health probes can only be disabled if there is a single enabled backend in single
enabled backend pool. Possible values include: "Enabled", "Disabled".
:type enabled_state: str or ~azure.mgmt.cdn.models.EnabledState
"""
_validation = {
'http_port': {'maximum': 65535, 'minimum': 1},
'https_port': {'maximum': 65535, 'minimum': 1},
'priority': {'maximum': 5, 'minimum': 1},
'weight': {'maximum': 1000, 'minimum': 1},
}
_attribute_map = {
'azure_origin': {'key': 'properties.azureOrigin', 'type': 'ResourceReference'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'http_port': {'key': 'properties.httpPort', 'type': 'int'},
'https_port': {'key': 'properties.httpsPort', 'type': 'int'},
'origin_host_header': {'key': 'properties.originHostHeader', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'weight': {'key': 'properties.weight', 'type': 'int'},
'shared_private_link_resource': {'key': 'properties.sharedPrivateLinkResource', 'type': 'object'},
'enabled_state': {'key': 'properties.enabledState', 'type': 'str'},
}
def __init__(
self,
*,
azure_origin: Optional["ResourceReference"] = None,
host_name: Optional[str] = None,
http_port: Optional[int] = None,
https_port: Optional[int] = None,
origin_host_header: Optional[str] = None,
priority: Optional[int] = None,
weight: Optional[int] = None,
shared_private_link_resource: Optional[object] = None,
enabled_state: Optional[Union[str, "EnabledState"]] = None,
**kwargs
):
super(AFDOriginUpdateParameters, self).__init__(**kwargs)
self.azure_origin = azure_origin
self.host_name = host_name
self.http_port = http_port
self.https_port = https_port
self.origin_host_header = origin_host_header
self.priority = priority
self.weight = weight
self.shared_private_link_resource = shared_private_link_resource
self.enabled_state = enabled_state
class AfdPurgeParameters(msrest.serialization.Model):
"""Parameters required for content purge.
All required parameters must be populated in order to send to Azure.
:param content_paths: Required. The path to the content to be purged. Can describe a file path
or a wild card directory.
:type content_paths: list[str]
:param domains: List of domains.
:type domains: list[str]
"""
_validation = {
'content_paths': {'required': True},
}
_attribute_map = {
'content_paths': {'key': 'contentPaths', 'type': '[str]'},
'domains': {'key': 'domains', 'type': '[str]'},
}
def __init__(
self,
*,
content_paths: List[str],
domains: Optional[List[str]] = None,
**kwargs
):
super(AfdPurgeParameters, self).__init__(**kwargs)
self.content_paths = content_paths
self.domains = domains
class CacheExpirationActionParameters(msrest.serialization.Model):
"""Defines the parameters for the cache expiration action.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleCacheExpirationActionParameters".
:vartype odata_type: str
:param cache_behavior: Required. Caching behavior for the requests. Possible values include:
"BypassCache", "Override", "SetIfMissing".
:type cache_behavior: str or ~azure.mgmt.cdn.models.CacheBehavior
:param cache_type: Required. The level at which the content needs to be cached. Possible values
include: "All".
:type cache_type: str or ~azure.mgmt.cdn.models.CacheType
:param cache_duration: The duration for which the content needs to be cached. Allowed format is
[d.]hh:mm:ss.
:type cache_duration: str
"""
_validation = {
'odata_type': {'required': True, 'constant': True},
'cache_behavior': {'required': True},
'cache_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'cache_behavior': {'key': 'cacheBehavior', 'type': 'str'},
'cache_type': {'key': 'cacheType', 'type': 'str'},
'cache_duration': {'key': 'cacheDuration', 'type': 'str'},
}
odata_type = "#Microsoft.Azure.Cdn.Models.DeliveryRuleCacheExpirationActionParameters"
def __init__(
self,
*,
cache_behavior: Union[str, "CacheBehavior"],
cache_type: Union[str, "CacheType"],
cache_duration: Optional[str] = None,
**kwargs
):
super(CacheExpirationActionParameters, self).__init__(**kwargs)
self.cache_behavior = cache_behavior
self.cache_type = cache_type
self.cache_duration = cache_duration
class CacheKeyQueryStringActionParameters(msrest.serialization.Model):
"""Defines the parameters for the cache-key query string action.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleCacheKeyQueryStringBehaviorActionParameters".
:vartype odata_type: str
:param query_string_behavior: Required. Caching behavior for the requests. Possible values
include: "Include", "IncludeAll", "Exclude", "ExcludeAll".
:type query_string_behavior: str or ~azure.mgmt.cdn.models.QueryStringBehavior
:param query_parameters: query parameters to include or exclude (comma separated).
:type query_parameters: str
"""
_validation = {
'odata_type': {'required': True, 'constant': True},
'query_string_behavior': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'query_string_behavior': {'key': 'queryStringBehavior', 'type': 'str'},
'query_parameters': {'key': 'queryParameters', 'type': 'str'},
}
odata_type = "#Microsoft.Azure.Cdn.Models.DeliveryRuleCacheKeyQueryStringBehaviorActionParameters"
def __init__(
self,
*,
query_string_behavior: Union[str, "QueryStringBehavior"],
query_parameters: Optional[str] = None,
**kwargs
):
super(CacheKeyQueryStringActionParameters, self).__init__(**kwargs)
self.query_string_behavior = query_string_behavior
self.query_parameters = query_parameters
class CdnCertificateSourceParameters(msrest.serialization.Model):
"""Defines the parameters for using CDN managed certificate for securing custom domain.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Default value:
"#Microsoft.Azure.Cdn.Models.CdnCertificateSourceParameters".
:vartype odata_type: str
:param certificate_type: Required. Type of certificate used. Possible values include: "Shared",
"Dedicated".
:type certificate_type: str or ~azure.mgmt.cdn.models.CertificateType
"""
_validation = {
'odata_type': {'required': True, 'constant': True},
'certificate_type': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'certificate_type': {'key': 'certificateType', 'type': 'str'},
}
odata_type = "#Microsoft.Azure.Cdn.Models.CdnCertificateSourceParameters"
def __init__(
self,
*,
certificate_type: Union[str, "CertificateType"],
**kwargs
):
super(CdnCertificateSourceParameters, self).__init__(**kwargs)
self.certificate_type = certificate_type
class CdnEndpoint(msrest.serialization.Model):
"""Defines the ARM Resource ID for the linked endpoints.
:param id: ARM Resource ID string.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(CdnEndpoint, self).__init__(**kwargs)
self.id = id
class CustomDomainHttpsParameters(msrest.serialization.Model):
"""The JSON object that contains the properties to secure a custom domain.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: UserManagedHttpsParameters, CdnManagedHttpsParameters.
All required parameters must be populated in order to send to Azure.
:param certificate_source: Required. Defines the source of the SSL certificate.Constant filled
by server. Possible values include: "AzureKeyVault", "Cdn".
:type certificate_source: str or ~azure.mgmt.cdn.models.CertificateSource
:param protocol_type: Required. Defines the TLS extension protocol that is used for secure
delivery. Possible values include: "ServerNameIndication", "IPBased".
:type protocol_type: str or ~azure.mgmt.cdn.models.ProtocolType
:param minimum_tls_version: TLS protocol version that will be used for Https. Possible values
include: "None", "TLS10", "TLS12".
:type minimum_tls_version: str or ~azure.mgmt.cdn.models.MinimumTlsVersion
"""
_validation = {
'certificate_source': {'required': True},
'protocol_type': {'required': True},
}
_attribute_map = {
'certificate_source': {'key': 'certificateSource', 'type': 'str'},
'protocol_type': {'key': 'protocolType', 'type': 'str'},
'minimum_tls_version': {'key': 'minimumTlsVersion', 'type': 'str'},
}
_subtype_map = {
'certificate_source': {'AzureKeyVault': 'UserManagedHttpsParameters', 'Cdn': 'CdnManagedHttpsParameters'}
}
def __init__(
self,
*,
protocol_type: Union[str, "ProtocolType"],
minimum_tls_version: Optional[Union[str, "MinimumTlsVersion"]] = None,
**kwargs
):
super(CustomDomainHttpsParameters, self).__init__(**kwargs)
self.certificate_source = None # type: Optional[str]
self.protocol_type = protocol_type
self.minimum_tls_version = minimum_tls_version
class CdnManagedHttpsParameters(CustomDomainHttpsParameters):
"""Defines the certificate source parameters using CDN managed certificate for enabling SSL.
All required parameters must be populated in order to send to Azure.
:param certificate_source: Required. Defines the source of the SSL certificate.Constant filled
by server. Possible values include: "AzureKeyVault", "Cdn".
:type certificate_source: str or ~azure.mgmt.cdn.models.CertificateSource
:param protocol_type: Required. Defines the TLS extension protocol that is used for secure
delivery. Possible values include: "ServerNameIndication", "IPBased".
:type protocol_type: str or ~azure.mgmt.cdn.models.ProtocolType
:param minimum_tls_version: | |
of the main branch by means of a 1-by-1 convolution,
so that the output tensors of the two branches have the same dimensions. Afterwards the output
tensors of the two branches are added.
Arguments:
input_tensor (4D tensor): The 4D input tensor of the shape (batch, heigh, width, channels).
kernel_size (int): The size of the quadratic convolution kernel.
num_filters (tuple): A tuple of 3 integers which define the number of filters to be used
for the three convolutional layers. The second element of the tuple can also be `None`,
indicating that the number of filters of the block's second convolutional layer will be
identical to its number of input channels. This is required in order to yield an antisymmetric
convolution matrix for the second conv layer.
antisymmetric (bool): If `True`, the convolution matrix of the second convolutional layer of
this block will be antisymmetric, which is equivalent to the convolution kernel being
skew-centrosymmetric. If `False`, the block will contain a regular 3-by-3 convolutional
layer instead. Setting this argument to `True` only has an effect if the second element of
`num_filters` is set to `None`, because the convolution matrix can only be a square matrix
if the number of input channels to and output channels from the layer are identical.
use_batch_norm (bool): If `True`, each convolutional layer of this block will be followed by
a batch normalization layer.
stage (int): The number of the current stage. Used for the generation of the layer names.
Usually, a new stage begins whenever the spatial dimensions (height and width) of the
convolutional feature map change.
block (int): The number of the current block within the current stage. Used for
the generation of the layer names.
version (float, optional): A value from the set {1, 1.5}, the version of the ResNet.
The different versions are defined as follows:
v1: The striding of the conv block is performed by the first 1-by-1 convolution.
The order of operations of each convolutional layer is (conv, BN, ReLU).
v1.5: The striding of the conv block is performed by the 3-by-3 convolution.
The order of operations of each convolutional layer is (conv, BN, ReLU).
v2 (currently not supported): The striding of the conv block is performed by
the first 3-by-3 convolution. The order of operations of each convolutional layer
is (BN, ReLU, conv).
The reasoning for introducing v1.5 is that is has been reported that performing the
striding (i.e. the spatial dimensionality reduction) in the 3-by-3 convolution instead
of the 1-by-1 convolution results in higher and more stable accuracy in fewer epochs
than the original v1 and has shown to scale to higher batch sizes with minimal degradation
in accuracy. However, it has also been reported that v1.5 requires ~12% more compute to
train and has 6% reduced throughput for inference compared to v1.
strides (tuple, optional): The spatial strides for the first 1-by-1 convolutional layer
(v1) or the 3-by-3 convolutional layer (v1.5) of this block.
gamma (float, optional): TODO
Returns:
The output tensor for the block.
'''
if version == 1:
strides_1_by_1 = strides
strides_k_by_k = (1,1)
elif version == 1.5:
strides_1_by_1 = (1,1)
strides_k_by_k = strides
else:
raise ValueError("Supported values for `version` are 1 and 1.5.")
conv_name_base = 'res' + str(stage) + '_' + str(block) + '_branch'
bn_name_base = 'bn' + str(stage) + '_' + str(block) + '_branch'
############################################################################
# 1-by-1
############################################################################
x = Conv2D(filters=num_filters[0],
kernel_size=(1,1),
strides=strides_1_by_1,
kernel_initializer='he_normal',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name=conv_name_base + '2a')(input_tensor)
if use_batch_norm:
x = BatchNormalization(axis=3,
name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
############################################################################
# 3-by-3
############################################################################
if antisymmetric and (num_filters[1] is None):
x = Conv2DAntisymmetric3By3(gamma=gamma,
strides=strides_k_by_k,
use_bias=True,
kernel_initializer='he_normal',
kernel_regularizer=kernel_regularizer,
name=conv_name_base + '2b')(x)
else:
x = Conv2D(filters=num_filters[1],
kernel_size=kernel_size,
strides=strides_k_by_k,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name=conv_name_base + '2b')(x)
if use_batch_norm:
x = BatchNormalization(axis=3,
name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
############################################################################
# 1-by-1
############################################################################
x = Conv2D(filters=num_filters[2],
kernel_size=(1,1),
kernel_initializer='he_normal',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name=conv_name_base + '2c')(x)
if use_batch_norm:
x = BatchNormalization(axis=3,
name=bn_name_base + '2c')(x)
############################################################################
# shortcut
############################################################################
shortcut = Conv2D(filters=num_filters[2],
kernel_size=(1,1),
strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '1')(input_tensor)
if use_batch_norm:
shortcut = BatchNormalization(axis=3,
name=bn_name_base + '1')(shortcut)
############################################################################
# fusion
############################################################################
x = add([x, shortcut])
x = Activation('relu')(x)
return x
def build_single_block_resnet(image_shape,
kernel_type='antisymmetric',
kernel_size=3,
h=1.0,
gamma=0.0,
num_stages=5,
blocks_per_stage=[3, 4, 6, 3],
filters_per_block=[64, 128, 256, 512],
strides=[(2,2),(2,2),(2,2),(2,2)],
include_top=True,
fc_activation='softmax',
num_classes=None,
use_batch_norm=False,
use_max_pooling=[False, False, False, False],
l2_regularization=0.0,
subtract_mean=None,
divide_by_stddev=None,
verbose=False):
'''
Build a ResNet with in which each ResNet block has only one convolutional layer. The overall ResNet is composed
of five stages of ResNet blocks. Each stage consists of one or more identical blocks.
Arguments:
image_shape (tuple): A tuple `(height, width, channels)` of three integers representing the size
and number of channels of the image input.
kernel_type (str, optional): If 'antisymmetric', will build a ResNet in which
all 3-by-3 convolution kernels are anti-centrosymmetric.
kernel_size (int, optional):
h (float, optional): The scaling factor for the output of the residual connections. This scaling factor
is 1.0 for the original ResNet architectures.
gamma (float, optional): Only relevant if `kernel_type` is set to 'antisymmetric'. For an antisymmetric kernel,
the real parts of the eigenvalues of the associated convolution matrix will be `gamma`.
num_stages (int, optional):
blocks_per_stage (tuple, optional): A tuple of four positive integers representing the number of
ResNet blocks for the stages 2, 3, 4, and 5 of the ResNet.
filters_per_block (tuple, optional): A tuple of four positive integers representing the number of
filters to be used for the convolutional layers of the blocks in each of the stages 2, 3, 4, and 5
of the ResNet.
include_top (bool, optional): If `False`, the output of the last convolutional layer is the model output.
Otherwise, an average pooling layer and a fully connected layer with `num_classes` outputs followed
by a softmax activation ayer will be added, the output of the latter of which will be the model output.
fc_activation (str, optional): The activation function to use for the very last layer of the network,
i.e. the dense layer. Can be any valid Keras activation function name, e.g. 'softmax' for classification.
If this is `None`, no activation will be applied to the dense layer. Only relevant if `include_top`
is True.
num_classes (int, optional): The number of classes for classification. Only relevant if `inclue_top`
is `True`.
use_batch_norm (bool, optional): If `True`, adds a batch normalization layer
after each convolutional layer.
use_max_pooling (tuple, optional): A tuple of four booleans which define whether max pooling is being
performed after the stages 1, 2, 3, and 4, respectively.
subtract_mean (array-like, optional): `None` or an array-like object of integers or floating point values
of any shape that is broadcast-compatible with the image shape. The elements of this array will be
subtracted from the image pixel intensity values. For example, pass a list of three integers
to perform per-channel mean normalization for color images.
divide_by_stddev (array-like, optional): `None` or an array-like object of non-zero integers or
floating point values of any shape that is broadcast-compatible with the image shape. The image pixel
intensity values will be divided by the elements of this array. For example, pass a list
of three integers to perform per-channel standard deviation normalization for color images.
verbose (bool, optional):
'''
build_function = get_single_block_resnet_build_function(kernel_type=kernel_type,
kernel_size=kernel_size,
h=h,
gamma=gamma,
num_stages=num_stages,
blocks_per_stage=blocks_per_stage,
filters_per_block=filters_per_block,
strides=strides,
include_top=include_top,
fc_activation=fc_activation,
num_classes=num_classes,
use_batch_norm=use_batch_norm,
use_max_pooling=use_max_pooling,
l2_regularization=l2_regularization,
subtract_mean=subtract_mean,
divide_by_stddev=divide_by_stddev,
verbose=verbose)
input_tensor = tf.keras.layers.Input(shape=image_shape)
return build_function(input_tensor)
def get_single_block_resnet_build_function(kernel_type='antisymmetric',
kernel_size=3,
h=1.0,
gamma=0.0,
num_stages=5,
blocks_per_stage=[3, 4, 6, 3],
filters_per_block=[64, 128, 256, 512],
strides=[(2,2),(2,2),(2,2),(2,2)],
include_top=True,
fc_activation='softmax',
num_classes=None,
use_batch_norm=False,
use_max_pooling=[False, False, False, False],
l2_regularization=0.0,
subtract_mean=None,
divide_by_stddev=None,
verbose=False):
if include_top and (num_classes is None):
raise ValueError("You must pass a positive integer for `num_classes` if `include_top` is `True`.")
name = 'single_block_resnet'
if kernel_type == 'antisymmetric':
antisymmetric = True
name += '_antisymmetric'
else:
antisymmetric = False
name += '_regular'
if not (subtract_mean is None):
subtract_mean = np.array(subtract_mean)
if not (divide_by_stddev is None):
divide_by_stddev = np.array(divide_by_stddev)
def _build_function(input_tensor):
'''
Build the network given an input | |
col=1), Player.black): 6893650162163055849,
(Point(row=18, col=12), Player.black): 2623660912777886370,
(Point(row=13, col=17), Player.black): 2459600351157906810,
(Point(row=1, col=19), Player.black): 8151912673468739038,
(Point(row=9, col=3), Player.white): 194307367481754028,
(Point(row=16, col=3), Player.black): 7186587303337655300,
(Point(row=17, col=10), Player.white): 5280069553128906975,
(Point(row=16, col=16), Player.black): 7529522454654483248,
(Point(row=3, col=6), Player.black): 3462693765803155898,
(Point(row=3, col=3), Player.white): 1120868131185429476,
(Point(row=13, col=7), Player.white): 7546015569151714900,
(Point(row=9, col=10), Player.black): 2507659278981864817,
(Point(row=18, col=3), Player.black): 1755314055889207616,
(Point(row=8, col=8), Player.black): 7653578726715946776,
(Point(row=7, col=13), Player.white): 6857537180056800245,
(Point(row=19, col=18), Player.white): 7784442173260196124,
(Point(row=6, col=12), Player.black): 9060559421449608358,
(Point(row=17, col=4), Player.white): 8272429468538893182,
(Point(row=17, col=18), Player.white): 6482485689448012448,
(Point(row=1, col=4), Player.white): 6015936502321865267,
(Point(row=19, col=8), Player.white): 6373812086529830381,
(Point(row=18, col=7), Player.black): 327399695993822024,
(Point(row=7, col=2), Player.white): 2386069977942064597,
(Point(row=12, col=1), Player.black): 7171025716757801235,
(Point(row=4, col=12), Player.black): 519565332639109992,
(Point(row=1, col=12), Player.black): 3465088746762627059,
(Point(row=4, col=6), Player.black): 2505844367378803621,
(Point(row=3, col=13), Player.black): 5840041237874942891,
(Point(row=9, col=6), Player.white): 6179590893748189749,
(Point(row=15, col=13), Player.white): 6073539092230557671,
(Point(row=6, col=19), Player.black): 2401547781177173839,
(Point(row=13, col=1), Player.white): 8663526241479273641,
(Point(row=16, col=9), Player.white): 9090929914685029498,
(Point(row=4, col=3), Player.white): 158807781242098357,
(Point(row=3, col=8), Player.black): 7163905206307173148,
(Point(row=6, col=11), Player.black): 6003387579549332004,
(Point(row=12, col=5), Player.black): 1905888476883067352,
(Point(row=10, col=2), Player.white): 621787674596225223,
(Point(row=8, col=13), Player.white): 7926190742240214180,
(Point(row=3, col=16), Player.black): 7701938841300054497,
(Point(row=3, col=14), Player.white): 6254651272069425314,
(Point(row=15, col=7), Player.white): 8338826032418825464,
(Point(row=1, col=15), Player.black): 7959166384006082791,
(Point(row=9, col=16), Player.black): 4837404493694931020,
(Point(row=1, col=17), Player.white): 2128184226109907791,
(Point(row=8, col=18), Player.black): 2768983006548199032,
(Point(row=13, col=14), Player.black): 1780283970078558465,
(Point(row=9, col=15), Player.white): 4548322597187922045,
(Point(row=13, col=13), Player.white): 5347734045022552038,
(Point(row=16, col=18), Player.black): 560146100273600086,
(Point(row=13, col=4), Player.white): 6804195989771739597,
(Point(row=19, col=15), Player.black): 8765196825636292134,
(Point(row=16, col=10), Player.white): 6038632678401215475,
(Point(row=2, col=4), Player.white): 986002656609339193,
(Point(row=3, col=3), Player.black): 8562755783557192849,
(Point(row=6, col=6), Player.black): 4864151772664044425,
(Point(row=6, col=11), Player.white): 892448199759945070,
(Point(row=8, col=16), Player.white): 9006225292776786513,
(Point(row=16, col=14), Player.white): 7812021989173062563,
(Point(row=16, col=4), Player.white): 7502480289631484199,
(Point(row=3, col=4), Player.black): 1755026511531415872,
(Point(row=11, col=1), Player.white): 257077318388252152,
(Point(row=3, col=10), Player.white): 8778239596792324192,
(Point(row=7, col=9), Player.black): 5271872557866476953,
(Point(row=18, col=7), Player.white): 4083165389979544527,
(Point(row=17, col=14), Player.white): 2966555236051484676,
(Point(row=18, col=4), Player.black): 6296440373686602611,
(Point(row=4, col=16), Player.black): 7890369874349717469,
(Point(row=10, col=18), Player.white): 65445814905001258,
(Point(row=4, col=14), Player.white): 624542602101888813,
(Point(row=15, col=18), Player.black): 8171665969258981564,
(Point(row=14, col=2), Player.white): 1809982625073587773,
(Point(row=14, col=11), Player.white): 2695353886913730429,
(Point(row=8, col=12), Player.white): 817861062274949162,
(Point(row=3, col=1), Player.black): 8628182186552163321,
(Point(row=12, col=8), Player.black): 8190632542139242232,
(Point(row=3, col=5), Player.white): 5215351130307926444,
(Point(row=10, col=6), Player.white): 5450019510238647897,
(Point(row=4, col=15), Player.black): 1550528415958664666,
(Point(row=6, col=13), Player.white): 5402012147369066752,
(Point(row=11, col=3), Player.white): 7547360695728420190,
(Point(row=8, col=7), Player.black): 8040850639257345043,
(Point(row=15, col=6), Player.white): 6409649986641627403,
(Point(row=10, col=17), Player.black): 7343684769768146308,
(Point(row=13, col=13), Player.black): 5945563110852745633,
(Point(row=2, col=8), Player.white): 7932819624477916791,
(Point(row=18, col=1), Player.white): 6437287578989886619,
(Point(row=7, col=8), Player.white): 6585605467365146577,
(Point(row=16, col=19), Player.white): 6456192253808192381,
(Point(row=5, col=9), Player.white): 5548318215603159400,
(Point(row=15, col=8), Player.black): 1154516408612002283,
(Point(row=6, col=13), Player.black): 481021678443707824,
(Point(row=6, col=10), Player.black): 2596415556830756301,
(Point(row=11, col=2), Player.white): 3389535247992007384,
(Point(row=16, col=11), Player.black): 6970929909999685503,
(Point(row=9, col=14), Player.black): 2111989143590353271,
(Point(row=17, col=17), Player.white): 268051584243374986,
(Point(row=3, col=10), Player.black): 6465521289217711886,
(Point(row=10, col=3), Player.white): 4796808638541206448,
(Point(row=17, col=19), Player.white): 9094637064189508507,
(Point(row=15, col=12), Player.black): 6337969641893708149,
(Point(row=18, col=9), Player.white): 6827069813348366394,
(Point(row=14, col=1), Player.white): 4856932872212340612,
(Point(row=10, col=16), Player.black): 1599230850428741800,
(Point(row=11, col=9), Player.black): 2246769922528586030,
(Point(row=18, col=5), Player.white): 3900808257801535827,
(Point(row=12, col=15), Player.black): 2075345125744217205,
(Point(row=11, col=13), Player.white): 6352449674538887967,
(Point(row=19, col=11), Player.white): 56050136652686310,
(Point(row=13, col=2), Player.white): 6284016731548062125,
(Point(row=12, col=7), Player.black): 2000786302113793216,
(Point(row=14, col=14), Player.white): 4326085171100634416,
(Point(row=2, col=5), Player.white): 4630966906511130196,
(Point(row=18, col=14), Player.black): 4732683096047849168,
(Point(row=5, col=8), Player.black): 95682779191246248,
(Point(row=8, col=13), Player.black): 1191918941386051816,
(Point(row=5, col=4), Player.white): 2564865951321580813,
(Point(row=10, col=15), Player.white): 6542245712443692473,
(Point(row=19, col=5), Player.black): 8089423724758179148,
(Point(row=14, col=1), Player.black): 8618995490952031174,
(Point(row=4, col=10), Player.black): 1926018888070830270,
(Point(row=17, col=12), Player.white): 1653456628569443972,
(Point(row=9, col=9), Player.white): 3570995489804585088,
(Point(row=10, col=13), Player.white): 2368335298592496353,
(Point(row=16, col=19), Player.black): 6534939770968789127,
(Point(row=6, col=8), Player.white): 3281996166367236453,
(Point(row=8, col=9), Player.black): 4820962955364149741,
(Point(row=4, col=19), Player.white): 9016702388841505537,
(Point(row=12, col=1), Player.white): 3721449572612086324,
(Point(row=15, col=4), Player.black): 1602621409445781014,
(Point(row=8, col=12), Player.black): 4783082945774834506,
(Point(row=11, col=2), Player.black): 2180688760914540067,
(Point(row=2, col=11), Player.black): 6167585027191118440,
(Point(row=8, col=15), Player.black): 4046884080914501218,
(Point(row=5, col=13), Player.white): 3909430268568316994,
(Point(row=3, col=17), Player.white): 4399625377986457210,
(Point(row=9, col=5), Player.white): 137830651505524108,
(Point(row=16, col=17), Player.white): 3508957034115067560,
(Point(row=13, col=3), Player.white): 6052330908112129508,
(Point(row=17, col=4), Player.black): 7566761271475300551,
(Point(row=4, col=13), Player.black): 5712676915053989190,
(Point(row=13, col=15), Player.black): 1575608649265893852,
(Point(row=2, col=18), Player.black): 9002396440035281901,
(Point(row=9, col=9), Player.black): 8864296002501030143,
(Point(row=8, col=14), Player.white): 1364397332387740960,
(Point(row=1, col=15), Player.white): 1426446637660345511,
(Point(row=15, col=18), Player.white): 3509293484331253464,
(Point(row=13, col=8), Player.black): 9174015167139054072,
(Point(row=13, col=16), Player.white): 2449890102405523850,
(Point(row=5, col=5), Player.white): 1574789240310030751,
(Point(row=16, col=3), Player.white): 3945249429993589015,
(Point(row=15, col=4), Player.white): 305093503540655133,
(Point(row=10, col=14), Player.black): 2459420219716421334,
(Point(row=11, col=4), Player.white): 5551861772446817192,
(Point(row=18, col=14), Player.white): 1554422268917173473,
(Point(row=13, col=12), Player.white): 6875993184644623610,
(Point(row=13, col=10), Player.white): 5710276992728290499,
(Point(row=1, col=14), Player.black): 1053132299578720502,
(Point(row=12, col=2), Player.black): 177211656815500662,
(Point(row=17, col=1), Player.white): 2748186715019885620,
(Point(row=19, col=2), Player.white): 2823185342659654273,
(Point(row=2, col=1), Player.white): 3804562459407734419,
(Point(row=3, col=9), Player.white): 6877545048005327598,
(Point(row=14, col=12), Player.white): 3447606326873331166,
(Point(row=16, col=12), Player.white): 1722807402685541718,
(Point(row=3, col=2), Player.white): 8064264360373896627,
(Point(row=5, col=11), Player.black): 2271905774997594087,
(Point(row=10, col=19), Player.black): 7259116450323762773,
(Point(row=6, col=2), Player.white): 8729751442830011845,
(Point(row=6, col=18), Player.black): 3287866706452717748,
(Point(row=3, col=19), Player.black): 6685540624905839902,
(Point(row=3, col=18), Player.white): 2033067431652527586,
(Point(row=19, col=7), Player.black): 7199529342193629322,
(Point(row=4, col=6), Player.white): 293131598194377332,
(Point(row=10, col=14), Player.white): 2745583935843962120,
(Point(row=6, col=1), Player.white): 1951402499805902206,
(Point(row=14, col=3), Player.white): 5655574882157416101,
(Point(row=13, col=7), Player.black): 79042380633272504,
(Point(row=9, col=1), Player.black): 1123993747014304321,
(Point(row=7, col=11), Player.black): 3563172153004357274,
(Point(row=17, col=1), Player.black): 8303104873017911737,
(Point(row=2, col=19), Player.white): 4470401760092235991,
(Point(row=15, col=3), Player.black): 3948210455522622599,
(Point(row=5, col=18), Player.white): 5762044134657313883,
(Point(row=18, col=16), Player.white): 2122000111964531072,
(Point(row=3, col=2), Player.black): 3210737301883184273,
(Point(row=18, col=6), Player.black): 7810812174504743138,
(Point(row=5, col=5), Player.black): 8452086670814239940,
(Point(row=10, col=9), Player.white): 7126078332556028119,
(Point(row=15, col=5), Player.black): 6239156966034936553,
(Point(row=15, col=1), Player.white): 1103760658877519330,
(Point(row=10, col=12), Player.white): 7101540098606327246,
(Point(row=15, col=8), Player.white): 6405922778855053984,
(Point(row=16, col=9), Player.black): 4755532664435384045,
(Point(row=5, col=2), Player.black): 5886133469015314054,
(Point(row=8, col=3), Player.black): 6285492160906290483,
(Point(row=12, col=14), Player.black): 5162228651856826069,
(Point(row=11, col=10), Player.white): 5184732526804762974,
(Point(row=13, col=3), Player.black): 7702334553828935656,
(Point(row=3, col=8), Player.white): 3588172511518628280,
(Point(row=2, col=2), Player.white): 5549964852192859112,
(Point(row=8, col=16), Player.black): 2066875695148106576,
(Point(row=17, col=6), Player.black): 7744468502812415426,
(Point(row=4, col=10), Player.white): 1014903899925303590,
(Point(row=18, col=6), Player.white): 1862799133113012147,
(Point(row=19, col=19), Player.black): 2345933963605778285,
(Point(row=12, col=18), Player.black): 764217853634645311,
(Point(row=16, col=18), Player.white): 8776904528357317165,
(Point(row=5, col=10), Player.white): 1299649349289209764,
(Point(row=12, col=12), Player.white): 5717243205542644710,
(Point(row=2, col=11), Player.white): 4882279216524677324,
(Point(row=2, col=4), Player.black): 5055557838328933698,
(Point(row=14, col=19), Player.black): 302896183053831912,
(Point(row=7, col=16), Player.white): 5487451658079387439,
(Point(row=4, col=8), Player.white): 5509265796984408890,
(Point(row=16, col=7), Player.white): 6183177966274698850,
(Point(row=17, col=15), Player.black): 2516424632030130850,
(Point(row=11, col=10), Player.black): 684388810545887198,
(Point(row=4, col=9), Player.black): 2477319201011867870,
(Point(row=1, col=7), Player.black): 5447483429354947427,
(Point(row=1, col=6), Player.black): 4068676992402546827,
(Point(row=11, col=17), Player.black): 8149299337399750125,
(Point(row=17, col=12), Player.black): 365081187020414172,
(Point(row=3, col=12), Player.white): 1689903298641342072,
(Point(row=17, col=11), Player.black): 6926278228168408982,
(Point(row=4, col=11), Player.black): 8662872549430103633,
(Point(row=4, col=12), Player.white): 6462389061901738824,
(Point(row=18, col=13), Player.black): 6158514514643956261,
(Point(row=5, col=12), Player.black): 7120090121199538973,
(Point(row=1, col=2), Player.black): 6938815493276724573,
(Point(row=12, col=2), Player.white): 7383861743580315524,
(Point(row=1, col=8), Player.black): 94283283050600950,
(Point(row=5, col=19), Player.white): 8465949199669580799,
(Point(row=8, col=10), Player.white): 1742308926907342905,
(Point(row=9, col=4), Player.black): 189336875932707896,
(Point(row=15, col=19), Player.white): 2861742259545552728,
(Point(row=6, col=17), Player.black): 388779490611822854,
(Point(row=15, col=17), Player.black): 9182595007980447297,
(Point(row=3, col=16), Player.white): 3678369990334852194,
(Point(row=2, col=15), Player.black): 4372183795095820171,
(Point(row=1, col=18), Player.white): 5406556242215553803,
(Point(row=4, col=2), Player.black): 3800090850242496445,
(Point(row=2, col=12), Player.white): 2392677782680297078,
(Point(row=19, col=9), Player.white): 8501481531703442411,
(Point(row=8, col=14), Player.black): 3360884357522615905,
(Point(row=16, col=15), Player.black): 8870336300345973822,
(Point(row=6, col=19), Player.white): 56348360033471784,
(Point(row=15, col=10), Player.white): 667414241124203146,
(Point(row=16, col=7), Player.black): 23028899636539794,
(Point(row=16, col=1), Player.black): 3840837529608593465,
(Point(row=15, col=2), Player.black): 6140402795848495424,
(Point(row=16, col=11), Player.white): 6878389759602636410,
(Point(row=9, col=5), Player.black): 154011992581209274,
(Point(row=11, col=7), Player.white): 9052276412915057567,
(Point(row=5, col=6), Player.white): 8286281702739988043,
(Point(row=6, col=2), Player.black): 2133615147947859225,
(Point(row=19, col=1), Player.white): 5517072955455253284,
(Point(row=4, col=5), Player.black): 474658709124801574,
(Point(row=2, col=2), Player.black): 8097862963653486542,
(Point(row=17, col=9), Player.white): 599424495706811119,
(Point(row=12, col=3), Player.black): 9203872499892187410,
(Point(row=13, col=17), Player.white): 1251070654448689924,
(Point(row=8, col=4), Player.white): 7095381027107779115,
(Point(row=10, col=10), Player.black): 526526592040805751,
(Point(row=17, col=2), Player.white): 8458016189193563924,
(Point(row=13, col=16), Player.black): 5320609164734048651,
(Point(row=4, col=14), Player.black): 8062957935158961656,
(Point(row=15, col=14), Player.white): 817321022290725549,
(Point(row=15, col=1), Player.black): 1718249162394861829,
(Point(row=3, col=9), Player.black): 5824900800226869019,
(Point(row=16, col=2), Player.white): 7232152668223454928,
(Point(row=11, col=8), Player.black): 3095606924889488855,
(Point(row=11, col=15), Player.white): 6266368939563310562,
(Point(row=11, col=11), Player.white): 7326554932467011368,
(Point(row=2, col=9), Player.black): 8482003039224977422,
(Point(row=14, col=17), Player.white): 6851679628127909465,
(Point(row=12, col=10), Player.black): 5732265585469845166,
(Point(row=16, col=17), Player.black): 8412769307648629675,
(Point(row=10, col=8), Player.white): 1117959631323901058,
(Point(row=4, col=5), Player.white): 7173521796631969334,
(Point(row=18, col=8), Player.black): 8602094765524468628,
(Point(row=2, col=12), Player.black): 4733936438425541404,
(Point(row=19, col=17), Player.black): 5248168160274267626,
(Point(row=9, col=11), Player.white): 8870919433177564605,
(Point(row=13, col=6), Player.black): 7380460660210891048,
(Point(row=5, col=3), Player.black): 5974646763871038805,
(Point(row=1, col=5), Player.black): 2003688140949458949,
(Point(row=11, col=19), Player.white): 372639670381455128,
(Point(row=9, col=15), Player.black): 4827608768248881990,
(Point(row=16, col=1), Player.white): 4031532620805205734,
(Point(row=7, col=7), Player.black): 2955155144536816061,
(Point(row=6, col=18), Player.white): 3351002419052351266,
(Point(row=19, col=16), Player.white): 3290608803507930781,
(Point(row=14, col=15), Player.black): 8518638178459347686,
(Point(row=9, col=17), Player.black): 7648483316508755073,
(Point(row=14, col=4), Player.black): 5201725500573673536,
(Point(row=17, col=17), Player.black): 2898887965748761878,
(Point(row=10, col=2), Player.black): 861297707504228216,
(Point(row=8, col=17), Player.white): 4480649830799620278,
(Point(row=18, col=10), Player.white): 6306722836986519589,
(Point(row=5, col=16), Player.black): 6658943788776995998,
(Point(row=7, col=3), Player.black): 269457878793499600,
(Point(row=7, col=1), Player.black): 6328509211739618375,
(Point(row=14, col=10), Player.black): 5313526223410619422,
(Point(row=2, col=3), Player.white): 4402051147394464997,
(Point(row=18, col=19), Player.white): 7044631298679117722,
(Point(row=17, col=7), Player.white): 6698955967509300015,
(Point(row=19, col=10), Player.white): 8563893649335258547,
(Point(row=11, col=19), Player.black): 2529513789628196536,
(Point(row=11, col=11), Player.black): 2972671929146542472,
(Point(row=6, col=3), Player.white): 1001121514444220402,
(Point(row=3, col=15), Player.white): 5689462004145751751,
(Point(row=9, col=4), Player.white): 7442529448260828970,
(Point(row=4, col=3), Player.black): 3261083201315583423,
(Point(row=15, col=12), Player.white): 8278894342678888879,
(Point(row=4, col=4), Player.black): 1989316053021505161,
(Point(row=17, col=3), Player.white): 2546138643495787279,
(Point(row=9, col=8), Player.white): 2325282932644595299,
(Point(row=13, col=11), Player.black): 2730327449943021321,
(Point(row=14, col=8), Player.white): 7231760218955914607,
(Point(row=4, | |
<filename>bu.py<gh_stars>0
# std:
import os;
import time;
import json;
import random;
import functools;
# pit-ext:
import bottle;
# pip-int:
import dotsi;
# loc:
import hashUp;
import utils;
request = bottle.request;
response = bottle.response;
redirect = bottle.redirect;
staticFile = bottle.static_file;
############################################################
# Config related: #
############################################################
config = dotsi.fy({ # Exo-module: bu.config.cookieSecret = "new secret"; Or use .update({});
"cookieSecret": "__default_cookie_secret_123__"
});
def setCookieSecret (newCookieSecret):
config.cookieSecret = newCookieSecret;
############################################################
# Static and shortcut related: #
############################################################
def addStaticFolder (app, folderPath, slug=None):
"Helps serve static files in `folderPath`.";
folderPath = os.path.abspath(folderPath);
assert "/" in folderPath;
slug = slug or folderPath.split("/")[-1];
@app.get("/" + slug + "/<filepath:path>")
def get_slug_plus (filepath):
if filepath.endswith("/"):
return redirect("/" + slug + "/" + filepath + "index.html");
return staticFile(filepath, folderPath);
@app.get("/" + slug)
@app.get("/" + slug + "/")
def get_slug_top():
return redirect("/" + slug + "/" + "index.html");
def addSingleShortcut (app, srcPath, tgtUrl): # Simple (internal) helper, doesn't accept param `suffixList`.
""" Adds redirection route from `srcPath` to `tgtUrl`.
Param `tgtUrl` may be full URL or just a path.
""";
@app.get(srcPath)
def redirector ():
srcQs = request.urlparts.query;
if not srcQs:
return redirect(tgtUrl);
# ==> Source URL has query string.
if not "?" in tgtUrl:
return redirect(tgtUrl + "?" + srcQs);
# ==> Target URL already has query string.
return redirect(tgtUrl + "&" + srcQs);
def addShortcuts (app, pathMap, suffixList=["/", ".html"]):
"Adds multiple shortcuts from pathMap's keys to values.";
for srcPath, tgtUrl in pathMap.items():
addSingleShortcut(app, srcPath, tgtUrl); # Eg: '/signup' --> '/front/signup.html'
for suffix in suffixList:
addSingleShortcut(app, srcPath + suffix, tgtUrl); # Eg: '/signup/' --> '/front/signup.html' (where '/' is a suffix.)
def setMemfileMax (memfileMax):
bottle.BaseRequest.MEMFILE_MAX = memfileMax
############################################################
# Rendering related: #
############################################################
def addTemplateFolder (folderPath):
"Tells bottle to look for templates in `folderPath`.";
bottle.TEMPLATE_PATH.append(os.path.abspath(folderPath));
return None;
defaultRenderParams = {};
def enableUnderscoreFriendlyRendering ():
defaultRenderParams.update({
"template_settings": {
"syntax": "<python> </python> @ {{py: }}", # default_syntax = '<% %> % {{ }}'
# format = 'block_start block_close line_start inline_start inline_end
},
});
# Note: You can't use '%' as 'line_start' because
# you tend to write js (underscore) blocks as:
# <%
# var foo = "bar";
# %>
# If 'line_start' is "%", then the last line of
# code above, i.e. "%>\n" would raise a SyntaxError.
def render(tplName, **kwargs):
"Renders templates."
for dpKey in defaultRenderParams:
if dpKey not in kwargs:
kwargs[dpKey] = defaultRenderParams[dpKey];
return bottle.template(tplName, **kwargs);
def view(tplName):
"Returns a decorator for rendering templates."
def renderDecorator(oFunc):
def nFunc(*args, **kwargs):
d = oFunc(*args, **kwargs);
return render(tplName, **d);
return functools.update_wrapper(nFunc, oFunc);
return renderDecorator;
############################################################
# Aborting related: #
############################################################
def abort (x, code=None, req=None):
"Aborts with a smartly-picked `code`, if unspecified.";
req = req or bottle.request;
if req.content_type != "application/json":
code = code or 404; # All non-JSON requests are, by default, aborted w/ "404 Not Found"
assert type(x) is str;
return bottle.abort(code, x);
# ==> We're dealing w/ a JSON request.
code = code or 418; # All JSON requests are, by default, aborted w/ "418 I'm a teapot"
if isinstance(x, Exception) and x.message:
d = {"status": "fail", "reason": x.message};
elif type(x) is str:
d = {"status": "fail", "reason": x};
#print(d);
else:
assert all([
type(x) in [dict, dotsi.Dict],
x["status"] == "fail",
type(x["reason"]) is str,
]);
d = dict(x);
return bottle.abort(code, d);
abort200 = lambda html: abort(html, 200);
def renderAndAbort(tplName, **kwargs):
"Combines render and abort200 into a single function.";
return abort200(render(tplName, **kwargs));
def claim (stmt, error=None, code=None):
"An assert-like wrapper around `abort(.)`.";
if stmt: return True; # Short ckt.
# ==> Claim failed.
error = error or "The claimed resource could not be found."; # Default was: "Claim failed." ... But that makes no sense to users.
assert type(error) is str;
return abort(error, code);
def claimKeys (dicty, keyList, error=None, code=None):
"Helps claim that each key in keyList are in dicty.";
#if type(dicty) is list and type(keyList) in [dict, dotsi.Dict]:
# dicty, keyList = keyList, dicty; # Pythonic swap.
for key in keyList:
assert claim(key in dicty, error or "Key not found: %s" % key, code);
return True;
############################################################
# Query params & form data related: #
############################################################
def getRequestQueryParamData ():
"Wrapper around bottle.request.query.";
return dotsi.Dict(request.query);
get_qdata = getRequestQueryParamData; # Alias.
def getRequestPostBodyParamData ():
"Wrapper around bottle.request.forms.";
return dotsi.Dict(request.forms);
get_pdata = getRequestPostBodyParamData; # Alias.
def getRequestJson (ensure=None):
"Get request.body.json as edict, with option to ensure keys."; #
if request.content_type != "application/json":
print("\n\n\trequest.content_type = %s\n\n" % request.content_type);
return abort("".join([
"INCOMPATIBLE BROWSER.\n",
"\n",
"Your browser can't handle JSON data.\n"
"Please update or switch your browser.\n"
"\n",
#"See: " + K.SITE_URL + "/browser-update",
]));
# ==> We're dealing with a proper JSON request.
assert request.content_type == "application/json";
idata = dotsi.Dict(request.json);
if not ensure:
# ==> Nothing to be ensured:
return idata;
# ==> We're required to ensure something(s):
assert type(ensure) in [list, str];
keyList = utils.readKeyz(ensure);
if keyList:
assert claimKeys(idata, keyList);
# ==> All claims (if any) passed.
return idata;
get_jdata = getRequestJson; # Alias.
def unpackRequestJson (keyz):
jdata = get_jdata(ensure=keyz);
return utils.unpack(jdata, keyz);
unpack_jdata = unpackRequestJson; # Alias.
############################################################
# Hashing & cookie related: #
############################################################
hasher = hashUp.buildHasher();
def setUnsignedCookie (
name, value, resp=None, httpOnly=True,
path="/", maxAge=None,
):
"Sets an unsigned, default-httpOnly cookie.";
resp = resp or response;
assert type(httpOnly) is bool;
assert type(path) is str;
assert maxAge is None or type(maxAge) is int;
if type(maxAge) is int:
resp.set_cookie(name, value,
httponly=httpOnly, path=path, max_age=maxAge,
);
else:
resp.set_cookie(name, value,
httponly=httpOnly, path=path, # w/o max_age kwarg
); # max_age, is passed, shouldn't be None. (bottle)
return value;
def setCookie(
name, data, secret=None, resp=None, httpOnly=True,
path="/", maxAge=None,
):
"Sets a signed, default-httpOnly cookie.";
secret = secret or config.cookieSecret;
signWrapped = hasher.signWrap(data, secret);
setUnsignedCookie(name, value=signWrapped, resp=resp,
httpOnly=httpOnly, path=path, maxAge=maxAge,
);
return signWrapped;
# Note: There's no .getUnsignedCookie() as unsigned
# cookies should not be trusted by the server.
def getCookie(name, secret=None, strict=True, req=None):
"Gets a signed HTTP-only cookie.";
req = req or bottle.request;
secret = secret or config.cookieSecret;
signWrapped = req.get_cookie(name);
if not signWrapped:
if strict:
return abort("Session not found. Please log in.", req=req); # Short ckt.
return None;
# ==> FOUND _some_ stringy cookie data.
try:
data = hasher.signUnwrap(signWrapped, secret=secret);
except hasher.SignatureInvalidError as e:
# ==> Bad cookie data.
time.sleep(random.random() * 2); # Randomly sleep between [0, 2) seconds, making timing-attacks difficult.
clearCookie(name, req=req); # TODO: Investigate if bottle actually clears cookies along w/ abort(.);
return abort("Malformed session encountered. Please log out and then log in.", req=req);
# ==> SIGNATURE OK. (Valid and non-expired.)
return data;
def clearCookie (name, **kwargs):
setUnsignedCookie(name, "", **kwargs);
# Note: This function is IDEMPOTENT and does not
# test whether the cookie with name `name`
# has a valid value (or any value) before
# clearing it.
############################################################
# Plugins. #
############################################################
def mkPlugin_enforeSchemeAndNetloc (reqdScheme, reqdNetloc):
"Creates plugin (decorator) for ensuring proper scheme & netloc.";
assert reqdScheme in ["http", "https"];
def plugin_enforceSchemeAndNetloc (oFunc):
"Plugin for ensuring proper scheme and netloc.";
def nFunc(*args, **kwargs):
scheme = request.urlparts.scheme;
netloc = request.urlparts.netloc;
if scheme == reqdScheme and netloc == reqdNetloc:
# ==> Good scheme and netloc.
return oFunc(*args, **kwargs);
# ==> Bad scheme/netloc.
url = request.url;
url = url.replace(scheme, reqdScheme, 1);
url = url.replace(netloc, reqdNetloc, 1);
return redirect(url);
return functools.update_wrapper(nFunc, oFunc);
# Return the created plugin:
return plugin_enforceSchemeAndNetloc;
def plugin_noindex (oFunc):
"Plugin adds `X-Robots-Tag: noindex` response header."
def nFunc (*args, **kwargs):
result = oFunc(*args, **kwargs);
if isinstance(result, bottle.BaseResponse):
result.set_header("X-Robots-Tag", "noindex"); # If result is [or is based on BaseResponse], use it's .set_header.
else:
response.set_header("X-Robots-Tag", "noindex"); # Else, use the globally available bottle.response's .set_header.
return result;
return functools.update_wrapper(nFunc, oFunc);
def plugin_frameDeny (oFunc):
"Plugin adds `X-Frame-Options: DENY` response header."
def nFunc (*args, **kwargs):
result = oFunc(*args, **kwargs);
if isinstance(result, bottle.BaseResponse):
result.set_header("X-Frame-Options", "DENY"); # If result is [or is based on BaseResponse], use it's .set_header.
else:
response.set_header("X-Frame-Options", "DENY"); # Else, use the globally available bottle.response's .set_header.
return result;
return functools.update_wrapper(nFunc, oFunc);
def plugin_timer (oFunc):
"Plugin for measuring exec-time in miliseconds.";
def nFunc (*args, **kwargs):
start = utils.now();
result = oFunc(*args, **kwargs);
delta = utils.now() - start;
if isinstance(result, bottle.BaseResponse):
result.set_header("X-Exec-Time", str(delta)); # If result is [or is based on BaseResponse], use it's .set_header.
else:
response.set_header("X-Exec-Time", str(delta)); # Else, use the globally available bottle.response's .set_header.
return result;
return functools.update_wrapper(nFunc, oFunc);
############################################################
# Misc. #
############################################################
def setResponseHeaders (headerMap):
for name, val in headerMap.items():
response.set_header(name, val);
def getClientIp (): # TODO: Write a plugin that | |
<gh_stars>10-100
"""
Implementation slightly modified from [1]_.
References:
..[1] https://github.com/lukemelas/EfficientNet-PyTorch
"""
import torch
from torch import nn
from torch.nn import functional as F
from netharn import layers
import re
import math
import collections
from functools import partial
from torch.utils import model_zoo
class Conv2dDynamicSamePadding(nn.Conv2d, layers.AnalyticModule):
""" 2D Convolutions like TensorFlow, for a dynamic image size """
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
super(Conv2dDynamicSamePadding, self).__init__(in_channels,
out_channels,
kernel_size, stride, 0,
dilation, groups, bias)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
@classmethod
def forsize(cls, image_size=None):
""" Chooses static padding if you have specified an image size, and dynamic padding otherwise.
Static padding is necessary for ONNX exporting of models. """
if image_size is None:
return Conv2dDynamicSamePadding
else:
return partial(Conv2dStaticSamePadding, image_size=image_size)
def _analytic_forward(self, inputs, _OutputFor, _Output, _Hidden,
**kwargs):
"""
Example:
>>> # xdoctest: +REQUIRES(module:ndsampler)
>>> from netharn.models.efficientnet import * # NOQA
>>> import netharn as nh
>>> kwargs = layers.AnalyticModule._analytic_shape_kw()
>>> globals().update(kwargs)
>>> inputs = (1, 3, 224, 224)
>>> self = Conv2dDynamicSamePadding(2, 3, 5)
>>> outputs = self.output_shape_for(inputs)
>>> import ubelt as ub
>>> print(nh.util.align(ub.repr2(outputs.hidden, nl=-1), ':'))
"""
hidden = _Hidden()
x = inputs
ih, iw = _OutputFor.shape(x)[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
pad = [pad_w // 2, pad_w - pad_w // 2,
pad_h // 2, pad_h - pad_h // 2]
x = hidden['dynamic_padding'] = _OutputFor(F.pad)(x, pad)
weight = self.weight
bias = self.bias is not None
stride = self.stride
padding = self.padding
dilation = self.dilation
groups = self.groups
y = hidden['conv'] = _OutputFor(F.conv2d)(x, weight, bias, stride,
padding, dilation, groups)
outputs = _Output.coerce(y, hidden)
return outputs
class Conv2dStaticSamePadding(nn.Conv2d, layers.AnalyticModule):
""" 2D Convolutions like TensorFlow, for a fixed image size"""
def __init__(self, in_channels, out_channels, kernel_size, image_size=None, **kwargs):
super(Conv2dStaticSamePadding, self).__init__(in_channels, out_channels, kernel_size, **kwargs)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = image_size if type(image_size) == list else [image_size, image_size]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = int(math.ceil(ih / sh)), int(math.ceil(iw / sw))
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
self.image_size = image_size
self._pad = (pad_h, pad_w)
self._pad_w = pad_w
self._pad_h = pad_w
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))
else:
self.static_padding = layers.Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
def _analytic_forward(self, inputs, _OutputFor, _Output, _Hidden,
**kwargs):
"""
Example:
>>> # xdoctest: +REQUIRES(module:ndsampler)
>>> from netharn.models.efficientnet import * # NOQA
>>> import netharn as nh
>>> kwargs = layers.AnalyticModule._analytic_shape_kw()
>>> globals().update(kwargs)
>>> inputs = (1, 3, 224, 224)
>>> self = Conv2dStaticSamePadding(2, 3, 5, image_size=[512, 512])
>>> outputs = self.output_shape_for(inputs)
>>> import ubelt as ub
>>> print(nh.util.align(ub.repr2(outputs.hidden, nl=-1), ':'))
"""
hidden = _Hidden()
x = inputs
x = hidden['static_padding'] = _OutputFor(self.static_padding)(x)
y = hidden['conv'] = _OutputFor(F.conv2d)(
x, self.weight, self.bias is not None, self.stride, self.padding,
self.dilation, self.groups)
outputs = _Output.coerce(y, hidden)
return outputs
##################
# Model definition
##################
class MBConvBlock(layers.AnalyticModule):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (BlockArgs): see :class:`Details`
global_params (GlobalParam): see :class:`Details`
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super(MBConvBlock, self).__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
# Get static or dynamic convolution depending on image size
Conv2d = Conv2dDynamicSamePadding.forsize(image_size=global_params.image_size)
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels
if self._block_args.expand_ratio != 1:
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Depthwise convolution phase
k = self._block_args.kernel_size
s = self._block_args.stride
# Note: it is important to set the weight decay to be very low for the
# depthwise convolutions
self._depthwise_conv = Conv2d(
in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
kernel_size=k, stride=s, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
if self.has_se:
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
# Output phase
final_oup = self._block_args.output_filters
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# Note that the bn2 layer before the residual add, should be
# initailized with gamma=0
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._bn2._residual_bn = True
noli = global_params.noli
self._noli = layers.rectify_nonlinearity(noli, dim=2)
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = self._noli(self._bn0(self._expand_conv(inputs)))
x = self._noli(self._bn1(self._depthwise_conv(x)))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(self._noli(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
x = self._bn2(self._project_conv(x))
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = self.drop_connect(x, p=drop_connect_rate)
x = x + inputs # skip connection
return x
@classmethod
def demo(MBConvBlock):
layer_block_args, global_params = Details.build_efficientnet_params()
block_args = layer_block_args[0]
self = MBConvBlock(block_args, global_params)
return self
def _analytic_forward(self, inputs, _OutputFor, _Output, _Hidden,
**kwargs):
"""
Example:
>>> # xdoctest: +REQUIRES(module:ndsampler)
>>> from netharn.models.efficientnet import * # NOQA
>>> import netharn as nh
>>> self = MBConvBlock.demo()
>>> kwargs = self._analytic_shape_kw()
>>> globals().update(kwargs)
>>> input_shape = inputs = (1, 32, 224, 224)
>>> outputs = self.output_shape_for(input_shape)
>>> import ubelt as ub
>>> print(nh.util.align(ub.repr2(outputs.hidden, nl=-1), ':'))
"""
hidden = _Hidden()
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = hidden['expand_conv'] = _OutputFor(self._expand_conv)(inputs)
x = hidden['_bn0'] = _OutputFor(self._bn0)(x)
x = hidden['_noli0'] = _OutputFor(self._noli)(x)
x = hidden['depthwise_conv'] = _OutputFor(self._depthwise_conv)(x)
x = hidden['_bn1'] = _OutputFor(self._bn1)(x)
x = hidden['_noli1'] = _OutputFor(self._noli)(x)
# Squeeze and Excitation
if self.has_se:
x_squeezed = hidden['_se_pool'] = _OutputFor(F.adaptive_avg_pool2d)(x, 1)
x_squeezed = hidden['_se_reduce'] = _OutputFor(self._se_reduce)(x_squeezed)
x_squeezed = hidden['_se_noli'] = _OutputFor(self._noli)(x_squeezed)
x_squeezed = hidden['_se_expand'] = _OutputFor(self._se_expand)(x_squeezed)
x_squeezed = hidden['_se_sigmoid'] = _OutputFor(torch.sigmoid)(x_squeezed)
x = hidden['_se_mul'] = _OutputFor.mul(x_squeezed, x)
x = hidden['_project'] = _OutputFor(self._project_conv)(x)
x = hidden['_bn2'] = _OutputFor(self._bn2)(x)
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
drop_connect_rate = kwargs.get('drop_connect_rate', 0)
if drop_connect_rate:
try:
x = self.drop_connect(x, p=drop_connect_rate)
except Exception:
pass
hidden['drop_connect'] = x
# skip connection
x = hidden['skip'] = _OutputFor.add(x, inputs)
outputs = _Output.coerce(x, hidden)
return outputs
def drop_connect(self, inputs, p):
""" Drop connect. """
if not self.training:
return inputs
batch_size = inputs.shape[0]
keep_prob = 1 - p
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
class EfficientNet(layers.AnalyticModule):
"""
An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained methods
Args:
blocks_args (List[BlockArgs]): arguments for each block
| |
<reponame>zenwerk/Pyxif
"""EXIF is a set of several IFDs.
Oth IFD can include "Make", "Model" and more...
Exif IFD can include "ExposureTime", "ISOSpeed" and more...
GPS IFD can include GPS information.
Pass dict(s), that shows several IFD, to "dump" function.
exifbytes = pyxif.dump(0th_dict, exif_dict, gps_dict) # second and third are optional.
To use dict as IFD data, it needs...
A tag number means which property? - 256: ImageWidth, 272: Model...
Appropriate type for property. - long for ImageWidth, str for Model...
zeroth_ifd = {pyxif.ImageGroup.Make: "Canon",
pyxif.ImageGroup.XResolution: (96, 1),
pyxif.ImageGroup.YResolution: (96, 1),
pyxif.ImageGroup.Software: "Photoshop x.x.x",
}
Property name and tag number
For 0th IFD - under "pyxif.ImageGroup"
For Exif IFD - under "pyxif.PhotoGroup"
For GPS IFD - under "pyxif.GPSInfoGroup"
Property and appropriate type
See variable"TAGS" in this script.
"Byte": int
"Ascii": str
"Short": int
"Long": Long
"Rational": (long, long)
"Undefined": str
"SLong": long
"SRational": (long, long)
"""
import io
import struct
from ._common import *
TAGS = {
'Image': {11: {'group': 'ProcessingSoftware', 'type': 'Ascii'},
254: {'group': 'NewSubfileType', 'type': 'Long'},
255: {'group': 'SubfileType', 'type': 'Short'},
256: {'group': 'ImageWidth', 'type': 'Long'},
257: {'group': 'ImageLength', 'type': 'Long'},
258: {'group': 'BitsPerSample', 'type': 'Short'},
259: {'group': 'Compression', 'type': 'Short'},
262: {'group': 'PhotometricInterpretation', 'type': 'Short'},
263: {'group': 'Threshholding', 'type': 'Short'},
264: {'group': 'CellWidth', 'type': 'Short'},
265: {'group': 'CellLength', 'type': 'Short'},
266: {'group': 'FillOrder', 'type': 'Short'},
269: {'group': 'DocumentName', 'type': 'Ascii'},
270: {'group': 'ImageDescription', 'type': 'Ascii'},
271: {'group': 'Make', 'type': 'Ascii'},
272: {'group': 'Model', 'type': 'Ascii'},
273: {'group': 'StripOffsets', 'type': 'Long'},
274: {'group': 'Orientation', 'type': 'Short'},
277: {'group': 'SamplesPerPixel', 'type': 'Short'},
278: {'group': 'RowsPerStrip', 'type': 'Long'},
279: {'group': 'StripByteCounts', 'type': 'Long'},
282: {'group': 'XResolution', 'type': 'Rational'},
283: {'group': 'YResolution', 'type': 'Rational'},
284: {'group': 'PlanarConfiguration', 'type': 'Short'},
290: {'group': 'GrayResponseUnit', 'type': 'Short'},
291: {'group': 'GrayResponseCurve', 'type': 'Short'},
292: {'group': 'T4Options', 'type': 'Long'},
293: {'group': 'T6Options', 'type': 'Long'},
296: {'group': 'ResolutionUnit', 'type': 'Short'},
301: {'group': 'TransferFunction', 'type': 'Short'},
305: {'group': 'Software', 'type': 'Ascii'},
306: {'group': 'DateTime', 'type': 'Ascii'},
315: {'group': 'Artist', 'type': 'Ascii'},
316: {'group': 'HostComputer', 'type': 'Ascii'},
317: {'group': 'Predictor', 'type': 'Short'},
318: {'group': 'WhitePoint', 'type': 'Rational'},
319: {'group': 'PrimaryChromaticities', 'type': 'Rational'},
320: {'group': 'ColorMap', 'type': 'Short'},
321: {'group': 'HalftoneHints', 'type': 'Short'},
322: {'group': 'TileWidth', 'type': 'Short'},
323: {'group': 'TileLength', 'type': 'Short'},
324: {'group': 'TileOffsets', 'type': 'Short'},
325: {'group': 'TileByteCounts', 'type': 'Short'},
330: {'group': 'SubIFDs', 'type': 'Long'},
332: {'group': 'InkSet', 'type': 'Short'},
333: {'group': 'InkNames', 'type': 'Ascii'},
334: {'group': 'NumberOfInks', 'type': 'Short'},
336: {'group': 'DotRange', 'type': 'Byte'},
337: {'group': 'TargetPrinter', 'type': 'Ascii'},
338: {'group': 'ExtraSamples', 'type': 'Short'},
339: {'group': 'SampleFormat', 'type': 'Short'},
340: {'group': 'SMinSampleValue', 'type': 'Short'},
341: {'group': 'SMaxSampleValue', 'type': 'Short'},
342: {'group': 'TransferRange', 'type': 'Short'},
343: {'group': 'ClipPath', 'type': 'Byte'},
344: {'group': 'XClipPathUnits', 'type': 'Long'},
345: {'group': 'YClipPathUnits', 'type': 'Long'},
346: {'group': 'Indexed', 'type': 'Short'},
347: {'group': 'JPEGTables', 'type': 'Undefined'},
351: {'group': 'OPIProxy', 'type': 'Short'},
512: {'group': 'JPEGProc', 'type': 'Long'},
513: {'group': 'JPEGInterchangeFormat', 'type': 'Long'},
514: {'group': 'JPEGInterchangeFormatLength', 'type': 'Long'},
515: {'group': 'JPEGRestartInterval', 'type': 'Short'},
517: {'group': 'JPEGLosslessPredictors', 'type': 'Short'},
518: {'group': 'JPEGPointTransforms', 'type': 'Short'},
519: {'group': 'JPEGQTables', 'type': 'Long'},
520: {'group': 'JPEGDCTables', 'type': 'Long'},
521: {'group': 'JPEGACTables', 'type': 'Long'},
529: {'group': 'YCbCrCoefficients', 'type': 'Rational'},
530: {'group': 'YCbCrSubSampling', 'type': 'Short'},
531: {'group': 'YCbCrPositioning', 'type': 'Short'},
532: {'group': 'ReferenceBlackWhite', 'type': 'Rational'},
700: {'group': 'XMLPacket', 'type': 'Byte'},
18246: {'group': 'Rating', 'type': 'Short'},
18249: {'group': 'RatingPercent', 'type': 'Short'},
32781: {'group': 'ImageID', 'type': 'Ascii'},
33421: {'group': 'CFARepeatPatternDim', 'type': 'Short'},
33422: {'group': 'CFAPattern', 'type': 'Byte'},
33423: {'group': 'BatteryLevel', 'type': 'Rational'},
33432: {'group': 'Copyright', 'type': 'Ascii'},
33434: {'group': 'ExposureTime', 'type': 'Rational'},
34377: {'group': 'ImageResources', 'type': 'Byte'},
34665: {'group': 'ExifTag', 'type': 'Long'},
34675: {'group': 'InterColorProfile', 'type': 'Undefined'},
34853: {'group': 'GPSTag', 'type': 'Long'},
34857: {'group': 'Interlace', 'type': 'Short'},
34858: {'group': 'TimeZoneOffset', 'type': 'Long'},
34859: {'group': 'SelfTimerMode', 'type': 'Short'},
37387: {'group': 'FlashEnergy', 'type': 'Rational'},
37388: {'group': 'SpatialFrequencyResponse', 'type': 'Undefined'},
37389: {'group': 'Noise', 'type': 'Undefined'},
37390: {'group': 'FocalPlaneXResolution', 'type': 'Rational'},
37391: {'group': 'FocalPlaneYResolution', 'type': 'Rational'},
37392: {'group': 'FocalPlaneResolutionUnit', 'type': 'Short'},
37393: {'group': 'ImageNumber', 'type': 'Long'},
37394: {'group': 'SecurityClassification', 'type': 'Ascii'},
37395: {'group': 'ImageHistory', 'type': 'Ascii'},
37397: {'group': 'ExposureIndex', 'type': 'Rational'},
37398: {'group': 'TIFFEPStandardID', 'type': 'Byte'},
37399: {'group': 'SensingMethod', 'type': 'Short'},
40091: {'group': 'XPTitle', 'type': 'Byte'},
40092: {'group': 'XPComment', 'type': 'Byte'},
40093: {'group': 'XPAuthor', 'type': 'Byte'},
40094: {'group': 'XPKeywords', 'type': 'Byte'},
40095: {'group': 'XPSubject', 'type': 'Byte'},
50341: {'group': 'PrintImageMatching', 'type': 'Undefined'},
50706: {'group': 'DNGVersion', 'type': 'Byte'},
50707: {'group': 'DNGBackwardVersion', 'type': 'Byte'},
50708: {'group': 'UniqueCameraModel', 'type': 'Ascii'},
50709: {'group': 'LocalizedCameraModel', 'type': 'Byte'},
50710: {'group': 'CFAPlaneColor', 'type': 'Byte'},
50711: {'group': 'CFALayout', 'type': 'Short'},
50712: {'group': 'LinearizationTable', 'type': 'Short'},
50713: {'group': 'BlackLevelRepeatDim', 'type': 'Short'},
50714: {'group': 'BlackLevel', 'type': 'Rational'},
50715: {'group': 'BlackLevelDeltaH', 'type': 'SRational'},
50716: {'group': 'BlackLevelDeltaV', 'type': 'SRational'},
50717: {'group': 'WhiteLevel', 'type': 'Short'},
50718: {'group': 'DefaultScale', 'type': 'Rational'},
50719: {'group': 'DefaultCropOrigin', 'type': 'Short'},
50720: {'group': 'DefaultCropSize', 'type': 'Short'},
50721: {'group': 'ColorMatrix1', 'type': 'SRational'},
50722: {'group': 'ColorMatrix2', 'type': 'SRational'},
50723: {'group': 'CameraCalibration1', 'type': 'SRational'},
50724: {'group': 'CameraCalibration2', 'type': 'SRational'},
50725: {'group': 'ReductionMatrix1', 'type': 'SRational'},
50726: {'group': 'ReductionMatrix2', 'type': 'SRational'},
50727: {'group': 'AnalogBalance', 'type': 'Rational'},
50728: {'group': 'AsShotNeutral', 'type': 'Short'},
50729: {'group': 'AsShotWhiteXY', 'type': 'Rational'},
50730: {'group': 'BaselineExposure', 'type': 'SRational'},
50731: {'group': 'BaselineNoise', 'type': 'Rational'},
50732: {'group': 'BaselineSharpness', 'type': 'Rational'},
50733: {'group': 'BayerGreenSplit', 'type': 'Long'},
50734: {'group': 'LinearResponseLimit', 'type': 'Rational'},
50735: {'group': 'CameraSerialNumber', 'type': 'Ascii'},
50736: {'group': 'LensInfo', 'type': 'Rational'},
50737: {'group': 'ChromaBlurRadius', 'type': 'Rational'},
50738: {'group': 'AntiAliasStrength', 'type': 'Rational'},
50739: {'group': 'ShadowScale', 'type': 'SRational'},
50740: {'group': 'DNGPrivateData', 'type': 'Byte'},
50741: {'group': 'MakerNoteSafety', 'type': 'Short'},
50778: {'group': 'CalibrationIlluminant1', 'type': 'Short'},
50779: {'group': 'CalibrationIlluminant2', 'type': 'Short'},
50780: {'group': 'BestQualityScale', 'type': 'Rational'},
50781: {'group': 'RawDataUniqueID', 'type': 'Byte'},
50827: {'group': 'OriginalRawFileName', 'type': 'Byte'},
50828: {'group': 'OriginalRawFileData', 'type': 'Undefined'},
50829: {'group': 'ActiveArea', 'type': 'Short'},
50830: {'group': 'MaskedAreas', 'type': 'Short'},
50831: {'group': 'AsShotICCProfile', 'type': 'Undefined'},
50832: {'group': 'AsShotPreProfileMatrix', 'type': 'SRational'},
50833: {'group': 'CurrentICCProfile', 'type': 'Undefined'},
50834: {'group': 'CurrentPreProfileMatrix', 'type': 'SRational'},
50879: {'group': 'ColorimetricReference', 'type': 'Short'},
50931: {'group': 'CameraCalibrationSignature', 'type': 'Byte'},
50932: {'group': 'ProfileCalibrationSignature', 'type': 'Byte'},
50934: {'group': 'AsShotProfileName', 'type': 'Byte'},
50935: {'group': 'NoiseReductionApplied', 'type': 'Rational'},
50936: {'group': 'ProfileName', 'type': 'Byte'},
50937: {'group': 'ProfileHueSatMapDims', 'type': 'Long'},
50938: {'group': 'ProfileHueSatMapData1', 'type': 'Float'},
50939: {'group': 'ProfileHueSatMapData2', 'type': 'Float'},
50940: {'group': 'ProfileToneCurve', 'type': 'Float'},
50941: {'group': 'ProfileEmbedPolicy', 'type': 'Long'},
50942: {'group': 'ProfileCopyright', 'type': 'Byte'},
50964: {'group': 'ForwardMatrix1', 'type': 'SRational'},
50965: {'group': 'ForwardMatrix2', 'type': 'SRational'},
50966: {'group': 'PreviewApplicationName', 'type': 'Byte'},
50967: {'group': 'PreviewApplicationVersion', 'type': 'Byte'},
50968: {'group': 'PreviewSettingsName', 'type': 'Byte'},
50969: {'group': 'PreviewSettingsDigest', 'type': 'Byte'},
50970: {'group': 'PreviewColorSpace', 'type': 'Long'},
50971: {'group': 'PreviewDateTime', 'type': 'Ascii'},
50972: {'group': 'RawImageDigest', 'type': 'Undefined'},
50973: {'group': 'OriginalRawFileDigest', 'type': 'Undefined'},
50974: {'group': 'SubTileBlockSize', 'type': 'Long'},
50975: {'group': 'RowInterleaveFactor', 'type': 'Long'},
50981: {'group': 'ProfileLookTableDims', 'type': 'Long'},
50982: {'group': 'ProfileLookTableData', 'type': 'Float'},
51008: {'group': 'OpcodeList1', 'type': 'Undefined'},
51009: {'group': 'OpcodeList2', 'type': 'Undefined'},
51022: {'group': 'OpcodeList3', 'type': 'Undefined'}},
'Photo': {33434: {'group': 'ExposureTime', 'type': 'Rational'},
33437: {'group': 'FNumber', 'type': 'Rational'},
34850: {'group': 'ExposureProgram', 'type': 'Short'},
34852: {'group': 'SpectralSensitivity', 'type': 'Ascii'},
34855: {'group': 'ISOSpeedRatings', 'type': 'Short'},
34856: {'group': 'OECF', 'type': 'Undefined'},
34864: {'group': 'SensitivityType', 'type': 'Short'},
34865: {'group': 'StandardOutputSensitivity', 'type': 'Long'},
34866: {'group': 'RecommendedExposureIndex', 'type': 'Long'},
34867: {'group': 'ISOSpeed', 'type': 'Long'},
34868: {'group': 'ISOSpeedLatitudeyyy', 'type': 'Long'},
34869: {'group': 'ISOSpeedLatitudezzz', 'type': 'Long'},
36864: {'group': 'ExifVersion', 'type': 'Undefined'},
36867: {'group': 'DateTimeOriginal', 'type': 'Ascii'},
36868: {'group': 'DateTimeDigitized', 'type': 'Ascii'},
37121: {'group': 'ComponentsConfiguration', 'type': 'Undefined'},
37122: {'group': 'CompressedBitsPerPixel', 'type': 'Rational'},
37377: {'group': 'ShutterSpeedValue', 'type': 'SRational'},
37378: {'group': 'ApertureValue', 'type': 'Rational'},
37379: {'group': 'BrightnessValue', 'type': 'SRational'},
37380: {'group': 'ExposureBiasValue', 'type': 'SRational'},
37381: {'group': 'MaxApertureValue', 'type': 'Rational'},
37382: {'group': 'SubjectDistance', 'type': 'Rational'},
37383: {'group': 'MeteringMode', 'type': 'Short'},
37384: {'group': 'LightSource', 'type': 'Short'},
37385: {'group': 'Flash', 'type': 'Short'},
37386: {'group': 'FocalLength', 'type': 'Rational'},
37396: {'group': 'SubjectArea', 'type': 'Short'},
37500: {'group': 'MakerNote', 'type': 'Undefined'},
37510: {'group': 'UserComment', 'type': 'Ascii'},
37520: {'group': 'SubSecTime', 'type': 'Ascii'},
37521: {'group': 'SubSecTimeOriginal', 'type': 'Ascii'},
37522: {'group': 'SubSecTimeDigitized', 'type': 'Ascii'},
40960: {'group': 'FlashpixVersion', 'type': 'Undefined'},
40961: {'group': 'ColorSpace', 'type': 'Short'},
40962: {'group': 'PixelXDimension', 'type': 'Long'},
40963: {'group': 'PixelYDimension', 'type': 'Long'},
40964: {'group': 'RelatedSoundFile', 'type': 'Ascii'},
40965: {'group': 'InteroperabilityTag', 'type': 'Long'},
41483: {'group': 'FlashEnergy', 'type': 'Rational'},
41484: {'group': 'SpatialFrequencyResponse', 'type': 'Undefined'},
41486: {'group': 'FocalPlaneXResolution', 'type': 'Rational'},
41487: {'group': 'FocalPlaneYResolution', 'type': 'Rational'},
41488: {'group': 'FocalPlaneResolutionUnit', 'type': 'Short'},
41492: {'group': 'SubjectLocation', 'type': | |
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Core iPOPO implementation
:author: <NAME>
:copyright: Copyright 2016, <NAME>
:license: Apache License 2.0
:version: 0.6.4
..
Copyright 2016 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import copy
import inspect
import logging
import threading
# Pelix
from pelix.constants import SERVICE_ID, BundleActivator
from pelix.framework import Bundle, BundleException
from pelix.internals.events import BundleEvent, ServiceEvent
from pelix.utilities import add_listener, remove_listener, is_string
# iPOPO constants
import pelix.ipopo.constants as constants
import pelix.ipopo.handlers.constants as handlers_const
# iPOPO beans
from pelix.ipopo.contexts import FactoryContext, ComponentContext
from pelix.ipopo.instance import StoredInstance
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 6, 4)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Prepare the module logger
_logger = logging.getLogger("ipopo.core")
# Built-in handlers, automatically installed
BUILTIN_HANDLERS = ('pelix.ipopo.handlers.properties',
'pelix.ipopo.handlers.provides',
'pelix.ipopo.handlers.requires',
'pelix.ipopo.handlers.requiresbest',
'pelix.ipopo.handlers.requiresmap',
'pelix.ipopo.handlers.requiresvarfilter',
'pelix.ipopo.handlers.temporal')
# ------------------------------------------------------------------------------
def _set_factory_context(factory_class, bundle_context):
"""
Transforms the context data dictionary into its FactoryContext object form.
:param factory_class: A manipulated class
:param bundle_context: The class bundle context
:return: The factory context, None on error
"""
try:
# Try to get the factory context (built using decorators)
context = getattr(factory_class, constants.IPOPO_FACTORY_CONTEXT)
except AttributeError:
# The class has not been manipulated, or too badly
return None
if not context.completed:
# Partial context (class not manipulated)
return None
# Associate the factory to the bundle context
context.set_bundle_context(bundle_context)
return context
def _load_bundle_factories(bundle):
"""
Retrieves a list of pairs (FactoryContext, factory class) with all
readable manipulated classes found in the bundle.
:param bundle: A Bundle object
:return: The list of factories loaded from the bundle
"""
result = []
# Get the Python module
module = bundle.get_module()
# Get the bundle context
bundle_context = bundle.get_bundle_context()
# Get all classes defined in the module
for inspect_member in inspect.getmembers(module, inspect.isclass):
# Get the class in the result tuple
factory_class = inspect_member[1]
if inspect.getmodule(factory_class) is not module:
# Ignore classes imported from other modules
continue
context = _set_factory_context(factory_class, bundle_context)
if context is None:
# Error setting up the factory context
continue
result.append((context, factory_class))
return result
# ------------------------------------------------------------------------------
class _IPopoService(object):
"""
The iPOPO registry and service
"""
def __init__(self, bundle_context):
"""
Sets up the iPOPO registry
:param bundle_context: The iPOPO bundle context
"""
# Store the bundle context
self.__context = bundle_context
# Factories registry : name -> factory class
self.__factories = {}
# Instances registry : name -> StoredInstance object
self.__instances = {}
# Event listeners
self.__listeners = []
# Auto-restarted components (Bundle -> [(factory, name, properties)]
self.__auto_restart = {}
# Service state
self.running = False
# Registries locks
self.__factories_lock = threading.RLock()
self.__instances_lock = threading.RLock()
self.__listeners_lock = threading.RLock()
self.__handlers_lock = threading.RLock()
# Handlers factories
self._handlers_refs = set()
self._handlers = {}
# Instances waiting for a handler: Name -> (ComponentContext, instance)
self.__waiting_handlers = {}
# Register the service listener
bundle_context.add_service_listener(
self, None, handlers_const.SERVICE_IPOPO_HANDLER_FACTORY)
self.__find_handler_factories()
def __find_handler_factories(self):
"""
Finds all registered handler factories and stores them
"""
# Get the references
svc_refs = self.__context.get_all_service_references(
handlers_const.SERVICE_IPOPO_HANDLER_FACTORY)
if svc_refs:
for svc_ref in svc_refs:
# Store each handler factory
self.__add_handler_factory(svc_ref)
def __add_handler_factory(self, svc_ref):
"""
Stores a new handler factory
:param svc_ref: ServiceReference of the new handler factory
"""
with self.__handlers_lock:
# Get the handler ID
handler_id = svc_ref.get_property(handlers_const.PROP_HANDLER_ID)
if handler_id in self._handlers:
# Duplicated ID
_logger.warning("Already registered handler ID: %s",
handler_id)
else:
# Store the service
self._handlers_refs.add(svc_ref)
self._handlers[handler_id] = \
self.__context.get_service(svc_ref)
# Try to instantiate waiting components
succeeded = set()
for name, (context, instance) \
in self.__waiting_handlers.items():
if self.__try_instantiate(context, instance):
succeeded.add(name)
# Remove instantiated component from the waiting list
for name in succeeded:
del self.__waiting_handlers[name]
def __remove_handler_factory(self, svc_ref):
"""
Removes an handler factory
:param svc_ref: ServiceReference of the handler factory to remove
"""
with self.__handlers_lock:
# Get the handler ID
handler_id = svc_ref.get_property(handlers_const.PROP_HANDLER_ID)
# Check if this is the handler we use
if svc_ref not in self._handlers_refs:
return
# Clean up
self.__context.unget_service(svc_ref)
self._handlers_refs.remove(svc_ref)
del self._handlers[handler_id]
# List the components using this handler
to_stop = set()
for factory_name in self.__factories:
_, context = self.__get_factory_with_context(factory_name)
if handler_id in context.get_handlers_ids():
to_stop.update(self.__get_stored_instances(factory_name))
with self.__instances_lock:
for stored_instance in to_stop:
# Extract information
context = stored_instance.context
name = context.name
instance = stored_instance.instance
# Clean up the stored instance (iPOPO side)
del self.__instances[name]
stored_instance.kill()
# Add the component to the waiting queue
self.__waiting_handlers[name] = (context, instance)
# Try to find a new handler factory
new_ref = self.__context.get_service_reference(
handlers_const.SERVICE_IPOPO_HANDLER_FACTORY,
"({0}={1})".format(handlers_const.PROP_HANDLER_ID, handler_id))
if new_ref is not None:
self.__add_handler_factory(new_ref)
def __get_factory_with_context(self, factory_name):
"""
Retrieves the factory registered with the given and its factory context
:param factory_name: The name of the factory
:return: A (factory, context) tuple
:raise TypeError: Unknown factory, or factory not manipulated
"""
factory = self.__factories.get(factory_name)
if factory is None:
raise TypeError("Unknown factory '{0}'"
.format(factory_name))
# Get the factory context
factory_context = getattr(factory, constants.IPOPO_FACTORY_CONTEXT,
None)
if factory_context is None:
raise TypeError("Factory context missing in '{0}'"
.format(factory_name))
return factory, factory_context
def __get_handler_factories(self, handlers_ids):
"""
Returns the list of Handler Factories for the given Handlers IDs.
Raises a KeyError exception is a handler factory is missing.
:param handlers_ids: List of handlers IDs
:raise KeyError: A handler is missing
"""
# Look for the required handlers
return {self._handlers[handler_id] for handler_id in handlers_ids}
def __get_stored_instances(self, factory_name):
"""
Retrieves the list of all stored instances objects corresponding to
the given factory name
:param factory_name: A factory name
:return: All components instantiated from the given factory
"""
with self.__instances_lock:
return [stored_instance
for stored_instance in self.__instances.values()
if stored_instance.factory_name == factory_name]
def __try_instantiate(self, component_context, instance):
"""
Instantiates a component, if all of its handlers are there. Returns
False if a handler is missing.
:param component_context: A ComponentContext bean
:param instance: The component instance
:return: True if the component has started,
False if a handler is missing
"""
with self.__instances_lock:
# Extract information about the component
factory_context = component_context.factory_context
handlers_ids = factory_context.get_handlers_ids()
name = component_context.name
factory_name = factory_context.name
try:
# Get handlers
handler_factories = self.__get_handler_factories(handlers_ids)
except KeyError:
# A handler is missing, stop here
return False
# Instantiate the handlers
all_handlers = set()
for handler_factory in handler_factories:
handlers = handler_factory.get_handlers(component_context,
instance)
if handlers:
all_handlers.update(handlers)
# Prepare the stored instance
stored_instance = StoredInstance(self, component_context, instance,
all_handlers)
# Manipulate the properties
for handler in all_handlers:
handler.manipulate(stored_instance, instance)
# Store the instance
self.__instances[name] = stored_instance
# Start the manager
stored_instance.start()
# Notify listeners now that every thing is ready to run
self._fire_ipopo_event(constants.IPopoEvent.INSTANTIATED,
factory_name, name)
# Try to validate it
stored_instance.update_bindings()
stored_instance.check_lifecycle()
return True
def _autorestart_store_components(self, bundle):
"""
Stores the components of the given bundle with the auto-restart
property
:param bundle: A Bundle object
"""
with self.__instances_lock:
# Prepare the list of components
store = self.__auto_restart.setdefault(bundle, [])
for stored_instance in self.__instances.values():
# Get the factory name
factory = stored_instance.factory_name
if self.get_factory_bundle(factory) is bundle:
# Factory from this bundle
# Test component properties
properties = stored_instance.context.properties
if properties.get(constants.IPOPO_AUTO_RESTART):
# Auto-restart property found
store.append((factory, stored_instance.name,
properties))
def _autorestart_components(self, bundle):
"""
Restart the components of the given bundle
:param bundle: A Bundle object
"""
with self.__instances_lock:
instances = self.__auto_restart.get(bundle)
if not instances:
# Nothing to do
return
for factory, name, properties in instances:
try:
# Instantiate the given component
self.instantiate(factory, name, properties)
except Exception as ex:
# Log error, but continue to work
_logger.exception("Error restarting component '%s' ('%s')"
"from bundle %s (%d): %s", name, factory,
bundle.get_symbolic_name(),
bundle.get_bundle_id(), ex)
def _autorestart_clear_components(self, bundle):
"""
Clear the list of auto-restart components of the given bundle
:param bundle: A Bundle object
"""
with self.__instances_lock:
# Simply delete the entry, if any
try:
del self.__auto_restart[bundle]
except KeyError:
pass
def _fire_ipopo_event(self, kind, factory_name, instance_name=None):
"""
Triggers an iPOPO event
:param kind: Kind of event
:param factory_name: Name of the factory associated to the event
:param instance_name: Name of the component instance associated to | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'GoogleCloudDatapipelinesV1FlexTemplateRuntimeEnvironmentResponse',
'GoogleCloudDatapipelinesV1LaunchFlexTemplateParameterResponse',
'GoogleCloudDatapipelinesV1LaunchFlexTemplateRequestResponse',
'GoogleCloudDatapipelinesV1LaunchTemplateParametersResponse',
'GoogleCloudDatapipelinesV1LaunchTemplateRequestResponse',
'GoogleCloudDatapipelinesV1RuntimeEnvironmentResponse',
'GoogleCloudDatapipelinesV1ScheduleSpecResponse',
'GoogleCloudDatapipelinesV1WorkloadResponse',
]
@pulumi.output_type
class GoogleCloudDatapipelinesV1FlexTemplateRuntimeEnvironmentResponse(dict):
"""
The environment values to be set at runtime for a Flex Template.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "additionalExperiments":
suggest = "additional_experiments"
elif key == "additionalUserLabels":
suggest = "additional_user_labels"
elif key == "enableStreamingEngine":
suggest = "enable_streaming_engine"
elif key == "flexrsGoal":
suggest = "flexrs_goal"
elif key == "ipConfiguration":
suggest = "ip_configuration"
elif key == "kmsKeyName":
suggest = "kms_key_name"
elif key == "machineType":
suggest = "machine_type"
elif key == "maxWorkers":
suggest = "max_workers"
elif key == "numWorkers":
suggest = "num_workers"
elif key == "serviceAccountEmail":
suggest = "service_account_email"
elif key == "tempLocation":
suggest = "temp_location"
elif key == "workerRegion":
suggest = "worker_region"
elif key == "workerZone":
suggest = "worker_zone"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in GoogleCloudDatapipelinesV1FlexTemplateRuntimeEnvironmentResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
GoogleCloudDatapipelinesV1FlexTemplateRuntimeEnvironmentResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
GoogleCloudDatapipelinesV1FlexTemplateRuntimeEnvironmentResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
additional_experiments: Sequence[str],
additional_user_labels: Mapping[str, str],
enable_streaming_engine: bool,
flexrs_goal: str,
ip_configuration: str,
kms_key_name: str,
machine_type: str,
max_workers: int,
network: str,
num_workers: int,
service_account_email: str,
subnetwork: str,
temp_location: str,
worker_region: str,
worker_zone: str,
zone: str):
"""
The environment values to be set at runtime for a Flex Template.
:param Sequence[str] additional_experiments: Additional experiment flags for the job.
:param Mapping[str, str] additional_user_labels: Additional user labels to be specified for the job. Keys and values must follow the restrictions specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions). An object containing a list of key/value pairs. Example: `{ "name": "wrench", "mass": "1kg", "count": "3" }`.
:param bool enable_streaming_engine: Whether to enable Streaming Engine for the job.
:param str flexrs_goal: Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs
:param str ip_configuration: Configuration for VM IPs.
:param str kms_key_name: Name for the Cloud KMS key for the job. Key format is: projects//locations//keyRings//cryptoKeys/
:param str machine_type: The machine type to use for the job. Defaults to the value from the template if not specified.
:param int max_workers: The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
:param str network: Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
:param int num_workers: The initial number of Compute Engine instances for the job.
:param str service_account_email: The email address of the service account to run the job as.
:param str subnetwork: Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
:param str temp_location: The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with `gs://`.
:param str worker_region: The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, defaults to the control plane region.
:param str worker_zone: The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane region is chosen based on available capacity. If both `worker_zone` and `zone` are set, `worker_zone` takes precedence.
:param str zone: The Compute Engine [availability zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones) for launching worker instances to run your pipeline. In the future, worker_zone will take precedence.
"""
pulumi.set(__self__, "additional_experiments", additional_experiments)
pulumi.set(__self__, "additional_user_labels", additional_user_labels)
pulumi.set(__self__, "enable_streaming_engine", enable_streaming_engine)
pulumi.set(__self__, "flexrs_goal", flexrs_goal)
pulumi.set(__self__, "ip_configuration", ip_configuration)
pulumi.set(__self__, "kms_key_name", kms_key_name)
pulumi.set(__self__, "machine_type", machine_type)
pulumi.set(__self__, "max_workers", max_workers)
pulumi.set(__self__, "network", network)
pulumi.set(__self__, "num_workers", num_workers)
pulumi.set(__self__, "service_account_email", service_account_email)
pulumi.set(__self__, "subnetwork", subnetwork)
pulumi.set(__self__, "temp_location", temp_location)
pulumi.set(__self__, "worker_region", worker_region)
pulumi.set(__self__, "worker_zone", worker_zone)
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter(name="additionalExperiments")
def additional_experiments(self) -> Sequence[str]:
"""
Additional experiment flags for the job.
"""
return pulumi.get(self, "additional_experiments")
@property
@pulumi.getter(name="additionalUserLabels")
def additional_user_labels(self) -> Mapping[str, str]:
"""
Additional user labels to be specified for the job. Keys and values must follow the restrictions specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions). An object containing a list of key/value pairs. Example: `{ "name": "wrench", "mass": "1kg", "count": "3" }`.
"""
return pulumi.get(self, "additional_user_labels")
@property
@pulumi.getter(name="enableStreamingEngine")
def enable_streaming_engine(self) -> bool:
"""
Whether to enable Streaming Engine for the job.
"""
return pulumi.get(self, "enable_streaming_engine")
@property
@pulumi.getter(name="flexrsGoal")
def flexrs_goal(self) -> str:
"""
Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs
"""
return pulumi.get(self, "flexrs_goal")
@property
@pulumi.getter(name="ipConfiguration")
def ip_configuration(self) -> str:
"""
Configuration for VM IPs.
"""
return pulumi.get(self, "ip_configuration")
@property
@pulumi.getter(name="kmsKeyName")
def kms_key_name(self) -> str:
"""
Name for the Cloud KMS key for the job. Key format is: projects//locations//keyRings//cryptoKeys/
"""
return pulumi.get(self, "kms_key_name")
@property
@pulumi.getter(name="machineType")
def machine_type(self) -> str:
"""
The machine type to use for the job. Defaults to the value from the template if not specified.
"""
return pulumi.get(self, "machine_type")
@property
@pulumi.getter(name="maxWorkers")
def max_workers(self) -> int:
"""
The maximum number of Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
"""
return pulumi.get(self, "max_workers")
@property
@pulumi.getter
def network(self) -> str:
"""
Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
"""
return pulumi.get(self, "network")
@property
@pulumi.getter(name="numWorkers")
def num_workers(self) -> int:
"""
The initial number of Compute Engine instances for the job.
"""
return pulumi.get(self, "num_workers")
@property
@pulumi.getter(name="serviceAccountEmail")
def service_account_email(self) -> str:
"""
The email address of the service account to run the job as.
"""
return pulumi.get(self, "service_account_email")
@property
@pulumi.getter
def subnetwork(self) -> str:
"""
Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
"""
return pulumi.get(self, "subnetwork")
@property
@pulumi.getter(name="tempLocation")
def temp_location(self) -> str:
"""
The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with `gs://`.
"""
return pulumi.get(self, "temp_location")
@property
@pulumi.getter(name="workerRegion")
def worker_region(self) -> str:
"""
The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, defaults to the control plane region.
"""
return pulumi.get(self, "worker_region")
@property
@pulumi.getter(name="workerZone")
def worker_zone(self) -> str:
"""
The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane region is chosen based on available capacity. If both `worker_zone` and `zone` are set, `worker_zone` takes precedence.
"""
return pulumi.get(self, "worker_zone")
@property
@pulumi.getter
def zone(self) -> str:
"""
The Compute Engine [availability zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones) for launching worker instances to run your pipeline. In the future, worker_zone will take precedence.
"""
return pulumi.get(self, "zone")
@pulumi.output_type
class GoogleCloudDatapipelinesV1LaunchFlexTemplateParameterResponse(dict):
"""
Launch Flex Template parameter.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "containerSpecGcsPath":
suggest = "container_spec_gcs_path"
elif key == "jobName":
suggest = "job_name"
elif key == "launchOptions":
suggest = "launch_options"
elif key == "transformNameMappings":
suggest = "transform_name_mappings"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in GoogleCloudDatapipelinesV1LaunchFlexTemplateParameterResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
GoogleCloudDatapipelinesV1LaunchFlexTemplateParameterResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
GoogleCloudDatapipelinesV1LaunchFlexTemplateParameterResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
container_spec_gcs_path: str,
environment: 'outputs.GoogleCloudDatapipelinesV1FlexTemplateRuntimeEnvironmentResponse',
job_name: str,
launch_options: Mapping[str, str],
parameters: Mapping[str, str],
transform_name_mappings: Mapping[str, str],
update: bool):
"""
Launch Flex Template parameter.
:param str | |
<reponame>Fournierp/FPL
import pandas as pd
import numpy as np
import os
import json
def get_raw_data(rank, path='data/fpl_official/2020-21/season/raw/'):
f = os.path.join(path, f"managers_{rank}.json")
chips = pd.DataFrame(
list(pd.read_json(f, orient='index')['chips']),
columns=['wildcard_1', 'freehit', 'bboost', 'wildcard_2', 'threexc'],
index=pd.read_json(f, orient='index').index
)
# Change index type & Sort index
chips.drop('[]', inplace=True, errors='ignore')
chips.index = chips.index.map(int)
chips.sort_index(inplace=True)
chips = chips.fillna(0)
chips = chips.astype(int)
# Handle the cases when managers have only used their second Wildcard.
chips.loc[(chips['wildcard_2'] == 0) & (chips['wildcard_1'] > 16), 'wildcard_2'] = chips[chips['wildcard_2'] == 0]['wildcard_1']
chips.loc[(chips['wildcard_1'] == chips['wildcard_2']) & (chips['wildcard_1'] > 16), 'wildcard_1'] = 0
teams = pd.DataFrame(
list(pd.read_json(f, orient='index')['team']),
columns=[str(gw) for gw in np.arange(1, 39)],
index=pd.read_json(f, orient='index').index
)
teams.drop('[]', inplace=True, errors='ignore')
caps = pd.DataFrame(
list(pd.read_json(f, orient='index')['cap']),
columns=[str(gw) for gw in np.arange(1, 39)],
index=pd.read_json(f, orient='index').index
)
caps.drop('[]', inplace=True, errors='ignore')
vice = pd.DataFrame(
list(pd.read_json(f, orient='index')['vice']),
columns=[str(gw) for gw in np.arange(1, 39)],
index=pd.read_json(f, orient='index').index
)
vice.drop('[]', inplace=True, errors='ignore')
bench_pts = pd.DataFrame(
list(pd.read_json(f, orient='index')['bench_pts']),
columns=[str(gw) for gw in np.arange(1, 39)],
index=pd.read_json(f, orient='index').index
)
bench_pts.drop('[]', inplace=True, errors='ignore')
transfers = pd.DataFrame(
list(pd.read_json(f, orient='index')['transfers']),
columns=[str(gw) for gw in np.arange(1, 39)],
index=pd.read_json(f, orient='index').index
)
transfers.drop('[]', inplace=True, errors='ignore')
return chips, teams, caps, vice, bench_pts, transfers
def get_season_points():
with open('info.json') as f:
season_data = json.load(f)
season = season_data['season']
for rank in np.arange(5000, 105000, 5000):
print(rank)
chips, teams, caps, vice, bench_pts, transfers = get_raw_data(rank, f'data/fpl_official/{season}-{season % 2000 + 1}/season/raw/')
points = pd.DataFrame().reindex_like(bench_pts)
free_transfer = np.zeros(105000)
all_gw_data = pd.read_csv(os.path.join(f'data/fpl_official/vaastav/data/{season}-{season % 2000 + 1}/gws/merged_gw.csv'))[['GW', 'element', 'total_points', 'minutes']]
for gw in np.arange(1, 39):
gw_data = all_gw_data[all_gw_data['GW'] == gw]
for player in points.index:
# Not registered team
if not len(teams.loc[player, str(gw)]):
points.loc[player, str(gw)] = 0
continue
# FPL Team
fpl_team = gw_data[gw_data['element'].isin(teams.loc[player, str(gw)])]
# All player pts
points.loc[player, str(gw)] = sum(fpl_team['total_points'])
# Captain/Vice points
try:
if chips.loc[int(player), 'threexc'] != gw:
multiplier = 1
else:
multiplier = 2
# This lookup throws an exception if the player has a BGW
if sum(gw_data[gw_data['element'] == caps.loc[player, str(gw)]]['minutes']) == 0:
# Captain does not play
points.loc[player, str(gw)] += multiplier * sum(fpl_team[fpl_team['element'] == vice.loc[player, str(gw)]]['total_points'].values)
points.loc[player, str(gw)] += multiplier * sum(fpl_team[fpl_team['element'] == caps.loc[player, str(gw)]]['total_points'].values)
except:
points.loc[player, str(gw)] += multiplier * sum(fpl_team[fpl_team['element'] == vice.loc[player, str(gw)]]['total_points'].values)
# Bench
if chips.loc[int(player), 'bboost'] != gw:
points.loc[player, str(gw)] -= bench_pts.loc[player, str(gw)]
# Hits
try:
if not (chips.loc[int(player), 'freehit'] == gw or chips.loc[int(player), 'wildcard_1'] == gw or chips.loc[int(player), 'wildcard_2'] == gw) :
transfer = len(transfers.loc[player, str(gw)]['in'])
hit = free_transfer[int(player)] - transfer
if hit > 0:
# No transfer
free_transfer[int(player)] = 2
elif hit == 0 :
# Used all FT
free_transfer[int(player)] = 1
else:
# Hits
points.loc[player, str(gw)] += 4 * hit
free_transfer[int(player)] = 1
else:
free_transfer[player] = 1
except:
free_transfer[int(player)] = min(2, free_transfer[int(player)] + 1)
points = points.fillna(0)
points = points.astype(int)
points.to_csv(f'data/fpl_official/{season}-{season % 2000 + 1}/season/processed/points_{rank}.csv')
def get_season_value():
with open('info.json') as f:
season_data = json.load(f)
season = season_data['season']
for rank in np.arange(5000, 105000, 5000):
print(rank)
chips, teams, caps, vice, bench_pts, transfers = get_raw_data(rank, f'data/fpl_official/{season}-{season % 2000 + 1}/season/raw/')
team_value = pd.DataFrame().reindex_like(bench_pts)
in_the_bank = pd.DataFrame().reindex_like(bench_pts)
bench_value = pd.DataFrame().reindex_like(bench_pts)
all_gw_data = pd.read_csv(os.path.join(f'data/fpl_official/vaastav/data/{season}-{season % 2000 + 1}/gws/merged_gw.csv'))[['GW', 'element', 'value']]
for gw in np.arange(1, 39):
gw_data = all_gw_data[all_gw_data['GW'] == gw]
next_gw_data = all_gw_data[all_gw_data['GW'] == gw+1]
for player in team_value.index:
# Not registered team
if not len(teams.loc[player, str(gw)]):
team_value.loc[player, str(gw)] = 1000
continue
# FPL Team
fpl_team = gw_data[gw_data['element'].isin(teams.loc[player, str(gw)])].drop_duplicates(subset='element', keep="first")
fpl_bench = gw_data[gw_data['element'].isin(teams.loc[player, str(gw)][-4:])].drop_duplicates(subset='element', keep="first")
next_fpl_team = next_gw_data[next_gw_data['element'].isin([player_id for player_id in teams.loc[player, str(gw)] if player_id not in gw_data['element'].values])].drop_duplicates(subset='element', keep="first")
next_fpl_bench = next_gw_data[next_gw_data['element'].isin([player_id for player_id in teams.loc[player, str(gw)][-4:] if player_id not in gw_data['element'].values])].drop_duplicates(subset='element', keep="first")
# Team value
# Handle missing players from DF due to BGW
team_value.loc[player, str(gw)] = (
sum(fpl_team['value']) +
sum(next_fpl_team['value'])
)
bench_value.loc[player, str(gw)] = (
sum(fpl_bench['value']) +
sum(next_fpl_bench['value'])
)
if gw == 1 :
in_the_bank.loc[player, '1'] = 1000 - team_value.loc[player, '1']
elif transfers.loc[player, str(gw)] == transfers.loc[player, str(gw)]: # NaN check
in_the_bank.loc[player, str(gw)] = in_the_bank.loc[player, str(gw-1)]
if chips.loc[int(player), 'freehit'] != gw:
# Huge approximative because of mid-week price changes.
# Get the next player value in case of BGW
gw_value = (
in_the_bank.loc[player, str(gw-1)] +
sum(gw_data[gw_data['element'].isin(list(transfers.loc[player, str(gw)]['out'].values()))].drop_duplicates(subset='element', keep="first")['value']) -
sum(gw_data[gw_data['element'].isin(list(transfers.loc[player, str(gw)]['in'].values()))].drop_duplicates(subset='element', keep="first")['value'])
)
in_missing = [player_id for player_id in transfers.loc[player, str(gw)]['in'].values() if player_id not in gw_data['element'].values]
out_missing = [player_id for player_id in transfers.loc[player, str(gw)]['out'].values() if player_id not in gw_data['element'].values]
if len(out_missing):
gw_value += sum(next_gw_data[next_gw_data['element'].isin(out_missing)].drop_duplicates(subset='element', keep="first")['value'])
if len(in_missing) :
gw_value -= sum(next_gw_data[next_gw_data['element'].isin(in_missing)].drop_duplicates(subset='element', keep="first")['value'])
in_the_bank.loc[player, str(gw)] = max(0, gw_value)
else:
in_the_bank.loc[player, str(gw)] = in_the_bank.loc[player, str(gw-1)]
team_value = team_value.fillna(0)
team_value = team_value.astype(int)
in_the_bank = in_the_bank.fillna(0)
in_the_bank = in_the_bank.astype(int)
bench_value = bench_value.fillna(0)
bench_value = bench_value.astype(int)
team_value.to_csv(f'data/fpl_official/{season}-{season % 2000 + 1}/season/processed/team_value_{rank}.csv')
in_the_bank.to_csv(f'data/fpl_official/{season}-{season % 2000 + 1}/season/processed/in_the_bank_{rank}.csv')
bench_value.to_csv(f'data/fpl_official/{season}-{season % 2000 + 1}/season/processed/bench_value_{rank}.csv')
def get_season_formation():
with open('info.json') as f:
season_data = json.load(f)
season = season_data['season']
for rank in np.arange(5000, 105000, 5000):
print(rank)
chips, teams, caps, vice, bench_pts, transfers = get_raw_data(rank, f'data/fpl_official/{season}-{season % 2000 + 1}/season/raw/')
team_formation = pd.DataFrame().reindex_like(bench_pts)
bench_order = pd.DataFrame().reindex_like(bench_pts)
all_gw_data = pd.read_csv(os.path.join(f'data/fpl_official/vaastav/data/{season}-{season % 2000 + 1}/gws/merged_gw.csv'))
all_gw_data = all_gw_data[['position', 'element']].drop_duplicates(subset='element', keep="first")
for gw in np.arange(1, 39):
for player in team_formation.index:
# Formation
lineup = all_gw_data['element'].isin(teams.loc[player, str(gw)][:-4])
team_formation.loc[player, str(gw)] = (
all_gw_data[(lineup) & (all_gw_data['position'] == 'FWD')].shape[0] +
all_gw_data[(lineup) & (all_gw_data['position'] == 'MID')].shape[0] * 10 +
all_gw_data[(lineup) & (all_gw_data['position'] == 'DEF')].shape[0] * 100
)
mapping = {
'DEF': 1,
'MID': 2,
'FWD': 3,
}
try:
bench_order.loc[player, str(gw)] = (
100 * mapping[all_gw_data[all_gw_data['element'].isin([teams.loc[player, str(gw)][-3]])]['position'].values[0]] + \
10 * mapping[all_gw_data[all_gw_data['element'].isin([teams.loc[player, str(gw)][-2]])]['position'].values[0]] + \
mapping[all_gw_data[all_gw_data['element'].isin([teams.loc[player, str(gw)][-1]])]['position'].values[0]]
)
except:
bench_order.loc[player, str(gw)] = 0
team_formation = team_formation.fillna(0)
team_formation = team_formation.astype(int)
bench_order = bench_order.fillna(0)
bench_order = bench_order.astype(int)
team_formation.to_csv(f'data/fpl_official/{season}-{season % 2000 + 1}/season/processed/team_formation_{rank}.csv')
bench_order.to_csv(f'data/fpl_official/{season}-{season % 2000 + 1}/season/processed/bench_order_{rank}.csv')
def get_season_pos_values():
with open('info.json') as f:
season_data = json.load(f)
season = season_data['season']
for rank in np.arange(5000, 105000, 5000):
print(rank)
chips, teams, caps, vice, bench_pts, transfers = get_raw_data(rank, f'data/fpl_official/{season}-{season % 2000 + 1}/season/raw/')
gk_value = pd.DataFrame().reindex_like(bench_pts)
def_value = pd.DataFrame().reindex_like(bench_pts)
mid_value = pd.DataFrame().reindex_like(bench_pts)
fwd_value = pd.DataFrame().reindex_like(bench_pts)
all_gw_data = pd.read_csv(os.path.join(f'data/fpl_official/vaastav/data/{season}-{season % 2000 + 1}/gws/merged_gw.csv'))[['GW', 'position', 'element', 'minutes', 'value']]
for gw in np.arange(1, 39):
prev_gw_data = all_gw_data[all_gw_data['GW'] == gw-1]
gw_data = all_gw_data[all_gw_data['GW'] == gw]
next_gw_data = all_gw_data[all_gw_data['GW'] == gw+1]
for player in gk_value.index:
# FPL Team
prev_fpl_team = prev_gw_data[prev_gw_data['element'].isin([player_id for player_id in teams.loc[player, str(gw)] if player_id not in gw_data['element'].values])].drop_duplicates(subset='element', keep="first")
fpl_team = gw_data[gw_data['element'].isin(teams.loc[player, str(gw)])].drop_duplicates(subset='element', keep="first")
next_fpl_team = next_gw_data[next_gw_data['element'].isin([player_id for player_id in teams.loc[player, str(gw)] if player_id not in gw_data['element'].values])].drop_duplicates(subset='element', keep="first")
# Team value
# Handle missing players from DF due to BGW
gk_value.loc[player, str(gw)] = (
sum(prev_fpl_team[prev_fpl_team['position'] == 'GK']['value']) +
sum(fpl_team[fpl_team['position'] == 'GK']['value']) +
sum(next_fpl_team[next_fpl_team['position'] == 'GK']['value'])
)
def_value.loc[player, str(gw)] = (
sum(prev_fpl_team[prev_fpl_team['position'] == 'DEF']['value']) +
sum(fpl_team[fpl_team['position'] == 'DEF']['value']) +
sum(next_fpl_team[next_fpl_team['position'] == 'DEF']['value'])
)
mid_value.loc[player, str(gw)] = (
sum(prev_fpl_team[prev_fpl_team['position'] == 'MID']['value']) +
sum(fpl_team[fpl_team['position'] == 'MID']['value']) +
sum(next_fpl_team[next_fpl_team['position'] == 'MID']['value'])
)
fwd_value.loc[player, str(gw)] = (
sum(prev_fpl_team[prev_fpl_team['position'] == 'FWD']['value']) +
sum(fpl_team[fpl_team['position'] == 'FWD']['value']) +
sum(next_fpl_team[next_fpl_team['position'] == 'FWD']['value'])
)
gk_value = gk_value.fillna(0)
gk_value = gk_value.astype(int)
def_value = def_value.fillna(0)
def_value = def_value.astype(int)
mid_value = mid_value.fillna(0)
mid_value = mid_value.astype(int)
fwd_value = fwd_value.fillna(0)
fwd_value = fwd_value.astype(int)
gk_value.to_csv(f'data/fpl_official/{season}-{season % 2000 + 1}/season/processed/gk_value_{rank}.csv')
def_value.to_csv(f'data/fpl_official/{season}-{season % 2000 + 1}/season/processed/def_value_{rank}.csv')
mid_value.to_csv(f'data/fpl_official/{season}-{season % 2000 + 1}/season/processed/mid_value_{rank}.csv')
fwd_value.to_csv(f'data/fpl_official/{season}-{season % 2000 + 1}/season/processed/fwd_value_{rank}.csv')
def get_season_assets():
with open('info.json') as f:
season_data = json.load(f)
season = season_data['season']
for rank in np.arange(5000, 105000, 5000):
print(rank)
chips, teams, caps, vice, bench_pts, transfers = get_raw_data(rank, f'data/fpl_official/{season}-{season % 2000 + 1}/season/raw/')
gk_premiums = pd.DataFrame().reindex_like(bench_pts)
def_premiums = pd.DataFrame().reindex_like(bench_pts)
mid_premiums = pd.DataFrame().reindex_like(bench_pts)
fwd_premiums = pd.DataFrame().reindex_like(bench_pts)
gk_cheap = pd.DataFrame().reindex_like(bench_pts)
def_cheap = pd.DataFrame().reindex_like(bench_pts)
mid_cheap = pd.DataFrame().reindex_like(bench_pts)
fwd_cheap = pd.DataFrame().reindex_like(bench_pts)
all_gw_data = pd.read_csv(os.path.join(f'data/fpl_official/vaastav/data/{season}-{season % 2000 + 1}/gws/merged_gw.csv'))[['GW', 'position', 'element', 'minutes', 'value']]
for gw in np.arange(1, 39):
prev_gw_data = all_gw_data[all_gw_data['GW'] == gw-1]
gw_data = all_gw_data[all_gw_data['GW'] == gw]
next_gw_data = all_gw_data[all_gw_data['GW'] == gw+1]
for player in gk_premiums.index:
# FPL Team
prev_fpl_team = prev_gw_data[prev_gw_data['element'].isin([player_id for player_id in teams.loc[player, str(gw)] if player_id not in gw_data['element'].values])].drop_duplicates(subset='element', keep="first")
fpl_team = gw_data[gw_data['element'].isin(teams.loc[player, str(gw)])].drop_duplicates(subset='element', keep="first")
next_fpl_team = next_gw_data[next_gw_data['element'].isin([player_id for player_id in teams.loc[player, str(gw)] if player_id not in gw_data['element'].values])].drop_duplicates(subset='element', keep="first")
# Team value
# Handle missing players from DF due to BGW
gk_premiums.loc[player, str(gw)] = (
sum(prev_fpl_team[prev_fpl_team['position'] == 'GK']['value'] > 55) +
sum(fpl_team[fpl_team['position'] == 'GK']['value'] > 55) +
sum(next_fpl_team[next_fpl_team['position'] == 'GK']['value'] > 55)
)
def_premiums.loc[player, str(gw)] = (
sum(prev_fpl_team[prev_fpl_team['position'] == 'DEF']['value'] >= 65) +
sum(fpl_team[fpl_team['position'] == 'DEF']['value'] | |
#!/usr/bin/python3
"""
poly MBA generation:
poly = linear * linear * linear ...
add 0-equality:
0-equality = 0 * linear
= (part-term) * (-1*linear)
poly = poly + 0-equality
"""
import argparse
import numpy as np
import os
import random
import re
import sys
sys.setrecursionlimit(30000)
sys.path.append("../tools")
import traceback
import z3
from lMBA_generate import complex_groundtruth
from mba_string_operation import verify_mba_unsat, expression_2_term, generate_coe_bit, addMBA, variable_list
class PolyMBAGenerator():
"""polynomial MBA expression generation, just for MBA * MBA.
Attributes:
vnumber1: the number of variables in the first file.
vnumber2: the number of variables in the second file.
MBAfile1: the file storing the MBA expression.
MBAfile2: the another file storing the MBA expression.
MBAdesfile: the file storing the generated MBA expression.
"""
def __init__(self, vnumber1, vnumber2, MBAfile1=None, MBAfile2=None, MBAdesfile=None):
if vnumber1 in [1, 2, 3, 4] and vnumber2 in [1, 2,3,4]:
self.vnumber1 = vnumber1
self.vnumber2 = vnumber2
else:
print("the value of vnumber is wrong!")
traceback.print_stack()
sys.exit(0)
if not MBAfile1:
self.MBAfile1 = "../dataset/lMBA_{vnumber}variable.dataset.sorted.txt".format(vnumber=vnumber1)
else:
self.MBAfile1 = MBAfile1
self.MBAList1 = self.get_MBA(self.MBAfile1)
if not MBAfile2:
self.MBAfile2 = "../dataset/lMBA_{vnumber}variable.dataset.sorted.txt".format(vnumber=vnumber2)
#self.MBAfile2 = r"pMBA_{vnumber}*{vnumber}variable.dataset.sorted.txt".format(vnumber=vnumber2)
else:
self.MBAfile2 = MBAfile2
self.MBAList2 = self.get_MBA(self.MBAfile2)
if not MBAdesfile:
self.MBAdesfile = "../dataset/pMBA_{vnumber1}_{vnumber2}variable.dataset.txt".format(vnumber1=self.vnumber1, vnumber2=self.vnumber2)
else:
self.MBAdesfile = MBAdesfile
return None
def get_MBA(self, fileread):
"""read the file storing linear MBA expression.
Arg:
fileread: the file storing linear MBA expression.
Return:
MBAList: the list of pair on complex MBA and related ground truth.
"""
MBAList = []
with open(fileread, "r") as fr:
linenum = 0
for line in fr:
if "#" not in line:
line = line.strip("\n")
itemList = re.split(",", line)
cmba = itemList[0]
gmba = itemList[1]
MBAList.append([cmba, gmba])
return MBAList
def generate_pmba_dataset(self, mbanumber):
"""generate the polynomial MBA expression dataset.
Args:
mbanumber: the nubmer of mba expression in the dataset.
"""
filewrite = self.MBAdesfile
fw = open(filewrite, "w")
print("#complex, groundtruth, z3flag, c_terms, g_terms", file=fw)
#linenum = 0
for i in range(mbanumber):
expreList1 = random.choice(self.MBAList1)
expreList2 = random.choice(self.MBAList2)
(cmbaexpreList, gmbaexpreList) = self.generate_one_pMBA(expreList1, expreList2)
#complex mba expression
cmbaexpre = cmbaexpreList[0]
cmbaterm = "{item1}*{item2}".format(item1=cmbaexpreList[1], item2=cmbaexpreList[2])
#ground truth
gmbaexpre = gmbaexpreList[0]
gmbaterm = "{item1}*{item2}".format(item1=gmbaexpreList[1], item2=gmbaexpreList[2])
print("z3 solving...")
z3res = verify_mba_unsat(cmbaexpre, gmbaexpre, 2)
print("z3 result: ", z3res)
print(cmbaexpre, gmbaexpre, z3res, cmbaterm, gmbaterm, sep=",", file=fw, flush=True)
fw.close()
return None
def generate_one_pMBA(self, expreList1, expreList2):
"""generate one poly MBA expression based on two linear MBA expressions.
Args:
expreList1: pair of one linear MBA expression, such as [complexMBA, groundtruth]
expreList2: pair of another one linear MBA expression, like [complexMBA, groundtruth]
Returns:
cmbaexpreList: poly MBA expression, the terms of one complex MBA expression and the other one.
gmbaexpreList: the related ground truth, the terms of one ground truth and the other one.
"""
cmbaexpre1 = expreList1[0]
gmbaexpre1 = expreList1[1]
cmbaexpre2 = expreList2[0]
gmbaexpre2 = expreList2[1]
cmbaexpreList = self.MBA_multiply(cmbaexpre1, cmbaexpre2)
gmbaexpreList = self.MBA_multiply(gmbaexpre1, gmbaexpre2)
return cmbaexpreList, gmbaexpreList
def MBA_multiply(self, mbaexpre1, mbaexpre2):
"""one MBA expression multiply the other one, must be coded in recursive version.
Args:
mbaexpre1: one MBA expression.
mbaexpre2: another one MBA expression.
Returns:
mbaexpre: the expression of mbaexpre1 * mbaexpre2
term1: the number of terms of one expression.
term2: the number of terms of another one expression.
"""
#split the expression into terms
mbaexpre1List = expression_2_term(mbaexpre1)
#mbaexpre1List = [item for l in mbaexpre1List for item in l ]
mbaexpre2List = expression_2_term(mbaexpre2)
#mbaexpre2List = [item for l in mbaexpre2List for item in l ]
#split the mba term in the pair: coefficient, bitwise
coeBitList1 = generate_coe_bit(mbaexpre1List)
coeBitList2 = generate_coe_bit(mbaexpre2List)
#construct the terms in the new MBA expression
mbaexpreList = []
for item1 in coeBitList1:
for item2 in coeBitList2:
coe1 = item1[0]
bit1 = item1[1]
coe2 = item2[0]
bit2 = item2[1]
coe = int(coe1) * int(coe2)
coe = str(coe)
bit = "{bit1}*{bit2}".format(bit1=bit1, bit2=bit2)
if "-" in coe:
mbaexpre = "{coe}*{bit}".format(coe=coe, bit=bit)
else:
mbaexpre = "+{coe}*{bit}".format(coe=coe, bit=bit)
mbaexpreList.append(mbaexpre)
#delete the start addition in the expression
if "+" in mbaexpreList[0]:
mbaexpreList[0] = mbaexpreList[0][1:]
#construct the MBA expression
mbaexpre = "".join(mbaexpreList)
#check resulting expression
oriExpre = "({expre1})*({expre2})".format(expre1=mbaexpre1, expre2=mbaexpre2)
z3res = verify_mba_unsat(oriExpre, mbaexpre)
if not z3res:
print("error in function of MBA_multiply!")
traceback.print_stack()
sys.exit(0)
return mbaexpre, len(mbaexpre1List), len(mbaexpre2List)
def generate_pmba_transformation_dataset(self, mbanumber):
"""generate the polynomial MBA expression that has been added 0-equality.
Args:
mbanumber: the nubmer of mba expression in the dataset.
"""
#filewrite = self.MBAdesfile + ".transformation.txt"
fw = open(self.MBAdesfile, "w")
print("#complex, groundtruth, z3flag, c_terms, g_terms", file=fw)
#linenum = 0
for i in range(mbanumber):
expreList1 = random.choice(self.MBAList1)
expreList2 = random.choice(self.MBAList2)
(cmbaexpreList, gmbaexpreList) = self.generate_one_transform_pMBA(expreList1, expreList2)
#complex mba expression
cmbaexpre = cmbaexpreList[0]
cmbaterm = "{item1}*{item2}".format(item1=cmbaexpreList[1], item2=cmbaexpreList[2])
#ground truth
gmbaexpre = gmbaexpreList[0]
gmbaterm = "{item1}*{item2}".format(item1=gmbaexpreList[1], item2=gmbaexpreList[2])
print("z3 solving...")
z3res = verify_mba_unsat(cmbaexpre, gmbaexpre, 2)
print("z3 result: ", z3res)
print(cmbaexpre, gmbaexpre, z3res, cmbaterm, gmbaterm, sep=",", file=fw, flush=True)
fw.close()
return None
def generate_one_transform_pMBA(self, expreList1, expreList2):
"""generate one poly MBA expression that has been added one 0-equality.
Algorithm:
originalPOly = expreStr1 * expreStr2
0-equality = (-1/1 * expreStr1) * 0
= (-1/1 * expreStr1) * (sub-expression of expreStr2, randomly reverse the coefficient)
newPoly = originalPoly + 0-equality
0-equality is suggested to be a linear MBA expression, non-linear MBA is difficulty to generate it.
Args:
expreList1: pair of one linear MBA expression, such as [complexMBA, groundtruth]
expreList2: pair of another one linear MBA expression, like [complexMBA, groundtruth]
Returns:
cmbaexpreList: poly MBA expression, the terms of one complex MBA expression and the other one.
gmbaexpreList: the related ground truth, the terms of one ground truth and the other one.
"""
cmbaExpre1 = expreList1[0]
gmbaExpre1 = expreList1[1]
cmbaExpre2 = expreList2[0]
gmbaExpre2 = expreList2[1]
#original poly MBA expression
originalcmbaExpreList = self.MBA_multiply(cmbaExpre1, cmbaExpre2)
originalcmbaExpre = originalcmbaExpreList[0]
cmbaExpreterm1 = expression_2_term(cmbaExpre1)
cmbaExpreterm2 = expression_2_term(cmbaExpre2)
#randomly reverse the sign of every term in cmbaexpre1
for (idx, term) in enumerate(cmbaExpreterm1):
r = random.randint(1,3)
#not reversed
if r % 2:
cmbaExpreterm1[idx] = term
continue
#reversed
if term[0] == "+":
cmbaExpreterm1[idx] = "-" + term[1:]
elif term[0] == "-":
cmbaExpreterm1[idx] = "+" + term[1:]
else:
cmbaExpreterm1[idx] = "-" + term
#construct the -1/1 * mbaexpre
cmbaExpre1 = "".join(cmbaExpreterm1)
if cmbaExpre1[0] == "+":
cmbaExpre1 = cmbaExpre1[1:]
#get part of the mbaexpre
partterm = cmbaExpreterm2[:len(cmbaExpreterm2) // 2 + 1]
#randomly reverse the sign of every term in cmbaexpre1
for (idx, term) in enumerate(partterm):
r = random.randint(1,3)
#not reversed
if r % 2:
partterm[idx] = term
continue
#reversed
if term[0] == "+":
partterm[idx] = "-" + term[1:]
elif term[0] == "-":
partterm[idx] = "+" + term[1:]
else:
partterm[idx] = "-" + term
#construct the part expression
partExpre ="".join(partterm)
if partExpre[0] == "+":
partExpre = partExpre[1:]
#construct a expression that equals to 0
groundtruth = str(0)
zeroEquality = complex_groundtruth(groundtruth, partExpre)
#0-equality = 0-expression * part_mbaexpre
zeroEqualityList = self.MBA_multiply(cmbaExpre1, zeroEquality)
zeroMBAexpre = zeroEqualityList[0]
#newmba = orimba + 0-equality
mbaExpre = addMBA(originalcmbaExpre, zeroMBAexpre)
#ground truth does not to be changed
gmbaExpreList = self.MBA_multiply(gmbaExpre1, gmbaExpre2)
#construct cmbaExpreList
cmbaExpreList = [mbaExpre, len(expression_2_term(mbaExpre)), 1]
return cmbaExpreList, gmbaExpreList
def generate_pMBA_from_zeroequality(self, groundtruth):
"""generate one poly MBA expression that has been added one 0-equality.
Algorithm:
originalPOly = complex(groundtruth) * complex(1)
0-equality = (-1/1 * complex(groundtruth)) * 0
= (-1/1 * complex(groundtruth)) * (sub-expression of complex(1), randomly reverse the coefficient)
newPoly = originalPoly + 0-equality
0-equality is suggested to be a linear MBA expression, non-linear MBA is difficulty to generate it.
Args:
groundtruth: a simplified expression.
Returns:
mbaexpre: the result poly mba expression.
"""
cmbaExpre1 = complex_groundtruth(groundtruth)
cmbaExpre2 = complex_groundtruth("1")
#original poly MBA expression
originalcmbaExpreList = self.MBA_multiply(cmbaExpre1, cmbaExpre2)
originalcmbaExpre = originalcmbaExpreList[0]
cmbaExpreterm1 = expression_2_term(cmbaExpre1)
cmbaExpreterm2 = expression_2_term(cmbaExpre2)
#randomly reverse the sign of every term in cmbaexpre1
for (idx, term) in enumerate(cmbaExpreterm1):
r = random.randint(1,3)
#not reversed
if r % 2:
cmbaExpreterm1[idx] = term
continue
#reversed
if term[0] == "+":
cmbaExpreterm1[idx] = "-" + term[1:]
elif term[0] == "-":
cmbaExpreterm1[idx] = "+" + term[1:]
else:
cmbaExpreterm1[idx] = "-" + term
#construct the -1/1 * mbaexpre
cmbaExpre1 = "".join(cmbaExpreterm1)
if cmbaExpre1[0] == "+":
cmbaExpre1 = cmbaExpre1[1:]
#get part of the mbaexpre
partterm = cmbaExpreterm2[:len(cmbaExpreterm2) // 2 + 1]
#randomly reverse the sign of every term in cmbaexpre1
for (idx, term) in enumerate(partterm):
r = | |
fp)
fp.write("end Material\n\n")
MapToTypes = {
'use_map_alpha' : 'ALPHA',
'use_map_ambient' : 'AMBIENT',
'use_map_color_diffuse' : 'COLOR',
'use_map_color_emission' : 'COLOR_EMISSION',
'use_map_color_reflection' : 'COLOR_REFLECTION',
'use_map_color_spec' : 'COLOR_SPEC',
'use_map_color_transmission' : 'COLOR_TRANSMISSION',
'use_map_density' : 'DENSITY',
'use_map_diffuse' : 'DIFFUSE',
'use_map_displacement' : 'DISPLACEMENT',
'use_map_emission' : 'EMISSION',
'use_map_emit' : 'EMIT',
'use_map_hardness' : 'HARDNESS',
'use_map_mirror' : 'MIRROR',
'use_map_normal' : 'NORMAL',
'use_map_raymir' : 'RAYMIR',
'use_map_reflect' : 'REFLECTION',
'use_map_scatter' : 'SCATTERING',
'use_map_specular' : 'SPECULAR_COLOR',
'use_map_translucency' : 'TRANSLUCENCY',
'use_map_warp' : 'WARP',
}
def exportMTex(index, mtex, use, fp):
tex = mtex.texture
texname = tex.name.replace(' ','_')
mapto = None
prio = []
for ext in MapToTypes.keys():
if eval("mtex.%s" % ext):
if mapto == None:
mapto = MapToTypes[ext]
prio.append(ext)
print("Mapto", ext, mapto)
fp.write(" MTex %d %s %s %s\n" % (index, texname, mtex.texture_coords, mapto))
writePrio(mtex, ['texture']+prio, " ", fp)
print("MTEX", texname, list(MapToTypes.keys()) )
writeDir(mtex, list(MapToTypes.keys()) + ['texture', 'type', 'texture_coords', 'offset'], " ", fp)
print("DONE MTEX", texname)
fp.write(" end MTex\n\n")
return
def exportTexture(tex, fp):
if not tex:
return
fp.write("Texture %s %s\n" % (tex.name.replace(' ', '_'), tex.type))
if tex.type == 'IMAGE' and tex.image:
fp.write(" Image %s ;\n" % tex.image.name.replace(' ', '_'))
fp.write("end Texture\n\n")
return
exportRamp(tex.color_ramp, "color_ramp", fp)
exportNodeTree(tex.node_tree, fp)
writeDir(tex, ['color_ramp', 'node_tree', 'image_user', 'use_nodes', 'use_textures', 'type', 'users_material'], " ", fp)
fp.write("end Texture\n\n")
def exportImage(img, fp):
imgName = img.name.replace(' ', '_')
if imgName == 'Render_Result':
return
fp.write("Image %s\n" % imgName)
fp.write(" Filename %s ;\n" % (img.filepath))
# writeDir(img, [], " ", fp)
fp.write("end Image\n\n")
def exportRamp(ramp, name, fp):
if ramp == None:
return
print(ramp)
fp.write(" Ramp %s\n" % name)
for elt in ramp.elements:
col = elt.color
fp.write(" Element (%.3f,%.3f,%.3f,%.3f) %.3f ;\n" % (col[0], col[1], col[2], col[3], elt.position))
writeDir(ramp, ['elements'], " ", fp)
fp.write(" end Ramp\n")
#
# exportWorld(world, fp):
# exportScene(scn, fp):
#
def exportWorld(world, fp):
fp.write("World %s\n" % (world.name.replace(' ', '_')))
exportDefault("Lighting", world.lighting, [], [], [], [], ' ', fp)
exportDefault("Mist", world.mist, [], [], [], [], ' ', fp)
exportDefault("Stars", world.stars, [], [], [], [], ' ', fp)
writeDir(world, ['lighting', 'mist', 'stars'], " ", fp)
fp.write("end World\n\n")
def exportScene(scn, fp):
fp.write("Scene %s\n" % (scn.name.replace(' ', '_')))
exportNodeTree(scn.nodetree, fp)
exportGameData(scn.game_data, fp)
for kset in scn.all_keying_sets:
exportDefault("KeyingSet", kset, [kset.name], [], ['type_info'], [], ' ', fp)
for obase in scn.bases:
exportDefault("ObjectBase", obase, [], [], [], [], ' ', fp)
for ob in scn.objects:
fp.write(" Object %s ;\n" % (ob.name.replace(' ','_')))
exportDefault("RenderSettings", scn.render, [], [], [], [('Layer', scn.render.layers)], ' ', fp)
exportToolSettings(scn.tool_settings, fp)
exportDefault("UnitSettings", scn.unit_settings, [], [], [], [], ' ', fp)
writeDir(scn,
['bases', 'all_keying_sets', 'game_data', 'network_render', 'nodetree', 'objects', 'render',
'pose_templates', 'tool_settings', 'unit_settings'], " ", fp)
fp.write("end Scene\n\n")
def exportToolSettings(tset, fp):
fp.write(" ToolSettings\n")
exportDefault("ImagePaint", tset.image_paint, [], [], [], [], ' ', fp)
exportDefault("Sculpt", tset.sculpt, [], [], [], [], ' ', fp)
exportDefault("VertexPaint", tset.vertex_paint, [], [], [], [], ' ', fp)
exportDefault("WeightPaint", tset.weight_paint, [], [], [], [], ' ', fp)
writeDir(tset, ['image_paint', 'sculpt', 'vertex_paint', 'weight_paint'], ' ', fp)
fp.write(" end ToolSettings\n")
def exportGameData(gdata, fp):
fp.write(" GameData\n")
writeDir(gdata, [], " ", fp)
fp.write(" end GameData\n")
#
# exportNodeTree(tree, fp)
# exportNode(node, fp)
#
def exportNodeTree(tree, fp):
if tree == None:
return
print(tree)
fp.write(" NodeTree %s\n" % tree.name.replace(' ', '_'))
exportAnimationData(tree.animation_data, fp)
for node in tree.nodes:
exportNode(node, fp)
writeDir(tree, ['nodes'], " ", fp)
fp.write(" end NodeTree\n")
return
def exportNode(node, fp):
fp.write(" Node %s\n" % node.name.replace(' ', '_'))
loc = node.location
fp.write(" location (%.3f, %3.f) ;\n" % (loc[0], loc[1]))
fp.write(" Inputs\n")
for inp in node.inputs:
exportNodeSocket(inp, fp)
fp.write(" end Inputs\n")
fp.write(" Outputs\n")
for outp in node.outputs:
exportNodeSocket(outp, fp)
fp.write(" end Outputs\n")
fp.write(" end Node\n")
return
def exportNodeSocket(socket, fp):
print(dir(socket.rna_type))
fp.write(" Socket %s %s\n" % (socket.name.replace(' ', '_'), socket.rna_type.name.replace(' ', '_')))
writeDir(socket, [], " ", fp)
#fp.write(" default_value %s ; \n" %socket.default_value)
#fp.write(" rna_type %s ; \n" %socket.rna_type)
fp.write(" end Socket\n")
return
#
# exportObject(ob, fp):
#
def exportObject(ob, fp):
global hairFile
fp.write("\n# ----------------------------- %s --------------------- # \n\n" % ob.type )
if ob.type == "MESH":
exportMesh(ob, fp)
elif ob.type == "ARMATURE":
exportArmature(ob, fp)
elif ob.type == "EMPTY":
pass
elif ob.type == "CURVE":
exportCurve(ob, fp)
elif ob.type == "SURFACE":
exportSurface(ob, fp)
elif ob.type == 'LATTICE':
exportLattice(ob, fp)
elif ob.type == 'TEXT':
exportTextCurve(ob, fp)
elif not expMsk & M_Obj:
return
elif ob.type == 'LAMP':
exportLamp(ob, fp)
else:
exportDefaultObject(ob,fp)
if ob.data:
datName = ob.data.name.replace(' ','_')
else:
datName = 'None'
fp.write("\nObject %s %s %s\n" % (ob.name.replace(' ', '_'), ob.type, datName))
writeArray('layers', ob.layers, " ", 1, fp)
for mod in ob.modifiers:
exportModifier(mod, fp)
if Quick:
fp.write("end Object\n\n")
return
for cns in ob.constraints:
exportConstraint(cns, fp)
for psys in ob.particle_systems:
exportParticleSystem(psys, " ", fp)
exportAnimationData(ob.animation_data, fp)
exportDefault("FieldSettings", ob.field, [], [], [], [], ' ', fp)
writeDir(ob,
['data','parent_vertices', 'mode', 'scene_users', 'children', 'pose', 'field',
'material_slots', 'modifiers', 'constraints', 'layers', 'bound_box', 'group_users',
'animation_visualisation', 'animation_data', 'particle_systems', 'active_particle_system',
'active_shape_key', 'vertex_groups', 'active_vertex_group', 'materials'], " ", fp)
fp.write("end Object\n\n")
return
#
# exportParticleSystem(psys, pad, fp):
# exportParticleSettings(settings, psys, pad, fp):
# exportParticle(par, nmax, pad, fp):
#
def exportParticleSystem(psys, pad, fp):
name = psys.name.replace(' ', '_')
fp.write("%sParticleSystem %s %s \n" % (pad,name, psys.settings.type))
createdLocal['ParticleSystem'].append(name)
exportParticleSettings(psys.settings, psys, pad, fp)
writeDir(psys,
['settings', 'child_particles', 'particles', 'editable', 'edited', 'global_hair', 'multiple_caches'],
pad+" ", fp)
if psys.edited:
exportParticles(psys.particles, psys.settings.amount, pad+" ", fp)
fp.write("%send ParticleSystem\n" % pad)
def exportParticleSettings(settings, psys, pad, fp):
fp.write("%ssettings Struct ParticleSettings %s \n" % (pad, settings.name.replace(' ','_')))
prio = ['amount', 'hair_step', 'rendered_child_nbr', 'child_radius', 'child_random_size']
writePrio(settings, prio, pad+" ", fp)
writeDir(settings, prio, pad+" ", fp)
fp.write("%send Struct\n" % pad)
def exportParticles(particles, nmax, pad, fp):
fp.write("%sParticles\n" % pad)
n = 0
prio = ['location']
for par in particles:
if n < nmax:
fp.write("%s Particle \n" % pad)
for h in par.hair:
fp.write("%s h " % pad)
writeTuple(h.location, fp)
fp.write(" %d %.3f ;\n" % (h.time, h.weight))
writePrio(par, prio, pad+" ", fp)
fp.write("%s end Particle\n" % pad)
n += 1
writeDir(particles[0], prio+['hair'], pad+" ", fp)
fp.write("%send Particles\n" % pad)
#
# exportMesh(ob, fp):
#
def exportMesh(ob, fp):
me = ob.data
meName = me.name.replace(' ', '_')
obName = ob.name.replace(' ', '_')
if verbosity > 0:
print( "Saving mesh "+meName )
fp.write("Mesh %s %s \n" % (meName, obName))
if me.vertices:
fp.write(" Verts\n")
for v in me.vertices:
fp.write(" v %.3f %.3f %.3f ;\n" %(v.co[0], v.co[1], v.co[2]))
v = me.vertices[0]
#writeDir(v, ['co', 'index', 'normal'], " ", fp)
fp.write(" end Verts\n")
if me.polygons:
fp.write(" Faces\n")
for f in me.polygons:
fp.write(" f ")
for v in f.vertices:
fp.write("%d " % v)
fp.write(";\n")
if len(me.materials) <= 1:
f = me.polygons[0]
fp.write(" ftall %d %d ;\n" % (f.material_index, f.use_smooth))
else:
"""
for f in me.polygons:
fp.write(" ft %d %d ;\n" % (f.material_index, f.use_smooth))
"""
mi = -1
us = -1
n = 0
for f in me.polygons:
if (f.material_index == mi) and (f.use_smooth == us):
n += 1
else:
if n > 1:
fp.write(" ftn %d %d %d ;\n" % (n, mi, us))
elif n > 0:
fp.write(" ft %d %d ;\n" % (mi, us))
mi = f.material_index
us = f.use_smooth
n = 1
if n > 1:
fp.write(" ftn %d %d %d ;\n" % (n, mi, us))
elif n > 0:
fp.write(" ft %d %d ;\n" % (mi, us))
fp.write(" end Faces\n")
elif me.edges:
fp.write(" Edges\n")
for e in me.edges:
fp.write(" e %d %d ;\n" % (e.vertices[0], e.vertices[1]))
e = me.edges[0]
#writeDir(e, ['vertices'], " ", fp)
fp.write(" end Edges\n")
if Quick:
fp.write("end Mesh\n")
return # exportMesh
for uvtex in me.uv_textures:
uvtexName = uvtex.name.replace(' ','_')
fp.write(" MeshTextureFaceLayer %s\n" % uvtexName)
fp.write(" Data \n")
for data in uvtex.data.values():
v = data.uv_raw
fp.write(" vt %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f ;\n" %
(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]))
writeDir(uvtex.data[0],
['uv1', 'uv2', 'uv3', 'uv4', 'uv', 'uv_raw', 'uv_pinned', 'uv_selected'], " ", fp)
fp.write(" end Data\n")
writeDir(uvtex, ['data'], " ", fp)
createdLocal['MeshTextureFaceLayer'].append(uvtexName)
fp.write(" end MeshTextureFaceLayer\n")
for vcol in me.vertex_colors:
vcolName = vcol.name.replace(' ','_')
fp.write(" MeshColorLayer %s\n" % vcolName)
if Optimize < 2:
fp.write(" Data \n")
for data in vcol.data.values():
fp.write(" cv ")
writeTuple(data.color1, fp)
writeTuple(data.color2, fp)
writeTuple(data.color3, fp)
writeTuple(data.color4, fp)
fp.write(" ;\n")
fp.write(" end Data\n")
writeDir(vcol, ['data'], " ", fp)
createdLocal['MeshColorLayer'].append(vcolName)
fp.write(" end MeshColorLayer\n")
"""
for v in me.sticky:
fp.write(" sticky %.3f %.3f\n" % (v.co[0], v.co[1]))
"""
for mat in me.materials:
if mat:
fp.write(" Material %s ;\n" % mat.name.replace(" | |
import unittest
import pandas as pd
import numpy as np
from borf.get_orfs import read_fasta
from borf.get_orfs import find_next_stop
from borf.get_orfs import find_max_orf_index
from borf.get_orfs import orf_start_stop_from_aa
from borf.get_orfs import find_longest_orfs
from borf.get_orfs import replace_last_stop
from borf.get_orfs import add_upstream_aas
from borf.get_orfs import filter_objects
from borf.get_orfs import translate_all_frames
from borf.get_orfs import convert_start_stop_to_nt
from borf.get_orfs import check_first_aa
from borf.get_orfs import unique_number_from_list
from borf.get_orfs import find_all_orfs
from borf.get_orfs import add_orf_classification
from borf.get_orfs import get_orfs
class TestReadFasta(unittest.TestCase):
def test_read_fasta(self):
# check that files are read into correct format"
read_sequence = read_fasta('test_data/test_mutliple_frame_orfs.fa')
seq_array = [str(x.seq) for x in read_sequence]
# check sequence matches
# (only check first/last few nts, and total length)
t_start = seq_array[0][0:20] == 'GCTTCGGGTTGGTGTCATGG'
t_end = seq_array[0][-1:-20:-1] == 'AGTTGTGTTACCGGGACGG'
t_len = len(seq_array[0]) == 2757
self.assertTrue(t_start and t_end and t_len)
class TestFindNextStop(unittest.TestCase):
def test_next_stop_not_longest(self):
# "check this finds the NEXT stop codon"
# assert find_next_stop("AAAMBBB*CCC*", 4) == 8
next_stop = find_next_stop("AMEATBALL*", 0)
self.assertEqual(next_stop, 10)
def test_next_stop_from_within(self):
# "check this finds the NEXT stop codon when given a start position
# greater than 0/1"
orf = "AMEATY*METABALL*"
next_stop = find_next_stop(orf, 7)
self.assertEqual(next_stop, len(orf))
def test_next_stop_final(self):
# "check that this returns the length of the given string when no stop
# codon is found"
orf = "AMEATBALL"
next_stop = find_next_stop(orf, 0)
self.assertEqual(next_stop, len(orf))
class TestFindMaxOrfIndex(unittest.TestCase):
def test_find_max_orf_index(self):
# test basic usage of finding the two maximum values
self.assertEqual(find_max_orf_index(start_locs=[0, 100],
end_locs=[1000, 200]), (0, 1000))
def test_find_max_orf_index_offby1(self):
# test when second index is greater by one
self.assertEqual(find_max_orf_index(start_locs=[0, 100],
end_locs=[999, 1100]), (100, 1100))
def test_find_max_orf_index_equal(self):
# test that first instance of the max is returned
self.assertEqual(find_max_orf_index(start_locs=[0, 100],
end_locs=[1000, 1100]), (0, 1000))
class TestOrfStartStopFromAA(unittest.TestCase):
def test_correct_start_stop(self):
# tests that the correct start/stop locations are given
# in non-pythonic (1-indexed) manner
self.assertEqual(orf_start_stop_from_aa('AMEATBALL*'), (1, 10))
def test_start_stop_no_stop_codon(self):
# tests that stop location is the final aa when no stop codon is found
self.assertEqual(orf_start_stop_from_aa('AMEATBALL'), (1, 9))
def test_start_stop_longest(self):
# tests that the start/stop locations are given for the LONGEST orf
self.assertEqual(orf_start_stop_from_aa('MAUL*AMEATBALL'), (6, 14))
class TestFindLongestORF(unittest.TestCase):
def test_find_longest_orf_output_format(self):
# tests that a length 5 tupple output, and each is the correct numpy
# array type
long_orf = find_longest_orfs(['AMEATBALL'])
t_len = len(long_orf) == 5
# test numpy types of all outputs
t0 = long_orf[0].dtype == '<U8'
t1 = long_orf[1].dtype == 'int64'
t2 = long_orf[2].dtype == 'int64'
t3 = long_orf[3].dtype == 'int64'
t4 = long_orf[4].dtype == 'bool'
all_right_types = t0 and t1 and t2 and t3 and t4 and t_len
self.assertTrue(all_right_types)
def test_find_longest_orf_trimmed(self):
# check that the last * is trimmed from the orf sequence
self.assertEqual(find_longest_orfs(['AMEATBALL*'])[0], ['MEATBALL'])
def test_find_longest_orf_multiple(self):
input = ['AMEATBALL*', 'TWOMEATBALLS']
result = find_longest_orfs(input)
self.assertEqual(len(result[0]), len(input))
def test_find_longest_orf_stopsites(self):
# check that the stop site is calculated as the * for seqs with it,
# and the last AA for those without
stop_loc_with_stop = find_longest_orfs(['AMEATBALL*'])[2]
stop_loc_without_stop = find_longest_orfs(['AMEATBALL'])[2]
self.assertEqual(stop_loc_with_stop, stop_loc_without_stop + 1)
class TestReplaceLastStop(unittest.TestCase):
def test_replace_last_stop(self):
# check that the last * is trimmed from the orf sequence
self.assertEqual(replace_last_stop('MEATBALL'),
replace_last_stop('MEATBALL*'))
class TestAddUpstreamAAs(unittest.TestCase):
def test_add_upstream_aa_output(self):
# check all outputs generated and all in correct type
aa_sequence = np.array(['ALONGERUPSTREAMMEATBALL'])
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_sequence)
output = add_upstream_aas(aa_sequence, stop_sites, start_sites,
orf_sequence, orf_length,
min_upstream_length=5)
t_len = len(output) == 3
# test numpy types of all outputs
t0 = output[0].dtype.type == np.str_
t1 = output[1].dtype == 'int64'
t2 = output[2].dtype == 'int64'
all_right_types = t0 and t1 and t2 and t_len
self.assertTrue(all_right_types)
def test_add_upstream_aa(self):
# test expected output
aa_sequence = np.array(['ALONGERUPSTREAMMEATBALL'])
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_sequence)
output = add_upstream_aas(
aa_sequence,
stop_sites,
start_sites,
orf_sequence,
orf_length,
min_upstream_length=5)
self.assertEqual(output[0], 'ALONGERUPSTREAMMEATBALL')
def test_add_upstream_aa_multi(self):
# test with multiple inputs
aa_sequence = np.array(
['ALONGERUPSTREAMMEATBALL', 'TWODOZENMEATBALLS', 'BROWNBEARMAULSGIANTSQUID'])
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_sequence)
output = add_upstream_aas(
aa_sequence,
stop_sites,
start_sites,
orf_sequence,
orf_length,
min_upstream_length=5)
self.assertTrue(np.all(output[0] == np.array(
['ALONGERUPSTREAMMEATBALL', 'TWODOZENMEATBALLS', 'BROWNBEARMAULSGIANTSQUID'])))
def test_add_upstream_aa_noupstream(self):
# test with no viable upstream AAs
aa_sequence = np.array(['BEAREATS*MEATBALLS'])
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_sequence)
output = add_upstream_aas(
aa_sequence,
stop_sites,
start_sites,
orf_sequence,
orf_length,
min_upstream_length=5)
self.assertEqual(output[0], 'MEATBALLS')
def test_add_upstream_aa_shortupstream(self):
# test with upstream AAs too short
aa_sequence = np.array(['BEARMEATBALLS'])
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_sequence)
output = add_upstream_aas(
aa_sequence,
stop_sites,
start_sites,
orf_sequence,
orf_length,
min_upstream_length=5)
self.assertEqual(output[0], 'MEATBALLS')
def test_add_upstream_aa_exactupstream(self):
# test with upstream AAs of exactly min_upstream_length
aa_sequence = np.array(['BEARMEATBALLS'])
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_sequence)
output = add_upstream_aas(
aa_sequence,
stop_sites,
start_sites,
orf_sequence,
orf_length,
min_upstream_length=4)
self.assertEqual(output[0], 'BEARMEATBALLS')
class TestFilterObjects(unittest.TestCase):
def test_filter_objects(self):
# check input arrays can be filtered
letters = np.array(['A', 'B', 'C', 'D', 'E', 'F', 'H', 'I', 'J'])
values = np.array([1, 2, 3, 4, 5, 4, 3, 2, 1])
filter = values < 3
output = filter_objects(filter, letters, values)
self.assertTrue(np.all(output[0] == np.array(['A', 'B', 'I', 'J'])) and
np.all(output[1] == np.array([1, 2, 2, 1])))
class TestTranslateAllFrames(unittest.TestCase):
def test_translate_output_format(self):
# tests that a length 3 tupple output, and each is the correct numpy
# array type
sequences = read_fasta('test_data/test_trans_all_frames.fa')
output = translate_all_frames(sequences, both_strands=False)
t_len = len(output) == 6
# test numpy types of all outputs
t0 = output[0].dtype.type == np.str_
t1 = output[1].dtype.type == np.str_
t2 = output[2].dtype == 'int64'
t3 = output[3].dtype.type == np.str_
t4 = output[4].dtype == 'int64'
t5 = output[5].dtype == 'int64'
all_right_types = t0 and t1 and t2 and t3 and t4 and t5 and t_len
self.assertTrue(all_right_types)
def test_translate_allframes(self):
sequences = read_fasta('test_data/test_trans_all_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=False)
self.assertTrue(np.all(frame == np.array([1, 2, 3])))
def test_translate_alltransframes(self):
sequences = read_fasta('test_data/test_trans_all_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=False)
self.assertTrue(np.all(aa_frames == np.array(
['MANATEE*', 'WRTRPKN', 'GERDRRI'])))
def test_translate_posstrand(self):
sequences = read_fasta('test_data/test_trans_all_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=False)
self.assertTrue(np.all(strand == np.array(['+', '+', '+'])))
def test_translate_seq_length_nt(self):
sequences = read_fasta('test_data/test_trans_all_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=False)
self.assertTrue(np.all(seq_length_nt == np.array([24, 24, 24])))
def test_translate_seq_length(self):
sequences = read_fasta('test_data/test_trans_all_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=False)
self.assertTrue(np.all(seq_length == np.array([8, 7, 7])))
def test_translate_bothstrands(self):
sequences = read_fasta('test_data/test_trans_all_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=True)
frame_correct = np.all(frame == np.array([1, 1, 2, 2, 3, 3]))
strand_correct = np.all(strand == np.array(
['+', '-', '+', '-', '+', '-']))
trans_correct = np.all(aa_frames == np.array(
['MANATEE*', 'LFFGRVRH', 'WRTRPKN', 'YSSVAFA', 'GERDRRI', 'ILRSRSP']))
self.assertTrue(frame_correct and strand_correct and trans_correct)
class TestConvertAANT(unittest.TestCase):
def test_convert_nt_output_format(self):
# tests that a length 3 tupple output, and each is the correct numpy
# array type
sequences = read_fasta('test_data/test_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=False)
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_frames)
# filter data by minimum orf length
keep = orf_length >= 6
aa_frames, frame, strand, seq_length_nt, ids, seq_length, start_sites, stop_sites, orf_sequence, last_aa_is_stop, orf_length = filter_objects(
keep, aa_frames, frame, strand, seq_length_nt, ids, seq_length, start_sites, stop_sites, orf_sequence, last_aa_is_stop, orf_length)
output = convert_start_stop_to_nt(
start_sites,
stop_sites,
seq_length_nt,
orf_length,
frame,
last_aa_is_stop)
t_len = len(output) == 3
# test numpy types of all outputs
t0 = output[0].dtype == 'int64'
t1 = output[1].dtype == 'int64'
t2 = output[2].dtype == 'int64'
all_right_types = t0 and t1 and t2 and t_len
self.assertTrue(all_right_types)
def test_convert_start_nt(self):
sequences = read_fasta('test_data/test_frames.fa')
ids, aa_frames, frame, strand, seq_length_nt, seq_length = translate_all_frames(
sequences, both_strands=False)
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_frames)
# filter data by minimum orf length
keep = orf_length >= 6
aa_frames, frame, strand, seq_length_nt, ids, seq_length, start_sites, stop_sites, orf_sequence, last_aa_is_stop, orf_length = filter_objects(
keep, aa_frames, frame, strand, seq_length_nt, ids, seq_length, start_sites, stop_sites, orf_sequence, last_aa_is_stop, orf_length)
start_site_nt, stop_site_nt, utr3_length = convert_start_stop_to_nt(
start_sites, stop_sites, seq_length_nt, orf_length, frame, last_aa_is_stop)
self.assertTrue(np.all(start_site_nt == np.array([1, 2, 3])))
def test_convert_stop_nt(self):
sequences = read_fasta('test_data/test_frames.fa')
ids, aa_frames, frame, strand,seq_length_nt, seq_length = translate_all_frames(sequences, both_strands=False)
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(
aa_frames)
# filter data by minimum orf length
keep = orf_length >= 6
aa_frames, frame, strand, seq_length_nt, ids, seq_length, start_sites, stop_sites, orf_sequence, last_aa_is_stop, orf_length = filter_objects(
keep, aa_frames, frame, strand, seq_length_nt, ids, seq_length, start_sites, stop_sites, orf_sequence, last_aa_is_stop, orf_length)
start_site_nt, stop_site_nt, utr3_length = convert_start_stop_to_nt(
start_sites, stop_sites, seq_length_nt, orf_length, frame, last_aa_is_stop)
self.assertTrue(np.all(stop_site_nt == np.array([21, 22, 23])))
def test_convert_stop_nt_3incomplete(self):
sequences = read_fasta('test_data/test_stopsitent.fa')
ids, aa_frames, frame, strand,seq_length_nt, seq_length = translate_all_frames(sequences, both_strands=False)
orf_sequence, start_sites, stop_sites, orf_length, last_aa_is_stop = find_longest_orfs(aa_frames)
# filter data by minimum orf length
keep = orf_length >= 6
aa_frames, frame, | |
3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, True, exp, np.complex128)
values = pd.Series([True, False, True, True])
exp = pd.Series([1 + 1j, 0, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
def test_where_index_complex128(self):
pass
def test_where_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
cond = pd.Series([True, False, True, False])
# bool + int -> int
exp = | |
<gh_stars>10-100
"""
Set of classes for dealing with a STAR files (Relion's format)
# Author: <NAME> (Max Planck Institute for Biochemistry)
# Date: 1.06.16
"""
__author__ = '<NAME>'
import gc
import sys
import csv
import copy
import pyto
import errno
from pyseg.globals import *
from pyseg import disperse_io
import numpy as np
import scipy as sp
import multiprocessing as mp
from pyseg.pexceptions import *
import itertools as it
from .variables import RadialAvg3D
###########################################################################################
# Global functionality
###########################################################################################
def relion_norm(tomo, mask=None, inv=True):
"""
Relion tomogram normalization
:param tomo: input tomogram
:param mask: if None (default) the whole tomogram is used for computing the statistics otherwise just the masked region
:param inv: if True the values are inverted (default)
:return:
"""
# Input parsing
if mask is None:
mask = np.ones(shape=tomo.shape, dtype=np.bool)
# Inversion
if inv:
hold_tomo = -1. * tomo
else:
hold_tomo = tomo
# Statistics
stat_tomo = hold_tomo[mask>0]
mn, st = stat_tomo.mean(), stat_tomo.std()
# Histogram equalization
tomo_out = np.zeros(shape=tomo.shape, dtype=np.float32)
if st > 0:
tomo_out = (hold_tomo-mn) / st
else:
print('WARNING (relion_norm): standard deviation=' + str(st))
return tomo_out
###########################################################################################
# Parallel processes
###########################################################################################
# Bin subvolumes in a particles STAR file
# pr_id: process ID
# beg_ids: beginning STAR particle row indices
# end_ids: ending STAR particle row indices
# bin: binning factor
# sv_shape: original subvolume shape
# res: original resolution vx/nm
# cutoff: cutoff in nm for low pass filtering, if None not applied
# dstar: dictionary with the STAR file data
# out_svol_dir: output directory for the binned files
# tmp_csv_dir: directory for temporary output CSV files
def pr_bin_star_svols(pr_id, beg_ids, end_ids, bin, res, sv_shape, cutoff, dstar, out_svol_dir, tmp_csv_dir):
# Initialization
zoom_f, nyquist = 1. / bin, 2. * res
tmp_csv = tmp_csv_dir + '/process_' + str(pr_id) + '.csv'
sv_shape_f = np.asarray(sv_shape, dtype=np.float32).min()
# Low pass filter to estimation CTF correction
if cutoff is None:
cutoff = nyquist * float(bin)
rad = (nyquist/cutoff) * np.min(sv_shape_f) * .5
lpf = low_pass_fourier_3d(sv_shape, res, cutoff, rad*.1)
clpf = lpf
lpf = sp.fftpack.fftshift(lpf)
sph_mask = sphere_mask(sv_shape, .4*sv_shape_f) > 0
# Particles loop
with open(tmp_csv, 'w') as tfile:
writer = csv.DictWriter(tfile, fieldnames=list(dstar.keys()))
writer.writeheader()
for row_id in range(beg_ids, end_ids+1):
svol = disperse_io.load_tomo(dstar['_rlnImageName'][row_id], mmap=False)
# Low pass filtering
svol_fft = sp.fftpack.fftn(svol)
svol = np.real(sp.fftpack.ifftn(svol_fft * lpf))
hold_svol = svol[sph_mask]
svol -= hold_svol.mean()
svol/hold_svol.std()
# Rescaling
svol_bin = sp.ndimage.interpolation.zoom(svol, zoom_f,
order=3, mode='constant', cval=0.0, prefilter=True)
out_svol = out_svol_dir + '/particle_bin' + str(int(bin)) + '_id_' + str(row_id) + '.mrc'
disperse_io.save_numpy(svol_bin, out_svol)
dstar['_rlnImageName'][row_id] = out_svol
try:
cvol = disperse_io.load_tomo(dstar['_rlnCtfImage'][row_id], mmap=False)
cvol *= clpf
lx_crop, ly_crop, lz_crop = int(math.floor(.5*svol.shape[0])), int(math.floor(.5*svol.shape[1])), \
int(math.floor(.5*svol.shape[2]))
lx_crop -= int(math.floor(0.5*svol_bin.shape[0]))
ly_crop -= int(math.floor(0.5*svol_bin.shape[1]))
lz_crop -= int(math.floor(0.5*svol_bin.shape[2]))
cvol_bin = cvol[lx_crop:-lx_crop, ly_crop:-ly_crop, lz_crop:-ly_crop]
out_cvol = out_svol_dir + '/ctf_bin' + str(int(bin)) + '_id_' + str(row_id) + '.mrc'
# disperse_io.save_numpy(lpf, out_cvol)
disperse_io.save_numpy(cvol_bin, out_cvol)
dstar['_rlnCtfImage'][row_id] = out_cvol
except ValueError:
pass
row = dict.fromkeys(dstar)
for key in list(dstar.keys()):
row[key] = dstar[key][row_id]
writer.writerow(row)
# print str(row_id)
if pr_id < 0:
return
else:
sys.exit(pr_id)
# Process for template matching on STAR file particles
# pr_id: process ID
# temp: template subvolume (same size as particles)
# mask: smooth borders mask (same size as particles)
# row_ids: STAR files row IDs to process
# dstar: STAR file dictionary with its data
# csvol_paths: list of paths ot CTF subvolumes
# angs_arr: array with the angular sampling
# max_shift_v: maximum shifting in vx from suvolume center to consider
# shared_ncc: shared array to store the computed NCC scores
def pr_star_tm(pr_id, row_ids, temp, mask, dstar, max_shift_v, angs_arr, shared_ncc):
# Initialization
n_mask = float(mask.sum())
cte = 1. / n_mask
svol_sp = np.asarray(temp.shape, dtype=np.int)
svol_sp2 = int(.5 * svol_sp[0])
svol_cent = np.asarray((svol_sp2, svol_sp2, svol_sp2), dtype=np.float32)
psvol_paths = dstar['_rlnImageName']
try:
csvol_paths = dstar['_rlnCtfImage']
except ValueError:
csvol_paths = None
try:
offs_x = dstar['_rlnOriginX']
except ValueError:
offs_x = np.zeros(shape=len(psvol_paths), dtype=np.float32)
try:
offs_y = dstar['_rlnOriginY']
except ValueError:
offs_y = np.zeros(shape=len(psvol_paths), dtype=np.float32)
try:
offs_z = dstar['_rlnOriginZ']
except ValueError:
offs_z = np.zeros(shape=len(psvol_paths), dtype=np.float32)
try:
offs_rot = dstar['_rlnAngleRot']
except ValueError:
offs_rot = np.zeros(shape=len(psvol_paths), dtype=np.float32)
try:
offs_tilt = dstar['_rlnAngleTilt']
except ValueError:
offs_tilt = np.zeros(shape=len(psvol_paths), dtype=np.float32)
try:
offs_psi = dstar['_rlnAnglePsi']
except ValueError:
offs_psi = np.zeros(shape=len(psvol_paths), dtype=np.float32)
# Template masking
mtemp = temp * mask
mask_id = mask > 0
mask_id_sum = mask_id.sum()
# Shifting mask
mask_shift = sphere_mask(mask.shape, max_shift_v).astype(np.bool)
# Particles loop
count = 0
for row_id in row_ids:
# Loading subvolumes
psvol = disperse_io.load_tomo(psvol_paths[row_id], mmap=False)
off_shift = np.asarray((offs_y[row_id], offs_x[row_id], offs_z[row_id]))
off_ang = np.asarray((offs_rot[row_id], offs_tilt[row_id], offs_psi[row_id]))
# Rotate particle to reference system
psvol = tomo_shift(psvol, off_shift)
r3d_a = pyto.geometry.Rigid3D()
r3d_a.q = r3d_a.make_r_euler(angles=np.radians(off_ang), mode='zyz_in_active')
rpsvol = r3d_a.transformArray(psvol, origin=svol_cent, order=3, prefilter=True, mode='reflect')
# Normalize particle density within the mask
rpsvol_fg = rpsvol[mask_id]
rpsvol_mn, rpsvol_std = rpsvol_fg.mean(), rpsvol_fg.std()
nrpsvol = rpsvol - rpsvol_mn
if rpsvol_std > 0:
nrpsvol /= rpsvol_std
# Rotate back the particles
r3d_p = pyto.geometry.Rigid3D()
r3d_p.q = r3d_p.make_r_euler(angles=np.radians(off_ang), mode='zyz_in_passive')
nrpsvol = r3d_p.transformArray(nrpsvol, origin=svol_cent, order=3, prefilter=True, mode='reflect')
# Apply CTF to reference
if csvol_paths is not None:
# Rotate template to particles system
rmtemp = r3d_p.transformArray(mtemp, origin=svol_cent, order=3, prefilter=True, mode='reflect')
# CTF
csvol = disperse_io.load_tomo(csvol_paths[row_id], mmap=False)
crmtemp = np.real(sp.fftpack.ifftn(sp.fftpack.fftn(rmtemp) * sp.fftpack.fftshift(csvol)))
# Rotate back to template system
ctemp = r3d_a.transformArray(crmtemp, origin=svol_cent, order=3, prefilter=True, mode='reflect')
else:
ctemp = mtemp
# Mask corrected normalization
ctemp_fg = ctemp[mask_id]
ctemp_mn, ctemp_std = ctemp_fg.mean(), ctemp_fg.std()
nctemp = ctemp - ctemp_mn
if ctemp_std > 0:
nctemp /= ctemp_std
# nctemp *= mask
print(sp.__version__)
if (int(sp.__version__.split('.')[0]) < 1) and (int(sp.__version__.split('.')[1]) < 19):
nctemp_conj = np.real(sp.fftpack.ifftn(np.conjugate(sp.fftpack.fftn(nctemp))))
# if row_id == 10:
# print 'Jol'
# else:
# continue
# Rotating loop
max_ncc = 0
# max_rot, max_tilt, max_psi = off_ang[0], off_ang[1], off_ang[2]
for i_ang, ang_val in enumerate(angs_arr):
# Particles search rotation
ang = off_ang + ang_val
r3d = pyto.geometry.Rigid3D()
r3d.q = r3d.make_r_euler(angles=np.radians(ang), mode='zyz_in_active')
rnrpsvol = r3d.transformArray(nrpsvol, origin=svol_cent, order=3, prefilter=True, mode='reflect') # * mask
# NCC
# psvol_conv = np.real(sp.fftpack.ifftn(sp.fftpack.fftn(rnrpsvol) * nctemp_fft)) * mask_shift * mask
# psvol_conv = sp.signal.fftconvolve(rnrpsvol, nctemp, mode='same') # * mask_shift * mask
if (int(sp.__version__.split('.')[0]) < 1) and (int(sp.__version__.split('.')[1]) < 19):
# psvol_conv = sp.signal.correlate(rnrpsvol*mask, nctemp*mask, mode='same') * mask_shift
psvol_conv = sp.signal.fftconvolve(rnrpsvol*mask, nctemp_conj*mask, mode='same') * mask_shift
else:
psvol_conv = sp.signal.correlate(rnrpsvol*mask, nctemp*mask, mode='same', method='fft') * mask_shift # * mask
# oi, oj, ok = np.unravel_index(np.argmax(psvol_conv), psvol_conv.shape)
# ncc = psvol_conv[oi, oj, ok] / mask_id_sum
ncc = psvol_conv.max() / mask_id_sum
# if i_ang == 0:
# disperse_io.save_numpy(rnrpsvol, '/fs/pool/pool-lucic2/antonio/ribo_johannes/lum_ext/stm/test3_ribo/svols_3000_low_bin2_lp6/particle_bin2_id_' + str(row_id) + '_tvol.mrc')
# disperse_io.save_numpy(nctemp, '/fs/pool/pool-lucic2/antonio/ribo_johannes/lum_ext/stm/test3_ribo/svols_3000_low_bin2_lp6/particle_bin2_id_' + str(row_id) + '_rvol.mrc')
# disperse_io.save_numpy(psvol_conv, '/fs/pool/pool-lucic2/antonio/ribo_johannes/lum_ext/stm/test3_ribo/svols_3000_low_bin2_lp6/particle_bin2_id_' + str(row_id) + '_cvol.mrc')
# Updating maximum
if ncc > max_ncc:
max_ncc = ncc
# FOR DEBUGGING
# max_rot, max_tilt, max_psi = ang[0], ang[1], ang[2]
# max_ox, max_oy, max_oz = oi-svol_cent[0], oj-svol_cent[1], ok-svol_cent[2]
# rnrpsvol = tomo_shift(rnrpsvol, (max_ox, max_oy, max_oz))
# rnrpsvol_sp = tomo_shift(np.copy(rnrpsvol), (max_oy, max_ox, max_oz)) * mask
# st_part, st_part_sp, st_temp, st_conv = rnrpsvol, rnrpsvol_sp, nctemp, psvol_conv
# print 'Ang: ' + str(ang_val)
# Set the maximum NCC in the shared array
shared_ncc[row_id] = max_ncc
# FOR DEBUGGING
# parts_dir = os.path.split(psvol_paths[row_id])[0]
# disperse_io.save_numpy(st_part, parts_dir + '/tm_particle_' + str(row_id) + '.mrc')
# disperse_io.save_numpy(st_part_sp, parts_dir + '/tm_particle_sp_' + str(row_id) + '.mrc')
# disperse_io.save_numpy(st_temp, parts_dir + '/tm_model_' + str(row_id) + '.mrc')
# disperse_io.save_numpy(st_conv, parts_dir + '/tm_conv_' + str(row_id) + '.mrc')
print('Process: ' + str(os.path.split(psvol_paths[row_id])[1]) + ', Particle: ' + str(count) + ' of ' + str(len(row_ids)) + ', ncc: ' + str(max_ncc))
count += 1
if pr_id < 0:
return
else:
sys.exit(pr_id)
# Process for template matching by Z-axis averaging on STAR file particles
# pr_id: process ID
# temp: template subvolume (same size as particles)
# mask: smooth borders mask (same size as particles)
# row_ids: STAR files row IDs to process
# dstar: STAR file dictionary with its data
# csvol_paths: list of paths ot CTF subvolumes
# shifts_arr: array with space shiftings
# shared_ncc: shared array to store the computed NCC scores
def pr_star_tm_za(pr_id, row_ids, temp, mask, dstar, shifts_arr, shared_ncc):
# Initialization
averager = RadialAvg3D(mask.shape, axis='z')
svol_sp = np.asarray(temp.shape, dtype=np.int)
svol_sp2 = int(.5 * svol_sp[0])
svol_cent = np.asarray((svol_sp2, svol_sp2, svol_sp2), dtype=np.float32)
psvol_paths = dstar['_rlnImageName']
try:
csvol_paths = dstar['_rlnCtfImage']
except ValueError:
csvol_paths = None
try:
offs_x = dstar['_rlnOriginX']
except ValueError:
offs_x = np.zeros(shape=len(psvol_paths), dtype=np.float32)
try:
offs_y = dstar['_rlnOriginY']
except ValueError:
offs_y = np.zeros(shape=len(psvol_paths), dtype=np.float32)
try:
offs_z = dstar['_rlnOriginZ']
except ValueError:
offs_z = np.zeros(shape=len(psvol_paths), dtype=np.float32)
try:
offs_rot = dstar['_rlnAngleRot']
except ValueError:
offs_rot | |
<filename>experiments/data_source.py
"""
Loading and preparing data for experiments. Some of the datasets are generated using the Matlab
code (see data/matlab_code_data), in order to ensure they are the same training and test points
that were used in the previous paper (Nguyen and Bonilla NIPS (2014)).
The Matlab code to generate data is ``load_data.m``.
"""
import _pickle as cPickle
import pickle
import gzip
import os
import subprocess
import GPy
import numpy as np
import pandas
from savigp.kernel import ExtRBF
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
WISC_DIR = os.path.join(DATA_DIR, 'wisconsin_cancer')
USPS_DIR = os.path.join(DATA_DIR, 'USPS')
MINING_DIR = os.path.join(DATA_DIR, 'mining')
BOSTON_DIR = os.path.join(DATA_DIR, 'boston_housing')
CREEP_DIR = os.path.join(DATA_DIR, 'creep')
SARCOS_DIR = os.path.join(DATA_DIR, 'sarcos')
MNIST_DIR = os.path.join(DATA_DIR, 'mnist')
MNIST8M_DIR = os.path.join(DATA_DIR, 'infimnist')
AIRLINE_DIR = os.path.join(DATA_DIR, 'airline')
SEISMIC_DIR = os.path.join(DATA_DIR, 'seismic')
def normal_generate_samples(n_samples, var, input_dim=3):
num_samples = n_samples
num_in = input_dim
X = np.random.uniform(low=-1.0, high=1.0, size=(num_samples, num_in))
X.sort(axis=0)
rbf = ExtRBF(num_in, variance=0.5,
lengthscale=np.array(np.random.uniform(low=0.1, high=3.0, size=input_dim)),
ARD=True)
white = GPy.kern.White(num_in, variance=var[0, 0])
kernel = rbf + white
K = kernel.K(X)
y = np.empty((num_samples, var.shape[0]))
for j in range(var.shape[0]):
y[:, j] = np.reshape(np.random.multivariate_normal(np.zeros(num_samples), K), (num_samples))
return X, y, rbf
def wisconsin_breast_cancer_data():
"""
Loads and returns data of Wisconsin breast cancer dataset. Note that ``X`` is standardized.
Returns
-------
data : list
a list of length = 5, where each element is a dictionary which contains ``train_outputs``,
``train_inputs``, ``test_outputs``, ``test_inputs``, and ``id``
Notes
-----
Data is directly imported from the Matlab code for AVIGP paper.
References
----------
* Mangasarian OL, Street WN, Wolberg WH. Breast cancer diagnosis and prognosis via linear
programming. Oper Res. 1995;43(4);570-7
"""
data = []
for i in range(1, 6):
train = pandas.read_csv(os.path.join(WISC_DIR, 'train_' + str(i) + '.csv'), header=None)
test = pandas.read_csv(os.path.join(WISC_DIR, 'test_' + str(i) + '.csv'), header=None)
data.append({
'train_outputs': train.ix[:, 0].values[:, np.newaxis],
'train_inputs': train.ix[:, 1:].values,
'test_outputs': test.ix[:, 0].values[:, np.newaxis],
'test_inputs': test.ix[:, 1:].values,
'id': i
})
return data
def usps_data():
"""
Loads and returns data of USPS dataset. Note that ``X`` is standardized. Only digits 4, 7,
and 9 are included.
Returns
-------
data : list
A list of length = 5, where each element is a dictionary which contains ``train_outputs``,
``train_inputs``, ``test_outputs``, ``test_inputs``, and ``id``
References
----------
* <NAME>, <NAME>. {G}aussian processes for machine learning. The MIT Press; 2006.
Data is imported from the Matlab code.
"""
data = []
for i in range(1, 6):
train = pandas.read_csv(os.path.join(USPS_DIR, 'train_' + str(i) + '.csv'), header=None)
test = pandas.read_csv(os.path.join(USPS_DIR, 'test_' + str(i) + '.csv'), header=None)
data.append({
'train_outputs': train.ix[:, 0:2].values,
'train_inputs': train.ix[:, 3:].values,
'test_outputs': test.ix[:, 0:2].values,
'test_inputs': test.ix[:, 3:].values,
'id': i
})
return data
def mining_data():
"""
Loads and returns data of Coal-mining disasters dataset. See 'get_mine_data.m' to see how data
is generated.
Returns
-------
data : list
A list of length = 1, where each element is a dictionary which contains ``train_outputs``,
``train_inputs``, ``test_outputs``, ``test_inputs``, and ``id``. Training and test points are the same.
References
----------
* <NAME>. A note on the intervals between coal-mining disasters. Biometrika.
1979;66(1):191-3.
"""
data = []
train = pandas.read_csv(os.path.join(MINING_DIR, 'data.csv'), header=None)
data.append({
'train_outputs': train.ix[:, 0].values[:, np.newaxis],
'train_inputs': train.ix[:, 1].values[:, np.newaxis],
'test_outputs': train.ix[:, 0].values[:, np.newaxis],
'test_inputs': train.ix[:, 1].values[:, np.newaxis],
'id': 1
})
return data
def boston_data():
"""
Loads and returns data of Boston housing dataset. Note data ``X`` is standardized.
Returns
-------
data : list
A list of length = 5, where each element is a dictionary which contains
``train_outputs``, ``train_inputs``, ``test_outputs``, ``test_inputs``, and ``id``
References
----------
* <NAME>, <NAME>. Hedonic housing prices and the demand for clean air.
J Environ Econ Manage. 1978;5(1):81-102.
"""
data = []
for i in range(1, 6):
train = pandas.read_csv(os.path.join(BOSTON_DIR, 'train_' + str(i) + '.csv'), header=None)
test = pandas.read_csv(os.path.join(BOSTON_DIR, 'test_' + str(i) + '.csv'), header=None)
data.append({
'train_outputs': train.ix[:, 0].values[:, np.newaxis],
'train_inputs': train.ix[:, 1:].values,
'test_outputs': test.ix[:, 0].values[:, np.newaxis],
'test_inputs': test.ix[:, 1:].values,
'id': i
})
return data
def abalone_data():
"""
Loads and returns data of Abalone dataset.
Returns
-------
data : list
A list of length = 5, where each element is a dictionary which contains ``train_outputs``,
``train_inputs``, ``test_outputs``, ``test_inputs``, and ``id``
References
----------
* <NAME>, <NAME>. {UCI} Machine Learning Repository [Internet]. 2013.
Available from: http://archive.ics.uci.edu/ml
"""
data = []
for i in range(5, 11):
train = pandas.read_csv(os.path.join(ABALONE_DIR, 'train_' + str(i) + '.csv'), header=None)
test = pandas.read_csv(os.path.join(ABALONE_DIR, 'test_' + str(i) + '.csv'), header=None)
data.append({
'train_outputs': train.ix[:, 0].values[:, np.newaxis],
'train_inputs': train.ix[:, 1:].values,
'test_outputs': test.ix[:, 0].values[:, np.newaxis],
'test_inputs': test.ix[:, 1:].values,
'id': i
})
return data
def creep_data():
"""
Loads and returns data of Creep dataset.
Returns
-------
data : list
A list of length = 5, where each element is a dictionary which contains ``train_outputs``,
``train_inputs``, ``test_outputs``, ``test_inputs``, and ``id``
References
----------
* <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
Modelling creep rupture strength of ferritic steel welds. Sci Technol Weld Join. 2000;5(2):81-9.
"""
data = []
for i in range(1, 6):
train = pandas.read_csv(os.path.join(CREEP_DIR, 'train_' + str(i) + '.csv'), header=None)
test = pandas.read_csv(os.path.join(CREEP_DIR, 'test_' + str(i) + '.csv'), header=None)
data.append({
'train_outputs': train.ix[:, 0].values[:, np.newaxis],
'train_inputs': train.ix[:, 1:].values,
'test_outputs': test.ix[:, 0].values[:, np.newaxis],
'test_inputs': test.ix[:, 1:].values,
'id': i
})
return data
def mnist_data():
"""
Loads and returns data of MNIST dataset for all digits.
Returns
-------
data : list
A list of length = 1, where each element is a dictionary which contains ``train_outputs``,
``train_inputs``, ``test_outputs``, ``test_inputs``, and ``id``
References
----------
* Data is imported from this project: http://deeplearning.net/tutorial/gettingstarted.html
"""
dataset = os.path.join(MNIST_DIR, 'mnist.pkl.gz')
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if not os.path.isdir(MNIST_DIR): # directory does not exist, download the data
os.makedirs(MNIST_DIR)
import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print('Downloading data from {}'.format(origin))
urllib.urlretrieve(origin, dataset)
print('... loading data')
# Load the dataset
with gzip.open(dataset, 'rb') as data_file:
train_set, valid_set, test_set = pickle.load(data_file, encoding='latin1')
test_outputs = np.zeros((test_set[1].shape[0], 10))
test_outputs[np.arange(test_set[1].shape[0]), test_set[1]] = 1
train_outputs = np.zeros((train_set[1].shape[0], 10))
train_outputs[np.arange(train_set[1].shape[0]), train_set[1]] = 1
validation_Y = np.zeros((valid_set[1].shape[0], 10))
validation_Y[np.arange(valid_set[1].shape[0]), valid_set[1]] = 1
data = []
data.append({
'train_outputs': np.vstack((train_outputs, validation_Y)),
'train_inputs': np.vstack((train_set[0], valid_set[0])),
'test_outputs': test_outputs,
'test_inputs': test_set[0],
'id': 0
})
return data
def mnist8m_data():
"""
Loads and returns data of MNIST8M dataset for all digits.
Returns
-------
data : list
A list of length = 1, where each element is a dictionary which contains ``train_outputs``,
``train_inputs``, ``test_outputs``, ``test_inputs``, and ``id``
References
----------
* Data is imported from this project: http://deeplearning.net/tutorial/gettingstarted.html
"""
train_images, train_labels, test_images, test_labels = import_mnist8m()
data = []
data.append({
'train_outputs': train_labels,
'train_inputs': train_images,
'test_outputs': test_labels,
'test_inputs': test_images,
'id': 0
})
return data
def mnist_binary_data():
"""
Loads and returns data of MNIST dataset for all digits.
Returns
-------
data : list
A list of length = 1, where each element is a dictionary which contains ``train_outputs``,
``train_inputs``, ``test_outputs``, ``test_inputs``, and ``id``
References
----------
* Data is imported from this project: http://deeplearning.net/tutorial/gettingstarted.html
"""
data = mnist_data()
# Transform the labels to be -1 on even numbers and 1 on odd.
to_bin = lambda x: x[1:10:2].sum() - x[0:10:2].sum()
data[0]['train_outputs'] = np.apply_along_axis(
to_bin, 1, data[0]['train_outputs']).astype(int)[:, np.newaxis]
data[0]['test_outputs'] = np.apply_along_axis(
to_bin, 1, data[0]['test_outputs']).astype(int)[:, np.newaxis]
return data
def sarcos_data():
"""
Loads and returns data of SARCOS dataset for joints 4 and 7. Note that ``X`` is standardized.
Returns
-------
data : list
A list of length = 1, where each element is a dictionary which contains ``train_outputs``,
``train_inputs``, ``test_outputs``, ``test_inputs``, and ``id``
References
----------
* Data is originally from this website: http://www.gaussianprocess.org/gpml/data/.
The data is directly imported from the Matlab code on Gaussian process networks.
The Matlab code to generate data is 'data/matlab_code_data/sarcos.m'.
"""
data = []
train = pandas.read_csv(os.path.join(SARCOS_DIR, 'train_' +'.csv'), header=None)
test = pandas.read_csv(os.path.join(SARCOS_DIR, 'test_' + '.csv'), header=None)
data.append({
'train_outputs': train.ix[:, 0:1].values,
'train_inputs': train.ix[:, 2:].values,
'test_outputs': test.ix[:, 0:1].values,
'test_inputs': test.ix[:, 2:].values,
'id': 0
})
return data
def sarcos_all_joints_data():
"""
Loads and returns data of SARCOS dataset for all joints.
Returns
-------
data : list
A list of length = 1, where each element is a dictionary which contains ``train_outputs``,
``train_inputs``, ``test_outputs``, ``test_inputs``, and ``id``
References
----------
* Data is originally from this website: http://www.gaussianprocess.org/gpml/data/.
The data here is directly imported from the Matlab code on Gaussian process networks.
The Matlab code to generate data is 'data/matlab_code_data/sarcos.m'
"""
data = []
train = pandas.read_csv(os.path.join(SARCOS_DIR, 'train_all' | |
# -*- coding: utf-8 -*-
import sys, logging
import numpy as np
from math import ceil
from gseapy.stats import multiple_testing_correction
from joblib import delayed, Parallel
def enrichment_score(gene_list, correl_vector, gene_set, weighted_score_type=1,
nperm=1000, seed=None, single=False, scale=False):
"""This is the most important function of GSEApy. It has the same algorithm with GSEA and ssGSEA.
:param gene_list: The ordered gene list gene_name_list, rank_metric.index.values
:param gene_set: gene_sets in gmt file, please use gsea_gmt_parser to get gene_set.
:param weighted_score_type: It's the same with gsea's weighted_score method. Weighting by the correlation
is a very reasonable choice that allows significant gene sets with less than perfect coherence.
options: 0(classic),1,1.5,2. default:1. if one is interested in penalizing sets for lack of
coherence or to discover sets with any type of nonrandom distribution of tags, a value p < 1
might be appropriate. On the other hand, if one uses sets with large number of genes and only
a small subset of those is expected to be coherent, then one could consider using p > 1.
Our recommendation is to use p = 1 and use other settings only if you are very experienced
with the method and its behavior.
:param correl_vector: A vector with the correlations (e.g. signal to noise scores) corresponding to the genes in
the gene list. Or rankings, rank_metric.values
:param nperm: Only use this parameter when computing esnull for statistical testing. Set the esnull value
equal to the permutation number.
:param seed: Random state for initializing gene list shuffling. Default: seed=None
:return:
ES: Enrichment score (real number between -1 and +1)
ESNULL: Enrichment score calculated from random permutations.
Hits_Indices: Index of a gene in gene_list, if gene is included in gene_set.
RES: Numerical vector containing the running enrichment score for all locations in the gene list .
"""
N = len(gene_list)
# Test whether each element of a 1-D array is also present in a second array
# It's more intuitive here than original enrichment_score source code.
# use .astype to covert bool to integer
tag_indicator = np.in1d(gene_list, gene_set, assume_unique=True).astype(int) # notice that the sign is 0 (no tag) or 1 (tag)
if weighted_score_type == 0 :
correl_vector = np.repeat(1, N)
else:
correl_vector = np.abs(correl_vector)**weighted_score_type
# get indices of tag_indicator
hit_ind = np.flatnonzero(tag_indicator).tolist()
# if used for compute esnull, set esnull equal to permutation number, e.g. 1000
# else just compute enrichment scores
# set axis to 1, because we have 2D array
axis = 1
tag_indicator = np.tile(tag_indicator, (nperm+1,1))
correl_vector = np.tile(correl_vector,(nperm+1,1))
# gene list permutation
rs = np.random.RandomState(seed)
for i in range(nperm): rs.shuffle(tag_indicator[i])
# np.apply_along_axis(rs.shuffle, 1, tag_indicator)
Nhint = tag_indicator.sum(axis=axis, keepdims=True)
sum_correl_tag = np.sum(correl_vector*tag_indicator, axis=axis, keepdims=True)
# compute ES score, the code below is identical to gsea enrichment_score method.
no_tag_indicator = 1 - tag_indicator
Nmiss = N - Nhint
norm_tag = 1.0/sum_correl_tag
norm_no_tag = 1.0/Nmiss
RES = np.cumsum(tag_indicator * correl_vector * norm_tag - no_tag_indicator * norm_no_tag, axis=axis)
if scale: RES = RES / N
if single:
es_vec = RES.sum(axis=axis)
else:
max_ES, min_ES = RES.max(axis=axis), RES.min(axis=axis)
es_vec = np.where(np.abs(max_ES) > np.abs(min_ES), max_ES, min_ES)
# extract values
es, esnull, RES = es_vec[-1], es_vec[:-1], RES[-1,:]
return es, esnull, hit_ind, RES
def enrichment_score_tensor(gene_mat, cor_mat, gene_sets, weighted_score_type, nperm=1000,
seed=None, single=False, scale=False):
"""Next generation algorithm of GSEA and ssGSEA. Works for 3d array
:param gene_mat: the ordered gene list(vector) with or without gene indices matrix.
:param cor_mat: correlation vector or matrix (e.g. signal to noise scores)
corresponding to the genes in the gene list or matrix.
:param dict gene_sets: gmt file dict.
:param float weighted_score_type: weighting by the correlation.
options: 0(classic), 1, 1.5, 2. default:1 for GSEA and 0.25 for ssGSEA.
:param int nperm: permutation times.
:param bool scale: If True, normalize the scores by number of genes_mat.
:param bool single: If True, use ssGSEA algorithm, otherwise use GSEA.
:param seed: Random state for initialize gene list shuffling.
Default: seed=None
:return: a tuple contains::
| ES: Enrichment score (real number between -1 and +1), for ssGSEA, set scale eq to True.
| ESNULL: Enrichment score calculated from random permutation.
| Hits_Indices: Indices of genes if genes are included in gene_set.
| RES: The running enrichment score for all locations in the gene list.
"""
rs = np.random.RandomState(seed)
# gene_mat -> 1d: prerank, ssSSEA or 2d: GSEA
keys = sorted(gene_sets.keys())
if weighted_score_type == 0:
# don't bother doing calcuation, just set to 1
cor_mat = np.ones(cor_mat.shape)
elif weighted_score_type > 0:
pass
else:
logging.error("Using negative values of weighted_score_type, not allowed")
raise ValueError("weighted_score_type should be postive numerics")
cor_mat = np.abs(cor_mat)
if cor_mat.ndim ==1:
# ssGSEA or Prerank
# genestes->M, genes->N, perm-> axis=2
N, M = len(gene_mat), len(keys)
# generate gene hits matrix
# for 1d ndarray of gene_mat, set assume_unique=True,
# means the input arrays are both assumed to be unique,
# which can speed up the calculation.
tag_indicator = np.vstack([np.in1d(gene_mat, gene_sets[key], assume_unique=True) for key in keys])
tag_indicator = tag_indicator.astype(int)
# index of hits
hit_ind = [ np.flatnonzero(tag).tolist() for tag in tag_indicator ]
# generate permutated hits matrix
perm_tag_tensor = np.repeat(tag_indicator, nperm+1).reshape((M,N,nperm+1))
# shuffle matrix, last matrix is not shuffled when nperm > 0
if nperm: np.apply_along_axis(lambda x: np.apply_along_axis(rs.shuffle,0,x),1, perm_tag_tensor[:,:,:-1])
# missing hits
no_tag_tensor = 1 - perm_tag_tensor
# calculate numerator, denominator of each gene hits
rank_alpha = (perm_tag_tensor*cor_mat[np.newaxis,:,np.newaxis])** weighted_score_type
elif cor_mat.ndim == 2:
# GSEA
# 2d ndarray, gene_mat and cor_mat are shuffled already
# reshape matrix
cor_mat = cor_mat.T
# gene_mat is a tuple contains (gene_name, permuate_gene_name_indices)
genes, genes_ind = gene_mat
# genestes->M, genes->N, perm-> axis=2
# don't use assume_unique=True in 2d array when use np.isin().
# elements in gene_mat are not unique, or will cause unwanted results
tag_indicator = np.vstack([np.in1d(genes, gene_sets[key], assume_unique=True) for key in keys])
tag_indicator = tag_indicator.astype(int)
perm_tag_tensor = np.stack([tag.take(genes_ind).T for tag in tag_indicator], axis=0)
#index of hits
hit_ind = [ np.flatnonzero(tag).tolist() for tag in perm_tag_tensor[:,:,-1] ]
# nohits
no_tag_tensor = 1 - perm_tag_tensor
# calculate numerator, denominator of each gene hits
rank_alpha = (perm_tag_tensor*cor_mat[np.newaxis,:,:])** weighted_score_type
else:
logging.error("Program die because of unsupported input")
raise ValueError("Correlation vector or matrix (cor_mat) is not supported")
# Nhint = tag_indicator.sum(1)
# Nmiss = N - Nhint
axis=1
P_GW_denominator = np.sum(rank_alpha, axis=axis, keepdims=True)
P_NG_denominator = np.sum(no_tag_tensor, axis=axis, keepdims=True)
REStensor = np.cumsum(rank_alpha / P_GW_denominator - no_tag_tensor / P_NG_denominator, axis=axis)
# ssGSEA: scale es by gene numbers ?
# https://gist.github.com/gaoce/39e0907146c752c127728ad74e123b33
if scale: REStensor = REStensor / len(gene_mat)
if single:
#ssGSEA
esmatrix = REStensor.sum(axis=axis)
else:
#GSEA
esmax, esmin = REStensor.max(axis=axis), REStensor.min(axis=axis)
esmatrix = np.where(np.abs(esmax)>np.abs(esmin), esmax, esmin)
es, esnull, RES = esmatrix[:,-1], esmatrix[:,:-1], REStensor[:,:,-1]
return es, esnull, hit_ind, RES
def ranking_metric_tensor(exprs, method, permutation_num, pos, neg, classes,
ascending, seed=None, skip_last=False):
"""Build shuffled ranking matrix when permutation_type eq to phenotype.
Works for 3d array.
:param exprs: gene_expression DataFrame, gene_name indexed.
:param str method: calculate correlation or ranking. methods including:
1. 'signal_to_noise' (s2n) or 'abs_signal_to_noise' (abs_s2n).
2. 't_test'.
3. 'ratio_of_classes' (also referred to as fold change).
4. 'diff_of_classes'.
5. 'log2_ratio_of_classes'.
:param int permuation_num: how many times of classes is being shuffled
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of
dataframe belongs to what class of phenotype.
:param bool ascending: bool. Sort ascending vs. descending.
:param seed: random_state seed
:param bool skip_last: (internal use only) whether to skip the permutation of the last rankings.
:return:
returns two 2d ndarray with shape (nperm, gene_num).
| cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix.
| cor_mat: sorted and permutated (exclude last row) ranking matrix.
"""
rs = np.random.RandomState(seed)
# S: samples, G: gene number
G, S = exprs.shape
# genes = exprs.index.values
expr_mat = exprs.values.T
perm_cor_tensor = np.tile(expr_mat, (permutation_num,1,1))
if skip_last:
# random shuffle on the first dim, the last matrix (expr_mat) is not shuffled
for arr in perm_cor_tensor[:-1]: rs.shuffle(arr)
else:
for arr in perm_cor_tensor: rs.shuffle(arr)
# metrics
classes = | |
<gh_stars>100-1000
from libsaas.services import base
from .resource import BitlyResource
class User(BitlyResource):
path = 'user'
@base.apimethod
def info(self, login=None, full_name=None):
"""
Return or update information about a user.
:var login: the bitly login of the user whose info to look up.
If not given, the authenticated user will be used.
:vartype login: str
:var full_name: set the users full name value (only available
for the authenticated user).
:vartype full_name: str
"""
params = base.get_params(None, locals())
return self._get('info', params)
@base.apimethod
def link_history(self, link=None, limit=None, offset=None,
created_before=None, created_after=None,
modified_after=None, expand_client_id=None,
archived=None, private=None, user=None):
"""
Returns entries from a user's link history
in reverse chronological order.
:var link the bitly link to return metadata for (when specified,
overrides all other options).
:vartype login: str
:var limit the max number of results to return.
:vartype login: int
:var offset the numbered result at which to start (for pagination).
:vartype offset: int
:var created_before timestamp as an integer unix epoch.
:vartype created_before: int
:var created_after timestamp as an integer unix epoch.
:vartype created_after: int
:var modified_after timestamp as an integer unix epoch.
:vartype modified_after: int
:var expand_client_id whether to provide additional information about
encoding application.
:vartype expand_client_id: bool
:var archived whether to include or exclude archived
history entries. Defaults to 'off'.
:vartype archived: str
:var private whether to include or exclude private
history entries. Defaults to 'both'.
:vartype private: str
:var user: the user for whom to retrieve history entries
(if different from authenticated user).
:vartype user: str
"""
params = base.get_params(None, locals())
return self._get('link_history', params)
@base.apimethod
def network_history(self, limit=None, offset=None,
expand_client_id=None, expand_user=None):
"""
Returns entries from a user's network history
in reverse chronogical order.
:var limit the max number of results to return.
:vartype login: int
:var offset the numbered result at which to start (for pagination).
:vartype offset: int
:var expand_client_id whether to provide additional information about
encoding application.
:vartype expand_client_id: bool
:var expand_user include extra user info in response.
:vartype expand_user: bool
"""
params = base.get_params(None, locals())
return self._get('network_history', params)
@base.apimethod
def tracking_domain_list(self):
"""
Returns a list of tracking domains a user has configured.
"""
params = base.get_params(None, locals())
return self._get('tracking_domain_list', params)
@base.apimethod
def clicks(self, unit=None, units=None, timezone=None,
rollup=None, limit=None, unit_reference_ts=None):
"""
Returns the aggregate number of clicks on all of the
authenticated user's bitly links.
:var unit: timspan: minute, hour, day, week or month.
When unit is minute the maximum value for units is 60.
if` not indicated, defaults to day.
:vartype unit: str
:var units: an integer representing the time units to query data for.
If -1 is passed, it will return all units of time.
:vartype units: int
:var timezone: an integer hour offset from UTC (-14..14) or a timezone
string. If not indicated, defaults to America/New_York.
:vartype timezone: str
:var rollup: returns data for multiple units rolled up to a single
result instead of a separate value for each period of time.
:vartype rollup: bool
:var limit: the number of rows it will return. Default is 100.
:vartype limit: int
:var unit_reference_ts: an epoch timestamp, indicating the most recent
time for which to pull metrics.
If not indicated, it defaults to now.
:vartype unit_reference_ts: int
"""
params = base.get_params(None, locals())
return self._get('clicks', params)
@base.apimethod
def countries(self, unit=None, units=None, timezone=None,
rollup=None, limit=None, unit_reference_ts=None):
"""
Returns aggregate metrics about the countries referring click traffic
to all of the authenticated user's bitly links.
:var unit: timspan: minute, hour, day, week or month.
When unit is minute the maximum value for units is 60.
if` not indicated, defaults to day.
:vartype unit: str
:var units: an integer representing the time units to query data for.
If -1 is passed, it will return all units of time.
:vartype units: int
:var timezone: an integer hour offset from UTC (-14..14) or a timezone
string. If not indicated, defaults to America/New_York.
:vartype timezone: str
:var rollup: returns data for multiple units rolled up to a single
result instead of a separate value for each period of time.
:vartype rollup: bool
:var limit: the number of rows it will return. Default is 100.
:vartype limit: int
:var unit_reference_ts: an epoch timestamp, indicating the most recent
time for which to pull metrics.
If not indicated, it defaults to now.
:vartype unit_reference_ts: int
"""
params = base.get_params(None, locals())
return self._get('countries', params)
@base.apimethod
def popular_links(self, unit=None, units=None, timezone=None,
limit=None, unit_reference_ts=None):
"""
Returns the authenticated user's most-clicked bitly links
(ordered by number of clicks) in a given time period.
:var unit: timspan: minute, hour, day, week or month.
When unit is minute the maximum value for units is 60.
if` not indicated, defaults to day.
:vartype unit: str
:var units: an integer representing the time units to query data for.
If -1 is passed, it will return all units of time.
:vartype units: int
:var timezone: an integer hour offset from UTC (-14..14) or a timezone
string. If not indicated, defaults to America/New_York.
:vartype timezone: str
:var limit: the number of rows it will return. Default is 100.
:vartype limit: int
:var unit_reference_ts: an epoch timestamp, indicating the most recent
time for which to pull metrics.
If not indicated, it defaults to now.
:vartype unit_reference_ts: int
"""
params = base.get_params(None, locals())
return self._get('popular_links', params)
@base.apimethod
def referrers(self, unit=None, units=None, timezone=None,
rollup=None, limit=None, unit_reference_ts=None):
"""
Returns aggregate metrics about the pages referring click traffic
to all of the authenticated user's bitly links.
:var unit: timspan: minute, hour, day, week or month.
When unit is minute the maximum value for units is 60.
if` not indicated, defaults to day.
:vartype unit: str
:var units: an integer representing the time units to query data for.
If -1 is passed, it will return all units of time.
:vartype units: int
:var timezone: an integer hour offset from UTC (-14..14) or a timezone
string. If not indicated, defaults to America/New_York.
:vartype timezone: str
:var rollup: returns data for multiple units rolled up to a single
result instead of a separate value for each period of time.
:vartype rollup: bool
:var limit: the number of rows it will return. Default is 100.
:vartype limit: int
:var unit_reference_ts: an epoch timestamp, indicating the most recent
time for which to pull metrics.
If not indicated, it defaults to now.
:vartype unit_reference_ts: int
"""
params = base.get_params(None, locals())
return self._get('referrers', params)
@base.apimethod
def referring_domains(self, unit=None, units=None, timezone=None,
rollup=None, limit=None, unit_reference_ts=None):
"""
Returns aggregate metrics about the domains referring click traffic
to all of the authenticated user's bitly links
:var unit: timspan: minute, hour, day, week or month.
When unit is minute the maximum value for units is 60.
if` not indicated, defaults to day.
:vartype unit: str
:var units: an integer representing the time units to query data for.
If -1 is passed, it will return all units of time.
:vartype units: int
:var timezone: an integer hour offset from UTC (-14..14) or a timezone
string. If not indicated, defaults to America/New_York.
:vartype timezone: str
:var rollup: returns data for multiple units rolled up to a single
result instead of a separate value for each period of time.
:vartype rollup: bool
:var limit: the number of rows it will return. Default is 100.
:vartype limit: int
:var unit_reference_ts: an epoch timestamp, indicating the most recent
time for which to pull metrics.
If not indicated, it defaults to now.
:vartype unit_reference_ts: int
"""
params = base.get_params(None, locals())
return self._get('referring_domains', params)
@base.apimethod
def share_counts(self, unit=None, units=None, timezone=None,
rollup=None, limit=None, unit_reference_ts=None):
"""
Returns the number of shares by the authenticated user
in a given time period.
:var unit: timspan: minute, hour, day, week or month.
When unit is minute the maximum value for units is 60.
if` not indicated, defaults to day.
:vartype unit: str
:var units: an integer representing the time units to query data for.
If -1 is passed, it will return all units | |
contain a dot.
assert '.' not in produce_method, produce_method
if produce_method not in produce_methods:
produce_methods.append(produce_method)
if self.phase == metadata_base.PipelineRunPhase.FIT:
fit_multi_produce_arguments = self._filter_arguments(step.primitive, 'fit_multi_produce', dict(arguments, produce_methods=produce_methods))
# We fit and produce once, without any limits on iterations/time.
multi_call_result = self._call_primitive_method(primitive.fit_multi_produce, fit_multi_produce_arguments)
if not multi_call_result.has_finished:
# Because we have not set any limits on iterations/time, the primitive should finish and not stop early.
# One should be able to control through a hyper-parameter or hyper-parameters stopping criteria for the primitive.
raise exceptions.InvalidReturnValueError(
"\"fit_multi_produce\" call result should have \"has_finished\" set to true because iterations/time limits were set and the primitive should finish and not stop early.",
)
outputs = multi_call_result.values
elif self.phase == metadata_base.PipelineRunPhase.PRODUCE:
multi_produce_arguments = self._filter_arguments(step.primitive, 'multi_produce', dict(arguments, produce_methods=produce_methods))
# We produce once, without any limits on iterations/time.
multi_call_result = self._call_primitive_method(primitive.multi_produce, multi_produce_arguments)
if not multi_call_result.has_finished:
# Because we have not set any limits on iterations/time, the primitive should finish and not stop early.
# One should be able to control through a hyper-parameter or hyper-parameters stopping criteria for the primitive.
raise exceptions.InvalidReturnValueError(
"\"multi_produce\" call result should have \"has_finished\" set to true because iterations/time limits were set and the primitive should finish and not stop early.",
)
outputs = multi_call_result.values
else:
# TODO: Allow dispatch to a general method so that subclasses of this class can handle them if necessary.
raise exceptions.UnexpectedValueError("Unknown phase: {phase}".format(phase=self.phase))
if self.phase == metadata_base.PipelineRunPhase.FIT:
assert self.steps_state[self.current_step] is None
self.steps_state[self.current_step] = primitive.get_params()
for output_id in produce_methods:
output_data_reference = 'steps.{i}.{output_id}'.format(i=step.index, output_id=output_id)
if output_id in outputs:
self.data_values[output_data_reference] = outputs[output_id]
else:
raise exceptions.InvalidReturnValueError("Missing declared output '{output_id}' in computed primitive's outputs.".format(output_id=output_id))
def _call_primitive_method(self, method: typing.Callable, arguments: typing.Dict) -> typing.Any:
"""
Calls a primitive method (or constructor). Records relevant information in pipeline run.
Parameters
----------
method:
Primitive's method or constructor to call.
arguments:
Arguments to pass to the method.
Returns
-------
The result of calling the method. It method is a constructor,
returns an instance.
"""
assert self.pipeline_run is not None
# A special case for the constructor.
if inspect.isclass(method):
method_name = '__init__'
else:
method_name = method.__name__
pipeline_run_method_call_id = self.pipeline_run.add_method_call_to_primitive_step(self.current_step, method_name)
callback = self.pipeline_run.get_method_call_logging_callback(pipeline_run_method_call_id)
logging_handler = utils.CallbackHandler(callback)
root = logging.getLogger()
redirect_logger = logging.getLogger('redirect')
old_level = root.level
old_handler_levels = [handler.level for handler in root.handlers]
old_propagate = redirect_logger.propagate
try:
# We are just about to modify the root logger level, so we change levels
# of all existing handlers to retain same configuration.
for handler in root.handlers:
# If existing handler has level already set to something more restrictive than what the
# root logger has, we do not change that. Otherwise, we set it to the root logger's level.
if handler.level < old_level:
handler.setLevel(old_level)
# Record all logging which happens during the call.
root.setLevel(logging.DEBUG)
root.addHandler(logging_handler)
# We do not want to print logging from "redirect_logger" because pass-through is enabled, so we
# disable propagation from it to the root logger (by default there is a stream handler on the root
# logger which prints all logging) and install our handler directly on the redirect logger.
redirect_logger.propagate = False
redirect_logger.addHandler(logging_handler)
# TODO: All this redirection works in a single thread, what about multi-threaded or async?
# Reference engine is single threaded, but maybe a subclass would not be?
# We redirect all stdout/stderr to logging, but pass it through to stdout/stderr as well.
with utils.redirect_to_logging(logger=redirect_logger, pass_through=True):
with utils.global_randomness_warning():
self.pipeline_run.method_call_started(pipeline_run_method_call_id)
try:
result = method(**arguments)
except Exception as error:
self.pipeline_run.method_call_failed(pipeline_run_method_call_id, traceback.format_exc())
raise error
self.pipeline_run.method_call_successful(pipeline_run_method_call_id)
finally:
# Restore original logging configuration.
root.removeHandler(logging_handler)
root.setLevel(old_level)
for i, level in enumerate(old_handler_levels):
root.handlers[i].setLevel(level)
# Just to be consistent, if somebody is doing something with the same logger.
redirect_logger.propagate = old_propagate
redirect_logger.removeHandler(logging_handler)
self.pipeline_run.set_method_call_result_metadata(pipeline_run_method_call_id, result)
return result
def _run_step(self, step: pipeline_module.StepBase) -> None:
if isinstance(step, pipeline_module.PlaceholderStep):
self._run_placeholder(step)
elif isinstance(step, pipeline_module.SubpipelineStep):
self._run_subpipeline(step)
elif isinstance(step, pipeline_module.PrimitiveStep):
self._run_primitive(step)
else:
# TODO: Allow dispatch to a general method so that subclasses of this class can handle them if necessary.
raise exceptions.UnexpectedValueError("Unknown step type: {step_type}".format(step_type=type(step)))
def _do_run_step(self, step: pipeline_module.StepBase) -> None:
assert self.pipeline_run is not None
self.pipeline_run.step_started(self.current_step)
try:
self._before_step_run()
self._run_step(step)
self._after_step_run()
except Exception as error:
self.pipeline_run.step_failed(self.current_step, traceback.format_exc())
raise exceptions.StepFailedError(
"Step {step_index} for pipeline {pipeline_id} failed.".format(
step_index=self.current_step, pipeline_id=self.pipeline.id,
),
) from error
self.pipeline_run.step_successful(self.current_step)
def _do_run(self) -> None:
for step_index, step in enumerate(self.pipeline.steps):
self.current_step = step_index
self._do_run_step(step)
def _run(
self, inputs: typing.Sequence[typing.Any], phase: metadata_base.PipelineRunPhase,
outputs_to_expose: typing.Optional[typing.Iterable[str]]
) -> Result:
if outputs_to_expose is None:
outputs_to_expose = self._get_all_outputs()
else:
# We sort to have deterministic order.
outputs_to_expose = sorted(set(outputs_to_expose))
outputs_to_expose = self._check_pipeline(inputs, outputs_to_expose)
self._initialize_run_state(inputs, phase, outputs_to_expose)
assert self.pipeline_run is not None
error: typing.Optional[Exception] = None
values: typing.Dict = {}
try:
self.pipeline_run.run_started()
try:
self._do_run()
except Exception as run_error:
self.pipeline_run.run_failed(traceback.format_exc())
error = run_error
if error is None:
self.pipeline_run.run_successful()
self._populate_output_values()
if self.is_standard_pipeline:
self.pipeline_run.set_predictions(self.data_values['outputs.0'])
values = self._get_exposed_outputs(error)
finally:
pipeline_run = self.pipeline_run
self._clear_run_state()
return Result(pipeline_run, values, error)
def _get_exposed_outputs(self, error: typing.Optional[Exception]) -> typing.Dict:
outputs = {}
for name in self.outputs_to_expose:
try:
outputs[name] = self.data_values[name]
except KeyError as value_error:
# We try to return whichever outputs we can, even in the case of an error.
if error is None:
raise value_error
return outputs
def _before_step_run(self) -> None:
pass
def _after_step_run(self) -> None:
self._delete_unnecessary_outputs()
def _delete_unnecessary_outputs(self) -> None:
outputs_needed = set()
# Which outputs are explicitly required to be kept until the end?
for output in self.outputs_to_expose:
outputs_needed.add(output)
# Pipeline outputs need step outputs.
for i, output_description in enumerate(self.pipeline.outputs):
if 'outputs.{i}'.format(i=i) in self.outputs_to_expose:
outputs_needed.add(output_description['data'])
# Future steps also need outputs.
for step in self.pipeline.steps[self.current_step + 1:]:
outputs_needed.update(step.get_input_data_references())
# Pipeline run for a standard pipeline needs predictions.
if self.is_standard_pipeline:
outputs_needed.add(self.pipeline.outputs[0]['data'])
# Delete any output which is not needed anymore.
# We iterate over a list so that we can change dict while iterating.
for data_reference in list(self.data_values.keys()):
if data_reference not in outputs_needed:
del self.data_values[data_reference]
@deprecate.arguments('return_values', message="use outputs_to_expose instead")
def fit(
self, inputs: typing.Sequence[typing.Any], *, outputs_to_expose: typing.Iterable[str] = None,
return_values: typing.Iterable[str] = None,
) -> Result:
"""
Does a "fit" phase of the pipeline.
Parameters
----------
inputs:
A list of inputs to the pipeline.
outputs_to_expose:
Data references of all outputs of all steps to return.
Requesting a data reference of an output which would otherwise not be produced
is allowed and it forces that output to be produced, but all inputs necessary
have to be provided to the primitive, otherwise an error is logged and output
is skipped. If ``None``, the outputs of the whole pipeline are returned.
return_values:
DEPRECATED: use ``outputs_to_expose`` instead.
Returns
-------
A result object with kept values, pipeline run description, and any exception.
"""
return self._run(inputs, metadata_base.PipelineRunPhase.FIT, outputs_to_expose or return_values)
@deprecate.arguments('return_values', message="use outputs_to_expose instead")
def produce(
self, inputs: typing.Sequence[typing.Any], *, outputs_to_expose: typing.Iterable[str] = None,
return_values: typing.Iterable[str] = None,
) -> Result:
"""
Does a "produce" phase of the pipeline and returns outputs.
Parameters
----------
inputs:
A list of inputs to the pipeline.
outputs_to_expose:
Data references of all outputs of all steps to return.
Requesting a data reference of an output which would otherwise not be produced
is allowed and it forces that output to be produced, but all inputs necessary
have to be provided to the primitive, otherwise an error is logged and output
is skipped. If ``None``, the outputs of the whole pipeline are returned.
return_values:
DEPRECATED: use ``outputs_to_expose`` instead.
Returns
-------
A result object with kept values, pipeline run description, and any exception.
"""
return self._run(inputs, metadata_base.PipelineRunPhase.PRODUCE, outputs_to_expose or return_values)
def get_params(self) -> typing.List[typing.Union[typing.Any, typing.List]]:
return self.steps_state
def set_params(self, params: typing.List[typing.Union[typing.Any, typing.List]]) -> None:
if not isinstance(params, typing.List):
raise exceptions.InvalidArgumentValueError("Parameters not a list.")
self._clear_run_state()
self.steps_state = params
def _populate_output_values(self) -> None:
for i, output_description in enumerate(self.pipeline.outputs):
# Outputs might not be available because they were not requested to be returned from the run.
if output_description['data'] in self.data_values:
self.data_values['outputs.{i}'.format(i=i)] = self.data_values[output_description['data']]
@classmethod
def _normalize_dataset_id(cls, dataset_id: str) -> str:
return DATASET_ID_REGEX.sub('', dataset_id)
@classmethod
def _dataset_ids_match(cls, first_dataset_id: str, second_dataset_id: str) -> bool:
if first_dataset_id == second_dataset_id:
return True
if cls._normalize_dataset_id(first_dataset_id) == cls._normalize_dataset_id(second_dataset_id):
return True
return False
@classmethod
def _mark_columns(cls, problem_inputs: typing.Sequence[typing.Dict], | |
<reponame>ionos-cloud/ionos-cloud-sdk-python<filename>ionoscloud/api/network_load_balancers_api.py
from __future__ import absolute_import
import re # noqa: F401
import six
from ionoscloud.api_client import ApiClient
from ionoscloud.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class NetworkLoadBalancersApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def datacenters_networkloadbalancers_delete(self, datacenter_id, network_load_balancer_id, **kwargs): # noqa: E501
"""Delete Network Load Balancers # noqa: E501
Remove the specified Network Load Balancer from the data center. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_networkloadbalancers_delete(datacenter_id, network_load_balancer_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param network_load_balancer_id: The unique ID of the Network Load Balancer. (required)
:type network_load_balancer_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_networkloadbalancers_delete_with_http_info(datacenter_id, network_load_balancer_id, **kwargs) # noqa: E501
def datacenters_networkloadbalancers_delete_with_http_info(self, datacenter_id, network_load_balancer_id, **kwargs): # noqa: E501
"""Delete Network Load Balancers # noqa: E501
Remove the specified Network Load Balancer from the data center. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_networkloadbalancers_delete_with_http_info(datacenter_id, network_load_balancer_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param network_load_balancer_id: The unique ID of the Network Load Balancer. (required)
:type network_load_balancer_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'network_load_balancer_id',
'pretty',
'depth',
'x_contract_number'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type',
'query_params'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_networkloadbalancers_delete" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_networkloadbalancers_delete`") # noqa: E501
# verify the required parameter 'network_load_balancer_id' is set
if self.api_client.client_side_validation and ('network_load_balancer_id' not in local_var_params or # noqa: E501
local_var_params['network_load_balancer_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `network_load_balancer_id` when calling `datacenters_networkloadbalancers_delete`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_networkloadbalancers_delete`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_networkloadbalancers_delete`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'datacenter_id' in local_var_params:
path_params['datacenterId'] = local_var_params['datacenter_id'] # noqa: E501
if 'network_load_balancer_id' in local_var_params:
path_params['networkLoadBalancerId'] = local_var_params['network_load_balancer_id'] # noqa: E501
query_params = list(local_var_params.get('query_params', {}).items())
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = None
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_networkloadbalancers_find_by_network_load_balancer_id(self, datacenter_id, network_load_balancer_id, **kwargs): # noqa: E501
"""Retrieve Network Load Balancers # noqa: E501
Retrieve the properties of the specified Network Load Balancer within the data center. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_networkloadbalancers_find_by_network_load_balancer_id(datacenter_id, network_load_balancer_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param network_load_balancer_id: The unique ID of the Network Load Balancer. (required)
:type network_load_balancer_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: NetworkLoadBalancer
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_networkloadbalancers_find_by_network_load_balancer_id_with_http_info(datacenter_id, network_load_balancer_id, **kwargs) # noqa: E501
def datacenters_networkloadbalancers_find_by_network_load_balancer_id_with_http_info(self, datacenter_id, network_load_balancer_id, **kwargs): # noqa: E501
"""Retrieve Network Load Balancers # noqa: E501
Retrieve the properties of the specified Network Load Balancer within the data center. # noqa: E501
This method makes a synchronous HTTP request by default. To make | |
# Copyright (c) Microsoft Corporation
# Copyright (c) Tsinghua University
# Copyright (c) Peng Cheng Laboratory
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
import logging
import sys
import multiprocessing
import nni
from nni.networkmorphism_tuner.graph import json_to_graph
import json
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim as optim
import torchvision
from torch.utils.data.distributed import DistributedSampler
from distributed_utils import dist_init, average_gradients, DistModule
import utils
import time
import datetime
import zmq
from nni.env_vars import trial_env_vars
import json
import os
import random
import yaml
from hyperopt import fmin, tpe, hp
import nni.hyperopt_tuner.hyperopt_tuner as TPEtuner
# set the logger format
log_format = "%(asctime)s %(message)s"
logging.basicConfig(
filename="networkmorphism.log",
filemode="a",
level=logging.INFO,
format=log_format,
datefmt="%m/%d %I:%M:%S %p",
)
# pylint: disable=W0603
# set the logger format
logger = logging.getLogger("cifar10-network-morphism-pytorch")
def get_args():
""" get args from command line
"""
parser = argparse.ArgumentParser("cifar10")
parser.add_argument("--batch_size", type=int, default=128, help="batch size")
parser.add_argument("--optimizer", type=str, default="SGD", help="optimizer")
parser.add_argument("--epochs", type=int, default=5, help="epoch limit")
parser.add_argument(
"--learning_rate", type=float, default=0.001, help="learning rate"
)
parser.add_argument("--cutout", action="store_true", default=False, help="use cutout")
parser.add_argument("--cutout_length", type=int, default=8, help="cutout length")
parser.add_argument(
"--model_path", type=str, default="./", help="Path to save the destination model"
)
parser.add_argument('--port', default='23456', type=str)
parser.add_argument('-j', '--workers', default=2, type=int)
parser.add_argument("--maxTPEsearchNum", type=int, default=20, help="maxTPEsearchNum")
return parser.parse_args()
trainloader = None
testloader = None
net = None
criterion = None
optimizer = None
device = "cuda" if torch.cuda.is_available() else "cpu"
best_acc = 0.0
args = get_args()
def build_graph_from_json(ir_model_json):
"""build model from json representation
"""
graph = json_to_graph(ir_model_json)
logging.debug(graph.operation_history)
model = graph.produce_torch_model()
return model
def parse_rev_args(receive_msg):
""" parse reveive msgs to global variable
"""
global trainloader
global testloader
global trainsampler
global testsampler
global net
global criterion
global optimizer
global rank, world_size
# Loading Data
if rank == 0:
logger.debug("Preparing data..")
transform_train, transform_test = utils.data_transforms_cifar10(args)
dataPath = os.environ["HOME"] + "/mountdir/data/"
trainset = torchvision.datasets.CIFAR10(
root=dataPath, train=True, download=True, transform=transform_train
)
#
# trainsampler = DistributedSampler(trainset)
#
# trainloader = torch.utils.data.DataLoader(
# trainset, batch_size=args.batch_size_per_gpu, shuffle=False, num_workers=args.workers,
# pin_memory=False, sampler=trainsampler
# )
testset = torchvision.datasets.CIFAR10(
root=dataPath, train=False, download=True, transform=transform_test
)
testsampler = DistributedSampler(testset)
testloader = torch.utils.data.DataLoader(
testset, batch_size=args.batch_size, shuffle=False, num_workers=0,
pin_memory = False, sampler = testsampler
)
if rank == 0:
print("len(trainset)=" + str(len(trainset)))
print("len(testset)=" + str(len(testset)))
# Model
if rank == 0:
logger.debug("Building model..")
net = build_graph_from_json(receive_msg)
net = net.to(device)
net = DistModule(net)
criterion = nn.CrossEntropyLoss()
# if args.optimizer == "SGD":
# optimizer = optim.SGD(
# net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=5e-4
# )
# if args.optimizer == "Adadelta":
# optimizer = optim.Adadelta(net.parameters(), lr=args.learning_rate)
# if args.optimizer == "Adagrad":
# optimizer = optim.Adagrad(net.parameters(), lr=args.learning_rate)
# if args.optimizer == "Adam":
# optimizer = optim.Adam(net.parameters(), lr=args.learning_rate)
# if args.optimizer == "Adamax":
# optimizer = optim.Adamax(net.parameters(), lr=args.learning_rate)
# if args.optimizer == "RMSprop":
# optimizer = optim.RMSprop(net.parameters(), lr=args.learning_rate)
cudnn.benchmark = True
return 0
# Training
def train(epoch,op_explore):
""" train model on each epoch in trainset
"""
global trainloader
global testloader
global net
global criterion
global optimizer
global rank, world_size
if rank == 0:
logger.debug("Epoch: %d", epoch)
net.train()
train_loss = 0
correct = 0
total = 0
optimizer = op_explore
f11=open('/root/log','a+')
f11.write('### ready to train \n')
f11.close()
for batch_idx, (inputs, targets) in enumerate(trainloader):
f11=open('/root/log','a+')
f11.write('### loop to train \n')
f11.close()
targets = targets.cuda(async=True)
#inputs, targets = inputs.to(device), targets.to(device)
input_var = torch.autograd.Variable(inputs.cuda())
target_var = torch.autograd.Variable(targets)
optimizer.zero_grad()
outputs = net(input_var)
loss = criterion(outputs, target_var) / world_size
loss.backward()
average_gradients(net)
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.data.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
#As the cost of all_reduce, we don't use all_reduce every batch to calculate acc."
"""
if rank == 0:
logger.debug(
"Loss: %.3f | Acc: %.3f%% (%d/%d)",
train_loss / (batch_idx + 1),
100.0 * tmp_correct / tmp_total,
tmp_correct,
tmp_total,
)
"""
reduced_total = torch.Tensor([total])
reduced_correct = torch.Tensor([correct])
reduced_total = reduced_total.cuda()
reduced_correct = reduced_correct.cuda()
dist.all_reduce(reduced_total)
dist.all_reduce(reduced_correct)
tmp_total = int(reduced_total[0])
tmp_correct = int(reduced_correct[0])
acc = 100.0 * tmp_correct / tmp_total
return acc
def test(epoch):
""" eval model on each epoch in testset
"""
global best_acc
global trainloader
global testloader
global net
global criterion
global optimizer
global rank, world_size
if rank == 0:
logger.debug("Eval on epoch: %d", epoch)
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = net(inputs)
loss = criterion(outputs, targets) / world_size
test_loss += loss.item()
_, predicted = outputs.data.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
#As the cost of all_reduce, we don't use all_reduce every batch to calculate acc."
"""
if rank == 0:
logger.debug(
"Loss: %.3f | Acc: %.3f%% (%d/%d)",
test_loss / (batch_idx + 1),
100.0 * tmp_correct / tmp_total,
tmp_correct,
tmp_total,
)"""
reduced_total = torch.Tensor([total])
reduced_correct = torch.Tensor([correct])
reduced_total = reduced_total.cuda()
reduced_correct = reduced_correct.cuda()
dist.all_reduce(reduced_total)
dist.all_reduce(reduced_correct)
tmp_total = int(reduced_total[0])
tmp_correct = int(reduced_correct[0])
acc = 100.0 * tmp_correct / tmp_total
if acc > best_acc:
best_acc = acc
return acc, best_acc
acclist=[]
reslist=[]
def estimate(esargs):
global best_acc
global trainloader
global testloader
global net
global criterion
global optimizer
global rank
#重置早停对象
early_stop = utils.EarlyStopping(mode="max")
global best_acc
best_acc = 0
lr_explore = esargs['learning_rate']
bs_explore = int(esargs['batch_size'])
global trainloader
transform_train, transform_test = utils.data_transforms_cifar10(args)
trainset = torchvision.datasets.CIFAR10(root= "/root/mountdir/data/", train=True, download=True, transform=transform_train)
trainsampler = DistributedSampler(trainset)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=bs_explore, shuffle=False, num_workers=args.workers,
pin_memory=False, sampler=trainsampler
)
op = optim.SGD(net.parameters(), lr=lr_explore, momentum=0.9, weight_decay=5e-4)
for ep in range(args.epochs):
current_ep = ep + 1
if rank==0:
if os.popen("grep epoch " + experiment_path + "/trials/" + str(nni.get_trial_id()) + "/output.log").read():
os.system("sed -i '/^epoch/cepoch=" + str(ep+1) + "' " + experiment_path + "/trials/" + str(nni.get_trial_id()) + "/output.log")
else:
os.system("sed -i '$a\\epoch=" + str(ep+1) + "' " + experiment_path + "/trials/" + str(nni.get_trial_id()) + "/output.log")
try:
train_acc = train(ep,op)
except Exception as exception:
f11=open('/root/log','a+')
f11.write('###### training is error \n')
f11.write(str(exception)+"\n")
f11.close()
acclist.append(0)
return 0,current_ep
test_acc, best_acc = test(ep)
logger.debug(test_acc)
if early_stop.step(test_acc):
break
list = [best_acc,bs_explore,str(lr_explore)[0:7]]
reslist.append(list)
acclist.append(best_acc)
return best_acc,current_ep
if __name__ == "__main__":
args = get_args()
rank, world_size = dist_init(args.port)
if rank == 1:
f11=open('/root/rank'+str(rank),'a+')
f11.write('rank:'+str(rank)+"\n")
f11.write("world_size:"+str(world_size)+"\n")
f11.close()
example_start_time = time.time()
try:
real_model_file = os.path.join("/root", "real_model.json")
experiment_path = os.environ["HOME"] + "/mountdir/nni/experiments/" + str(nni.get_experiment_id())
assert(args.workers % world_size == 0)
args.workers = args.workers // world_size
#real_model_file = os.path.join(trial_env_vars.NNI_SYS_DIR, "real_model.json")
if rank == 0: # only works for single node
lock = multiprocessing.Lock()
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://172.17.0.10:800081")
# trial get next parameter from network morphism tuner
#path=os.environ["HOME"] + "/mountdir/nni/experiments/" + str(nni.get_experiment_id()) + "/trials/" + str(nni.get_trial_id())
os.makedirs(experiment_path + "/trials/" + str(nni.get_trial_id()))
get_next_parameter_start=time.time()
nni.get_next_parameter(socket)
get_next_parameter_end = time.time()
while True:
lock.acquire()
f1 = open(experiment_path + "/graph.txt","a+")
f1.seek(0)
lines = f1.readlines()
f1.close()
lock.release()
if lines:
break
json_and_id_str = lines[-1].replace("\n","") #逆序读取并记录,数据组成字典
json_and_id = dict((l.split('=') for l in json_and_id_str.split('+')))
if str(json_and_id['history']) == "True":
socket.send_pyobj({"type": "generated_parameter", "parameters": json_and_id['json_out'], "father_id": int(json_and_id['father_id']), "parameter_id": int(nni.get_sequence_id())})
f11=open('/root/log','a+')
f11.write('histtory is True so \nsend parameters')
f11.close()
message = socket.recv_pyobj()
f11=open('/root/log','a+')
f11.write('recv message: '+ str(message)+'\n')
f11.close()
elif str(json_and_id['history']) == "False":
socket.send_pyobj({"type": "generated_parameter"})
f11=open('/root/log','a+')
f11.write('history is false so \nsend generated_parameter\n')
f11.close()
message = socket.recv_pyobj()
f11=open('/root/log','a+')
f11.write('history is false so \nsend generated_parameter\n')
f11.close()
RCV_CONFIG = json_and_id['json_out']
parse_rev_args(RCV_CONFIG)
f11=open('/root/log','a+')
f11.write("RCV_CONFIG:"+str(RCV_CONFIG)+"\n")
f11.close()
with open(real_model_file, "w") as f:
json.dump(RCV_CONFIG, f)
#logger.info(RCV_CONFIG)
else:
while not os.path.isfile(real_model_file):
time.sleep(5)
with open(real_model_file, "r") as f:
RCV_CONFIG = json.load(f)
if rank ==0:
start_time = time.time()
f = open(experiment_path + "/trials/" + str(nni.get_trial_id()) + "/output.log", "a+")
f.write("sequence_id=" + str(nni.get_sequence_id()) + "\n")
f.close()
with open('search_space.json') as json_file:
search_space = json.load(json_file)
## 根据father_id读取相应的超参json文件
# 在起始时,不需要读取该件
if 'father_id' in json_and_id:
with | |
requirements.
ResourceRequirement keys that are not present in the dict are not modified.
Args:
name: The name of the resource to set the requirements of (e.g., "cpu" or "memory").
requirements: A dict mapping requirements to target values (e.g., `{ResourceRequirement.request: '500m', ResourceRequirement.limit: '2000m'})
"""
resources: kubernetes_asyncio.client.V1ResourceRequirements = copy.copy(
getattr(self, 'resources', kubernetes_asyncio.client.V1ResourceRequirements())
)
for requirement, value in requirements.items():
resource_to_values = getattr(resources, requirement.resources_key, {})
if not resource_to_values:
resource_to_values = {}
if value is not None:
# NOTE: Coerce to string as values are headed into Kubernetes resource model
resource_to_values[name] = str(value)
else:
resource_to_values.pop(name, None)
setattr(resources, requirement.resources_key, resource_to_values)
self.resources = resources
@property
def ports(self) -> List[kubernetes_asyncio.client.V1ContainerPort]:
"""
Return the ports for the Container.
Returns:
The Container ports.
"""
return self.obj.ports or []
def __str__(self) -> str:
return str(self.obj)
def __repr__(self) -> str:
return self.__str__()
class Pod(KubernetesModel):
"""Wrapper around a Kubernetes `Pod`_ API Object.
The actual ``kubernetes.client.V1Pod`` instance that this
wraps can be accessed via the ``obj`` instance member.
This wrapper provides some convenient functionality around the
API Object and provides some state management for the `Pod`_.
.. _Pod:
https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#pod-v1-core
"""
obj:kubernetes_asyncio.client.V1Pod
api_clients: ClassVar[Dict[str, Type]] = {
"preferred":kubernetes_asyncio.client.CoreV1Api,
"v1":kubernetes_asyncio.client.CoreV1Api,
}
@classmethod
async def read(cls, name: str, namespace: str) -> "Pod":
"""Read the Pod from the cluster under the given namespace.
Args:
name: The name of the Pod to read.
namespace: The namespace to read the Pod from.
"""
servo.logger.debug(f'reading pod "{name}" in namespace "{namespace}"')
async with cls.preferred_client() as api_client:
obj = await api_client.read_namespaced_pod_status(name, namespace)
return Pod(obj)
async def create(self, namespace: str = None) -> None:
"""Create the Pod under the given namespace.
Args:
namespace: The namespace to create the Pod under.
If the Pod was loaded via the kubetest client, the
namespace will already be set, so it is not needed
here. Otherwise, the namespace will need to be provided.
"""
if namespace is None:
namespace = self.namespace
self.logger.info(f'creating pod "{self.name}" in namespace "{namespace}"')
async with self.preferred_client() as api_client:
self.obj = await api_client.create_namespaced_pod(
namespace=namespace,
body=self.obj,
)
async def patch(self) -> None:
"""
Patches a Pod, applying spec changes to the cluster.
"""
self.logger.info(f'patching pod "{self.name}"')
async with self.api_client() as api_client:
api_client.api_client.set_default_header('content-type', 'application/strategic-merge-patch+json')
await api_client.patch_namespaced_pod(
name=self.name,
namespace=self.namespace,
body=self.obj,
)
async def delete(self, options:kubernetes_asyncio.client.V1DeleteOptions = None) ->kubernetes_asyncio.client.V1Status:
"""Delete the Pod.
This method expects the Pod to have been loaded or otherwise
assigned a namespace already. If it has not, the namespace will
need to be set manually.
Args:
options: Options for Pod deletion.
Return:
The status of the delete operation.
"""
if options is None:
options =kubernetes_asyncio.client.V1DeleteOptions()
self.logger.info(f'deleting pod "{self.name}"')
self.logger.trace(f"delete options: {options}")
async with self.api_client() as api_client:
return await api_client.delete_namespaced_pod(
name=self.name,
namespace=self.namespace,
body=options,
)
async def refresh(self) -> None:
"""Refresh the underlying Kubernetes Pod resource."""
async with self.api_client() as api_client:
self.obj = await api_client.read_namespaced_pod_status(
name=self.name,
namespace=self.namespace,
)
async def is_ready(self) -> bool:
"""Check if the Pod is in the ready state.
Returns:
True if in the ready state; False otherwise.
"""
self.logger.trace("refreshing pod status to check is_ready")
await self.refresh()
# if there is no status, the pod is definitely not ready
status = self.obj.status
self.logger.trace(f"current pod status is {status}")
if status is None:
return False
# check the pod phase to make sure it is running. a pod in
# the 'failed' or 'success' state will no longer be running,
# so we only care if the pod is in the 'running' state.
status.phase
self.logger.trace(f"current pod phase is {status}")
if not status.conditions:
return False
self.logger.trace(f"checking status conditions {status.conditions}")
for cond in status.conditions:
if cond.reason == "Unschedulable":
return False
# we only care about the condition type 'ready'
if cond.type.lower() != "ready":
continue
# check that the readiness condition is True
return cond.status.lower() == "true"
# Catchall
self.logger.trace(f"unable to find ready=true, continuing to wait...")
return False
async def raise_for_status(self, adjustments: List[servo.Adjustment]) -> None:
"""Raise an exception if the Pod status is not not ready."""
# NOTE: operate off of current state, assuming you have checked is_ready()
status = self.obj.status
self.logger.trace(f"current pod status is {status}")
if status is None:
raise RuntimeError(f'No such pod: {self.name}')
# check the pod phase to make sure it is running. a pod in
# the 'failed' or 'success' state will no longer be running,
# so we only care if the pod is in the 'running' state.
# phase = status.phase
if not status.conditions:
raise RuntimeError(f'Pod is not running: {self.name}')
self.logger.trace(f"checking container statuses: {status.container_statuses}")
if status.container_statuses:
for cont_stat in status.container_statuses:
if cont_stat.state and cont_stat.state.waiting and cont_stat.state.waiting.reason in ["ImagePullBackOff", "ErrImagePull"]:
raise servo.AdjustmentFailedError("Container image pull failure detected", reason="image-pull-failed")
restarted_container_statuses = list(filter(lambda cont_stat: cont_stat.restart_count > 0, (status.container_statuses or [])))
if restarted_container_statuses:
container_messages = list(map(lambda cont_stat: f"{cont_stat.name} x{cont_stat.restart_count}", restarted_container_statuses))
raise servo.AdjustmentRejectedError(
f"Tuning optimization {self.name} crash restart detected on container(s): {', '.join(container_messages)}",
reason="unstable"
)
self.logger.trace(f"checking status conditions {status.conditions}")
for cond in status.conditions:
if cond.reason == "Unschedulable":
# FIXME: The servo rejected error should be raised further out. This should be a generic scheduling error
unschedulable_adjustments = list(filter(lambda a: a.setting_name in cond.message, adjustments))
raise servo.AdjustmentRejectedError(
f"Requested adjustment(s) ({', '.join(map(str, unschedulable_adjustments))}) cannot be scheduled due to \"{cond.message}\"",
reason="unschedulable"
)
if cond.type == "Ready" and cond.status == "False":
raise servo.AdjustmentRejectedError(f"(reason {cond.reason}) {cond.message}", reason="start-failed")
# we only care about the condition type 'ready'
if cond.type.lower() != "ready":
continue
# check that the readiness condition is True
if cond.status.lower() == "true":
return
# Catchall
self.logger.trace(f"unable to find ready=true, continuing to wait...")
raise RuntimeError(f"Unknown Pod status for '{self.name}': {status}")
async def get_status(self) ->kubernetes_asyncio.client.V1PodStatus:
"""Get the status of the Pod.
Returns:
The status of the Pod.
"""
# first, refresh the pod state to ensure latest status
await self.refresh()
# return the status of the pod
return cast(kubernetes_asyncio.client.V1PodStatus, self.obj.status)
@property
def containers(self) -> List[Container]:
"""
Return a list of Container objects from the underlying pod template spec.
"""
return list(map(lambda c: Container(c, self), self.obj.spec.containers))
async def get_containers(self) -> List[Container]:
"""Get the Pod's containers.
Returns:
A list of containers that belong to the Pod.
"""
self.logger.debug(f'getting containers for pod "{self.name}"')
await self.refresh()
return self.containers
def get_container(self, name: str) -> Union[Container, None]:
"""Get a container in the Pod by name.
Args:
name (str): The name of the Container.
Returns:
Container: The Pod's Container with the matching name. If
no container with the given name is found, ``None`` is returned.
"""
return next(filter(lambda c: c.name == name, self.containers), None)
async def get_restart_count(self) -> int:
"""Get the total number of Container restarts for the Pod.
Returns:
The total number of Container restarts.
"""
status = await self.get_status()
if status.container_statuses is None:
return 0
total = 0
for container_status in status.container_statuses:
total += container_status.restart_count
return total
async def containers_started(self) -> bool:
"""Check if the Pod's Containers have all started.
Returns:
True if all Containers have started; False otherwise.
"""
# start the flag as true - we will check the state and set
# this to False if any container is not yet running.
containers_started = True
status = await self.get_status()
if status.container_statuses is not None:
for container_status in status.container_statuses:
if container_status.state is not None:
if container_status.state.running is not None:
if container_status.state.running.started_at is not None:
# The container is started, so move on to check the
# next container
continue
# If we get here, then the container has not started.
containers_started = containers_started and False
break
return containers_started
def uid(self) -> str:
"""
Gets the UID for the Pod.
UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids # noqa: E501
"""
return self.obj.metadata.uid
class Service(KubernetesModel):
"""Kubetest wrapper around a Kubernetes `Service`_ API Object.
The actual ``kubernetes.client.V1Service`` instance that this
wraps can be accessed via the ``obj`` instance member.
This wrapper provides some convenient functionality around the
API Object and provides some state management for the `Service`_.
.. | |
<gh_stars>1-10
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import pandas as pd
import ray
import ray.dataframe as rdf
@pytest.fixture
def ray_df_equals_pandas(ray_df, pandas_df):
return rdf.to_pandas(ray_df).sort_index().equals(pandas_df.sort_index())
@pytest.fixture
def test_roundtrip(ray_df, pandas_df):
assert(ray_df_equals_pandas(ray_df, pandas_df))
@pytest.fixture
def test_index(ray_df, pandas_df):
assert(ray_df.index.equals(pandas_df.index))
@pytest.fixture
def test_size(ray_df, pandas_df):
assert(ray_df.size == pandas_df.size)
@pytest.fixture
def test_ndim(ray_df, pandas_df):
assert(ray_df.ndim == pandas_df.ndim)
@pytest.fixture
def test_ftypes(ray_df, pandas_df):
assert(ray_df.ftypes.equals(pandas_df.ftypes))
@pytest.fixture
def test_values(ray_df, pandas_df):
assert(np.array_equal(ray_df.values, pandas_df.values))
@pytest.fixture
def test_axes(ray_df, pandas_df):
for ray_axis, pd_axis in zip(ray_df.axes, pandas_df.axes):
assert (np.array_equal(ray_axis, pd_axis))
@pytest.fixture
def test_shape(ray_df, pandas_df):
assert(ray_df.shape == pandas_df.shape)
@pytest.fixture
def test_add_prefix(ray_df, pandas_df):
test_prefix = "TEST"
new_ray_df = ray_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
assert(new_ray_df.columns.equals(new_pandas_df.columns))
@pytest.fixture
def test_add_suffix(ray_df, pandas_df):
test_suffix = "TEST"
new_ray_df = ray_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
assert(new_ray_df.columns.equals(new_pandas_df.columns))
@pytest.fixture
def test_applymap(ray_df, pandas_df, testfunc):
new_ray_df = ray_df.applymap(testfunc)
new_pandas_df = pandas_df.applymap(testfunc)
assert(ray_df_equals_pandas(new_ray_df, new_pandas_df))
@pytest.fixture
def test_copy(ray_df):
new_ray_df = ray_df.copy()
assert(new_ray_df is not ray_df)
assert(new_ray_df._df == ray_df._df)
@pytest.fixture
def test_sum(ray_df, pandas_df):
assert(ray_df_equals_pandas(ray_df.sum(), pandas_df.sum()))
@pytest.fixture
def test_abs(ray_df, pandas_df):
assert(ray_df_equals_pandas(ray_df.abs(), pandas_df.abs()))
@pytest.fixture
def test_keys(ray_df, pandas_df):
assert(ray_df.keys().equals(pandas_df.keys()))
@pytest.fixture
def test_transpose(ray_df, pandas_df):
assert(ray_df_equals_pandas(ray_df.T, pandas_df.T))
assert(ray_df_equals_pandas(ray_df.transpose(), pandas_df.transpose()))
@pytest.fixture
def test_get(ray_df, pandas_df, key):
assert(ray_df.get(key).equals(pandas_df.get(key)))
assert ray_df.get(
key, default='default').equals(
pandas_df.get(key, default='default'))
@pytest.fixture
def test_get_dtype_counts(ray_df, pandas_df):
assert(ray_df.get_dtype_counts().equals(pandas_df.get_dtype_counts()))
@pytest.fixture
def test_get_ftype_counts(ray_df, pandas_df):
assert(ray_df.get_ftype_counts().equals(pandas_df.get_ftype_counts()))
@pytest.fixture
def create_test_dataframe():
df = pd.DataFrame({'col1': [0, 1, 2, 3],
'col2': [4, 5, 6, 7],
'col3': [8, 9, 10, 11],
'col4': [12, 13, 14, 15],
'col5': [0, 0, 0, 0]})
return rdf.from_pandas(df, 2)
def test_int_dataframe():
ray.init()
pandas_df = pd.DataFrame({'col1': [0, 1, 2, 3],
'col2': [4, 5, 6, 7],
'col3': [8, 9, 10, 11],
'col4': [12, 13, 14, 15],
'col5': [0, 0, 0, 0]})
ray_df = rdf.from_pandas(pandas_df, 2)
testfuncs = [lambda x: x + 1,
lambda x: str(x),
lambda x: x * x,
lambda x: x,
lambda x: False]
keys = ['col1',
'col2',
'col3',
'col4']
test_roundtrip(ray_df, pandas_df)
test_index(ray_df, pandas_df)
test_size(ray_df, pandas_df)
test_ndim(ray_df, pandas_df)
test_ftypes(ray_df, pandas_df)
test_values(ray_df, pandas_df)
test_axes(ray_df, pandas_df)
test_shape(ray_df, pandas_df)
test_add_prefix(ray_df, pandas_df)
test_add_suffix(ray_df, pandas_df)
for testfunc in testfuncs:
test_applymap(ray_df, pandas_df, testfunc)
test_copy(ray_df)
test_sum(ray_df, pandas_df)
test_abs(ray_df, pandas_df)
test_keys(ray_df, pandas_df)
test_transpose(ray_df, pandas_df)
test_round(ray_df, pandas_df)
test_all(ray_df, pandas_df)
test_any(ray_df, pandas_df)
test___getitem__(ray_df, pandas_df)
test___delitem__(ray_df, pandas_df)
test___copy__(ray_df, pandas_df)
test___deepcopy__(ray_df, pandas_df)
test_bool(ray_df, pandas_df)
test_count(ray_df, pandas_df)
test_head(ray_df, pandas_df)
test_tail(ray_df, pandas_df)
test_idxmax(ray_df, pandas_df)
test_idxmin(ray_df, pandas_df)
test_pop(ray_df, pandas_df)
for key in keys:
test_get(ray_df, pandas_df, key)
test_get_dtype_counts(ray_df, pandas_df)
test_get_ftype_counts(ray_df, pandas_df)
def test_float_dataframe():
pandas_df = pd.DataFrame({'col1': [0.0, 1.0, 2.0, 3.0],
'col2': [4.0, 5.0, 6.0, 7.0],
'col3': [8.0, 9.0, 10.0, 11.0],
'col4': [12.0, 13.0, 14.0, 15.0],
'col5': [0.0, 0.0, 0.0, 0.0]})
ray_df = rdf.from_pandas(pandas_df, 2)
testfuncs = [lambda x: x + 1,
lambda x: str(x),
lambda x: x * x,
lambda x: x,
lambda x: False]
keys = ['col1',
'col2',
'col3',
'col4']
test_roundtrip(ray_df, pandas_df)
test_index(ray_df, pandas_df)
test_size(ray_df, pandas_df)
test_ndim(ray_df, pandas_df)
test_ftypes(ray_df, pandas_df)
test_values(ray_df, pandas_df)
test_axes(ray_df, pandas_df)
test_shape(ray_df, pandas_df)
test_add_prefix(ray_df, pandas_df)
test_add_suffix(ray_df, pandas_df)
for testfunc in testfuncs:
test_applymap(ray_df, pandas_df, testfunc)
test_copy(ray_df)
test_sum(ray_df, pandas_df)
test_abs(ray_df, pandas_df)
test_keys(ray_df, pandas_df)
test_transpose(ray_df, pandas_df)
test_round(ray_df, pandas_df)
test_all(ray_df, pandas_df)
test_any(ray_df, pandas_df)
test___getitem__(ray_df, pandas_df)
test___delitem__(ray_df, pandas_df)
test___copy__(ray_df, pandas_df)
test___deepcopy__(ray_df, pandas_df)
test_bool(ray_df, pandas_df)
test_count(ray_df, pandas_df)
test_head(ray_df, pandas_df)
test_tail(ray_df, pandas_df)
test_idxmax(ray_df, pandas_df)
test_idxmin(ray_df, pandas_df)
test_pop(ray_df, pandas_df)
for key in keys:
test_get(ray_df, pandas_df, key)
test_get_dtype_counts(ray_df, pandas_df)
test_get_ftype_counts(ray_df, pandas_df)
def test_mixed_dtype_dataframe():
pandas_df = pd.DataFrame({
'col1': [1, 2, 3, 4],
'col2': [4, 5, 6, 7],
'col3': [8.0, 9.4, 10.1, 11.3],
'col4': ['a', 'b', 'c', 'd']})
ray_df = rdf.from_pandas(pandas_df, 2)
testfuncs = [lambda x: x + x,
lambda x: str(x),
lambda x: x,
lambda x: False]
keys = ['col1',
'col2',
'col3',
'col4']
test_roundtrip(ray_df, pandas_df)
test_index(ray_df, pandas_df)
test_size(ray_df, pandas_df)
test_ndim(ray_df, pandas_df)
test_ftypes(ray_df, pandas_df)
test_values(ray_df, pandas_df)
test_axes(ray_df, pandas_df)
test_shape(ray_df, pandas_df)
test_add_prefix(ray_df, pandas_df)
test_add_suffix(ray_df, pandas_df)
for testfunc in testfuncs:
test_applymap(ray_df, pandas_df, testfunc)
test_copy(ray_df)
test_sum(ray_df, pandas_df)
test_keys(ray_df, pandas_df)
test_transpose(ray_df, pandas_df)
for key in keys:
test_get(ray_df, pandas_df, key)
test_get_dtype_counts(ray_df, pandas_df)
test_get_ftype_counts(ray_df, pandas_df)
def test_add():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.add(None)
def test_agg():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.agg(None)
def test_aggregate():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.aggregate(None)
def test_align():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.align(None)
@pytest.fixture
def test_all(ray_df, pd_df):
assert pd_df.all().equals(ray_df.all())
assert pd_df.all(axis=1).equals(ray_df.all(axis=1))
@pytest.fixture
def test_any(ray_df, pd_df):
assert pd_df.any().equals(ray_df.any())
assert pd_df.any(axis=1).equals(ray_df.any(axis=1))
def test_append():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.append(None)
def test_apply():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.apply(None)
def test_as_blocks():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.as_blocks()
def test_as_matrix():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.as_matrix()
def test_asfreq():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.asfreq(None)
def test_asof():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.asof(None)
def test_assign():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.assign()
def test_astype():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.astype(None)
def test_at_time():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.at_time(None)
def test_between_time():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.between_time(None, None)
def test_bfill():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.between_time(None, None)
@pytest.fixture
def test_bool(ray_df, pd_df):
with pytest.raises(ValueError):
ray_df.bool()
pd_df.bool()
single_bool_pd_df = pd.DataFrame([True])
single_bool_ray_df = rdf.from_pandas(single_bool_pd_df, 1)
assert single_bool_pd_df.bool() == single_bool_ray_df.bool()
def test_boxplot():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.boxplot()
def test_clip():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.clip()
def test_clip_lower():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.clip_lower(None)
def test_clip_upper():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.clip_upper(None)
def test_combine():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.combine(None, None)
def test_combine_first():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.combine_first(None)
def test_compound():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.compound()
def test_consolidate():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.consolidate()
def test_convert_objects():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.convert_objects()
def test_corr():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.corr()
def test_corrwith():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.corrwith(None)
@pytest.fixture
def test_count(ray_df, pd_df):
assert ray_df.count().equals(pd_df.count())
assert ray_df.count(axis=1).equals(pd_df.count(axis=1))
def test_cov():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.cov()
def test_cummax():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.cummax()
def test_cummin():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.cummin()
def test_cumprod():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.cumprod()
def test_cumsum():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.cumsum()
def test_describe():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.describe()
def test_diff():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.diff()
def test_div():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.div(None)
def test_divide():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.divide(None)
def test_dot():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.dot(None)
def test_drop():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.drop()
def test_drop_duplicates():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.drop_duplicates()
def test_duplicated():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.duplicated()
def test_eq():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.eq(None)
def test_equals():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.equals(None)
def test_eval():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.eval(None)
def test_ewm():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.ewm()
def test_expanding():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.expanding()
def test_ffill():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.ffill()
def test_fillna():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.fillna()
def test_filter():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.filter()
def test_first():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.first(None)
def test_first_valid_index():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.first_valid_index()
def test_floordiv():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.floordiv(None)
def test_from_csv():
with pytest.raises(NotImplementedError):
rdf.DataFrame.from_csv(None)
def test_from_dict():
with pytest.raises(NotImplementedError):
rdf.DataFrame.from_dict(None)
def test_from_items():
with pytest.raises(NotImplementedError):
rdf.DataFrame.from_items(None)
def test_from_records():
with pytest.raises(NotImplementedError):
rdf.DataFrame.from_records(None)
def test_ge():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.ge(None)
def test_get_value():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.get_value(None, None)
def test_get_values():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.get_values()
def test_gt():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.gt(None)
@pytest.fixture
def test_head(ray_df, pandas_df):
ray_df_equals_pandas(ray_df.head(), pandas_df.head())
def test_hist():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.hist(None)
@pytest.fixture
def test_idxmax(ray_df, pandas_df):
assert \
ray_df.idxmax().sort_index().equals(pandas_df.idxmax().sort_index())
@pytest.fixture
def test_idxmin(ray_df, pandas_df):
assert \
ray_df.idxmin().sort_index().equals(pandas_df.idxmin().sort_index())
def test_infer_objects():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.infer_objects()
def test_info():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.info()
def test_insert():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.insert(None, None, None)
def test_interpolate():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.interpolate()
def test_items():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.items()
def test_iteritems():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.iteritems()
def test_iterrows():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.iterrows()
def test_itertuples():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.itertuples()
def test_join():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.join(None)
def test_kurt():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.kurt()
def test_kurtosis():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.kurtosis()
def test_last():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.last(None)
def test_last_valid_index():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.last_valid_index()
def test_le():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.le(None)
def test_lookup():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.lookup(None, None)
def test_lt():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.lt(None)
def test_mad():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.mad()
def test_mask():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.mask(None)
def test_max():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.max()
def test_mean():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.mean()
def test_median():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.median()
def test_melt():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.melt()
def test_memory_usage():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.memory_usage()
def test_merge():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.merge(None)
def test_min():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.min()
def test_mod():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.mod(None)
def test_mode():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.mode()
def test_mul():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.mul(None)
def test_multiply():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.multiply(None)
def test_ne():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.ne(None)
def test_nlargest():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.nlargest(None, None)
def test_notna():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.notna()
def test_notnull():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.notnull()
def test_nsmallest():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.nsmallest(None, None)
def test_nunique():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.nunique()
def test_pct_change():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.pct_change()
def test_pipe():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.pipe(None)
def test_pivot():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.pivot()
def test_pivot_table():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.pivot_table()
def test_plot():
ray_df = create_test_dataframe()
with pytest.raises(NotImplementedError):
ray_df.plot()
@pytest.fixture
def test_pop(ray_df, pandas_df):
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 15 13:52:36 2018
@author: BallBlueMeercat
"""
import numpy as np
from datasim import magn
#def lnlike(theta, data, sigma, firstderivs_key, ndim):
# '''
# Finding matter density m, interaction gamma.
# '''
# mag = data['mag']
#
# params = {}
# if ndim == 1:
# params = {'m':theta}
# elif ndim == 2:
# params = {'m':theta[0],'gamma':theta[1]}
#
# model = magn(params, data, firstderivs_key)
# var = sigma**2
# return -0.5*np.sum((mag-model)**2 /var +0.5*np.log(2*np.pi*var))
#def lnlike(theta, data, sigma, firstderivs_key, ndim):
# '''
# Finding matter density m, corrected absolute mag M, interaction gamma.
# '''
# mag = data['mag']
#
# params = {}
# if ndim == 2:
# params = {'m':theta[0], 'M':theta[1]}
# elif ndim == 3:
# params = {'m':theta[0],'M':theta[1], 'gamma':theta[2]}
#
# model = magn(params, data, firstderivs_key)
# var = sigma**2
# return -0.5*np.sum((mag-model)**2 /var +0.5*np.log(2*np.pi*var))
def lnlike(theta, data, sigma, firstderivs_key, ndim):
'''
Finding matter density m, absolute M, alpha, beta, interaction gamma.
'''
mag = data['mag']
params = {}
if ndim == 4:
params= {'m':theta[0], 'M':theta[1], 'alpha':theta[2], 'beta':theta[3]}
elif ndim == 5:
params= {'m':theta[0], 'M':theta[1], 'alpha':theta[2],
'beta':theta[3],'gamma':theta[4]}
elif ndim == 6:
params= {'m':theta[0], 'M':theta[1], 'alpha':theta[2],
'beta':theta[3],'gamma':theta[4], 'zeta':theta[5]}
model = magn(params, data, firstderivs_key)
var = sigma**2
return -0.5*np.sum((mag-model)**2 /var +0.5*np.log(2*np.pi*var))
#def lnprior(theta, key):
# '''
# Finding matter density m, interaction gamma.
# '''
#
# if key == 'LCDM':
# m = theta
# if 0 < m < 1 or m == 1:
# return 0.0
# elif key == 'late_int' or 'heaviside_late_int' or 'late_intxde':
# m, gamma = theta
# if (0 < m < 1 or m == 1) and -1.45 < gamma < 0.2:
# return 0.0
# elif key == 'rdecay':
# m, gamma = theta
# if (0 < m < 1 or m == 1) and -10 < gamma < 0:
# return 0.0
# elif key == 'interacting':
# m, gamma = theta
# if (0 < m < 1 or m == 1) and abs(gamma) < 1.45:
# return 0.0
# elif key == 'expgamma':
# m, gamma = theta
# if (0 < m < 1 or m == 1) and abs(gamma) < 25:
# return 0.0
# elif key == 'zxxgamma' or '<KEY>':
# m, gamma = theta
# if (0 < m < 1 or m == 1) and 0 < gamma < 10:
# return 0.0
# else:
# m, gamma = theta
# if (0 < m < 1 or m == 1) and abs(gamma) < 10:
# return 0.0
#
# return -np.inf
#def lnprior(theta, key):
# '''
# Finding matter density m, corrected absolute mag M, interaction gamma.
# '''
#
# Mmin = -20
#
# Mmax = -18
#
# if key == 'LCDM':
# m, M = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax:
# return 0.0
# elif key == 'late_int' or 'heaviside_late_int' or 'late_intxde':
# m, M, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and -1.45 < gamma < 0.2:
# return 0.0
# elif key == 'rdecay':
# m, M, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and -10 < gamma < 0 :
# return 0.0
# elif key == 'interacting':
# m, M, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(gamma) < 1.45:
# return 0.0
# elif key == 'expgamma':
# m, M, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(gamma) < 25 :
# return 0.0
# elif key == 'zxxgamma' or 'gammaxxz':
# m, M, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and 0 < gamma < 10:
# return 0.0
# else:
# m, M, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(gamma) < 10:
# return 0.0
#
# return -np.inf
#def lnprior(theta, key):
# '''
# Finding matter density m, absolute M, alpha, beta, interaction gamma.
# '''
#
# Mmin, Mmax = -20, -18
# amax = 5
# bmax = 5
#
# print('key ln prior gets is = ',key)
#
# if key == 'LCDM':
# m, M, alpha, beta = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(alpha) < amax and abs(beta) < bmax:
# return 0.0
# elif key == 'late_int' or key == 'heaviside_late_int' or key == 'late_intxde':
# m, M, alpha, beta, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(alpha) < amax and abs(beta) < bmax and -1.45 < gamma < 0.2:
# return 0.0
# elif key == 'rdecay':
# m, M, alpha, beta, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(alpha) < amax and abs(beta) < bmax and -10 < gamma < 0 :
# return 0.0
# elif key == 'interacting':
# m, M, alpha, beta, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(alpha) < amax and abs(beta) < bmax and abs(gamma) < 1.45:
# return 0.0
# elif key == 'expgamma':
# m, M, alpha, beta, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(alpha) < amax and abs(beta) < bmax and abs(gamma) < 25 :
# return 0.0
# elif key == '<KEY>' or key == '<KEY>':
# m, M, alpha, beta, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(alpha) < amax and abs(beta) < bmax and 0 < gamma < 10:
# return 0.0
# elif key == 'exotic':
# m, M, alpha, beta, gamma, zeta = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(alpha) < amax and abs(beta) < bmax and 0 < gamma < 10 and 0 < zeta < 10:
# return 0.0
# else:
# m, M, alpha, beta, gamma = theta
# if (0 < m < 1 or m == 1) and Mmin < M < Mmax and abs(alpha) < amax and abs(beta) < bmax and abs(gamma) < 10:
# return 0.0
#
# return -np.inf
def lnprior(theta, key):
'''
Finding matter density m, absolute M, alpha, beta, interaction gamma.
'''
Mmin, Mmax = -20, -18
amax = 5
bmax = 5
if key == 'LCDM':
m, M, alpha, beta = theta
elif key == 'exotic':
m, M, alpha, beta, gamma, zeta = theta
else:
m, M, alpha, beta, gamma = theta
if (0 < m < 1 or m == 1):
if Mmin < M < Mmax:
if abs(alpha) < amax:
if abs(beta) < bmax:
if key == 'exotic':
if -2 < gamma < 0.1 and -1.5 < abs(zeta) < 3.5:
return 0.0
elif key == 'late_intxde':
if -2 < gamma < 0.1:
return 0.0
elif key == 'heaviside_late_int':
if -1.45 < gamma < 0.1:
return 0.0
elif key == 'late_int':
if -15 < gamma < 0.1:
return 0.0
elif key == 'expgamma':
if -0.1 < gamma < 1.5:
return 0.0
elif key == 'txgamma':
if -0.5 < gamma < 0.1:
return 0.0
elif key == 'zxgamma':
if -10 < gamma < 0.1:
return 0.0
elif key == '<KEY>':
if -0.1 < gamma < 12:
return 0.0
elif key == '<KEY>':
if -1 < gamma < 1:
return 0.0
elif key == 'rdecay':
if -2 < gamma < 0.1:
return 0.0
elif key == 'interacting':
if -1.5 < gamma < 0.1:
return 0.0
elif key == 'LCDM':
return 0.0
else:
if abs(gamma) < 10:
return 0.0
return -np.inf
def lnprob(theta, data, sigma, firstderivs_key, ndim):
lp = lnprior(theta, firstderivs_key)
if not np.isfinite(lp):
| |
import os
import subprocess
from subprocess import PIPE
from multiprocessing import Pool, cpu_count
import shutil
import re
import pandas as pd
from multiprocessing import Lock
import hashlib
import math
import xarray as xr
import numpy as np
import time
import pickle
class SpiceAnalysis():
def __init__(self):
pass
class RawFile():
'''
Read the given ngspice .raw file
'''
def __init__(self, file):
pass
class NgSpiceFile():
'''
Reading and modifying an ngspice file
'''
def __init__(self, file):
self.spice=[]
self.file=file
self.paramPairs={}
self.commandBlPairs={}
with open(self.file, 'r') as myfile:
for line in myfile:
self.spice.append(line)
def findReplaceable(self):
'''
Searches the file for replaceable parameters.
Replaceable parameters are characterized by a name enclosed in # without spaces. E.g. #Name#
Note: The name #OP# is reserved. It must not be used and will be ignored.
'''
result=[]
for line in self.spice:
candidate=line.split('#')
for i, c in enumerate(candidate):
# The first candidate in a line is not valid
if i == 0:
continue
# OP is not a valid candidate
if c == 'OP':
continue
if not ' ' in c and not '\n' in c:
result.append(c)
return result
def opExtraction(self, directory):
'''
Replaces the keyword *#OP# with a block saving information about operating point of all transistors including capacitances.
Make sure the have an OP statement before the keyword *#OP#. Also, *#OP# must be part of a control block.
:param directory: Directory into which the operating point information should be written.
'''
EXTRACT_PARAM=["vds", "gm", "vth", "vgs", "id", "gds", "cgg", "cgs", "cgd", "cgb", "cdg", "cds", "cdd", "cdb", "csg", "css", "csd", "csb", "cbg", "cbs", "cbd", "cbb"]
transistors = self._getTransistorModelNames()
# Generate the string for
string = ""
for name, model in transistors.items():
outfile=os.path.join(directory, f"op_{name}.csv")
subString="wrdata {} ".format(str(outfile))
for param in EXTRACT_PARAM:
subString+="@m.{}.m{}[{}] ".format(name.lower(), model, param)
string += f"{subString}\n"
self.commandBlPairs["*#OP#"] = string
def _getTransistorModelNames(self):
'''
Returns a list of the model names of transistors. Note: This only works for SK130A for now as we search for sky130 in the model name.
'''
result={}
for line in self.spice:
candidate=line.split(' ')
# Check if the name contains XM which stands for transistor
#print(candidate[0][:2])
if candidate[0][:2] == 'XM':
for c in candidate:
#print(c[:6])
# Model names contain "sky130" at the beginning. Find the model name.
if c[:6] == "sky130":
result[candidate[0]] = c
return result
def replace(self, pairs, keepOld=False):
'''
Replace the replaceable parameters
:param pairs: Dictionary, They key is the name of the replaceable parameters. The value the value to be written to the output file.
:type pairs: dict
'''
replaceable=self.findReplaceable()
#
if not keepOld: self.paramPairs={}
for k, v in pairs.items():
if not k in replaceable:
raise(RuntimeError(f'Cannot replace {k}. {k} not found in spice file'))
self.paramPairs[k] = v
def write(self, file):
'''
Writes out the modified spice file. The file is written to the rundir given when instatiating the class.
:param
'''
# Check if we have parameters for all replaceable strings in the spice file.
replaceable=self.findReplaceable()
for r in replaceable:
if not r in self.paramPairs.keys():
raise(RuntimeError(f'No value found for {r}. Please provide values for all replaceable strings.'))
self.outfile=file
#outfile=os.path.join(self.rundir, file)
outspice=[]
for line in self.spice:
# Replace the parameters
for k,v in self.paramPairs.items():
line=line.replace('#'+k+'#', str(v))
# Replace command blocks
for k,v in self.commandBlPairs.items():
line=line.replace(k, v)
outspice.append(line)
# Write to file.
with open(self.outfile, 'w') as outfile:
for line in outspice:
outfile.write(line)
def splitNgspiceFileName(fileName):
'''
Splits the file name returned by ngspice into it's parts.
Please follow the following naming scheme:
result_type_name_temperature_mcrunno.csv
In detail:
- "result": Constant. Must be present for :py:class:`Ngspice` to recognize the file.
- "type": Currently allowed are "AC", "NOISE", "MEAS".
- "name": You might want to have multiple simulations of the same type. You can distinguish them by name.
- "temperature": The temperature followed by deg. E.g. 80deg.
- "mcrunno": Number of the monte carlo run.
:returns: type, name, temperature, mcrunno
'''
try:
# Remove extension
fileName=os.path.splitext(fileName)[0]
fileName_split=fileName.split('_')
simu_type=fileName_split[1]
simu_name=fileName_split[2]
simu_temp=re.findall('[0-9]*deg', fileName_split[3])
simu_temp=re.findall('[0-9]*', simu_temp[0])
simu_temp=int(simu_temp[0])
#mcrunno=fileName_split[4].split('.')
mcrunno=int(fileName_split[4])
except:
print('Failed to parse file name {}'.format(fileName))
raise
return simu_type, simu_name, simu_temp, mcrunno
class NgSpice():
'''
Enables investigation of a parameter space using parallel processes of ngspice.
Concept and usage:
* In the spice file mark parameters to be altered with #Name#.
* In the spice file mark the position at which the operating point should be saved with "*#OP#".
* In the spice file write results to .csv files. The filename has to comply with the convetions defined in :py:func:`splitNgspiceFileName`
* Instantiate this class for the given spice file. You can then call :py:meth:`setParameterSpace` to replace the parameters to be altered.
'''
def __init__(self, file, rundir, parallel=True, max_cpus=24):
'''
:param rundir: Directory used for temporary storage.
:param file: Spice file.
:param parallel: Optional. Defaults to True. Set to false to disable processing on multiple cpus.
:param max_cpus: Maximum number of CPUs to be used. Defaults to 24.
'''
self.rundir=rundir
self.file=file
#self.includeFiles=includeFiles
self.constantPairs={}
self.ngspiceFile = file
self.parallel=parallel
self.folderOffset=0
self.simulation_result=None
self.max_cpus=max_cpus
# This is obsolete. The function is coverd by spiceParam.
# def setConstants(self, pairs, keepOld):
# '''
# Sets the parameters to be replaced by constants.
#
# :param pairs: Dictionary, They key is the name of the replaceable parameters. The value the value to be written to the output file.
# :type pairs: dict
# '''
# if not keepOld: self.pairs={}
#
# for k, v in pairs.items():
# self.constantPairs[k] = v
#
def getFileHash(self):
'''
Returns a hash for the spice file.
'''
hash=hashlib.sha1()
with open(self.ngspiceFile) as file:
for line in file:
hash.update(line.encode('utf-8'))
return hash.hexdigest()
def getResultAsDataArray(self):
'''
Groups the results from :py:meth:`Ngspice.run` by simulation type and converts the results
to a big dataframe.
The function :py:meth:`Ngspice.run` returns a list of simulation results.
The results may stem from AC analysis, OP calculation, measurements, etc.
The results are also not organized in a very handy form.
This function will group the results by measurement type. Since there is no easy way to infer
the type of simulation from the results you must provide the type of data via the file
name generated by ngspice.
:returns: {"type": {"name": xarray}}
'''
# Our simulation result is multidimensional. We use xarrays to store the data.
# Uppon creation of the array we need to know the exact size of the array.
# Some dimensions are known from spiceParam. Others, in particular temperature need to be
# inferred from the simulation results. Therefore we iterate over our simulation results twice.
# Firstly we extract the parameters given to the simulation from within ngspice.
# Secondly we initialize fill the xarray, then we fill the xarray.
if self.simulation_result is None:
raise(RuntimeError('You must run a simulation first'))
simulationResult=self.simulation_result
spiceParam=self.spiceParam
# Fill params from simulation results
param={}
t=time.time()
simulationResultNext=[]
for res in simulationResult:
# Iterate over the results of a simulation run.
keepResult=True
for fileName, simu_result in zip(res['resultFiles'], res['results']) :
# Get the type and name of the simulation
simu_type, simu_name, simu_temp, mcrunno=splitNgspiceFileName(fileName)
# Check if the type has already been added if not initialize
# TODO: Consider doing some sanity check on simulation types.
# We might want to reduce the amount of data stored for measurements.
if not simu_type in param:
param[simu_type]={}
# Check if the name has already been added if not initialize
if not simu_name in param[simu_type]:
param[simu_type][simu_name]={'temp':[], 'mcrunno':[]}
# Add frequencies
columns=list(simu_result.columns)
if 'frequency' in columns:
param[simu_type][simu_name]['frequency']=simu_result['frequency'].values
columns.remove('frequency')
# Same with time
if 'time' in columns:
param[simu_type][simu_name]['time']=simu_result['time'].values
columns.remove('time')
# Add spice vectors
param[simu_type][simu_name]['spice_vector']=columns
# See if the temperature is already listed
| |
# coding: utf-8
"""
3Di API
3Di simulation API (latest stable version: v3) Framework release: 2.9.0 3Di core release: 2.2.2 deployed on: 11:01AM (UTC) on January 11, 2022 # noqa: E501
The version of the OpenAPI document: v3
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import logging
import pprint
import re # noqa: F401
import six
from threedi_api_client.openapi.configuration import Configuration
logger = logging.getLogger(__name__)
class Schematisation(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'url': 'str',
'id': 'int',
'owner': 'str',
'name': 'str',
'slug': 'str',
'tags': 'list[str]',
'meta': 'object',
'created_by': 'str',
'created_by_first_name': 'str',
'created_by_last_name': 'str',
'created': 'datetime',
'last_updated': 'datetime',
'storage_usage': 'int'
}
attribute_map = {
'url': 'url',
'id': 'id',
'owner': 'owner',
'name': 'name',
'slug': 'slug',
'tags': 'tags',
'meta': 'meta',
'created_by': 'created_by',
'created_by_first_name': 'created_by_first_name',
'created_by_last_name': 'created_by_last_name',
'created': 'created',
'last_updated': 'last_updated',
'storage_usage': 'storage_usage'
}
def __init__(self, url=None, id=None, owner=None, name=None, slug=None, tags=None, meta=None, created_by=None, created_by_first_name=None, created_by_last_name=None, created=None, last_updated=None, storage_usage=None, local_vars_configuration=None): # noqa: E501
"""Schematisation - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._url = None
self._id = None
self._owner = None
self._name = None
self._slug = None
self._tags = None
self._meta = None
self._created_by = None
self._created_by_first_name = None
self._created_by_last_name = None
self._created = None
self._last_updated = None
self._storage_usage = None
self.discriminator = None
if url is not None:
self.url = url
if id is not None:
self.id = id
if owner is not None:
self.owner = owner
self.name = name
if slug is not None:
self.slug = slug
if tags is not None:
self.tags = tags
self.meta = meta
if created_by is not None:
self.created_by = created_by
if created_by_first_name is not None:
self.created_by_first_name = created_by_first_name
if created_by_last_name is not None:
self.created_by_last_name = created_by_last_name
if created is not None:
self.created = created
if last_updated is not None:
self.last_updated = last_updated
if storage_usage is not None:
self.storage_usage = storage_usage
@property
def url(self):
"""Gets the url of this Schematisation. # noqa: E501
:return: The url of this Schematisation. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this Schematisation.
:param url: The url of this Schematisation. # noqa: E501
:type: str
"""
self._url = url
@property
def id(self):
"""Gets the id of this Schematisation. # noqa: E501
:return: The id of this Schematisation. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Schematisation.
:param id: The id of this Schematisation. # noqa: E501
:type: int
"""
self._id = id
@property
def owner(self):
"""Gets the owner of this Schematisation. # noqa: E501
The unique_id of an organisation # noqa: E501
:return: The owner of this Schematisation. # noqa: E501
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this Schematisation.
The unique_id of an organisation # noqa: E501
:param owner: The owner of this Schematisation. # noqa: E501
:type: str
"""
self._owner = owner
@property
def name(self):
"""Gets the name of this Schematisation. # noqa: E501
:return: The name of this Schematisation. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Schematisation.
:param name: The name of this Schematisation. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) > 256):
raise ValueError("Invalid value for `name`, length must be less than or equal to `256`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) < 1):
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
@property
def slug(self):
"""Gets the slug of this Schematisation. # noqa: E501
The internal name (only superusers can modify) # noqa: E501
:return: The slug of this Schematisation. # noqa: E501
:rtype: str
"""
return self._slug
@slug.setter
def slug(self, slug):
"""Sets the slug of this Schematisation.
The internal name (only superusers can modify) # noqa: E501
:param slug: The slug of this Schematisation. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
slug is not None and len(slug) > 256):
raise ValueError("Invalid value for `slug`, length must be less than or equal to `256`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
slug is not None and len(slug) < 1):
raise ValueError("Invalid value for `slug`, length must be greater than or equal to `1`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
slug is not None and not re.search(r'^[-a-zA-Z0-9_]+$', slug)): # noqa: E501
raise ValueError(r"Invalid value for `slug`, must be a follow pattern or equal to `/^[-a-zA-Z0-9_]+$/`") # noqa: E501
self._slug = slug
@property
def tags(self):
"""Gets the tags of this Schematisation. # noqa: E501
tags provided as a list of strings # noqa: E501
:return: The tags of this Schematisation. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this Schematisation.
tags provided as a list of strings # noqa: E501
:param tags: The tags of this Schematisation. # noqa: E501
:type: list[str]
"""
self._tags = tags
@property
def meta(self):
"""Gets the meta of this Schematisation. # noqa: E501
:return: The meta of this Schematisation. # noqa: E501
:rtype: object
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this Schematisation.
:param meta: The meta of this Schematisation. # noqa: E501
:type: object
"""
self._meta = meta
@property
def created_by(self):
"""Gets the created_by of this Schematisation. # noqa: E501
The username of a user # noqa: E501
:return: The created_by of this Schematisation. # noqa: E501
:rtype: str
"""
return self._created_by
@created_by.setter
def created_by(self, created_by):
"""Sets the created_by of this Schematisation.
The username of a user # noqa: E501
:param created_by: The created_by of this Schematisation. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
created_by is not None and not re.search(r'^[\w.@+-]+$', created_by)): # noqa: E501
raise ValueError(r"Invalid value for `created_by`, must be a follow pattern or equal to `/^[\w.@+-]+$/`") # noqa: E501
self._created_by = created_by
@property
def created_by_first_name(self):
"""Gets the created_by_first_name of this Schematisation. # noqa: E501
:return: The created_by_first_name of this Schematisation. # noqa: E501
:rtype: str
"""
return self._created_by_first_name
@created_by_first_name.setter
def created_by_first_name(self, created_by_first_name):
"""Sets the created_by_first_name of this Schematisation.
:param created_by_first_name: The created_by_first_name of this Schematisation. # noqa: E501
:type: str
"""
self._created_by_first_name = created_by_first_name
@property
def created_by_last_name(self):
"""Gets the created_by_last_name of this Schematisation. # noqa: E501
:return: The created_by_last_name of this Schematisation. # noqa: E501
:rtype: str
"""
return self._created_by_last_name
@created_by_last_name.setter
def created_by_last_name(self, created_by_last_name):
"""Sets the created_by_last_name of this Schematisation.
:param created_by_last_name: The created_by_last_name of this Schematisation. # noqa: E501
:type: str
"""
self._created_by_last_name = created_by_last_name
@property
def created(self):
"""Gets the created of this Schematisation. # noqa: E501
The creation date and time (only superusers can modify) # noqa: E501
:return: The created of this Schematisation. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this Schematisation.
The creation date and time (only superusers can modify) # noqa: E501
:param created: The created of this Schematisation. # noqa: E501
:type: datetime
"""
self._created = created
@property
def last_updated(self):
"""Gets the last_updated of this Schematisation. # noqa: E501
:return: The last_updated of this Schematisation. # noqa: E501
:rtype: datetime
"""
return self._last_updated
@last_updated.setter
def last_updated(self, last_updated):
"""Sets the last_updated of this Schematisation.
:param last_updated: The last_updated of this Schematisation. # noqa: E501
:type: datetime
"""
self._last_updated = last_updated
@property
def storage_usage(self):
"""Gets the storage_usage of this Schematisation. # noqa: E501
Automatically calculated. # noqa: E501
:return: The storage_usage of this Schematisation. # noqa: E501
| |
== state['bots']
assert test_first_round['food'] == list(state['food'][0]) + list(state['food'][1])
assert state['score'] == [0, 0]
for i in range(4):
state = game.play_turn(state)
test_second_round = layout.parse_layout(
""" ######
# . #
#. #
# #
######
######
# 0 #
#21 #
# 3#
###### """)
assert test_second_round['bots'] == state['bots']
assert test_second_round['food'] == list(state['food'][0]) + list(state['food'][1])
assert state['score'] == [0, 1]
for i in range(4):
state = game.play_turn(state)
test_third_round = layout.parse_layout(
""" ######
#2 . #
#.0 1#
# 3#
###### """)
assert test_third_round['bots'] == state['bots']
assert test_third_round['food'] == list(state['food'][0]) + list(state['food'][1])
assert state['score'] == [game.KILL_POINTS, 1]
for i in range(4):
state = game.play_turn(state)
test_fourth_round = layout.parse_layout(
""" ######
# . #
#. #
# #
######
######
#2 #
#0 1 #
# 3#
###### """)
assert test_fourth_round['bots'] == state['bots']
assert test_fourth_round['food'] == list(state['food'][0]) + list(state['food'][1])
assert state['score'] == [game.KILL_POINTS, game.KILL_POINTS + 1]
for i in range(4):
state = game.play_turn(state)
test_fifth_round = layout.parse_layout(
""" ######
# . #
#. #
# #
######
######
# 2 #
# 0 1#
# 3#
###### """)
assert test_fifth_round['bots'] == state['bots']
assert test_fifth_round['food'] == list(state['food'][0]) + list(state['food'][1])
assert state['score'] == [game.KILL_POINTS * 2, game.KILL_POINTS + 1]
for i in range(4):
state = game.play_turn(state)
test_sixth_round = layout.parse_layout(
""" ######
# . #
#. #
# #
######
######
# 2 #
#0 1 #
# 3#
###### """)
assert test_sixth_round['bots'] == state['bots']
assert test_sixth_round['food'] == list(state['food'][0]) + list(state['food'][1])
assert state['score'] == [game.KILL_POINTS * 2, game.KILL_POINTS * 2+ 1]
for i in range(3): # !! Only move three bots
state = game.play_turn(state)
test_seventh_round = layout.parse_layout(
""" ######
# #
#. #
# #
######
######
# 2 #
#0 1 #
# 3#
###### """)
assert test_seventh_round['bots'] == state['bots']
assert test_seventh_round['food'] == list(state['food'][0]) + list(state['food'][1])
assert state['score'] == [game.KILL_POINTS * 2 + 1, game.KILL_POINTS * 2 + 1]
assert state['gameover'] == True
assert state['whowins'] == 2
with pytest.raises(ValueError):
state = game.play_turn(state)
@pytest.mark.parametrize('score', ([[3, 3], 2], [[1, 13], 1], [[13, 1], 0]))
def test_play_turn_maxrounds(score):
"""Check that game quits at maxrounds and choses correct winner"""
# this works for ties as well, because there are no points to be gained at init positions
game_state = setup_random_basic_gamestate()
game_state["round"] = 301
game_state["score"] = score[0]
game_state_new = game.play_turn(game_state)
assert game_state_new["gameover"]
assert game_state_new["whowins"] == score[1]
def test_play_turn_move():
"""Checks that bot is moved to intended space"""
turn = 0
l = layout.get_layout_by_name("small_100")
parsed_l = layout.parse_layout(l)
game_state = {
"food": parsed_l["food"],
"walls": parsed_l["walls"],
"bots": parsed_l["bots"],
"max_rounds": 300,
"team_names": ("a", "b"),
"turn": turn,
"round": 0,
"timeout": [],
"gameover": False,
"whowins": None,
"team_say": "bla",
"score": 0,
"kills":[0]*4,
"deaths": [0]*4,
"bot_was_killed": [False]*4,
"errors": [[], []],
"fatal_errors": [{}, {}],
"rnd": random.Random()
}
legal_positions = get_legal_positions(game_state["walls"], game_state["bots"][turn])
game_state_new = apply_move(game_state, legal_positions[0])
assert game_state_new["bots"][turn] == legal_positions[0]
def setup_random_basic_gamestate(*, round=1, turn=0):
"""helper function for testing play turn"""
l = layout.get_layout_by_name("small_100")
parsed_l = layout.parse_layout(l)
stopping = lambda bot, s: (bot.position, s)
game_state = setup_game([stopping, stopping], layout_dict=parsed_l)
game_state['round'] = round
game_state['turn'] = turn
return game_state
def setup_specific_basic_gamestate(round=0, turn=0):
"""helper function for testing play turn"""
l = """
##################
#. ... .##. 3#
# # # . .### #1#
# # ##. . #
# . .## # #
#0# ###. . # # #
#2 .##. ... .#
##################
"""
parsed_l = layout.parse_layout(l)
stopping = lambda bot, s: (bot.position, s)
game_state = setup_game([stopping, stopping], layout_dict=parsed_l)
game_state['round'] = round
game_state['turn'] = turn
return game_state
def test_max_rounds():
l = """
########
#20..13#
# #
########
"""
def move(bot, s):
# in the first round (round #1),
# all bots move to the south
if bot.round == 1:
# go one step to the right
return (bot.position[0], bot.position[1] + 1)
else:
# There should not be more then one round in this test
raise RuntimeError("We should not be here in this test")
l = layout.parse_layout(l)
assert l['bots'][0] == (2, 1)
assert l['bots'][1] == (5, 1)
assert l['bots'][2] == (1, 1)
assert l['bots'][3] == (6, 1)
# max_rounds == 1 should call move just once
final_state = run_game([move, move], layout_dict=l, max_rounds=1)
assert final_state['round'] == 1
assert final_state['bots'][0] == (2, 2)
assert final_state['bots'][1] == (5, 2)
assert final_state['bots'][2] == (1, 2)
assert final_state['bots'][3] == (6, 2)
# max_rounds == 2 should finish and have the first team lose
final_state = run_game([move, move], layout_dict=l, max_rounds=2)
assert final_state['round'] == 2
assert final_state['turn'] == 0
assert final_state['bots'][0] == (2, 2)
assert final_state['bots'][1] == (5, 2)
assert final_state['bots'][2] == (1, 2)
assert final_state['bots'][3] == (6, 2)
assert final_state['gameover']
assert final_state['whowins'] == 1
assert final_state['fatal_errors'][0][0] == {
'type': 'FatalException',
'description': 'Exception in client (RuntimeError): We should not be here in this test',
'round': 2,
'turn': 0,
}
def test_update_round_counter():
tests = {
(None, None): (1, 0),
(1, 0): (1, 1),
(1, 1): (1, 2),
(1, 2): (1, 3),
(1, 3): (2, 0),
(2, 3): (3, 0)
}
for (round0, turn0), (round1, turn1) in tests.items():
res = game.next_round_turn({'turn': turn0,
'round': round0,
'gameover': False,})
assert all(item in res.items() for item in {'turn': turn1, 'round': round1}.items())
for (round0, turn0), (round1, turn1) in tests.items():
with pytest.raises(ValueError):
res = game.next_round_turn({'turn': turn0,
'round': round0,
'gameover': True,})
def test_last_round_check():
# (max_rounds, current_round, turn): gameover
test_map = {
(1, None, None): False,
(1, 1, 0): False,
(1, 1, 3): True,
}
for test_val, test_res in test_map.items():
max_rounds, current_round, current_turn = test_val
state = {
'max_rounds': max_rounds,
'round': current_round,
'turn': current_turn,
'fatal_errors': [[],[]],
'errors': [[],[]],
'gameover': False,
'score': [0, 0],
'food': [{(1,1)}, {(1,1)}] # dummy food
}
res = game.check_gameover(state, detect_final_move=True)
assert res['gameover'] == test_res
@pytest.mark.parametrize(
'team_errors, team_wins', [
(((0, 0), (0, 0)), False),
(((0, 1), (0, 0)), False),
(((0, 0), (0, 1)), False),
(((0, 2), (0, 2)), False),
(((0, 4), (0, 0)), False),
(((0, 0), (0, 4)), False),
(((0, 4), (0, 4)), False),
(((0, 5), (0, 0)), 1),
(((0, 0), (0, 5)), 0),
(((0, 5), (0, 5)), 2),
(((1, 0), (0, 0)), 1),
(((0, 0), (1, 0)), 0),
(((1, 0), (1, 0)), 2),
(((1, 1), (1, 0)), 2),
(((1, 0), (0, 5)), 1),
(((0, 5), (1, 0)), 0),
]
)
def test_error_finishes_game(team_errors, team_wins):
# the mapping is as follows:
# [(num_fatal_0, num_errors_0), (num_fatal_1, num_errors_1), result_flag]
# the result flag: 0/1: team 0/1 wins, 2: draw, False: no winner yet
assert game.MAX_ALLOWED_ERRORS == 4, "Test assumes MAX_ALLOWED_ERRORS is 4"
(fatal_0, errors_0), (fatal_1, errors_1) = team_errors
# just faking a bunch of errors in our game state
state = {
"fatal_errors": [[None] * fatal_0, [None] * fatal_1],
"errors": [[None] * errors_0, [None] * errors_1]
}
res = game.check_gameover(state)
if team_wins is False:
assert res["whowins"] is None
assert res["gameover"] is False
else:
assert res["whowins"] == team_wins
assert res["gameover"] is True
@pytest.mark.parametrize('bot_to_move', [0, 1, 2, 3])
def test_finished_when_no_food(bot_to_move):
""" Test that the game is over when a team has eaten its food. """
l = """
########
# 0.2 #
# 3.1 #
########
"""
bot_turn = bot_to_move // 2
team_to_move = bot_to_move % 2
def move(bot, s):
if team_to_move == 0 and bot.is_blue and bot_turn == bot._bot_turn:
return (4, 1)
# eat the food between 0 and 2
if team_to_move == 1 and (not bot.is_blue) and bot_turn == bot._bot_turn:
# eat the food between 3 and 1
return (3, 2)
return bot.position
l = layout.parse_layout(l)
final_state = run_game([move, move], layout_dict=l, max_rounds=20)
assert final_state['round'] == 1
assert final_state['turn'] == bot_to_move
def test_minimal_game():
def move(b, s):
return b.position
layout_name, layout_string = layout.get_random_layout()
l = layout.parse_layout(layout_string)
final_state = run_game([move, move], max_rounds=20, layout_dict=l)
assert final_state['gameover'] is True
assert final_state['score'] == [0, 0]
assert final_state['round'] == 20
def test_minimal_losing_game_has_one_error():
def move0(b, s):
if b.round == 1 and b._bot_index == 0:
# trigger a bad move in the first round
return (0, 0)
else:
return b.position
def move1(b, s):
return b.position
layout_name, layout_string = layout.get_random_layout()
l = layout.parse_layout(layout_string)
final_state = run_game([move0, move1], max_rounds=20, layout_dict=l)
assert final_state['gameover'] is True
assert final_state['score'] == [0, 0]
assert len(final_state['errors'][0]) == 1
assert len(final_state['errors'][1]) == 0
assert final_state['round'] == 20
def test_minimal_remote_game():
def move(b, s):
return b.position
layout_name, layout_string = layout.get_random_layout()
l = layout.parse_layout(layout_string)
final_state = run_game(["test/demo01_stopping.py", move], max_rounds=20, layout_dict=l)
final_state | |
+ 0.00793062*m.x249 + 0.00225214*m.x250 + 0.00173381*m.x251
+ 0.0025893*m.x252 + 0.00146414*m.x253 + 0.00619079*m.x254 - 0.000522178*m.x255
+ 0.00121952*m.x256 + 0.000349511*m.x257 + 0.000972283*m.x258 + 0.00145449*m.x259
- 0.000489457*m.x260 + 2.38348E-5*m.x261 + 0.000570035*m.x262 + 0.00228208*m.x263
+ 0.00212064*m.x264 + 0.00363109*m.x265 + 0.000619034*m.x266 - 0.000860656*m.x267
+ 0.00340102*m.x268 - 0.00197937*m.x269 + 0.000621111*m.x270 + 0.00199323*m.x271
+ 0.00314636*m.x272 - 0.000650527*m.x273 + 0.00194261*m.x274 - 0.00321407*m.x275
+ 0.000171403*m.x276 + 0.00364979*m.x277 + 0.00485946*m.x278 + 0.00796772*m.x279
+ 0.00166603*m.x280 + 0.00349167*m.x281 - 0.000466954*m.x282 - 0.00100195*m.x283
+ 0.00342181*m.x284 + 0.00300422*m.x285 - 0.00124986*m.x286 - 5.94885E-5*m.x287
+ 0.00328713*m.x288 + 0.00273221*m.x289 + 0.00345779*m.x290 + 0.00419154*m.x291
+ 0.00399493*m.x292 + 0.00448916*m.x293 + 0.00118132*m.x294 - 0.00106048*m.x295
+ 0.00275687*m.x296 - 0.00193635*m.x297 + 0.0035958*m.x298 - 0.000537039*m.x299
- 0.000271199*m.x300 - 0.00288422*m.x301 + 0.00223441*m.x302 + 0.00280782*m.x303 == 0)
m.c251 = Constraint(expr= - m.x146 + 0.0126161*m.x204 + 0.02189*m.x205 + 2.7433E-5*m.x206 + 0.00711934*m.x207
+ 0.00440483*m.x208 + 0.00205128*m.x209 + 0.00522821*m.x210 - 0.000872693*m.x211
+ 0.0174386*m.x212 + 0.00793697*m.x213 - 0.00874611*m.x214 - 0.00283528*m.x215
+ 0.012225*m.x216 + 0.0115852*m.x217 + 0.0027102*m.x218 + 0.00606924*m.x219
+ 0.00917394*m.x220 + 0.0156491*m.x221 + 0.0187115*m.x222 + 0.0109855*m.x223
+ 0.000118632*m.x224 + 0.0223632*m.x225 - 0.000683767*m.x226 - 0.00385233*m.x227
+ 0.00106856*m.x228 + 0.00327571*m.x229 + 0.00913902*m.x230 + 0.00142627*m.x231
+ 0.000596885*m.x232 + 0.00136868*m.x233 + 0.00938261*m.x234 + 0.00193995*m.x235
- 0.00127938*m.x236 + 0.00363406*m.x237 + 0.0163399*m.x238 + 0.00413739*m.x239
+ 0.00442326*m.x240 - 0.00109864*m.x241 + 0.0175505*m.x242 + 0.00339227*m.x243
+ 0.0166665*m.x244 + 0.00780343*m.x245 + 0.00475321*m.x246 + 0.00257258*m.x247
+ 0.333543*m.x248 + 0.00602288*m.x249 + 0.00486385*m.x250 + 0.0409265*m.x251
+ 0.00666557*m.x252 + 0.00172096*m.x253 + 0.00148516*m.x254 + 0.00818338*m.x255
+ 0.00375999*m.x256 + 0.00892913*m.x257 + 0.00635127*m.x258 + 0.00310111*m.x259
+ 0.0023572*m.x260 - 0.0124136*m.x261 - 0.00282737*m.x262 + 0.00109311*m.x263
+ 0.0171055*m.x264 - 0.000546877*m.x265 - 0.00127115*m.x266 - 0.00882959*m.x267
+ 0.00370779*m.x268 + 0.00220942*m.x269 - 0.0007257*m.x270 + 0.000362512*m.x271
+ 0.000666346*m.x272 + 0.00537778*m.x273 + 0.00435506*m.x274 + 0.0289633*m.x275
- 0.00793056*m.x276 - 0.00124997*m.x277 + 0.00600917*m.x278 + 0.00654889*m.x279
+ 0.0013566*m.x280 + 0.0145382*m.x281 + 0.00753084*m.x282 + 0.000994392*m.x283
- 0.0010613*m.x284 + 0.00748896*m.x285 + 0.0112013*m.x286 + 0.00952063*m.x287
- 0.00220101*m.x288 + 0.00611291*m.x289 - 0.00609654*m.x290 + 0.000316901*m.x291
+ 0.00160516*m.x292 + 0.00470546*m.x293 - 0.00380484*m.x294 - 0.00603313*m.x295
+ 0.00178999*m.x296 + 0.017123*m.x297 + 0.00424638*m.x298 + 0.0127695*m.x299
+ 0.00599775*m.x300 + 0.0107649*m.x301 + 0.00273511*m.x302 + 0.00174594*m.x303 == 0)
m.c252 = Constraint(expr= - m.x147 + 0.000539029*m.x204 - 0.00142123*m.x205 - 0.00324286*m.x206 + 0.000965714*m.x207
- 0.000705455*m.x208 + 0.0034346*m.x209 + 0.000518417*m.x210 + 0.000448292*m.x211
+ 0.00274013*m.x212 - 0.00236638*m.x213 - 0.00176148*m.x214 + 0.00355332*m.x215
+ 0.00282004*m.x216 + 0.00378529*m.x217 + 0.00285381*m.x218 + 0.0066067*m.x219
+ 0.0035766*m.x220 + 0.00559342*m.x221 + 0.000813106*m.x222 - 2.44696E-5*m.x223
+ 0.00252467*m.x224 + 0.00169098*m.x225 + 0.00438943*m.x226 + 0.00138166*m.x227
+ 0.00241905*m.x228 - 0.00357096*m.x229 + 0.000648204*m.x230 + 0.00217757*m.x231
- 0.000173936*m.x232 + 3.95557E-5*m.x233 + 0.00048899*m.x234 + 0.00582795*m.x235
- 0.000380482*m.x236 + 0.0025062*m.x237 + 0.00412922*m.x238 + 0.00695501*m.x239
+ 0.00224652*m.x240 + 0.0042909*m.x241 - 0.00564367*m.x242 + 0.00216846*m.x243
- 0.000715736*m.x244 + 0.00217772*m.x245 + 0.00490204*m.x246 + 0.00793062*m.x247
+ 0.00602288*m.x248 + 0.0286408*m.x249 + 0.00321708*m.x250 - 0.000649104*m.x251
+ 0.00220265*m.x252 + 0.000192602*m.x253 + 0.0056319*m.x254 + 0.00468117*m.x255
- 0.000284323*m.x256 + 0.00172826*m.x257 + 0.00393731*m.x258 + 0.00193608*m.x259
+ 0.00419589*m.x260 + 0.000305111*m.x261 - 0.000836327*m.x262 + 0.00132171*m.x263
+ 0.00254184*m.x264 + 0.00434758*m.x265 - 0.000143504*m.x266 - 0.00324401*m.x267
+ 0.00241411*m.x268 - 0.000256307*m.x269 + 0.00117147*m.x270 + 0.000577468*m.x271
+ 0.00154811*m.x272 - 0.000768089*m.x273 + 0.00319599*m.x274 + 0.00303778*m.x275
+ 0.00128681*m.x276 + 0.000718747*m.x277 - 0.000691613*m.x278 + 0.0060368*m.x279
+ 0.000899485*m.x280 + 0.00663248*m.x281 + 0.0010729*m.x282 + 0.006249*m.x283
+ 0.00265281*m.x284 + 0.00400057*m.x285 - 0.00122283*m.x286 + 0.000588542*m.x287
+ 0.00328499*m.x288 + 0.000580561*m.x289 + 0.000227831*m.x290 + 0.0066509*m.x291
+ 0.00019791*m.x292 + 0.00589171*m.x293 - 0.000623163*m.x294 - 7.87584E-5*m.x295
+ 0.00213016*m.x296 - 0.00403433*m.x297 + 0.00423746*m.x298 + 0.00337871*m.x299
+ 0.00204417*m.x300 + 0.00449024*m.x301 + 0.00140987*m.x302 + 0.00259129*m.x303 == 0)
m.c253 = Constraint(expr= - m.x148 + 0.00171572*m.x204 + 0.000952018*m.x205 + 0.0057922*m.x206 + 0.00724404*m.x207
+ 0.00591159*m.x208 + 0.00340814*m.x209 + 0.00229506*m.x210 - 0.00207556*m.x211
+ 0.00199163*m.x212 + 0.00351492*m.x213 + 0.00162467*m.x214 + 0.00303711*m.x215
+ 0.0010868*m.x216 + 0.00266115*m.x217 + 0.00231984*m.x218 + 0.00683528*m.x219
+ 0.00328833*m.x220 + 0.000136349*m.x221 + 0.0030329*m.x222 - 0.00162389*m.x223
+ 0.00317859*m.x224 + 0.00143324*m.x225 + 0.00306698*m.x226 + 0.001371*m.x227
+ 0.00317936*m.x228 - 0.0014774*m.x229 + 0.00369376*m.x230 + 0.00191957*m.x231
+ 0.00262989*m.x232 + 0.0072304*m.x233 + 0.00273794*m.x234 + 0.00221714*m.x235
+ 0.0022927*m.x236 + 0.00352758*m.x237 + 0.00247137*m.x238 + 0.0028489*m.x239
+ 0.000464028*m.x240 + 0.0039924*m.x241 + 0.00110388*m.x242 + 0.0060672*m.x243
+ 0.00244895*m.x244 + 0.0059976*m.x245 + 0.00645328*m.x246 + 0.00225214*m.x247
+ 0.00486385*m.x248 + 0.00321708*m.x249 + 0.0326124*m.x250 + 0.00473313*m.x251
+ 0.00461718*m.x252 + 0.0068865*m.x253 + 0.00250704*m.x254 + 0.00606702*m.x255
+ 0.000219255*m.x256 + 0.00287223*m.x257 + 0.00250699*m.x258 + 0.00176811*m.x259
+ 0.00192979*m.x260 + 2.4776E-5*m.x261 - 0.000659325*m.x262 + 0.00468844*m.x263
+ 0.00480861*m.x264 + 0.00487248*m.x265 - 0.00220491*m.x266 - 0.00289289*m.x267
+ 0.00758561*m.x268 + 0.00350318*m.x269 + 0.0034932*m.x270 + 0.00414106*m.x271
- 0.000776536*m.x272 - 0.00326844*m.x273 + 0.00321975*m.x274 + 0.00511975*m.x275
+ 0.0018167*m.x276 + 0.0015802*m.x277 + 0.00532914*m.x278 + 0.0052332*m.x279
+ 0.00121475*m.x280 + 0.00220381*m.x281 + 0.00145566*m.x282 - 0.000197051*m.x283
+ 0.00302341*m.x284 + 0.00322867*m.x285 + 0.0042178*m.x286 - 0.00044828*m.x287
+ 0.00548425*m.x288 + 0.00168605*m.x289 - 0.000888688*m.x290 + 0.004772*m.x291
+ 0.000505019*m.x292 + 0.00313289*m.x293 + 0.00119693*m.x294 + 0.00445284*m.x295
+ 0.00521004*m.x296 - 0.00282173*m.x297 + 0.00291792*m.x298 - 0.000120553*m.x299
+ 0.000967824*m.x300 + 0.000698762*m.x301 + 0.00432741*m.x302 + 0.00166067*m.x303 == 0)
m.c254 = Constraint(expr= - m.x149 + 0.00871066*m.x204 - 0.000699597*m.x205 + 0.0166603*m.x206 + 0.00718655*m.x207
+ 0.00637504*m.x208 + 0.00910415*m.x209 - 0.000868046*m.x210 - 0.00135891*m.x211
+ 0.0121972*m.x212 + 0.0269972*m.x213 + 0.00683673*m.x214 + 0.00993462*m.x215
+ 0.00141579*m.x216 + 0.0140797*m.x217 + 0.00363014*m.x218 + 0.000969236*m.x219
+ 0.00340781*m.x220 + 0.0130438*m.x221 + 0.0177681*m.x222 + 0.00351454*m.x223
- 0.00620603*m.x224 + 0.0060925*m.x225 + 0.0140371*m.x226 + 0.00194706*m.x227
+ 0.00580385*m.x228 - 0.000131234*m.x229 - 0.00141307*m.x230 + 0.0115101*m.x231
+ 0.00163586*m.x232 + 0.0127117*m.x233 + 0.0081173*m.x234 + 0.00143776*m.x235
+ 0.0147001*m.x236 + 0.00268179*m.x237 + 0.00232468*m.x238 + 0.00397424*m.x239
+ 0.00563646*m.x240 + 0.000223728*m.x241 + 0.00135794*m.x242 + 0.00195896*m.x243
+ 0.00980088*m.x244 + 0.00294803*m.x245 + 0.0235477*m.x246 + 0.00173381*m.x247
+ 0.0409265*m.x248 - 0.000649104*m.x249 + 0.00473313*m.x250 + 0.212813*m.x251
- 0.00571772*m.x252 - 9.80108E-5*m.x253 + 0.000713594*m.x254 - 0.000291411*m.x255
- 0.0028928*m.x256 + 0.00849788*m.x257 - 0.00393003*m.x258 + 0.0144913*m.x259
- 0.00133849*m.x260 - 0.00262538*m.x261 + 0.0025511*m.x262 + 0.0304485*m.x263
+ 0.00172909*m.x264 - 0.00455718*m.x265 - 0.000349801*m.x266 - 0.010277*m.x267
- 0.00203947*m.x268 + 0.00647039*m.x269 - 0.000334806*m.x270 - 0.000679271*m.x271
+ 0.0102058*m.x272 + 0.0132501*m.x273 + 0.00894547*m.x274 + 0.0211536*m.x275
- 0.000443774*m.x276 + 0.00327277*m.x277 + 0.00786226*m.x278 + 0.00909273*m.x279
+ 0.0049413*m.x280 + 0.00400317*m.x281 - 0.00680418*m.x282 + 0.0137323*m.x283
+ 0.00633139*m.x284 - 5.13418E-5*m.x285 + 0.0162123*m.x286 + 0.00880031*m.x287
- 0.00234105*m.x288 + 0.00247115*m.x289 + 0.00278431*m.x290 + 0.00463181*m.x291
+ 0.00075518*m.x292 - 0.00287996*m.x293 - 0.00533256*m.x294 - 0.00290057*m.x295
- 0.000529075*m.x296 + 0.000979263*m.x297 + 0.00309003*m.x298 + 0.00415618*m.x299
+ 0.00182144*m.x300 + 0.00336642*m.x301 + 0.000262547*m.x302 - 0.000847169*m.x303 == 0)
m.c255 = Constraint(expr= - m.x150 - 0.000658519*m.x204 + 0.00166555*m.x205 - 0.000651147*m.x206 + 0.00872243*m.x207
+ 0.00572145*m.x208 + 0.00203269*m.x209 + 0.00175605*m.x210 - 0.00277333*m.x211
+ 5.78285E-5*m.x212 + 0.0035179*m.x213 + 0.000473999*m.x214 + 0.00342858*m.x215
+ 0.00220267*m.x216 + 0.00578554*m.x217 + 0.00842352*m.x218 + 0.00589067*m.x219
+ 0.00725086*m.x220 + 0.00351709*m.x221 + 0.0056051*m.x222 - 0.00101164*m.x223
+ 4.22193E-5*m.x224 - 0.00143233*m.x225 + 0.00404018*m.x226 + 0.000567011*m.x227
+ 0.00417651*m.x228 + 0.00076644*m.x229 - 0.00209444*m.x230 - 0.000945371*m.x231
+ 0.000879524*m.x232 + 0.00781209*m.x233 + 0.00159047*m.x234 + 0.00796665*m.x235
- 0.000690034*m.x236 + 0.0017263*m.x237 + 0.00329108*m.x238 - 0.00183251*m.x239
+ 0.0023205*m.x240 + 0.00665241*m.x241 + 0.00288393*m.x242 + 0.00250112*m.x243
+ 0.00710031*m.x244 + 0.00552272*m.x245 + 0.00185295*m.x246 + 0.0025893*m.x247
+ 0.00666557*m.x248 + 0.00220265*m.x249 + 0.00461718*m.x250 - 0.00571772*m.x251
+ 0.0297052*m.x252 + 0.0133063*m.x253 + 0.00512875*m.x254 + 0.00511319*m.x255
+ 0.00128142*m.x256 + 0.0011758*m.x257 + 0.000185703*m.x258 + 0.000807728*m.x259
+ 0.00482243*m.x260 + 0.00671943*m.x261 + 0.0024249*m.x262 - 0.00576356*m.x263
+ 0.00615053*m.x264 + 0.00457997*m.x265 + 0.00116129*m.x266 - 0.00334468*m.x267
- 0.00177309*m.x268 + 0.00135912*m.x269 + 0.0055357*m.x270 + 0.00211564*m.x271
+ 0.0010794*m.x272 + 0.00192198*m.x273 + 0.00189277*m.x274 + 0.00643706*m.x275
+ 0.00398105*m.x276 + 0.000745003*m.x277 + 0.00524394*m.x278 + 0.00240223*m.x279
+ 0.00282459*m.x280 + 0.00472739*m.x281 + 0.000794467*m.x282 + 0.00387197*m.x283
+ 0.00198803*m.x284 + 0.00165035*m.x285 + 0.00321866*m.x286 + 0.000171898*m.x287
+ 0.00933104*m.x288 + 0.00315924*m.x289 + 0.00241389*m.x290 + 0.00336441*m.x291
+ 4.30958E-5*m.x292 + 0.00271535*m.x293 + 0.00462087*m.x294 + 0.00710323*m.x295
+ 0.000902586*m.x296 + 0.00233291*m.x297 + 0.00342174*m.x298 + 0.00618718*m.x299
+ 0.00291333*m.x300 - 0.00120359*m.x301 + 0.00135234*m.x302 - 0.000351985*m.x303 == 0)
m.c256 = Constraint(expr= - m.x151 + 0.00632557*m.x204 + 0.00413697*m.x205 + 0.0025737*m.x206 + 0.00911811*m.x207
+ 0.00369485*m.x208 - 0.000620515*m.x209 + 0.00453298*m.x210 + 0.00257709*m.x211
- 0.003034*m.x212 + 0.00199006*m.x213 + 0.00469908*m.x214 + 0.00462163*m.x215
+ 0.00354208*m.x216 + 0.00521587*m.x217 + 0.00670307*m.x218 + 0.0039247*m.x219
+ 0.00486144*m.x220 + 0.00107063*m.x221 - 0.0012575*m.x222 + 0.00584599*m.x223
+ 0.000472764*m.x224 + 0.00043714*m.x225 + 0.00262736*m.x226 + 0.0025858*m.x227
+ 0.00240856*m.x228 + 0.0033869*m.x229 + 0.000886721*m.x230 - 0.0071917*m.x231
+ 0.00125037*m.x232 + 0.00489662*m.x233 + 0.00666147*m.x234 + 0.00687732*m.x235
- 0.00340535*m.x236 + 0.0110796*m.x237 - 0.00197335*m.x238 + 0.00181348*m.x239
+ 0.00368711*m.x240 + 0.0109749*m.x241 + 0.000894389*m.x242 + 0.00342576*m.x243
+ 0.00623048*m.x244 + 0.00800356*m.x245 - 0.00270753*m.x246 + 0.00146414*m.x247
+ 0.00172096*m.x248 + 0.000192602*m.x249 + 0.0068865*m.x250 - 9.80108E-5*m.x251
+ 0.0133063*m.x252 + 0.0509447*m.x253 - 0.000905573*m.x254 + 0.00584973*m.x255
+ 0.00164159*m.x256 + 0.00170257*m.x257 + 0.00491697*m.x258 + 0.00333082*m.x259
+ 0.00312286*m.x260 + 0.00513272*m.x261 + 0.00511337*m.x262 + 0.00111281*m.x263
+ 0.00524869*m.x264 + 0.00756756*m.x265 + 0.00467689*m.x266 - 0.00212043*m.x267
- 0.000146116*m.x268 + 0.00271306*m.x269 + 0.00797013*m.x270 | |
<filename>src/smexperiments/tracker.py<gh_stars>1-10
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Contains the SageMaker Experiments Tracker class."""
import datetime
import os
import mimetypes
import urllib.parse
import urllib.request
import logging
import botocore
import json
from smexperiments._utils import get_module
from os.path import join
import dateutil
from smexperiments import api_types, metrics, trial_component, _utils, _environment
class Tracker(object):
"""A SageMaker Experiments Tracker.
Use a tracker object to record experiment information to a SageMaker trial component.
A new tracker can be created in two ways:
- By loading an existing trial component with :meth:`~smexperiments.tracker.Tracker.load`
- By creating a tracker for a new trial component with :meth:`~smexperiments.tracker.Tracker.create`.
When creating a tracker within a SageMaker training or processing job, use the ``load`` method with
no arguments to track artifacts to the trial component automatically created for your job. When tracking
within a Jupyter notebook running in SageMaker, use the ``create`` method to automatically
create a new
trial component.
Trackers are Python context managers and you can use them using the Python ``with`` keyword. Exceptions
thrown within the with block will cause the tracker's trial component to be marked as failed. Start and
end times are automatically set when using the with statement and the trial component is saved to
SageMaker at the end of the block.
Note that only parameters, input artifacts, and output artifacts are saved to SageMaker. Metrics are saved to file.
Attributes:
trial_component (TrialComponent): The trial component tracked.
"""
trial_component = None
_metrics_writer = None
_in_sagemaker_job = False
_artifact_uploader = None
def __init__(self, trial_component, metrics_writer, artifact_uploader, lineage_artifact_tracker):
self.trial_component = trial_component
self.trial_component.parameters = self.trial_component.parameters or {}
self.trial_component.input_artifacts = self.trial_component.input_artifacts or {}
self.trial_component.output_artifacts = self.trial_component.output_artifacts or {}
self._artifact_uploader = artifact_uploader
self._metrics_writer = metrics_writer
self._warned_on_metrics = False
self._lineage_artifact_tracker = lineage_artifact_tracker
@classmethod
def load(
cls,
trial_component_name=None,
artifact_bucket=None,
artifact_prefix=None,
boto3_session=None,
sagemaker_boto_client=None,
training_job_name=None,
processing_job_name=None,
):
"""Create a new ``Tracker`` by loading an existing trial component.
Note that `log_metric` will only work from a training job host.
Examples:
.. code-block:: python
from smexperiments import tracker
# load tracker from already existing trial component
my_tracker = tracker.Tracker.load(trial_component_name='xgboost')
# load tracker from a training job name
my_tracker = tracker.Tracker.load(
training_job_name=estimator.latest_training_job.name)
# load tracker from a processing job name
my_tracker = tracker.Tracker.load(
processing_job_name=my_processing_job.name)
Args:
trial_component_name: (str, optional). The name of the trial component to track. If specified, this
trial component must exist in SageMaker. If you invoke this method in a running SageMaker training
or processing job, then trial_component_name can be left empty. In this case, the Tracker will
resolve the trial component automatically created for your SageMaker Job.
artifact_bucket: (str, optional) The name of the S3 bucket to store artifacts to.
artifact_prefix: (str, optional) The prefix to write artifacts to within ``artifact_bucket``
boto3_session: (boto3.Session, optional) The boto3.Session to use to interact with AWS services.
If not specified a new default boto3 session will be created.
sagemaker_boto_client: (boto3.Client, optional) The SageMaker AWS service client to use. If not
specified a new client will be created from the specified ``boto3_session`` or default
boto3.Session.
training_job_name: (str, optional). The name of the training job to track via trial
processing_job_name: (str, optional). The name of the processing job to track via trial
component.
Returns:
Tracker: The tracker for the given trial component.
Raises:
ValueError: If the trial component failed to load.
"""
boto3_session = boto3_session or _utils.boto_session()
sagemaker_boto_client = sagemaker_boto_client or _utils.sagemaker_client()
tce = _environment.TrialComponentEnvironment.load()
if training_job_name and not trial_component_name:
trial_component_name = training_job_name + "-aws-training-job"
elif processing_job_name and not trial_component_name:
trial_component_name = processing_job_name + "-aws-processing-job"
# Resolve the trial component for this tracker to track: If a trial component name was passed in, then load
# and track that trial component. Otherwise, try to find a trial component given the current environment,
# failing if we're unable to load one.
if trial_component_name:
tc = trial_component.TrialComponent.load(
trial_component_name=trial_component_name, sagemaker_boto_client=sagemaker_boto_client
)
elif tce:
tc = tce.get_trial_component(sagemaker_boto_client)
else:
raise ValueError('Could not load TrialComponent. Specify a trial_component_name or invoke "create"')
# metrics require the metrics agent running on training job hosts
if not trial_component_name and tce.environment_type == _environment.EnvironmentType.SageMakerTrainingJob:
metrics_writer = metrics.SageMakerFileMetricsWriter()
else:
metrics_writer = None
tracker = cls(
tc,
metrics_writer,
_ArtifactUploader(tc.trial_component_name, artifact_bucket, artifact_prefix, boto3_session),
_LineageArtifactTracker(tc.trial_component_arn, sagemaker_boto_client),
)
tracker._in_sagemaker_job = True if tce else False
return tracker
@classmethod
def create(
cls,
display_name=None,
artifact_bucket=None,
artifact_prefix=None,
boto3_session=None,
sagemaker_boto_client=None,
):
"""Create a new ``Tracker`` by creating a new trial component.
Note that `log_metric` will _not_ work when tracker is created this way.
Examples
.. code-block:: python
from smexperiments import tracker
my_tracker = tracker.Tracker.create()
Args:
display_name: (str, optional). The display name of the trial component to track.
artifact_bucket: (str, optional) The name of the S3 bucket to store artifacts to.
artifact_prefix: (str, optional) The prefix to write artifacts to within ``artifact_bucket``
boto3_session: (boto3.Session, optional) The boto3.Session to use to interact with AWS services.
If not specified a new default boto3 session will be created.
sagemaker_boto_client: (boto3.Client, optional) The SageMaker AWS service client to use. If not
specified a new client will be created from the specified ``boto3_session`` or default
boto3.Session.
Returns:
Tracker: The tracker for the new trial component.
"""
boto3_session = boto3_session or _utils.boto_session()
sagemaker_boto_client = sagemaker_boto_client or _utils.sagemaker_client()
tc = trial_component.TrialComponent.create(
trial_component_name=_utils.name("TrialComponent"),
display_name=display_name,
sagemaker_boto_client=sagemaker_boto_client,
)
# metrics require the metrics agent running on training job hosts and in which case the load
# method should be used because it loads the trial component associated with the currently
# running training job
metrics_writer = None
return cls(
tc,
metrics_writer,
_ArtifactUploader(tc.trial_component_name, artifact_bucket, artifact_prefix, boto3_session),
_LineageArtifactTracker(tc.trial_component_arn, sagemaker_boto_client),
)
def log_parameter(self, name, value):
"""Record a single parameter value for this trial component.
Overwrites any previous value recorded for the specified parameter name.
Examples
.. code-block:: python
# log hyper parameter of learning rate
my_tracker.log_parameter('learning_rate', 0.01)
Args:
name (str): The name of the parameter
value (str or numbers.Number): The value of the parameter
"""
self.trial_component.parameters[name] = value
def log_parameters(self, parameters):
"""Record a collection of parameter values for this trial component.
Examples
.. code-block:: python
# log multiple hyper parameters used in training
my_tracker.log_parameters({"learning_rate": 1.0, "gamma": 0.9, "dropout": 0.5})
Args:
parameters (dict[str, str or numbers.Number]): The parameters to record.
"""
self.trial_component.parameters.update(parameters)
def log_input(self, name, value, media_type=None):
"""Record a single input artifact for this trial component.
Overwrites any previous value recorded for the specified input name.
Examples
.. code-block:: python
# log input dataset s3 location
my_tracker.log_input(name='input', value='s3://inputs/path')
Args:
name (str): The name of the input value.
value (str): The value.
media_type (str, optional): The MediaType (MIME type) of the value
"""
if len(self.trial_component.input_artifacts) >= 30:
raise ValueError("Cannot add more than 30 input_artifacts under tracker trial_component.")
self.trial_component.input_artifacts[name] = api_types.TrialComponentArtifact(value, media_type=media_type)
def log_output(self, name, value, media_type=None):
"""Record a single output artifact for this trial component.
Overwrites any previous value recorded for the specified output name.
Examples
.. code-block:: python
# log output dataset s3 location
my_tracker.log_output(name='prediction', value='s3://outputs/path')
Args:
name (str): The name of the output value.
value (str): The value.
media_type (str, optional): The MediaType (MIME type) of the value.
"""
if len(self.trial_component.output_artifacts) >= 30:
raise ValueError("Cannot add more than 30 output_artifacts under tracker trial_component")
self.trial_component.output_artifacts[name] = api_types.TrialComponentArtifact(value, media_type=media_type)
def log_artifacts(self, directory, media_type=None):
"""Upload all the files under the directory to s3 and store it as artifacts in this trial component. The file
name is used as the artifact name
Examples
.. code-block:: python
# log local artifact
my_tracker.log_artifact(directory='/local/path)
Args:
directory (str): The directory of the local files to upload.
media_type (str, optional): The MediaType (MIME type) of the file. If not specified, this library
will attempt to infer the media type from the file extension of ``file_path``.
| |
<gh_stars>0
#!/usr/bin/env python
from optparse import OptionParser, OptionGroup, SUPPRESS_HELP
import os
import sys
from mapdamage.version import __version__
from mapdamage.rscript import check_R_lib
def file_exist(filename):
if os.path.exists(filename) and not os.path.isdir(filename):
return True
elif filename == "-":
return True
else:
sys.stderr.write("Error: '%s' is not a valid file\n" % (filename))
return None
def whereis(program):
for path in os.environ.get('PATH', '').split(':'):
if os.path.exists(os.path.join(path, program)) and \
not os.path.isdir(os.path.join(path, program)):
return os.path.join(path, program)
return None
def check_py_version():
req_version = (2, 6)
cur_version = sys.version_info
if cur_version >= req_version:
return True
else:
sys.stderr.write("Your Python interpreter is too old."\
"Please consider upgrading to at least %d.%d\n" % (req_version[0], req_version[1]))
return None
def options():
parser = OptionParser("%prog [options] -i BAMfile -r reference.fasta\n\nUse option -h or --help for help", version=__version__, \
epilog="report bugs to <EMAIL>, <EMAIL> or <EMAIL>")
args = OptionGroup(parser, "Input files")
args.add_option("-i", "--input", help="SAM/BAM file, must contain a valid header, use '-' for reading a BAM from stdin", \
action="store", type="string", dest="filename")
args.add_option("-r", "--reference", help="Reference file in FASTA format", \
action="store", dest="ref")
parser.add_option_group(args)
group = OptionGroup(parser, "General options")
group.add_option("-n", "--downsample", help = "Downsample to a randomly selected fraction of the reads (if 0 < DOWNSAMPLE < 1), or " \
"a fixed number of randomly selected reads (if DOWNSAMPLE >= 1). By default, no downsampling is performed.",
type = float, default = None)
group.add_option("--downsample-seed", help = "Seed value to use for downsampling. See documentation for py module 'random' for default behavior.",
type = int, default = None)
group.add_option("--merge-reference-sequences", help = "Ignore referece sequence names when tabulating reads (using '*' instead). "
"Useful for alignments with a large number of reference sequnces, which may otherwise result in excessive "
"memory or disk usage due to the number of tables generated.",
default = False, action = "store_true")
group.add_option("-l", "--length", dest="length", help="read length, in nucleotides to consider [%default]", \
type = int, default=70,action="store")
group.add_option("-a", "--around", dest="around", help="nucleotides to retrieve before/after reads [%default]", \
type = int, default=10,action="store")
group.add_option("-Q", "--min-basequal", dest="minqual", help="minimun base quality Phred score considered, Phred-33 assumed [%default]", \
type = int, default=0, action="store")
group.add_option("-d", "--folder", help="folder name to store results [results_FILENAME]", \
action="store", type="string", dest="folder")
group.add_option("-f", "--fasta", dest="fasta", help="Write alignments in a FASTA file", \
default=False,action="store_true")
group.add_option("--plot-only", dest="plot_only", help="Run only plotting from a valid result folder", \
default=False,action="store_true")
group.add_option("-q", "--quiet", dest="quiet", help="Disable any output to stdout", \
default=False,action="store_true")
group.add_option("-v", "--verbose", dest="verbose", help="Display progression information during parsing", \
default=False,action="store_true")
group.add_option("--mapdamage-modules", dest="mapdamage_modules", help="Override the system wide installed mapDamage module", \
default=None)
group.add_option("--no-plot", dest="no_r", help=SUPPRESS_HELP, default=False, action="store_true")
parser.add_option_group(group)
# options for plotting damage patterns
group2 = OptionGroup(parser, "Options for graphics")
group2.add_option("-y", "--ymax", dest="ymax", \
help="graphical y-axis limit for nucleotide misincorporation frequencies [%default]", type = float, \
default=0.3,action="store")
group2.add_option("-m", "--readplot", dest="readplot", \
help="read length, in nucleotides, considered for plotting nucleotide misincorporations [%default]", \
type = int, default=25, action="store")
group2.add_option("-b", "--refplot", dest="refplot", \
help="the number of reference nucleotides to consider for ploting base composition in the region located upstream "
"and downstream of every read [%default]", type= int, default=10, action="store")
group2.add_option("-t", "--title", dest="title", \
help="title used for plots [%default]", \
type="string", default="",action="store")
parser.add_option_group(group2)
# Then the plethora of optional options for the statistical estimation ..
group3 = OptionGroup(parser,"Options for the statistical estimation")
group3.add_option("", "--rand", dest="rand", \
help="Number of random starting points for the likelihood optimization [%default]", type = int, default=30, action="store")
group3.add_option("", "--burn", dest="burn", \
help="Number of burnin iterations [%default]", type = int, default=10000,action="store")
group3.add_option("", "--adjust", dest="adjust", \
help="Number of adjust proposal variance parameters iterations [%default]", type = int, default=10, action="store")
group3.add_option("", "--iter", dest="iter", \
help="Number of final MCMC iterations [%default]", type = int, default=50000, action="store")
group3.add_option("", "--forward", dest="forward", \
help="Using only the 5' end of the seqs [%default]", default=False, action="store_true")
group3.add_option("", "--reverse", dest="reverse", \
help="Using only the 3' end of the seqs [%default]", default=False, action="store_true")
group3.add_option("", "--var-disp", dest="var_disp", \
help="Variable dispersion in the overhangs [%default]", default=False,action="store_true")
group3.add_option("", "--jukes-cantor", dest="jukes_cantor", \
help="Use Jukes Cantor instead of HKY85 [%default]", default=False,action="store_true")
group3.add_option("", "--diff-hangs", dest="diff_hangs", \
help="The overhangs are different for 5' and 3' [%default]", default=False, action="store_true")
group3.add_option("", "--fix-nicks" , dest="fix_nicks", \
help="Fix the nick frequency vector (Only C.T from the 5' end and G.A from the 3' end) [%default]", default=False, action="store_true")
group3.add_option("", "--use-raw-nick-freq" , dest="use_raw_nick_freq", \
help="Use the raw nick frequency vector without smoothing [%default]", default=False, action="store_true")
group3.add_option("", "--single-stranded", dest="single_stranded", \
help="Single stranded protocol [%default]", default=False, action="store_true")
group3.add_option("", "--theme-bw", dest="theme_bw", \
help="Use black and white theme in post. pred. plot [%default]", default=False, action="store_true")
group3.add_option("", "--seq-length", dest="seq_length", \
help="How long sequence to use from each side [%default]", type = int, default=12, action="store")
group3.add_option("--stats-only", dest="stats_only", help="Run only statistical estimation from a valid result folder", \
default=False, action="store_true")
group3.add_option("--no-stats", help="Disabled statistical estimation, active by default", default=False, action="store_true")
group3.add_option("--check-R-packages", help="Check if the R modules are working", default=False, action="store_true")
parser.add_option_group(group3)
group4 = OptionGroup(parser,"Options for rescaling of BAM files")
group4.add_option("--rescale", dest="rescale", help="Rescale the quality scores in the BAM file using the output from the statistical estimation",
default=False, action="store_true")
group4.add_option("--rescale-only", dest="rescale_only", help="Run only rescaling from a valid result folder",
default=False, action="store_true")
group4.add_option("--rescale-out", dest="rescale_out", help="Write the rescaled BAM to this file",
default=None, action="store")
group4.add_option("--rescale-length-5p", dest="rescale_length_5p",
help="How many bases to rescale at the 5' termini; defaults to --seq-length.", type=int, action="store")
group4.add_option("--rescale-length-3p", dest="rescale_length_3p",
help="How many bases to rescale at the 5' termini; defaults to --seq-length.", type=int, action="store")
parser.add_option_group(group4)
#Parse the arguments
(options, args) = parser.parse_args()
# check python version
if not check_py_version():
return None
# if the user wants to check the R packages then do that before the option parsing
if options.check_R_packages:
if check_R_lib():
sys.exit(1)
else:
print("All R packages are present")
sys.exit(0)
# check general arguments
if not (options.plot_only or options.stats_only) and not options.filename:
parser.error('SAM/BAM file not given (-i)')
if not (options.plot_only or options.ref):
parser.error('Reference file not given (-r)')
if not options.plot_only and not options.stats_only:
if not file_exist(options.filename) or not file_exist(options.ref):
return None
if options.downsample is not None:
if options.downsample <= 0:
parser.error("-n/--downsample must be a positive value")
elif options.downsample >= 1:
options.downsample = int(options.downsample)
if options.plot_only and not options.folder:
parser.error('Folder not provided, required with --plot-only')
if options.stats_only and not options.folder:
parser.error('Folder not provided, required with --stats-only')
if options.rescale_only and not options.folder:
parser.error('Folder not provided, required with --rescale-only')
if options.rescale_only and not options.filename:
parser.error('Input bam not provided, required with --rescale-only')
if options.rescale_only and not options.ref:
parser.error('Reference not provided, required with --rescale-only')
if options.verbose and options.quiet:
parser.error('Cannot use verbose and quiet option at the same time')
# check options
if options.length < 0:
parser.error('length (-l) must be a positive integrer')
if options.around < 0:
parser.error('around (-a) must be a positive integrer')
if options.ymax <= 0 or options.ymax > 1:
parser.error('ymax (-b) must be an real number beetween 0 and 1')
if options.readplot < 0:
parser.error('readplot (-m) must be a positive integrer')
if options.refplot < 0:
parser.error('refplot (-b) must be a positive integrer')
if options.refplot > options.around and not options.plot_only:
parser.error('refplot (-b) must be inferior to around (-a)')
if options.readplot > options.length:
parser.error('readplot (-m) must be inferior to length (-l)')
if options.minqual < 0 or options.minqual > 41:
parser.error('minimal base quality, Phred score, must be within this range: 0 - 41')
# check statistic options
if options.forward and options.reverse:
parser.error('Cannot use only forward end and only reverse end for the statistics')
# use filename as default for plot titles if not set
if options.title == "" and options.filename:
options.title = os.path.splitext(os.path.basename(options.filename))[0]
# for --plot-only, use the folder name, without results_ as title
if options.title == "" and not options.filename and options.folder:
options.title = os.path.splitext(os.path.basename(options.folder))[0].replace("results_", "")
# check folder
if not options.folder and options.filename:
options.folder = "results_"+os.path.splitext(os.path.basename(options.filename))[0]
# check destination for rescaled bam
if not options.rescale_out and (options.rescale or options.rescale_only):
# if there are mulitiple bam files to rescale then pick first one as
# the name of the rescaled file
if isinstance(options.filename,list):
basename = os.path.basename(options.filename[0])
else:
basename = os.path.basename(options.filename)
with_ext = os.path.splitext(basename)[0] + ".rescaled.bam"
options.rescale_out = os.path.join(options.folder, with_ext)
if os.path.isdir(options.folder):
if not options.quiet and not options.plot_only:
print("Warning, %s already exists" % options.folder)
if options.plot_only:
if | |
<filename>tests/blackbox/challenge/test_bb_challenge.py
#!/usr/bin/env python3
'''Test scopes with custom rules'''
# ------------------------------------------------------------------------------
# Imports
# ------------------------------------------------------------------------------
import subprocess
import os
import json
import time
import re
import requests
import pytest
try:
from html.parser import HTMLParser
except ImportError:
# python2 fallback
from HTMLParser import HTMLParser
# ------------------------------------------------------------------------------
# Constants
# ------------------------------------------------------------------------------
G_TEST_HOST = 'http://127.0.0.1:12345'
# ------------------------------------------------------------------------------
# run_command
# ------------------------------------------------------------------------------
def run_command(command):
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return (p.returncode, stdout, stderr)
# ------------------------------------------------------------------------------
# setup scopez server in event mode
# ------------------------------------------------------------------------------
@pytest.fixture()
def setup_waflz_server():
# ------------------------------------------------------
# setup
# ------------------------------------------------------
l_cwd = os.getcwd()
l_file_path = os.path.dirname(os.path.abspath(__file__))
l_scopes_dir = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf/scopes'))
l_conf_dir = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf'))
l_ruleset_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/ruleset'))
l_geoip2city_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/db/GeoLite2-City.mmdb'))
l_geoip2ISP_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/db/GeoLite2-ASN.mmdb'))
l_waflz_server_path = os.path.abspath(os.path.join(l_file_path, '../../../build/util/waflz_server/waflz_server'))
l_subproc = subprocess.Popen([l_waflz_server_path,
'-d', l_conf_dir,
'-b', l_scopes_dir,
'-r', l_ruleset_path,
'-g', l_geoip2city_path,
'-s', l_geoip2ISP_path])
time.sleep(1)
# ------------------------------------------------------
# yield...
# ------------------------------------------------------
yield setup_waflz_server
# ------------------------------------------------------
# tear down
# ------------------------------------------------------
l_code, l_out, l_err = run_command('kill -9 %d'%(l_subproc.pid))
time.sleep(0.5)
# ------------------------------------------------------------------------------
# setup scopez server in action mode
# ------------------------------------------------------------------------------
@pytest.fixture()
def setup_waflz_server_action():
# ------------------------------------------------------
# setup
# ------------------------------------------------------
# l_cwd = os.getcwd()
l_file_path = os.path.dirname(os.path.abspath(__file__))
l_geoip2city_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/db/GeoLite2-City.mmdb'))
l_geoip2ISP_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/db/GeoLite2-ASN.mmdb'))
l_conf_dir = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf'))
l_challenge = os.path.realpath(os.path.join(l_file_path, '../../data/bot/bot-challenges.json'))
l_ruleset_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/ruleset'))
l_scopes_dir = os.path.realpath(os.path.join(l_file_path, '../../data/waf/conf/scopes'))
l_waflz_server_path = os.path.abspath(os.path.join(l_file_path, '../../../build/util/waflz_server/waflz_server'))
l_subproc = subprocess.Popen([l_waflz_server_path,
'-d', l_conf_dir,
'-b', l_scopes_dir,
'-r', l_ruleset_path,
'-g', l_geoip2city_path,
'-s', l_geoip2ISP_path,
'-c', l_challenge,
'-j'])
print('cmd: \n{}\n'.format(' '.join([l_waflz_server_path,
'-d', l_conf_dir,
'-b', l_scopes_dir,
'-r', l_ruleset_path,
'-g', l_geoip2city_path,
'-s', l_geoip2ISP_path,
'-c', l_challenge,
'-j'])))
time.sleep(1)
# ------------------------------------------------------
# yield...
# ------------------------------------------------------
yield setup_waflz_server_action
# ------------------------------------------------------
# tear down
# ------------------------------------------------------
_, _, _ = run_command('kill -9 %d'%(l_subproc.pid))
time.sleep(0.5)
# ------------------------------------------------------------------------------
# parse html
# ------------------------------------------------------------------------------
class html_parse(HTMLParser):
#Store data
m_data = ""
def handle_data(self, data):
if data.startswith('function'):
self.m_data = data
# ------------------------------------------------------------------------------
# Solve browser challenge
# TODO: This is based on assumption that the problem will be a simple addition
# operation in js. If problem changes in data file, this needs to be updated
# ------------------------------------------------------------------------------
def solve_challenge(a_html):
l_problem_p = re.search('val =.[0-9]{3}\+[0-9]{3}', a_html)
l_problem_vars = l_problem_p.group(0).split("=")[-1].split('+')
l_solution = int(l_problem_vars[0]) + int(l_problem_vars[1])
l_ectoken_p = re.search('__ecbmchid=(.*?)"', a_html)
l_ectoken = l_ectoken_p.group(0)
return '__eccha = ' + str(l_solution) + ';' + l_ectoken[:-1]
# ------------------------------------------------------------------------------
# test bot challenge events
# ------------------------------------------------------------------------------
def test_challenge_events(setup_waflz_server):
# ------------------------------------------------------
# test for recieving a bot challenge
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'mybot.com',
'user-agent': 'bot-testing',
'waf-scopes-id': '0052'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
l_r_json = l_r.json()
assert 'prod_profile' in l_r_json
assert l_r_json['prod_profile']['challenge_status'] == "CHAL_STATUS_NO_TOKEN"
assert l_r_json['prod_profile']['token_duration_sec'] == 3
# ------------------------------------------------------
# send random corrupted token
# ------------------------------------------------------
l_solution_cookies = '__ecbmchid=d3JvbmdfdG9rZW4K;__eccha=300'
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'mybot.com',
'user-agent': 'bot-testing',
'Cookie': l_solution_cookies,
'waf-scopes-id': '0052'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
l_r_json = l_r.json()
assert 'prod_profile' in l_r_json
assert l_r_json['prod_profile']['challenge_status'] == "CHAL_STATUS_TOKEN_CORRUPTED"
assert l_r_json['prod_profile']['token_duration_sec'] == 3
# ------------------------------------------------------------------------------
# test bot challenge in bot config
# ------------------------------------------------------------------------------
def test_challenge_in_bot_config(setup_waflz_server_action):
# ------------------------------------------------------
# test for recieving a bot challenge
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'mybot.com',
'user-agent': 'bot-testing',
'waf-scopes-id': '0052'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 401
# ------------------------------------------------------
# solve challenge
# ------------------------------------------------------
l_parser = html_parse()
l_parser.feed(l_r.text)
assert 'function' in l_parser.m_data
l_solution_cookies = solve_challenge(l_parser.m_data)
# ------------------------------------------------------
# test again with solved challenge and cookies
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'mybot.com',
'user-agent': 'bot-testing',
'Cookie': l_solution_cookies,
'waf-scopes-id': '0052'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
l_r_json = l_r.json()
#-------------------------------------------------------
# check no event is returned
# ------------------------------------------------------
assert l_r_json['errors'][0]['message'] == 'OK'
#-------------------------------------------------------
# sleep for 3 seconds for challenge to expire
# ------------------------------------------------------
time.sleep(3)
# ------------------------------------------------------
# test with previous solved challenge, new challenge
# should be returned
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'mybot.com',
'user-agent': 'bot-testing',
'Cookie': l_solution_cookies,
'waf-scopes-id': '0052'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 401
l_parser = html_parse()
l_parser.feed(l_r.text)
assert 'function' in l_parser.m_data
# ------------------------------------------------------------------------------
# test bot challenge with limits
# ------------------------------------------------------------------------------
def test_challenge_with_limits(setup_waflz_server_action):
# ------------------------------------------------------
# test for recieving a bot challenge
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'mybot.com',
'user-agent': 'bot-testing',
'waf-scopes-id': '0052'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 401
# ------------------------------------------------------
# solve challenge
# ------------------------------------------------------
l_parser = html_parse()
l_parser.feed(l_r.text)
assert 'function' in l_parser.m_data
l_solution_cookies = solve_challenge(l_parser.m_data)
# ------------------------------------------------------
# send the solved challenge thrice
# rate limiting should block the request
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'mybot.com',
'user-agent': 'bot-testing',
'Cookie': l_solution_cookies,
'waf-scopes-id': '0052'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
assert l_r.text == "ddos enforcement from bot config\n"
# ------------------------------------------------------
# sleep for 3 seconds for challenge and rate limiting
# enforcement to expire
# ------------------------------------------------------
time.sleep(3)
# ------------------------------------------------------
# test with previous solved challenge, new challenge
# should be returned
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'mybot.com',
'user-agent': 'bot-testing',
'Cookie': l_solution_cookies,
'waf-scopes-id': '0052'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 401
l_parser = html_parse()
l_parser.feed(l_r.text)
assert 'function' in l_parser.m_data
# ------------------------------------------------------------------------------
# test bot challenge with profile
# ------------------------------------------------------------------------------
def test_challenge_with_profile(setup_waflz_server_action):
# ------------------------------------------------------
# test for recieving a bot challenge with attack vector
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html?a=%27select%20*%20from%20testing%27'
l_headers = {'host': 'mybot.com',
'user-agent': 'bot-testing',
'waf-scopes-id': '0052'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 401
# ------------------------------------------------------
# solve challenge
# ------------------------------------------------------
l_parser = html_parse()
l_parser.feed(l_r.text)
assert 'function' in l_parser.m_data
l_solution_cookies = solve_challenge(l_parser.m_data)
# ------------------------------------------------------
# send the solved challenge with attack vector
# should get custoem response from profile
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html?a=%27select%20*%20from%20testing%27'
l_headers = {'host': 'mybot.com',
'user-agent': 'bot-testing',
'Cookie': l_solution_cookies,
'waf-scopes-id': '0052'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
assert l_r.text == 'This is profile custom response\n'
# ------------------------------------------------------
# send the solved challenge without attack vector
# request should go through
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'mybot.com',
'user-agent': 'bot-testing',
'Cookie': l_solution_cookies,
'waf-scopes-id': '0052'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
l_r_json = l_r.json()
#-------------------------------------------------------
# check no event is returned
# ------------------------------------------------------
assert l_r_json['errors'][0]['message'] == 'OK'
#-------------------------------------------------------
# sleep for 3 seconds for challenge to expire
# ------------------------------------------------------
time.sleep(3)
# ------------------------------------------------------
# test with previous solved challenge, new challenge
# should be returned
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'mybot.com',
'user-agent': 'bot-testing',
'Cookie': l_solution_cookies,
'waf-scopes-id': '0052'}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 401
l_parser = html_parse()
l_parser.feed(l_r.text)
assert 'function' in l_parser.m_data
# ------------------------------------------------------------------------------
# test bot rules in reputation db for audit mode
# ------------------------------------------------------------------------------
def test_bot_rules_with_reputation_db_audit(setup_waflz_server_action):
# ------------------------------------------------------
# pass a IP which is set for audit mode in bots
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'mybot.com',
'user-agent': 'monkey',
'waf-scopes-id': '0052',
'x-waflz-ip': '172.16.17.32'
}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
l_r_json = l_r.json()
assert l_r_json['audit_profile'] == None
assert l_r_json['prod_profile']['sub_event'][0]['rule_id'] == 70000001
assert l_r_json['prod_profile']['sub_event'][0]['rule_msg'] == 'Client IP in bots audit list'
#test we are logging all headers
assert 'request_headers' in l_r_json['prod_profile']['req_info']
assert len(l_r_json['prod_profile']['req_info']['request_headers']) == 6
#assert l_r.text = '"'
# ------------------------------------------------------------------------------
# test bot rules in reputation db for audit mode
# ------------------------------------------------------------------------------
def test_bot_rules_with_reputation_db_block(setup_waflz_server_action):
# ------------------------------------------------------
# pass a IP which is set for block mode in bots
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'mybot.com',
'user-agent': 'monkey',
'waf-scopes-id': '0052',
'x-waflz-ip': '192.168.127.12'
}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 403
l_r_json = l_r.json()
assert l_r_json['audit_profile'] == None
assert l_r_json['prod_profile']['sub_event'][0]['rule_id'] == 70000002
assert l_r_json['prod_profile']['sub_event'][0]['rule_msg'] == 'Client IP in bots block list'
#test we are logging all headers
assert 'request_headers' in l_r_json['prod_profile']['req_info']
assert len(l_r_json['prod_profile']['req_info']['request_headers']) == 6
# ------------------------------------------------------------------------------
# test bot rules in reputation db but matched browser challenge rule
# ------------------------------------------------------------------------------
def test_bot_rules_challenge_takes_precedence(setup_waflz_server):
# ------------------------------------------------------
# pass a IP which is set for block mode reputation db
# Set the user-agent which matches browser challenge rule
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': 'mybot.com',
'waf-scopes-id': '0052',
'user-agent': 'bot-testing',
'x-waflz-ip': '192.168.127.12'
}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
l_r_json = l_r.json()
assert 'prod_profile' in l_r_json
# The rule for throwing browser challenge takes precedence
assert l_r_json['prod_profile']['sub_event'][0]['rule_id'] == 77000101
assert l_r_json['prod_profile']['challenge_status'] == "CHAL_STATUS_NO_TOKEN"
assert l_r_json['prod_profile']['token_duration_sec'] == 3
# ------------------------------------------------------------------------------
# test bot rules in reputation db for audit mode
# ------------------------------------------------------------------------------
def test_bot_rules_audit_rdb_takes_precedence(setup_waflz_server_action):
# ------------------------------------------------------
# pass a IP which is present in both reputation db
# ------------------------------------------------------
l_uri = G_TEST_HOST+'/test.html'
l_headers = {'host': | |
as writer:
for Ev, Egrp in sgrp.groupby("E_RHE"):
# sheet_name = Ev
EmV = f"{1E3*Ev:.0f}"
Egrp[["Frequency(Hz)"] + cols].to_excel(
writer, sheet_name=EmV
)
# === plotting
fig, ax = plt.subplots()
Egrp.plot(
x=cols[0],
y=cols[1],
kind="scatter",
ax=ax,
label=cols[1],
)
Egrp.plot(x=cols[2], y=cols[3], c="r", ax=ax, label=cols[3])
plt.legend()
ax.set_xlabel(ax_type[0])
ax.set_ylabel(ax_type[1])
ax.set_title(f"{gas} {sID} {EmV}")
ax.grid(True)
plt.savefig(
specdir.joinpath(f"{ax_type[0][0]}_{gas}_{sID}_{EmV}"),
bbox_inches="tight",
)
plt.close()
# ===
def save_load_AST_pars(func):
# argnames = func.func_code.co_varnames[:func.func_code.co_argcount]
# fname = func.func_name
def wrapper(*args, **kwargs):
func_args = inspect.signature(func).bind(*args, **kwargs).arguments
func_args_str = ", ".join(
"{} = {!r}".format(*item) for item in func_args.items()
)
print(f"{func.__module__}.{func.__qualname__} ( {func_args_str} )")
# args = list(args)
# print('must-have arguments are:')
# my_var_name = [ (k,v) for k,v in locals().items()]
# for i in my_var_name:
# print(f'{(i)}')
## for i in args:
## print(eval(i))
# print('optional arguments are:')
# for kw in kwargs.keys():
# print( kw+'='+str( kwargs[kw] ))
return args
return wrapper
# # @save_load_AST_pars
# def mergedEC( _reloadset = False):
# _pkl_EC_merged = 'EC_merged_dict'
# # EC_merged_dict = EC_PorphSiO2.mergedEC(_reloadset=True)
# if _reloadset == True:
# # EC_merged_dict_bak = EC_merged_dict.copy()
# # EC_merged_dict = EC_PorphSiO2.take_selection_of_EC_merged(EC_merged_dict)
# mcols = [i for i in Load_from_Indexes.EC_label_cols if i not in ['PAR_file']]+['Sweep_Type']
# _mcols = [i for i in mcols if not i in ['Gas','E_RHE']]
# LC_fls, AST_days = EC_PorphSiO2.select_ECexps(EC_folder)
# EC_merged_dict = {}
# # _reloadset = True
# template = PorphSiO2_template()
# HPRR = EC_PorphSiO2.HPRR()
# N2CV = EC_PorphSiO2().N2cv(reload= False, use_daily = True)
# # N2_pltqry = EC_merged_dict.get('N2CV')
# N2_AST = EC_PorphSiO2.get_AST_matches(N2CV)
# N2_AST_diff = EC_PorphSiO2.compare_AST_pars(N2CV, N2_AST, reload = False)
# # _DFtype = EC_PorphSiO2.sense_DF_type(N2CV)
# # EC_merged_dict.update({'N2CV' : N2_AST_diff})
# EC_merged_dict.update({'N2CV' : {'PARS' : N2CV, 'AST_matches' : N2_AST, 'AST_diff' : N2_AST_diff}})
# # list(N2CV.columns)
# # _renameN2 = {c : c.split('_')[-1] for c in [i for i in N2CV.columns if any([i.split('_')[-1] in mcols])]}
# # N2CV = N2CV.rename(columns = _renameN2)
# ORR = EC_PorphSiO2().ORR_pars()
# ORR_AST = EC_PorphSiO2.get_AST_matches(ORR)
# ORR_AST_diff = EC_PorphSiO2.compare_AST_pars(ORR, ORR_AST, reload = _reloadset)
# ttpars = ORR.query('RPM_DAC_uni > 1000 & Sweep_Type == "cathodic"')
# tt_AST = EC_PorphSiO2.get_AST_matches(ttpars)
# tt = ORR_AST.query('RPM_DAC_uni > 1000 & Sweep_Type == "cathodic"')
# tt_diff = EC_PorphSiO2.compare_AST_pars(ORR, tt, reload = _reloadset, save_pkl = False)
# # ttpfs = ORR.loc[ORR.ORR_Jkin_max_700 > 0].PAR_file.unique()
# # ttpfs = ORR.query('Sweep_Type == "mean"').loc[ORR.ORR_E_onset > 0.85].PAR_file.unique()
# # ORR.loc[(ORR.ORR_E_onset > 0.85) & (ORR.Sweep_Type == 'cathodic')].PAR_file.unique()
# # EC_merged_dict.update({'ORR' : ORR_AST_diff})
# EC_merged_dict.update({'ORR' : {'PARS' : ORR, 'AST_matches' : ORR_AST, 'AST_diff' : ORR_AST_diff}})
# # _renameO2 = {c : c.split('_')[-1] for c in [i for i in ORR.columns if any([i.split('_')[-1] in mcols]) and not '_Ring' in i]}
# # ORR = ORR.rename(columns = _renameO2)
# KL = EC_PorphSiO2().KL_pars()
# KL = KL.assign(**{'RPM_DAC' : 0})
# KL_AST = EC_PorphSiO2.get_AST_matches(KL)
# KL_AST_diff = EC_PorphSiO2.compare_AST_pars(KL, KL_AST, reload = _reloadset)
# # EC_merged_dict.update({'KL' : KL_AST_diff})
# EC_merged_dict.update({'KL' : {'PARS' : KL, 'AST_matches' : KL_AST, 'AST_diff' : KL_AST_diff}})
# # _KLdatacols = ['ORR_KL_data_file_post','ORR_KL_data_x_post', 'ORR_KL_data_y_post', 'ORR_KL_fit_y_post', 'ORR_KL_fit_y_2e_post', 'ORR_KL_fit_y_4e_post']
# # _renameKL = {c : c.split('_')[-1] for c in [i for i in KL.columns if any([i.split('_')[-1] in mcols]) and not '_Ring' in i]}
# # KL = KL.rename(columns = _renameKL)
# EIS = EC_PorphSiO2.EIS_pars()
# EIS_AST = EC_PorphSiO2.get_AST_matches(EIS)
# EIS_AST_diff = EC_PorphSiO2.compare_AST_pars(EIS, EIS_AST, reload = _reloadset)
# # EC_merged_dict.update({'EIS' : EIS_AST_diff})
# EC_merged_dict.update({'EIS' : {'PARS' : EIS, 'AST_matches' : EIS_AST, 'AST_diff' : EIS_AST_diff}})
# # _renameEIS = {c : c.split('_')[-1] for c in [i for i in EIS.columns if any([i.split('_')[-1] in mcols]) and not '_Ring' in i]}
# # EIS = EIS.rename(columns = _renameEIS)
# HER = EC_PorphSiO2().HER_pars(reload= False, use_daily = True)
# HER_type_grp = HER.groupby('HER_type')
# HER.HER_at_E_slice = HER.HER_at_E_slice.round(3)
# HER_AST = EC_PorphSiO2.get_AST_matches(HER)
# for Htype, Hgrp in HER_type_grp:
# # Htype, Hgrp = 'E_slice', HER.loc[HER.groupby('HER_type').groups['E_slice']]
# HER_AST_diff = EC_PorphSiO2.compare_AST_pars(Hgrp, HER_AST, reload = _reloadset,extra= Htype)
# try:
# if not HER_AST_diff.empty:
# EC_merged_dict.update({f'HER_{Htype}' : {'PARS' : Hgrp, 'AST_matches' : HER_AST, 'AST_diff' : HER_AST_diff}})
# except Exception as e:
# print(f'HER {Htype} fail, {e}')
# # EC_merged_dict.update({f'HER_{Htype}' : HER_AST_diff})
# EC_merged_dict = EC_PorphSiO2.add_filter_selection_of_EC_merged(EC_merged_dict)
# save_dict_pkl(_pkl_EC_merged, EC_merged_dict)
# else:
# EC_merged_dict = load_dict_pkl(_pkl_EC_merged)
# return EC_merged_dict
# ECmerged = pd.merge(ORR,pd.merge(N2CV, EIS,on=_mcols),on=_mcols)
# EC_EIS = pd.merge(ECmerged,EIS,on=mcols)
# EC_OHN_merged = pd.merge(template, EC_EIS, on='SampleID')
# EC_PorphSiO2.export_to_xls(EC_OHN_merged)
# return EC_OHN_merged
def corr_plots():
EC_OHC.query('SampleID != "JOS5"').corr()
corrstk = EC_OHC.query('SampleID != "JOS5"').corr().stack()
EC_OHC.plot(x="E_onset", y="HPRR_E_onset", kind="scatter")
EC_OHC.plot(x="FracH2O2_050", y="HPRR_E_onset", kind="scatter")
EC_OHC.plot(x="N2_Cdl_mFcm-2_0.5", y="HPRR_dj/dE", kind="scatter")
EC_OHC.plot(x="N2_Cdl_mFcm-2_0.5", y="E_half", kind="scatter")
EC_OHC.corr(method="pearson")
def _check_eis_plots():
_par = ["Cdlp", "Rorr", "Rct", "Qad", "Aw"][-1]
_checky = ["N_content", "BET_cat_agg"][0]
for modn, mgrp in EIS_pars_all.loc[EIS_pars_all.pH < 3].groupby(
["pH", "postAST", "Model_EEC"]
):
_ps = eisplot(_par)
if len(mgrp[_par].dropna()) > 3:
mgrp.plot(
x="E_RHE",
y=_par,
yerr=f"{_par}_stderr",
kind="scatter",
ylim=_ps.ylim,
logy=_ps.logy,
title=f"{modn}",
c=_checky,
cmap="rainbow",
)
# def EC_PorphSio():
## folder = Path('F:\EKTS_CloudStation\CloudStation\Preparation-Thesis\SiO2_projects\SiO2_Me_EC+Struc\EC_Porph_SiO2_0.1MH2SO4\Compare_parameters')
## folder = Path('G:\CloudStation\Preparation-Thesis\SiO2_projects\SiO2_Me_EC+Struc\EC_Porph_SiO2_0.1MH2SO4\Compare_parameters')
## HPRR = pd.concat([pd.read_excel(i)['file'] for i in hprr_files])
# EC_ORR_HPRR = pd.merge(ORR_pars_origin,HPRR_pars_origin)
# HPRR_pars_origin.join(N2_orig, on='SampleID')
# EC_OHC = pd.merge(ORR_pars_origin,pd.merge(HPRR_pars_origin, N2_orig),on='SampleID')
## orr_raw.query('RPM > 1400')
## orrfs.append(orr_raw.query('RPM > 1400'))
# EC_OHC.to_excel(folder.joinpath('EC_ORR_HPRR.xlsx'))
def _testing_():
tt = EC_prepare_EC_merged()
self = tt
_pp = EC_post_plotting(tt)
self = _pp
N2CV = self.N2cv(reload=False, use_daily=True)
#%% == EC_post_plotting == testing
class EC_post_plotting:
def __init__(self, _EC_prepare_EC_merged):
self._EC_prepare_EC_merged = _EC_prepare_EC_merged
self.add_attrs()
def add_attrs(self):
if hasattr(self._EC_prepare_EC_merged, "EC_merged_dict"):
self.EC_merged = self._EC_prepare_EC_merged.EC_merged_dict
else:
self.EC_merged = {} # self._EC_prepare_EC_merged
def ORR_get_experiments(self):
ORR_AST = self.EC_merged["ORR"]["AST_matches"]
ORR_AST_mean1500 = ORR_AST.loc[
(ORR_AST.Sweep_Type == "mean") & (ORR_AST.RPM_DAC_uni > 1000)
]
ORR_AST_mean1500.to_excel(EC_folder.joinpath("ORR_AST_exp_overview.xlsx"))
# N2_scan_index = EC_index.loc[(EC_index.SampleID.isin(_smpls)) & (EC_index.PAR_exp.str.contains('N2_act'))]
# N2_scan_index.to_excel(EC_folder.joinpath('N2_scan_exp_overview.xlsx'))
def N2_repr_Cdl(self):
# ECname = 'N2'
# Cdl_pars_all = Load_from_Indexes.N2_pars_OVV()
_DF = self.EC_merged["N2CV"]["PARS"]
# _DF = Cdl_pars_all
ECname = "N2"
_raw_data_folder = mkfolder(EC_folder.joinpath(f"{ECname}_reproducibility"))
_grpcols = ["pH", "Loading_cm2", "SampleID"]
_swpcol = [i for i in _DF.columns if "Sweep_Type" in i]
_grpcols += _swpcol
_sIDgrps = _DF.loc[
_DF.SampleID.isin(PorphSiO2_template().SampleID.values) & (_DF.pH < 2)
]
# .query('postAST == "no"')
_lst = []
for sID, sgrp in _sIDgrps.groupby(_grpcols):
sID,
_sgpr_Cdl_mean = (
sgrp.groupby("E_AppV_RHE").Cdl.mean().rename("Cdl_grp_mean")
)
_Cdl_cols = [i for i in sgrp.columns if i.startswith("N2_Cdl_F")]
fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 10), sharex=True)
_sgpr_Cdl_mean.plot(
c="grey", alpha=0.5, ls="--", lw=5, label="mean Cdl", ax=ax1
)
pfgrps = sgrp.groupby("PAR_file")
for pf, pfgrp in pfgrps:
pfgrp = pd.merge(pfgrp, _sgpr_Cdl_mean, on=EvRHE)
ls = "-" if "no" in pfgrp.postAST.unique() else "--"
pfgrp = pfgrp.assign(
**{"Cdl_mean_diff": pfgrp.Cdl - pfgrp.Cdl_grp_mean}
)
_lst.append(pfgrp)
pfgrp.plot(
x="E_AppV_RHE", y="Cdl_mean_diff", ax=ax2, legend=False, ls=ls
)
_dt = pfgrp.PAR_date_day.unique()[0]
_lbl = f"{_dt}, {Path(pf).stem}"
pfgrp.plot(x="E_AppV_RHE", y="Cdl", ax=ax1, label=_lbl, ls=ls)
_title = ", ".join([f"{k} : {str(val)}" for k, val in (zip(_grpcols, sID))])
_stem = "_".join([str(i) for i in sID]) + f"_{len(pfgrps)}"
ax1.set_ylabel("Cdl")
ax1.set_title(_title)
ax1.legend(
fontsize=15, bbox_to_anchor=(1.02, 1), loc="upper left", fancybox=True
)
ax2.set_ylabel("Cdl - Cdl_mean")
# ax2.legend(False)
plt.savefig(
_raw_data_folder.joinpath(_stem + ".png"), bbox_inches="tight", dpi=200
)
plt.close()
N2_Cdl_pars_mean = pd.concat(_lst)
def select_sID_N2(_sIDgrps):
_grp_select = (1.0, 0.379, "JOS4", "cathodic")
_jos4 = _sIDgrps.groupby(_grpcols).get_group(_grp_select)
_raw_data_folder = mkfolder(
EC_folder.joinpath(
f"{ECname}_reproducibility", "_".join([str(i) for i in _grp_select])
)
)
_j4lc = _jos4.loc[_jos4.postAST == "postAST_LC"]
j4post = pd.concat(
[
pd.read_excel(_j4lc.sourceFilename.unique()[0].parent.joinpath(i))
for i in _j4lc.N2_CV_datafilenames.unique()[0].split(", ")
]
)
_j4no = _jos4.loc[
(_jos4.PAR_date_day == "2019-05-06") & (_jos4.postAST == "no")
]
j4no = pd.concat(
[
pd.read_excel(_j4no.SourceFilename.unique()[0].parent.joinpath(i))
for i in _j4no.N2_CV_datafilenames.unique()[0].split(", ")
]
)
_j4no_pfgrps = _jos4.loc[(_jos4.postAST == "no")].groupby("PAR_file")
for pf, pgr in _j4no_pfgrps:
j4no_grps = pd.concat(
[
pd.read_excel(pgr.SourceFilename.unique()[0].parent.joinpath(i))
for i in pgr.N2_CV_datafilenames.unique()[0].split(", ")
]
).groupby("ScanRate_mVs")
for sr, sgrp in j4post.groupby("ScanRate_mVs"):
fig, ax = plt.subplots()
j4no_grps.get_group(sr).plot(
x="E_AppV_RHE",
y="jmAcm-2",
ax=ax,
label=f"pre,{pgr.PAR_date_day.unique()[0]} / {Path(pgr.PAR_file.unique()[0]).stem}",
)
sgrp.plot(
x="E_AppV_RHE", y="jmAcm-2", ax=ax, label="postAST_LC", title=sr
)
ax.legend(
fontsize=15,
bbox_to_anchor=(1.02, 1),
loc="upper left",
fancybox=True,
)
_stem = f"{sr}_{pgr.PAR_date_day.unique()[0]}_{Path(pgr.PAR_file.unique()[0]).stem}"
plt.savefig(
_raw_data_folder.joinpath(_stem + ".png"),
bbox_inches="tight",
dpi=200,
)
def reproducibility_check_samples(_DF, ECname):
ECname = "EIS"
if ECname == "EIS":
EIS = EC_PorphSiO2.EIS_pars()
_DF = EIS
_grpcols = ["pH", "Loading_cm2", "SampleID"]
_eisqry = '(postAST == "no") &'
_sIDgrps = _DF.loc[
_DF.SampleID.isin(PorphSiO2_template().SampleID.values) & (_DF.pH < 2)
].query('(Sweep_Type == "cathodic")')
_lst = []
for sID, sgrp in _sIDgrps.groupby(_grpcols):
sID
pars, vars = eisplot.read_varnames(sgrp)
for gas in ["O2", "N2"]:
# gas ='N2'
_vars_gas = [i for i in vars if i.endswith(gas)]
sgrp_gas = sgrp.copy()
sgrp_gas.dropna(subset=_vars_gas, axis=0, inplace=True)
sgrp_gas.dropna(axis=1, how="all", inplace=True)
sgrp_gas = sgrp_gas.loc[
sgrp_gas[[i for i in _vars_gas if f"Rct_{gas}" in i][0]] < 2000
]
for var in _vars_gas:
_raw_data_folder = mkfolder(
EC_folder.joinpath(f"{ECname}_reproducibility/{var}")
)
# var = f'EIS_{_var}_{gas}'
_sgpr_var_mean = (
sgrp_gas.groupby("E_RHE")[var].mean().rename(f"{var}_mean")
)
fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 10), sharex=True)
_sgpr_var_mean.plot(
c="grey",
alpha=0.5,
ls="--",
lw=5,
label=f"{var}_mean",
ax=ax1,
)
pfgrps | |
"""
Compare with the previous dialog_reader, this dialog_reader removes the token type id
but add padding token and other corresponding special tokens instead.
"""
import os
import json
import pickle
import random
import pprint
import logging
import numpy as np
from tqdm import tqdm
from operator import itemgetter
from typing import Any, Dict, List, Optional, Tuple, Union, Iterator
from parlai.core.dict import DictionaryAgent
from parlai.core.worlds import create_task
import torch
from torch.utils.data import Dataset, DataLoader
import transformers
from transformers import AutoTokenizer
from src.data_utils.utils import pad_sents, get_mask, pad_list_of_sents, get_list_of_mask
from src.data_utils.data_reader import getDataLoader
class DialogReader(Dataset):
def __init__(self,
tokenizer,
mode: str = "train",
max_length: int = 128,
max_context_length: int = 128,
max_kn_length: int = 128,
max_episode_length: int = 1,
data_dir: str = "./data",
history_in_context: bool = False,
kn_in_context: bool = False,
model_type: str = "decoder_only",
debug: bool = False,
inference: bool = False,
kn_mode: str = "oracle",
):
self._max_length = max_length
self._max_context_length = max_context_length
self._max_kn_length = max_kn_length
self._max_episode_length = max_episode_length
self._data_dir = data_dir
self._model_type = model_type
self._kn_mode = kn_mode
self._tokenizer = tokenizer
self._debug = debug
self._inference = inference
self.data = self.read(mode, history_in_context, kn_in_context)
def __getitem__(self, idx):
"""Returns one data pair (source and target)."""
item = {}
for key in ["context", "response", "chosen_sentence"]:
item[key] = torch.LongTensor(self.data[key][idx])
item[f"{key}_mask"] = torch.LongTensor(self.data[f"{key}_mask"][idx])
return item
def __len__(self):
return len(self.data["episode_id"])
def _load_and_preprocess_all(self, mode: str):
raise NotImplementedError
def read(self, mode: str, history_in_context: bool, kn_in_context: bool):
def _gen(episodes, sos_token, eos_token):
"""
Convert example into samples for training and testing
1. truncate the knowledge
2. split the episode into training samples
"""
samples = {
"context": [], # list
"response": [], # list
"chosen_sentence": [], # list
}
if self._model_type == "decoder_only":
samples.update({"token_type":[]})
for _id, episode in enumerate(tqdm(episodes, desc="Generate samples", ncols=100)):
contexts = episode["context"] # list
response = episode["response"] # list
checked_sentence = episode["checked_sentence"]
episode_num = episode["episode_num"] # int
episode_length = len(episode["context"])
token_type_ids = []
# get knowledge
kn = self._tokenizer.encode(checked_sentence.strip(), add_special_tokens=False) + self._tokenizer.encode("<eos_k>", add_special_tokens=False)
if kn_in_context:
token_type_ids += [0] * len(kn)
# get history
# USR_{t-1} <eos_u> SYS_{t-1} <eos_r> USR_{t} <eos_u>
history = []
if history_in_context:
for num in range(len(contexts)):
temp = self._tokenizer.encode(contexts[num].strip(), add_special_tokens=False)
history += temp
if num % 2 == 0:
history += self._tokenizer.encode("<eos_u>", add_special_tokens=False)
token_type_ids += [1] * len(temp)
else:
history += self._tokenizer.encode("<eos_r>", add_special_tokens=False)
token_type_ids += [0] * len(temp)
if kn_in_context:
context = kn + history
else:
context = history
else:
context = self._tokenizer.encode(contexts[-1].strip()+"<eos_u>", add_special_tokens=False)
if self._model_type == "seq2seq": # [bos] seq1 [sep] seq2 [eos]
samples["context"].append(self._tokenizer.build_inputs_with_special_tokens(context))
samples["response"].append(self._tokenizer.encode(response.strip()))
elif self._model_type == "decoder_only": # GPT2 model without pre-defined special tokens
context = self._tokenizer.encode(context)
response = self._tokenizer.encode(sos_token+response.strip()+eos_token)
response_mask = [1] * (len(context)+1) + [0] * (len(response)-1)
token_type_ids = [1] * len(response)
context += response
samples["context"].append(context)
samples["response"].append(response_mask)
else:
raise ValueError(f"Model type '{self._model_type}' is invalid.")
samples["chosen_sentence"].append(self._tokenizer.encode(checked_sentence.strip()))
if self._model_type == "decoder_only":
samples["token_type"].append(token_type_ids)
if self._debug:
if len(samples["context"]) >= 10:
break
return samples
def _uniform(samples, history_in_context):
"""
1. pad the sents in the same sample to the maximum length
2. get the length of the inputs
3. get the mask of the inputs
"""
uniformed_samples = {}
for key in samples:
# Get sample data, check the type
sample = samples[key]
pad_id = self._tokenizer.convert_tokens_to_ids(self._tokenizer._pad_token)
if key == "checked_sentence":
mask = get_mask(sample, max_len=self._max_kn_length)
padded_sample, _ = pad_sents(sample, pad_token=pad_id, max_len=self._max_kn_length)
elif key == "context" or key == "token_type":
mask = get_mask(sample, max_len=self._max_context_length)
padded_sample, _ = pad_sents(sample, pad_token=pad_id, max_len=self._max_context_length)
elif "kn" in key:
mask = get_mask(sample, max_len=1024)
padded_sample, _ = pad_sents(sample, pad_token=pad_id, max_len=1024)
else:
mask = get_mask(sample, max_len=self._max_length)
padded_sample, _ = pad_sents(sample, pad_token=pad_id, max_len=self._max_length)
uniformed_samples[key] = padded_sample
uniformed_samples[f"{key}_mask"] = mask
return uniformed_samples
# read the datasets and do preprocessing (tokenize, set up KN)
episodes = self._load_and_preprocess_all(mode, history_in_context, self._max_episode_length)
# formulate the samples
sos_token = self._tokenizer.bos_token
eos_token = self._tokenizer.eos_token
samples = _gen(episodes, sos_token, eos_token)
uniformed_samples = _uniform(samples, history_in_context)
return uniformed_samples
@property
def vocab(self):
return self._tokenizer.vocab
def _preprocess_episodes(self, episodes, mode, history_in_context, max_episode_length=1):
"""
Tokenize all the fields in Wizard-of-Wikipedia.
Return List[Dict[samples](episodes)]
Output Example:
[
{ # one episode
'context': [], # in episode length
'response': [],
'title': [],
'sample_id': int,
'episode_num': int,
}
...
{
# another episode
}
]
"""
new_episodes = []
for episode_num, episode in enumerate(tqdm(episodes, desc="Preprocess episodes", ncols=100)):
for example_num, example in enumerate(episode):
new_examples = {'context': [],
'response': '',
'checked_sentence': ''}
history = []
if example_num != 0 and history_in_context:
start_idx = max(0, example_num-max_episode_length)
for num in range(start_idx, example_num):
history.append(episode[num]['text'].lower().strip())
history.append(episode[num]['labels'][0].lower().strip() if mode == "train" else episode[num]['eval_labels'][0].lower().strip())
context = history + [example['text'].lower().strip()]
if mode == "train":
response = example['labels'][0]
else:
response = example['eval_labels'][0]
checked_sentence = example['checked_sentence']
new_examples['context'] = context
new_examples['response'] = response.lower()
new_examples['checked_sentence'] = checked_sentence.lower()
new_episodes.append(new_examples)
return new_episodes
class WowDialogReader(DialogReader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._task = "wizard_of_wikipedia"
def _load_and_preprocess_all(self, mode: str, history_in_context: bool, max_episode_length: int):
"""
As default, it returns the following action dict:
{
'id': 'wizard_of_wikipedia'
'text': chosen_topic\n # if first example in episode
last_apprentice_message\n # if possible
wizard_message # if --label-type is 'chosen_sent'
'knowledge': title_1 sentence_1\n
.
.
.
title_m sentence_n # all knowledge available to wizard
'labels': [title_checked sentence_checked] # default
OR
[wizard_response] # if --label-type set to 'response'
'label_candidates': knowledge + [no_passages_used no_passages_used]
OR
100 response candidates # if 'validation' or 'test'
'chosen_topic': chosen_topic as untokenized string
'checked_sentence': checked sentence if wizard, else None # if --include_checked_sentence
'title': title of checked sentence # if --include_checked_sentence
--> if not exists, then checked_sentence = title = 'no_passages_used'
'episode_done': (Boolean) whether episode is done or not
}
"""
parlai_opt = self._get_parlai_opt([
'--task', 'wizard_of_wikipedia:generator:topic_split' if 'unseen' in mode else 'wizard_of_wikipedia:generator:random_split',
'--datatype', '{}:stream'.format(mode.split('_')[0]) if 'unseen' in mode else f'{mode}:stream', # 'train' for shuffled data and 'train:stream' for unshuffled data
'--datapath', self._data_dir,
'--include_knowledge_separator', 'True', # include speical __knowledge__ token between title and passage
'--include_checked_sentence', 'True',
'--label_type', 'response', # choices = ['response', 'chosen_sent']
])
# As a default, world use "WizardDialogKnowledgeTeacher"
agent = DictionaryAgent(parlai_opt)
world = create_task(parlai_opt, agent)
num_examples = world.num_examples()
num_episodes = world.num_episodes()
episodes = []
for _ in range(num_episodes):
examples = []
while True:
world.parley()
example = world.acts[0]
examples.append(example)
if world.episode_done():
episodes.append(examples)
break
return self._preprocess_episodes(episodes, mode, history_in_context, max_episode_length=max_episode_length)
def _get_parlai_opt(self, options: List[str] = []):
from parlai.scripts.build_dict import setup_args
parser = setup_args()
opt = parser.parse_args(options)
return opt
def get_wow_dataloader(args, tokenizer, train=True, shuffle_train=True):
if train:
train_reader = WowDialogReader(
tokenizer,
mode="train",
max_length = args.max_length,
max_context_length = args.max_context_length,
max_kn_length = args.max_kn_length,
max_episode_length = args.max_episode_length,
data_dir = args.data_dir,
history_in_context = args.history_in_context,
kn_in_context = args.kn_in_context,
model_type = args.model_type,
debug = args.debug if hasattr(args, 'debug') else False,
inference = args.inference if hasattr(args, 'inference') else False,
)
train_loader = getDataLoader(train_reader, args.bsz, test=False if shuffle_train else True)
else:
train_loader = None
valid_reader = WowDialogReader(
tokenizer,
mode="valid",
max_length = args.max_length,
max_context_length = args.max_context_length,
max_kn_length = args.max_kn_length,
max_episode_length = args.max_episode_length,
data_dir = args.data_dir,
history_in_context = args.history_in_context,
kn_in_context = args.kn_in_context,
model_type = args.model_type,
debug = args.debug if hasattr(args, 'debug') else False,
inference = args.inference if hasattr(args, 'inference') else False,\
)
valid_loader = getDataLoader(valid_reader, args.eval_bsz, test=True)
valid_unseen_reader = WowDialogReader(
tokenizer,
mode="valid_unseen",
max_length = args.max_length,
max_context_length = args.max_context_length,
max_kn_length = args.max_kn_length,
max_episode_length = args.max_episode_length,
data_dir = args.data_dir,
history_in_context = args.history_in_context,
kn_in_context = args.kn_in_context,
model_type = args.model_type,
debug = args.debug if hasattr(args, 'debug') else False,
inference = args.inference if hasattr(args, 'inference') else False,
)
valid_unseen_loader = getDataLoader(valid_unseen_reader, args.eval_bsz, test=True)
dataloaders = {
"train": train_loader,
"valid": valid_loader,
"valid_unseen": valid_unseen_loader,
}
return dataloaders
def get_data_from_batch(batch, model_type="decoder_only"):
kn_sent = batch["chosen_sentence"]
kn_mask = batch["chosen_sentence_mask"]
if model_type == "seq2seq":
inputs = batch["context"]
masks = batch["context_mask"]
labels = batch["response"]
label_masks = batch["response_mask"]
response_masks = None
label_starts = torch.Tensor([0]*inputs.size(0))
label_idxs = torch.sum(label_masks, 1)
else:
seqlen = batch["context"].size(1)
inputs = batch["context"].narrow(1, 0, seqlen - 1).clone() #
masks = batch["context_mask"].narrow(1, 0, seqlen - 1).clone() #
labels = batch["context"].narrow(1, 1, seqlen-1).clone() #
label_masks = batch["context_mask"].narrow(1, 1, seqlen - 1).clone() #
response_masks = batch["response"].narrow(1, 1, seqlen - 1).clone() #
label_starts = torch.sum(response_masks, 1)
label_idxs = torch.sum(label_masks, 1)
return inputs, masks, kn_sent, kn_mask, topic, topic_masks, \
labels, label_masks, response_masks, label_starts, label_idxs, None
if __name__ == "__main__":
args = {
"max_length" : 128,
"max_context_length" : 128, # 256
"max_kn_length" : 128,
"max_episode_length" : 1,
"data_dir" | |
<gh_stars>0
from _learning import *
from _learning import _lunarySharedFeatFunctionsGen,_lpottsFunctionsGen
import numpy
import struct
from opengm import index_type,value_type, label_type, graphicalModel,gridVis
from opengm import configuration as opengmConfig, LUnaryFunction
from opengm import to_native_boost_python_enum_converter
from opengm import Tribool
#from progressbar import *
from functools import partial
def _extendedGetLoss(self, model_idx, infCls, parameter = None):
if parameter is None:
import opengm
parameter = opengm.InfParam()
cppParam = infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
return self._getLoss(cppParam, model_idx)
def _extendedGetTotalLoss(self, infCls, parameter = None):
if parameter is None:
import opengm
parameter = opengm.InfParam()
cppParam = infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
return self._getTotalLoss(cppParam)
DatasetWithFlexibleLoss.lossType = 'flexible'
class LossParameter(FlexibleLossParameter):
def __init__(self, lossType, labelMult=None, nodeMult=None, factorMult=None):
super(LossParameter, self).__init__()
self.lossType = to_native_boost_python_enum_converter(lossType,self.lossType.__class__)
if labelMult is not None:
assert self.lossType == LossType.hamming
self.setLabelLossMultiplier(labelMult)
if nodeMult is not None:
assert self.lossType != LossType.partition
self.setNodeLossMultiplier(nodeMult)
if factorMult is not None:
assert self.lossType == LossType.partition
self.setFactorLossMultiplier(factorMult)
def extend_learn():
def learner_learn_normal(self, infCls, parameter = None):
if parameter is None:
import opengm
parameter = opengm.InfParam()
cppParam = infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
try:
self._learn(cppParam)
except Exception, e:
#print "an error ",e,"\n\n"
if (str(e).find("did not match C++ signature")):
raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning"%str(infCls))
def learner_learn_reduced_inf(self, infCls, parameter = None, persistency=True, tentacles=False, connectedComponents=False):
if parameter is None:
import opengm
parameter = opengm.InfParam()
cppParam = infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
try:
self._learnReducedInf(cppParam, bool(persistency), bool(tentacles),bool(connectedComponents))
except Exception, e:
#print "an error ",e,"\n\n"
if (str(e).find("did not match C++ signature")):
raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning with reduced inference"%str(infCls))
def learner_learn_reduced_inf_self_fusion(self, infCls, parameter = None, persistency=True, tentacles=False, connectedComponents=False):
if parameter is None:
import opengm
parameter = opengm.InfParam()
cppParam = infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
try:
self._learnReducedInf(cppParam, bool(persistency), bool(tentacles),bool(connectedComponents))
except Exception, e:
#print "an error ",e,"\n\n"
if (str(e).find("did not match C++ signature")):
raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning with reduced inference"%str(infCls))
def learner_learn_self_fusion(self, infCls, parameter = None, fuseNth=1, fusionSolver="qpbo",maxSubgraphSize=2,
redInf=True, connectedComponents=False, fusionTimeLimit=100.9, numStopIt=10):
if parameter is None:
import opengm
parameter = opengm.InfParam()
cppParam = infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
try:
self._learnSelfFusion(cppParam, int(fuseNth),str(fusionSolver),int(maxSubgraphSize),bool(redInf),
bool(connectedComponents),float(fusionTimeLimit),int(numStopIt))
except Exception, e:
#print "an error ",e,"\n\n"
if (str(e).find("did not match C++ signature")):
raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning with self fusion inference"%str(infCls))
def learner_learn(self, infCls, parameter=None, infMode='normal',**kwargs):
assert infMode in ['normal','n','selfFusion','sf','reducedInference','ri','reducedInferenceSelfFusion','risf']
if infMode in ['normal','n']:
self.learnNormal(infCls=infCls, parameter=parameter)
elif infMode in ['selfFusion','sf']:
self.learnSelfFusion(infCls=infCls, parameter=parameter,**kwargs)
elif infMode in ['reducedInference','ri']:
self.learnReducedInf(infCls=infCls, parameter=parameter,**kwargs)
elif infMode in ['reducedInferenceSelfFusion','risf']:
self.learnReducedInfSelfFusion(infCls=infCls, parameter=parameter,**kwargs)
# all learner classes
learnerClss = [GridSearch_FlexibleLoss, StructPerceptron_FlexibleLoss,
SubgradientSSVM_FlexibleLoss, Rws_FlexibleLoss]
if opengmConfig.withCplex or opengmConfig.withGurobi :
learnerClss.append(StructMaxMargin_Bundle_FlexibleLoss)
for learnerCls in learnerClss:
learnerCls.learn = learner_learn
learnerCls.learnNormal = learner_learn_normal
learnerCls.learnReducedInf = learner_learn_reduced_inf
learnerCls.learnSelfFusion = learner_learn_self_fusion
learnerCls.learnReducedInfSelfFusion = learner_learn_reduced_inf_self_fusion
extend_learn()
del extend_learn
DatasetWithFlexibleLoss.getLoss = _extendedGetLoss
DatasetWithFlexibleLoss.getTotalLoss = _extendedGetTotalLoss
def createDataset(numWeights, numInstances=0):
w = Weights(numWeights)
# if loss not in ['hamming','h','gh','generalized-hamming']:
# raise RuntimeError("loss must be 'hamming' /'h' or 'generalized-hamming'/'gh' ")
# if loss in ['hamming','h']:
# dataset = DatasetWithHammingLoss(int(numInstances))
# elif loss in ['generalized-hamming','gh']:
# dataset = DatasetWithGeneralizedHammingLoss(int(numInstances))
# else:
# raise RuntimeError("loss must be 'hamming' /'h' or 'generalized-hamming'/'gh' ")
dataset = DatasetWithFlexibleLoss(numInstances)
dataset.setWeights(w)
weights = dataset.getWeights()
for wi in range(numWeights):
weights[wi] = 0.0
return dataset
def gridSearchLearner(dataset, lowerBounds, upperBounds, nTestPoints):
assert dataset.__class__.lossType == 'flexible'
learnerCls = GridSearch_FlexibleLoss
learnerParamCls = GridSearch_FlexibleLossParameter
nr = numpy.require
sizeT_type = 'uint64'
if struct.calcsize("P") * 8 == 32:
sizeT_type = 'uint32'
param = learnerParamCls(nr(lowerBounds,dtype='float64'), nr(upperBounds,dtype='float64'),
nr(nTestPoints,dtype=sizeT_type))
learner = learnerCls(dataset, param)
return learner
def structPerceptron(dataset, learningMode='online',eps=1e-5, maxIterations=10000, stopLoss=0.0, decayExponent=0.0, decayT0=0.0):
assert dataset.__class__.lossType == 'flexible'
learnerCls = StructPerceptron_FlexibleLoss
learnerParamCls = StructPerceptron_FlexibleLossParameter
learningModeEnum = StructPerceptron_FlexibleLossParameter_LearningMode
lm = None
if learningMode not in ['online','batch']:
raise RuntimeError("wrong learning mode, must be 'online' or 'batch' ")
if learningMode == 'online':
lm = learningModeEnum.online
if learningMode == 'batch':
lm = learningModeEnum.batch
param = learnerParamCls()
param.eps = float(eps)
param.maxIterations = int(maxIterations)
param.stopLoss = float(stopLoss)
param.decayExponent = float(decayExponent)
param.decayT0 = float(decayT0)
param.learningMode = lm
learner = learnerCls(dataset, param)
return learner
def rws(dataset,eps=1e-5, maxIterations=10000, stopLoss=0.0, learningRate=1.0, C=100.0, sigma=1.0, p=10):
assert dataset.__class__.lossType == 'flexible'
learnerCls = Rws_FlexibleLoss
learnerParamCls = Rws_FlexibleLossParameter
param = learnerParamCls()
param.eps = float(eps)
param.maxIterations = int(maxIterations)
param.stopLoss = float(stopLoss)
param.learningRate = float(learningRate)
param.C = float(C)
param.p = int(p)
param.sigma = float(sigma)
learner = learnerCls(dataset, param)
return learner
def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000, stopLoss=0.0, learningRate=1.0, C=100.0, averaging=-1, nConf=0):
assert dataset.__class__.lossType == 'flexible'
learnerCls = SubgradientSSVM_FlexibleLoss
learnerParamCls = SubgradientSSVM_FlexibleLossParameter
learningModeEnum = SubgradientSSVM_FlexibleLossParameter_LearningMode
lm = None
if learningMode not in ['online','batch']:
raise RuntimeError("wrong learning mode, must be 'online' or 'batch' ")
if learningMode == 'online':
lm = learningModeEnum.online
if learningMode == 'batch':
lm = learningModeEnum.batch
param = learnerParamCls()
param.eps = float(eps)
param.maxIterations = int(maxIterations)
param.stopLoss = float(stopLoss)
param.learningRate = float(learningRate)
param.C = float(C)
param.learningMode = lm
param.averaging = int(averaging)
param.nConf = int(nConf)
learner = learnerCls(dataset, param)
return learner
def structMaxMarginLearner(dataset, regularizerWeight=1.0, minEps=1e-5, nSteps=0, epsStrategy='change', optimizer='bundle'):
if opengmConfig.withCplex or opengmConfig.withGurobi :
if optimizer != 'bundle':
raise RuntimeError("Optimizer type must be 'bundle' for now!")
assert dataset.__class__.lossType == 'flexible'
learnerCls = StructMaxMargin_Bundle_FlexibleLoss
learnerParamCls = StructMaxMargin_Bundle_FlexibleLossParameter
epsFromGap = False
if epsStrategy == 'gap':
epsFromGap = True
elif epsStrategy == 'change':
epsFromGap = False
param = learnerParamCls(regularizerWeight, minEps, nSteps, epsFromGap)
learner = learnerCls(dataset, param)
return learner
else:
raise RuntimeError("this learner needs withCplex or withGurobi")
def maxLikelihoodLearner(
dataset,
maximumNumberOfIterations = 100,
gradientStepSize = 0.1,
weightStoppingCriteria = 0.00000001,
gradientStoppingCriteria = 0.00000000001,
infoFlag = True,
infoEveryStep = False,
weightRegularizer = 1.0,
beliefPropagationMaximumNumberOfIterations = 40,
beliefPropagationConvergenceBound = 0.0001,
beliefPropagationDamping = 0.5,
beliefPropagationReg = 1.0,
beliefPropagationTemperature = 1.0,
beliefPropagationIsAcyclic = Tribool(0)
):
learnerCls = MaxLikelihood_FlexibleLoss
learnerParamCls = MaxLikelihood_FlexibleLossParameter
param = learnerParamCls(
maximumNumberOfIterations,
gradientStepSize,
weightStoppingCriteria,
gradientStoppingCriteria,
infoFlag,
infoEveryStep,
weightRegularizer,
beliefPropagationMaximumNumberOfIterations,
beliefPropagationConvergenceBound,
beliefPropagationDamping,
beliefPropagationTemperature,
beliefPropagationIsAcyclic
)
#param.maxIterations = int(maxIterations)
#param.reg = float(reg)
#param.temperature = float(temp)
learner = learnerCls(dataset, param)
return learner
def lUnaryFunction(weights, numberOfLabels, features, weightIds):
assert numberOfLabels >= 2
features = numpy.require(features, dtype=value_type)
weightIds = numpy.require(weightIds, dtype=index_type)
assert features.ndim == weightIds.ndim
if features.ndim == 1 or weightIds.ndim == 1:
assert numberOfLabels == 2
assert features.shape[0] == weightIds.shape[0]
features = features.reshape(1,-1)
weightIds = weightIds.reshape(1,-1)
assert features.shape[0] in [numberOfLabels, numberOfLabels-1]
assert weightIds.shape[0] in [numberOfLabels, numberOfLabels-1]
assert features.shape[1] == weightIds.shape[1]
return LUnaryFunction(weights=weights, numberOfLabels=int(numberOfLabels),
features=features, weightIds=weightIds)
class FeaturePolicy(object):
sharedBetweenLabels = 0
def lUnaryFunctions(weights,numberOfLabels, features, weightIds,
featurePolicy = FeaturePolicy.sharedBetweenLabels,
**kwargs):
if (featurePolicy == FeaturePolicy.sharedBetweenLabels ):
makeFirstEntryConst = kwargs.get('makeFirstEntryConst',False)
addConstFeature = kwargs.get('addConstFeature',False)
ff = numpy.require(features, dtype=value_type)
wid = numpy.require(weightIds, dtype=index_type)
assert features.ndim == 2
assert weightIds.ndim == 2
res = _lunarySharedFeatFunctionsGen(
weights = weights,
numFunctions = int(ff.shape[0]),
numLabels = int(numberOfLabels),
features = ff,
weightIds = wid,
makeFirstEntryConst = bool(makeFirstEntryConst),
addConstFeature = bool(addConstFeature)
)
res.__dict__['_features_'] =features
res.__dict__['_ff_'] = ff
res.__dict__['_weights_'] = weights
return res
else :
raise RuntimeError("noy yet implemented")
def lPottsFunctions(weights, numberOfLabels, features, weightIds,
addConstFeature = False):
# check that features has the correct shape
if features.ndim != 2:
raise RuntimeError("feature must be two-dimensional")
# check that weights has the correct shape
if weightIds.ndim != 1:
raise RuntimeError("weightIds must be one-dimensional")
if weightIds.shape[0] != features.shape[1] + int(addConstFeature) :
raise RuntimeError("weightIds.shape[0] must be equal to features.shape[1]")
# do the c++ call here
# which generates a function generator
ff = numpy.require(features, dtype=value_type)
wid = numpy.require(weightIds, dtype=index_type)
res = _lpottsFunctionsGen(
weights=weights,
numFunctions=long(features.shape[0]),
numLabels=long(numberOfLabels),
features=ff,
weightIds=wid,
addConstFeature=bool(addConstFeature)
)
res.__dict__['_features_'] = wid
res.__dict__['_weights_'] = ff
return res
# def getPbar(size, name):
# widgets = ['%s: '%name, Percentage(), ' ', Bar(marker='0',left='[',right=']'),
# ' ', ETA(), ' ', FileTransferSpeed()] #see docs for other options
# pbar = ProgressBar(widgets=widgets, maxval=size)
# return pbar
def secondOrderImageDataset(imgs, gts, numberOfLabels, fUnary, fBinary, addConstFeature, trainFraction=0.75):
#try:
# import vigra
# from progressbar import *
#except:
# pass
# train test
nImg = len(imgs)
nTrain = int(float(nImg)*trainFraction+0.5)
nTest = (nImg-nTrain)
def getFeat(fComp, im):
res = []
for f in fComp:
r = f(im)
if r.ndim == 2:
r = r[:,:, None]
res.append(r)
return res
# compute features for a single image
tImg = imgs[0]
unaryFeat = getFeat(fUnary, tImg)
unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray))
nUnaryFeat = unaryFeat.shape[-1] + int(addConstFeature)
nUnaryFeat *= numberOfLabels - int(numberOfLabels==2)
if len(fBinary)>0:
binaryFeat = getFeat(fBinary, tImg)
binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray))
nBinaryFeat = binaryFeat.shape[-1] + int(addConstFeature)
nWeights = nUnaryFeat + nBinaryFeat
else:
nBinaryFeat = 0
print "------------------------------------------------"
print "nTrain",nTrain,"nTest",nTest
print "nWeights",nWeights,"(",nUnaryFeat,nBinaryFeat,")"
print "------------------------------------------------"
train_set = []
tentative_test_set = []
for i,(img,gt) in enumerate(zip(imgs,gts)):
if(i<nTrain):
train_set.append((img,gt))
else:
tentative_test_set.append((img,gt))
dataset = createDataset(numWeights=nWeights)
weights = dataset.getWeights()
uWeightIds | |
dtype=dtype)
# We are testing correct canonicalization behavior here, so we turn off the
# permissive canonicalization logic in the test harness.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
canonicalize_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
def testArrayUnsupportedDtypeError(self):
with self.assertRaisesRegex(TypeError,
"JAX only supports number and bool dtypes.*"):
jnp.array(3, [('a','<i4'),('b','<i4')])
def testArrayFromInteger(self):
int_dtype = dtypes.canonicalize_dtype(jnp.int64)
int_max = jnp.iinfo(int_dtype).max
int_min = jnp.iinfo(int_dtype).min
# Values at extremes are converted correctly.
for val in [int_min, 0, int_max]:
self.assertEqual(jnp.array(val).dtype, int_dtype)
# out of bounds leads to an OverflowError
val = int_max + 1
with self.assertRaisesRegex(OverflowError, f"Python int {val} too large to convert to {int_dtype.name}"):
jnp.array(val)
# explicit uint64 should work
if config.x64_enabled:
self.assertEqual(val, jnp.array(val, dtype='uint64'))
# TODO(jakevdp): fix list inputs to jnp.array and enable the following test
# def testArrayFromList(self):
# int_max = jnp.iinfo(jnp.int64).max
# int_min = jnp.iinfo(jnp.int64).min
#
# # Values at extremes are converted correctly.
# for val in [int_min, 0, int_max]:
# self.assertEqual(jnp.array([val]).dtype, dtypes.canonicalize_dtype('int64'))
#
# # list of values results in promoted type.
# self.assertEqual(jnp.array([0, np.float16(1)]).dtype, jnp.result_type('int64', 'float16'))
#
# # out of bounds leads to an OverflowError
# val = int_min - 1
# with self.assertRaisesRegex(OverflowError, f"Python int {val} too large to convert to int64"):
# jnp.array([0, val])
def testIssue121(self):
assert not np.isscalar(jnp.array(3))
def testArrayOutputsDeviceArrays(self):
assert xla.type_is_device_array(jnp.array([]))
assert xla.type_is_device_array(jnp.array(np.array([])))
class NDArrayLike:
def __array__(self, dtype=None):
return np.array([], dtype=dtype)
assert xla.type_is_device_array(jnp.array(NDArrayLike()))
# NOTE(mattjj): disabled b/c __array__ must produce ndarrays
# class DeviceArrayLike:
# def __array__(self, dtype=None):
# return jnp.array([], dtype=dtype)
# assert xla.type_is_device_array(jnp.array(DeviceArrayLike()))
def testArrayMethod(self):
class arraylike(object):
dtype = np.float32
def __array__(self, dtype=None):
return np.array(3., dtype=dtype)
a = arraylike()
ans = jnp.array(a)
assert ans == 3.
def testMemoryView(self):
ans = jnp.array(bytearray(b'\x2a'))
self.assertAllClose(
ans,
np.array([0x2a], dtype=np.uint8))
def testIsClose(self):
c_isclose = api.jit(jnp.isclose)
c_isclose_nan = api.jit(partial(jnp.isclose, equal_nan=True))
n = 2
rng = np.random.RandomState(0)
x = rng.randn(n, 1)
y = rng.randn(n, 1)
inf = np.asarray(n * [np.inf]).reshape([n, 1])
nan = np.asarray(n * [np.nan]).reshape([n, 1])
args = [x, y, inf, -inf, nan]
for arg0 in args:
for arg1 in args:
result_np = np.isclose(arg0, arg1)
result_jax = jnp.isclose(arg0, arg1)
result_jit = c_isclose(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
result_np = np.isclose(arg0, arg1, equal_nan=True)
result_jax = jnp.isclose(arg0, arg1, equal_nan=True)
result_jit = c_isclose_nan(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_y={}_equal_nan={}".format(x, y, equal_nan),
"x": x, "y": y, "equal_nan": equal_nan}
for x, y in itertools.product([
1, [1], [1, 1 + 1E-4], [1, np.nan]], repeat=2)
for equal_nan in [True, False]))
def testAllClose(self, x, y, equal_nan):
jnp_fun = partial(jnp.allclose, equal_nan=equal_nan, rtol=1E-3)
np_fun = partial(np.allclose, equal_nan=equal_nan, rtol=1E-3)
args_maker = lambda: [x, y]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testZeroStridesConstantHandler(self):
raw_const = np.random.RandomState(0).randn(1, 2, 1, 1, 5, 1)
const = np.broadcast_to(raw_const, (3, 2, 3, 4, 5, 6))
def fun(x):
return x * const
fun = api.jit(fun)
out_val = fun(3.)
self.assertAllClose(out_val, 3. * const, check_dtypes=False)
def testIsInstanceNdarrayDuringTracing(self):
arr = np.ones(3)
@api.jit
def f(x):
self.assertIsInstance(x, jnp.ndarray)
return jnp.sum(x)
f(arr)
def testNonArrayErrorMessage(self):
x = [1., 2.]
y = np.array([3., 4.])
def g(x, y):
return jnp.add(x, y)
def f(x, y):
return jnp.dot(x, y)
self.assertRaises(TypeError, lambda: g(x, y))
self.assertRaises(TypeError, lambda: f(x, y))
self.assertRaises(TypeError, lambda: api.jit(g)(x, y))
self.assertRaises(TypeError, lambda: api.jit(f)(x, y))
def testAbstractionErrorMessage(self):
@api.jit
def f(x, n):
for _ in range(n):
x = x * x
return x
self.assertRaises(jax.errors.TracerIntegerConversionError, lambda: f(3., 3))
@api.jit
def g(x):
if x > 0.:
return x * 2
else:
return x + 2
self.assertRaises(jax.errors.ConcretizationTypeError, lambda: g(3.))
def testTracingPrimitiveWithNoTranslationErrorMessage(self):
# TODO(mattjj): update this for jax3
self.skipTest("test needs jax3 update")
foo = jnp._not_implemented(lambda x: x)
# No error if there's no tracing.
foo(np.arange(3))
cfoo = api.jit(foo)
self.assertRaises(NotImplementedError, lambda: cfoo(np.arange(3)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in [(3,), (2, 3)]
for dtype in default_dtypes
for axis in list(range(-len(shape), len(shape))) + [None] + [tuple(range(len(shape)))] # Test negative axes and tuples
))
def testFlip(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flip(x, axis)
np_op = lambda x: np.flip(x, axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(3,), (2, 3), (3, 2, 4)]
for dtype in default_dtypes))
def testFlipud(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flipud(x)
np_op = lambda x: np.flipud(x)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(3, 2), (2, 3), (3, 2, 4)]
for dtype in default_dtypes))
def testFliplr(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.fliplr(x)
np_op = lambda x: np.fliplr(x)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_k={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), k, axes),
"shape": shape, "dtype": dtype, "k": k, "axes": axes}
for shape, axes in [
[(2, 3), (0, 1)],
[(2, 3), (1, 0)],
[(4, 3, 2), (0, 2)],
[(4, 3, 2), (2, 1)],
]
for k in range(-3, 4)
for dtype in default_dtypes))
def testRot90(self, shape, dtype, k, axes):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.rot90(x, k, axes)
np_op = lambda x: np.rot90(x, k, axes)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
# TODO(mattjj): test infix operator overrides
def testRavel(self):
rng = np.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
self._CompileAndCheck(lambda x: x.ravel(), args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}_mode={}".format(
shape, order, mode),
"shape": shape, "order": order, "mode": mode}
for shape in nonempty_nonscalar_array_shapes
for order in ['C', 'F']
for mode in ['wrap', 'clip', 'raise']))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testRavelMultiIndex(self, shape, order, mode):
# generate indices in each dimension with a few out of bounds.
rngs = [jtu.rand_int(self.rng(), low=-1, high=dim + 1)
for dim in shape]
# generate multi_indices of different dimensions that broadcast.
args_maker = lambda: [tuple(rng(ndim * (3,), jnp.int_)
for ndim, rng in enumerate(rngs))]
def np_fun(x):
try:
return np.ravel_multi_index(x, shape, order=order, mode=mode)
except ValueError as err:
if str(err).startswith('invalid entry'):
# sentinel indicating expected error.
return -999
else:
raise
def jnp_fun(x):
try:
return jnp.ravel_multi_index(x, shape, order=order, mode=mode)
except ValueError as err:
if str(err).startswith('invalid entry'):
# sentinel indicating expected error.
return -999
else:
raise
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
if mode == 'raise':
msg = ("The error occurred because ravel_multi_index was jit-compiled "
"with mode='raise'. Use mode='wrap' or mode='clip' instead.")
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg):
jax.jit(jnp_fun)(*args_maker())
else:
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ashape={}{}_cshapes={}{}_mode={}".format(
adtype.__name__, ashape, cdtype.__name__, cshapes, mode),
"ashape": ashape, "adtype": adtype, "cshapes": cshapes, "cdtype": cdtype, "mode": mode}
for ashape in ((), (4,), (3, 4))
for cshapes in [
[(), (4,)],
[(3, 4), (4,), (3, 1)]
]
for adtype in int_dtypes
for cdtype in default_dtypes
for mode in ['wrap', 'clip', 'raise']))
def testChoose(self, ashape, adtype, cshapes, cdtype, mode):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(ashape, adtype), [rng(s, cdtype) for s in cshapes]]
def np_fun(a, c):
try:
return np.choose(a, c, mode=mode)
except ValueError as err:
if mode == 'raise' and str(err).startswith('invalid entry'):
return -999 # sentinel indicating expected error.
else:
raise
def jnp_fun(a, c):
try:
return jnp.choose(a, c, mode=mode)
except ValueError as err:
if mode == 'raise' and str(err).startswith('invalid entry'):
return -999 # sentinel indicating expected error.
else:
raise
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
if mode == 'raise':
msg = ("The error occurred because jnp.choose was jit-compiled"
" with mode='raise'. Use mode='wrap' or mode='clip' instead.")
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg):
jax.jit(jnp_fun)(*args_maker())
else:
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.parameters(
(0, (2, 1, 3)),
(5, (2, 1, 3)),
(0, ()),
([0, 1, 2], (2, 2)),
([[[0, 1], [2, 3]]], (2, 2)))
def testUnravelIndex(self, flat_index, shape):
args_maker = lambda: (flat_index, shape)
self._CheckAgainstNumpy(np.unravel_index, jnp.unravel_index,
args_maker)
self._CompileAndCheck(jnp.unravel_index, args_maker)
def testUnravelIndexOOB(self):
self.assertEqual(jnp.unravel_index(2, (2,)), (1,))
self.assertEqual(jnp.unravel_index(-2, (2, 1, 3,)), (1, 0, 1))
self.assertEqual(jnp.unravel_index(-3, (2,)), (0,))
def testAstype(self):
rng = np.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
np_op = lambda x: np.asarray(x).astype(jnp.int32)
jnp_op = lambda x: jnp.asarray(x).astype(jnp.int32)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in array_shapes
for dtype in all_dtypes))
def testNbytes(self, shape, dtype):
rng = jtu.rand_default(self.rng())
np_op = lambda x: np.asarray(x).nbytes
jnp_op = lambda x: jnp.asarray(x).nbytes
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_dtype={}".format(
jtu.format_shape_dtype_string(shape, a_dtype), dtype),
| |
<filename>airflow/configuration.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import json
import logging
import multiprocessing
import os
import pathlib
import re
import shlex
import subprocess
import sys
import warnings
from base64 import b64encode
from collections import OrderedDict
# Ignored Mypy on configparser because it thinks the configparser module has no _UNSET attribute
from configparser import _UNSET, ConfigParser, NoOptionError, NoSectionError # type: ignore
from json.decoder import JSONDecodeError
from typing import Any, Dict, List, Optional, Tuple, Union
from airflow.exceptions import AirflowConfigException
from airflow.secrets import DEFAULT_SECRETS_SEARCH_PATH, BaseSecretsBackend
from airflow.utils import yaml
from airflow.utils.module_loading import import_string
from airflow.utils.weight_rule import WeightRule
log = logging.getLogger(__name__)
# show Airflow's deprecation warnings
if not sys.warnoptions:
warnings.filterwarnings(action='default', category=DeprecationWarning, module='airflow')
warnings.filterwarnings(action='default', category=PendingDeprecationWarning, module='airflow')
_SQLITE3_VERSION_PATTERN = re.compile(r"(?P<version>^\d+(?:\.\d+)*)\D?.*$")
def _parse_sqlite_version(s: str) -> Tuple[int, ...]:
match = _SQLITE3_VERSION_PATTERN.match(s)
if match is None:
return ()
return tuple(int(p) for p in match.group("version").split("."))
def expand_env_var(env_var):
"""
Expands (potentially nested) env vars by repeatedly applying
`expandvars` and `expanduser` until interpolation stops having
any effect.
"""
if not env_var:
return env_var
while True:
interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))
if interpolated == env_var:
return interpolated
else:
env_var = interpolated
def run_command(command):
"""Runs command and returns stdout"""
process = subprocess.Popen(
shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True
)
output, stderr = (stream.decode(sys.getdefaultencoding(), 'ignore') for stream in process.communicate())
if process.returncode != 0:
raise AirflowConfigException(
f"Cannot execute {command}. Error code is: {process.returncode}. "
f"Output: {output}, Stderr: {stderr}"
)
return output
def _get_config_value_from_secret_backend(config_key):
"""Get Config option values from Secret Backend"""
try:
secrets_client = get_custom_secret_backend()
if not secrets_client:
return None
return secrets_client.get_config(config_key)
except Exception as e:
raise AirflowConfigException(
'Cannot retrieve config from alternative secrets backend. '
'Make sure it is configured properly and that the Backend '
'is accessible.\n'
f'{e}'
)
def _default_config_file_path(file_name: str):
templates_dir = os.path.join(os.path.dirname(__file__), 'config_templates')
return os.path.join(templates_dir, file_name)
def default_config_yaml() -> List[Dict[str, Any]]:
"""
Read Airflow configs from YAML file
:return: Python dictionary containing configs & their info
"""
with open(_default_config_file_path('config.yml')) as config_file:
return yaml.safe_load(config_file)
class AirflowConfigParser(ConfigParser):
"""Custom Airflow Configparser supporting defaults and deprecated options"""
# These configuration elements can be fetched as the stdout of commands
# following the "{section}__{name}__cmd" pattern, the idea behind this
# is to not store password on boxes in text files.
# These configs can also be fetched from Secrets backend
# following the "{section}__{name}__secret" pattern
sensitive_config_values = {
('core', 'sql_alchemy_conn'),
('core', 'fernet_key'),
('celery', 'broker_url'),
('celery', 'flower_basic_auth'),
('celery', 'result_backend'),
('atlas', 'password'),
('smtp', 'smtp_password'),
('webserver', 'secret_key'),
}
# A mapping of (new section, new option) -> (old section, old option, since_version).
# When reading new option, the old option will be checked to see if it exists. If it does a
# DeprecationWarning will be issued and the old option will be used instead
deprecated_options = {
('celery', 'worker_precheck'): ('core', 'worker_precheck', '2.0.0'),
('logging', 'base_log_folder'): ('core', 'base_log_folder', '2.0.0'),
('logging', 'remote_logging'): ('core', 'remote_logging', '2.0.0'),
('logging', 'remote_log_conn_id'): ('core', 'remote_log_conn_id', '2.0.0'),
('logging', 'remote_base_log_folder'): ('core', 'remote_base_log_folder', '2.0.0'),
('logging', 'encrypt_s3_logs'): ('core', 'encrypt_s3_logs', '2.0.0'),
('logging', 'logging_level'): ('core', 'logging_level', '2.0.0'),
('logging', 'fab_logging_level'): ('core', 'fab_logging_level', '2.0.0'),
('logging', 'logging_config_class'): ('core', 'logging_config_class', '2.0.0'),
('logging', 'colored_console_log'): ('core', 'colored_console_log', '2.0.0'),
('logging', 'colored_log_format'): ('core', 'colored_log_format', '2.0.0'),
('logging', 'colored_formatter_class'): ('core', 'colored_formatter_class', '2.0.0'),
('logging', 'log_format'): ('core', 'log_format', '2.0.0'),
('logging', 'simple_log_format'): ('core', 'simple_log_format', '2.0.0'),
('logging', 'task_log_prefix_template'): ('core', 'task_log_prefix_template', '2.0.0'),
('logging', 'log_filename_template'): ('core', 'log_filename_template', '2.0.0'),
('logging', 'log_processor_filename_template'): ('core', 'log_processor_filename_template', '2.0.0'),
('logging', 'dag_processor_manager_log_location'): (
'core',
'dag_processor_manager_log_location',
'2.0.0',
),
('logging', 'task_log_reader'): ('core', 'task_log_reader', '2.0.0'),
('metrics', 'statsd_on'): ('scheduler', 'statsd_on', '2.0.0'),
('metrics', 'statsd_host'): ('scheduler', 'statsd_host', '2.0.0'),
('metrics', 'statsd_port'): ('scheduler', 'statsd_port', '2.0.0'),
('metrics', 'statsd_prefix'): ('scheduler', 'statsd_prefix', '2.0.0'),
('metrics', 'statsd_allow_list'): ('scheduler', 'statsd_allow_list', '2.0.0'),
('metrics', 'stat_name_handler'): ('scheduler', 'stat_name_handler', '2.0.0'),
('metrics', 'statsd_datadog_enabled'): ('scheduler', 'statsd_datadog_enabled', '2.0.0'),
('metrics', 'statsd_datadog_tags'): ('scheduler', 'statsd_datadog_tags', '2.0.0'),
('metrics', 'statsd_custom_client_path'): ('scheduler', 'statsd_custom_client_path', '2.0.0'),
('scheduler', 'parsing_processes'): ('scheduler', 'max_threads', '1.10.14'),
('scheduler', 'scheduler_idle_sleep_time'): ('scheduler', 'processor_poll_interval', '2.2.0'),
('operators', 'default_queue'): ('celery', 'default_queue', '2.1.0'),
('core', 'hide_sensitive_var_conn_fields'): ('admin', 'hide_sensitive_variable_fields', '2.1.0'),
('core', 'sensitive_var_conn_names'): ('admin', 'sensitive_variable_fields', '2.1.0'),
('core', 'default_pool_task_slot_count'): ('core', 'non_pooled_task_slot_count', '1.10.4'),
('core', 'max_active_tasks_per_dag'): ('core', 'dag_concurrency', '2.2.0'),
('logging', 'worker_log_server_port'): ('celery', 'worker_log_server_port', '2.2.0'),
('api', 'access_control_allow_origins'): ('api', 'access_control_allow_origin', '2.2.0'),
}
# A mapping of old default values that we want to change and warn the user
# about. Mapping of section -> setting -> { old, replace, by_version }
deprecated_values = {
'core': {
'hostname_callable': (re.compile(r':'), r'.', '2.1'),
},
'webserver': {
'navbar_color': (re.compile(r'\A#007A87\Z', re.IGNORECASE), '#fff', '2.1'),
},
'email': {
'email_backend': (
re.compile(r'^airflow\.contrib\.utils\.sendgrid\.send_email$'),
r'airflow.providers.sendgrid.utils.emailer.send_email',
'2.1',
),
},
}
_available_logging_levels = ['CRITICAL', 'FATAL', 'ERROR', 'WARN', 'WARNING', 'INFO', 'DEBUG']
enums_options = {
("core", "default_task_weight_rule"): sorted(WeightRule.all_weight_rules()),
('core', 'mp_start_method'): multiprocessing.get_all_start_methods(),
("scheduler", "file_parsing_sort_mode"): ["modified_time", "random_seeded_by_host", "alphabetical"],
("logging", "logging_level"): _available_logging_levels,
("logging", "fab_logging_level"): _available_logging_levels,
}
# This method transforms option names on every read, get, or set operation.
# This changes from the default behaviour of ConfigParser from lowercasing
# to instead be case-preserving
def optionxform(self, optionstr: str) -> str:
return optionstr
def __init__(self, default_config=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.airflow_defaults = ConfigParser(*args, **kwargs)
if default_config is not None:
self.airflow_defaults.read_string(default_config)
self.is_validated = False
def validate(self):
self._validate_config_dependencies()
self._validate_enums()
for section, replacement in self.deprecated_values.items():
for name, info in replacement.items():
old, new, version = info
current_value = self.get(section, name, fallback="")
if self._using_old_value(old, current_value):
new_value = old.sub(new, current_value)
self._update_env_var(section=section, name=name, new_value=new_value)
self._create_future_warning(
name=name,
section=section,
current_value=current_value,
new_value=new_value,
version=version,
)
self.is_validated = True
def _validate_enums(self):
"""Validate that enum type config has an accepted value"""
for (section_key, option_key), enum_options in self.enums_options.items():
if self.has_option(section_key, option_key):
value = self.get(section_key, option_key)
if value not in enum_options:
raise AirflowConfigException(
f"`[{section_key}] {option_key}` should not be "
+ f"{value!r}. Possible values: {', '.join(enum_options)}."
)
def _validate_config_dependencies(self):
"""
Validate that config values aren't invalid given other config values
or system-level limitations and requirements.
"""
is_executor_without_sqlite_support = self.get("core", "executor") not in (
'DebugExecutor',
'SequentialExecutor',
)
is_sqlite = "sqlite" in self.get('core', 'sql_alchemy_conn')
if is_sqlite and is_executor_without_sqlite_support:
raise AirflowConfigException(f"error: cannot use sqlite with the {self.get('core', 'executor')}")
if is_sqlite:
import sqlite3
from airflow.utils.docs import get_docs_url
# Some of the features in storing rendered fields require sqlite version >= 3.15.0
min_sqlite_version = (3, 15, 0)
if _parse_sqlite_version(sqlite3.sqlite_version) < min_sqlite_version:
min_sqlite_version_str = ".".join(str(s) for s in min_sqlite_version)
raise AirflowConfigException(
f"error: sqlite C library version too old (< {min_sqlite_version_str}). "
f"See {get_docs_url('howto/set-up-database.html#setting-up-a-sqlite-database')}"
)
def _using_old_value(self, old, current_value):
return old.search(current_value) is not None
def _update_env_var(self, section, name, new_value):
# Make sure the env var option is removed, otherwise it
# would be read and used instead of the value we set
env_var = self._env_var_name(section, name)
os.environ.pop(env_var, None)
if not self.has_section(section):
self.add_section(section)
self.set(section, name, new_value)
@staticmethod
def _create_future_warning(name, section, current_value, new_value, version):
warnings.warn(
f'The {name!r} setting in [{section}] has the old default value of {current_value!r}. '
f'This value has been changed to {new_value!r} in the running config, but '
f'please update your config before Apache Airflow {version}.',
FutureWarning,
)
ENV_VAR_PREFIX = 'AIRFLOW__'
def _env_var_name(self, section: str, key: str) -> str:
return f'{self.ENV_VAR_PREFIX}{section.upper()}__{key.upper()}'
def _get_env_var_option(self, section, key):
# must have format AIRFLOW__{SECTION}__{KEY} (note double underscore)
env_var = self._env_var_name(section, key)
if env_var in os.environ:
return expand_env_var(os.environ[env_var])
# alternatively AIRFLOW__{SECTION}__{KEY}_CMD (for a command)
env_var_cmd = env_var + '_CMD'
if env_var_cmd in os.environ:
# if this is a valid command key...
if (section, key) in self.sensitive_config_values:
return run_command(os.environ[env_var_cmd])
# alternatively AIRFLOW__{SECTION}__{KEY}_SECRET (to get from Secrets Backend)
env_var_secret_path = env_var + '_SECRET'
if env_var_secret_path in os.environ:
# if this is a valid secret path...
if (section, key) in self.sensitive_config_values:
return _get_config_value_from_secret_backend(os.environ[env_var_secret_path])
return None
def _get_cmd_option(self, section, key):
fallback_key = key + '_cmd'
# if this is a valid command key...
if (section, key) in self.sensitive_config_values:
if super().has_option(section, fallback_key):
command = super().get(section, fallback_key)
return run_command(command)
return None
def _get_secret_option(self, section, key):
"""Get Config option values from Secret Backend"""
fallback_key = key + '_secret'
# if this is a valid secret key...
| |
<filename>_build/jupyter_execute/01_linda_soln.py<gh_stars>0
# Bite Size Bayes
Copyright 2020 <NAME>
License: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/)
# Load utils.py
import os
if not os.path.exists('utils.py'):
!wget https://github.com/AllenDowney/BiteSizeBayes/raw/master/utils.py
# Load the data file
if not os.path.exists('gss_bayes.csv'):
!wget https://github.com/AllenDowney/BiteSizeBayes/raw/master/gss_bayes.csv
import pandas as pd
import numpy as np
from utils import values
## Introduction
This notebook takes a computational approach to understanding probability. We'll use data from the General Social Survey to compute the probability of propositions like:
* If I choose a random survey respondent, what is the probability they are female?
* If I choose a random survey respondent, what is the probability they work in banking?
From there, we will explore two related concepts:
* Conjunction, which is the probability that two propositions are both true; for example, what is the probability of choosing a female banker?
* Conditional probability, which is the probability that one proposition is true, given that another is true; for example, given than a respondent is female, what is the probability that she is a banker?
I chose these examples because they are related to a famous experiment by Tversky and Kahneman, who posed the following question:
> Linda is 31 years old, single, outspoken, and very bright. She majored in philosophy. As a student, she was deeply concerned with issues of discrimination and social justice, and also participated in anti-nuclear demonstrations. Which is more probable?
1. Linda is a bank teller.
2. Linda is a bank teller and is active in the feminist movement.
Many people choose the second answer, presumably because it seems more consistent with the description. It seems unlikely that Linda would be *just* a bank teller; if she is a bank teller, it seems likely that she would also be a feminist.
But the second answer cannot be "more probable", as the question asks. Suppose we find 1000 people who fit Linda's description and 10 of them work as bank tellers. How many of them are also feminists? At most, all 10 of them are; in that case, the two options are *equally* likely. More likely, only some of them are; in that case the second option is *less* likely. But there can't be more than 10 out of 10, so the second option cannot be more likely.
The error people make if they choose the second option is called the [conjunction fallacy](https://en.wikipedia.org/wiki/Conjunction_fallacy). It's called a [fallacy](https://en.wikipedia.org/wiki/Fallacy) because it's a logical error and "conjunction" because "bank teller AND feminist" is a [logical conjunction](https://en.wikipedia.org/wiki/Logical_conjunction).
If this example makes you uncomfortable, you are in good company. The biologist [<NAME> wrote](https://sci-hub.tw/https://doi.org/10.1080/09332480.1989.10554932) :
> I am particularly fond of this example because I know that the [second] statement is least probable, yet a little [homunculus](https://en.wikipedia.org/wiki/Homunculus_argument) in my head continues to jump up and down, shouting at me, "but she can't just be a bank teller; read the description."
If the little person in your head is still unhappy, maybe this notebook will help.
## Probability
At this point I should define probability, but that [turns out to be surprisingly difficult](https://en.wikipedia.org/wiki/Probability_interpretations). To avoid getting bogged down before we get started, I'll start with a simple definition: a **probability** is a **fraction** of a dataset.
For example, if we survey 1000 people, and 20 of them are bank tellers, the fraction that work as bank tellers is 0.02 or 2\%. If we choose a person from this population at random, the probability that they are a bank teller is 2\%.
(By "at random" I mean that every person in the dataset has the same chance of being chosen, and by "they" I mean the [singular, gender-neutral pronoun](https://en.wikipedia.org/wiki/Singular_they), which is a correct and useful feature of English.)
With this definition and an appropriate dataset, we can compute probabilities by counting.
To demonstrate, I'll use a data set from the [General Social Survey](http://gss.norc.org/) or GSS. The following cell reads the data.
gss = pd.read_csv('gss_bayes.csv', index_col=0)
The results is a Pandas DataFrame with one row for each person surveyed and one column for each variable I selected.
Here are the number of rows and columns:
gss.shape
And here are the first few rows:
gss.head()
The columns are
* `caseid`: Respondent id (which is the index of the table).
* `year`: Year when the respondent was surveyed.
* `age`: Respondent's age when surveyed.
* `sex`: Male or female.
* `polviews`: Political views on a range from liberal to conservative.
* `partyid`: Political party affiliation, Democrat, Independent, or Republican.
* `indus10`: [Code](https://www.census.gov/cgi-bin/sssd/naics/naicsrch?chart=2007) for the industry the respondent works in.
Let's look at these variables in more detail, starting with `indus10`.
## Banking
The code for "Banking and related activities" is 6870, so we can select bankers like this:
banker = (gss['indus10'] == 6870)
The result is a Boolean series, which is a Pandas Series that contains the values `True` and `False`. Here are the first few entries:
banker.head()
We can use `values` to see how many times each value appears.
values(banker)
In this dataset, there are 728 bankers.
If we use the `sum` function on this Series, it treats `True` as 1 and `False` as 0, so the total is the number of bankers.
banker.sum()
To compute the *fraction* of bankers, we can divide by the number of people in the dataset:
banker.sum() / banker.size
But we can also use the `mean` function, which computes the fraction of `True` values in the Series:
banker.mean()
About 1.5% of the respondents work in banking.
That means if we choose a random person from the dataset, the probability they are a banker is about 1.5%.
**Exercise**: The values of the column `sex` are encoded like this:
```
1 Male
2 Female
```
The following cell creates a Boolean series that is `True` for female respondents and `False` otherwise.
female = (gss['sex'] == 2)
* Use `values` to display the number of `True` and `False` values in `female`.
* Use `sum` to count the number of female respondents.
* Use `mean` to compute the fraction of female respondents.
# Solution
values(gss['sex'])
# Solution
female.sum()
# Solution
female.mean()
The fraction of women in this dataset is higher than in the adult U.S. population because [the GSS does not include people living in institutions](https://gss.norc.org/faq), including prisons and military housing, and those populations are more likely to be male.
**Exercise:** The designers of the General Social Survey chose to represent sex as a binary variable. What alternatives might they have considered? What are the advantages and disadvantages of their choice?
For more on this topic, you might be interested in this article: Westbrook and Saperstein, [New categories are not enough: rethinking the measurement of sex and gender in social surveys](https://sci-hub.tw/10.1177/0891243215584758)
## Political views
The values of `polviews` are on a seven-point scale:
```
1 Extremely liberal
2 Liberal
3 Slightly liberal
4 Moderate
5 Slightly conservative
6 Conservative
7 Extremely conservative
```
Here are the number of people who gave each response:
values(gss['polviews'])
I'll define `liberal` to be `True` for anyone whose response is "Extremely liberal", "Liberal", or "Slightly liberal".
liberal = (gss['polviews'] < 4)
Here are the number of `True` and `False` values:
values(liberal)
And the fraction of respondents who are "liberal".
liberal.mean()
If we choose a random person in this dataset, the probability they are liberal is about 27%.
## The probability function
To summarize what we have done so far:
* To represent a logical proposition like "this respondent is liberal", we are using a Boolean series, which contains the values `True` and `False`.
* To compute the probability that a proposition is true, we are using the `mean` function, which computes the fraction of `True` values in a series.
To make this computation more explicit, I'll define a function that takes a Boolean series and returns a probability:
def prob(A):
"""Computes the probability of a proposition, A.
A: Boolean series
returns: probability
"""
assert isinstance(A, pd.Series)
assert A.dtype == 'bool'
return A.mean()
The `assert` statements check whether `A` is a Boolean series. If not, they display an error message.
Using this function to compute probabilities makes the code more readable. Here are the probabilities for the propositions we have computed so far.
prob(banker)
prob(female)
prob(liberal)
**Exercise**: The values of `partyid` are encoded like this:
```
0 Strong democrat
1 Not str democrat
2 Ind,near dem
3 Independent
4 Ind,near rep
5 Not str republican
6 Strong republican
7 Other party
```
I'll define `democrat` to include respondents who chose "Strong democrat" or "Not str democrat":
democrat = (gss['partyid'] <= 1)
* Use `mean` to compute the fraction of Democrats in this dataset.
* Use `prob` to compute | |
enumeration that specifies
whether the search operation should include only the current directory or
should include all subdirectories.The default value is
System.IO.SearchOption.TopDirectoryOnly.
Returns: An enumerable collection of directory names in the directory specified by path
and that match searchPattern and searchOption.
EnumerateDirectories(path: str, searchPattern: str) -> IEnumerable[str]
Returns an enumerable collection of directory names that match a search pattern
in a specified path.
path: The directory to search.
searchPattern: The search string to match against the names of directories in path.
Returns: An enumerable collection of directory names in the directory specified by path
and that match searchPattern.
EnumerateDirectories(path: str) -> IEnumerable[str]
Returns an enumerable collection of directory names in a specified path.
path: The directory to search.
Returns: An enumerable collection of directory names in the directory specified by path.
"""
pass
@staticmethod
def EnumerateFiles(path, searchPattern=None, searchOption=None):
"""
EnumerateFiles(path: str, searchPattern: str, searchOption: SearchOption) -> IEnumerable[str]
Returns an enumerable collection of file names that match a search pattern in a
specified path, and optionally searches subdirectories.
path: The directory to search.
searchPattern: The search string to match against the names of directories in path.
searchOption: One of the values of the System.IO.SearchOption enumeration that specifies
whether the search operation should include only the current directory or
should include all subdirectories.The default value is
System.IO.SearchOption.TopDirectoryOnly.
Returns: An enumerable collection of file names in the directory specified by path and
that match searchPattern and searchOption.
EnumerateFiles(path: str, searchPattern: str) -> IEnumerable[str]
Returns an enumerable collection of file names that match a search pattern in a
specified path.
path: The directory to search.
searchPattern: The search string to match against the names of directories in path.
Returns: An enumerable collection of file names in the directory specified by path and
that match searchPattern.
EnumerateFiles(path: str) -> IEnumerable[str]
Returns an enumerable collection of file names in a specified path.
path: The directory to search.
Returns: An enumerable collection of file names in the directory specified by path.
"""
pass
@staticmethod
def EnumerateFileSystemEntries(path, searchPattern=None, searchOption=None):
"""
EnumerateFileSystemEntries(path: str, searchPattern: str, searchOption: SearchOption) -> IEnumerable[str]
Returns an enumerable collection of file names and directory names that match a
search pattern in a specified path, and optionally searches subdirectories.
path: The directory to search.
searchPattern: The search string to match against the names of directories in path.
searchOption: One of the values of the System.IO.SearchOption enumeration that specifies
whether the search operation should include only the current directory or
should include all subdirectories.The default value is
System.IO.SearchOption.TopDirectoryOnly.
Returns: An enumerable collection of file-system entries in the directory specified by
path and that match searchPattern and searchOption.
EnumerateFileSystemEntries(path: str, searchPattern: str) -> IEnumerable[str]
Returns an enumerable collection of file-system entries that match a search
pattern in a specified path.
path: The directory to search.
searchPattern: The search string to match against the names of directories in path.
Returns: An enumerable collection of file-system entries in the directory specified by
path and that match searchPattern.
EnumerateFileSystemEntries(path: str) -> IEnumerable[str]
Returns an enumerable collection of file-system entries in a specified path.
path: The directory to search.
Returns: An enumerable collection of file-system entries in the directory specified by
path.
"""
pass
@staticmethod
def Exists(path):
"""
Exists(path: str) -> bool
Determines whether the given path refers to an existing directory on disk.
path: The path to test.
Returns: true if path refers to an existing directory; otherwise, false.
"""
pass
@staticmethod
def GetAccessControl(path, includeSections=None):
"""
GetAccessControl(path: str, includeSections: AccessControlSections) -> DirectorySecurity
Gets a System.Security.AccessControl.DirectorySecurity object that encapsulates
the specified type of access control list (ACL) entries for a specified
directory.
path: The path to a directory containing a
System.Security.AccessControl.DirectorySecurity object that describes the
file's access control list (ACL) information.
includeSections: One of the System.Security.AccessControl.AccessControlSections values that
specifies the type of access control list (ACL) information to receive.
Returns: A System.Security.AccessControl.DirectorySecurity object that encapsulates the
access control rules for the file described by the path parameter.
GetAccessControl(path: str) -> DirectorySecurity
Gets a System.Security.AccessControl.DirectorySecurity object that encapsulates
the access control list (ACL) entries for a specified directory.
path: The path to a directory containing a
System.Security.AccessControl.DirectorySecurity object that describes the
file's access control list (ACL) information.
Returns: A System.Security.AccessControl.DirectorySecurity object that encapsulates the
access control rules for the file described by the path parameter.
"""
pass
@staticmethod
def GetCreationTime(path):
"""
GetCreationTime(path: str) -> DateTime
Gets the creation date and time of a directory.
path: The path of the directory.
Returns: A System.DateTime structure set to the creation date and time for the specified
directory. This value is expressed in local time.
"""
pass
@staticmethod
def GetCreationTimeUtc(path):
"""
GetCreationTimeUtc(path: str) -> DateTime
Gets the creation date and time, in Coordinated Universal Time (UTC) format, of
a directory.
path: The path of the directory.
Returns: A System.DateTime structure set to the creation date and time for the specified
directory. This value is expressed in UTC time.
"""
pass
@staticmethod
def GetCurrentDirectory():
"""
GetCurrentDirectory() -> str
Gets the current working directory of the application.
Returns: A string that contains the path of the current working directory, and does not
end with a backslash (\).
"""
pass
@staticmethod
def GetDirectories(path, searchPattern=None, searchOption=None):
"""
GetDirectories(path: str, searchPattern: str, searchOption: SearchOption) -> Array[str]
Gets the names of the directories (including their paths) that match the
specified search pattern in the current directory, and optionally searches
subdirectories.
path: The path to search.
searchPattern: The search string to match against the names of files in path. The parameter
cannot end in two periods ("..") or contain two periods ("..") followed by
System.IO.Path.DirectorySeparatorChar or
System.IO.Path.AltDirectorySeparatorChar, nor can it contain any of the
characters in System.IO.Path.InvalidPathChars.
searchOption: One of the System.IO.SearchOption values that specifies whether the search
operation should include all subdirectories or only the current directory.
Returns: A String array of directories that match the search pattern.
GetDirectories(path: str, searchPattern: str) -> Array[str]
Gets an array of directories (including their paths) that match the specified
search pattern in the current directory.
path: The path to search.
searchPattern: The search string to match against the names of files in path. The parameter
cannot end in two periods ("..") or contain two periods ("..") followed by
System.IO.Path.DirectorySeparatorChar or
System.IO.Path.AltDirectorySeparatorChar, nor can it contain any of the
characters in System.IO.Path.InvalidPathChars.
Returns: A String array of directories that match the search pattern.
GetDirectories(path: str) -> Array[str]
Gets the names of subdirectories (including their paths) in the specified
directory.
path: The path for which an array of subdirectory names is returned.
Returns: An array of the names of subdirectories in path.
"""
pass
@staticmethod
def GetDirectoryRoot(path):
"""
GetDirectoryRoot(path: str) -> str
Returns the volume information, root information, or both for the specified
path.
path: The path of a file or directory.
Returns: A string containing the volume information, root information, or both for the
specified path.
"""
pass
@staticmethod
def GetFiles(path, searchPattern=None, searchOption=None):
"""
GetFiles(path: str, searchPattern: str, searchOption: SearchOption) -> Array[str]
Returns the names of files (including their paths) that match the specified
search pattern in the specified directory, using a value to determine whether
to search subdirectories.
| |
<gh_stars>1000+
import logging
import uuid
from typing import Any
from typing import Dict
from typing import Generator
from typing import List
from typing import Tuple
import neo4j
from azure.core.exceptions import ClientAuthenticationError
from azure.core.exceptions import HttpResponseError
from azure.core.exceptions import ResourceNotFoundError
from azure.mgmt.cosmosdb import CosmosDBManagementClient
from .util.credentials import Credentials
from cartography.util import run_cleanup_job
from cartography.util import timeit
logger = logging.getLogger(__name__)
@timeit
def get_client(credentials: Credentials, subscription_id: str) -> CosmosDBManagementClient:
"""
Getting the CosmosDB client
"""
client = CosmosDBManagementClient(credentials, subscription_id)
return client
@timeit
def get_database_account_list(credentials: Credentials, subscription_id: str) -> List[Dict]:
"""
Get a list of all database accounts.
"""
try:
client = get_client(credentials, subscription_id)
database_account_list = list(map(lambda x: x.as_dict(), client.database_accounts.list()))
# ClientAuthenticationError and ResourceNotFoundError are subclasses under HttpResponseError
except ClientAuthenticationError:
logger.warning('Client Authentication Error while retrieving database accounts', exc_info=True)
return []
except ResourceNotFoundError:
logger.warning('Database Account not found error', exc_info=True)
return []
except HttpResponseError:
logger.warning('Error while retrieving database accounts', exc_info=True)
return []
for database_account in database_account_list:
x = database_account['id'].split('/')
database_account['resourceGroup'] = x[x.index('resourceGroups') + 1]
return database_account_list
@timeit
def transform_database_account_data(database_account_list: List[Dict]) -> List[Dict]:
"""
Transforming the database account response for neo4j ingestion.
"""
for database_account in database_account_list:
capabilities: List[str] = []
iprules: List[str] = []
if 'capabilities' in database_account and len(database_account['capabilities']) > 0:
capabilities = [x['name'] for x in database_account['capabilities']]
if 'ip_rules' in database_account and len(database_account['ip_rules']) > 0:
iprules = [x['ip_address_or_range'] for x in database_account['ip_rules']]
database_account['ipruleslist'] = iprules
database_account['list_of_capabilities'] = capabilities
return database_account_list
@timeit
def load_database_account_data(
neo4j_session: neo4j.Session, subscription_id: str, database_account_list: List[Dict], azure_update_tag: int,
) -> None:
"""
Ingest data of all database accounts into neo4j.
"""
ingest_database_account = """
UNWIND {database_accounts_list} AS da
MERGE (d:AzureCosmosDBAccount{id: da.id})
ON CREATE SET d.firstseen = timestamp(),
d.type = da.type, d.resourcegroup = da.resourceGroup,
d.location = da.location
SET d.lastupdated = {azure_update_tag},
d.kind = da.kind,
d.name = da.name,
d.ipranges = da.ipruleslist,
d.capabilities = da.list_of_capabilities,
d.documentendpoint = da.document_endpoint,
d.virtualnetworkfilterenabled = da.is_virtual_network_filter_enabled,
d.enableautomaticfailover = da.enable_automatic_failover,
d.provisioningstate = da.provisioning_state,
d.multiplewritelocations = da.enable_multiple_write_locations,
d.accountoffertype = da.database_account_offer_type,
d.publicnetworkaccess = da.public_network_access,
d.enablecassandraconnector = da.enable_cassandra_connector,
d.connectoroffer = da.connector_offer,
d.disablekeybasedmetadatawriteaccess = da.disable_key_based_metadata_write_access,
d.keyvaulturi = da.key_vault_key_uri,
d.enablefreetier = da.enable_free_tier,
d.enableanalyticalstorage = da.enable_analytical_storage,
d.defaultconsistencylevel = da.consistency_policy.default_consistency_level,
d.maxstalenessprefix = da.consistency_policy.max_staleness_prefix,
d.maxintervalinseconds = da.consistency_policy.max_interval_in_seconds
WITH d
MATCH (owner:AzureSubscription{id: {AZURE_SUBSCRIPTION_ID}})
MERGE (owner)-[r:RESOURCE]->(d)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {azure_update_tag}
"""
neo4j_session.run(
ingest_database_account,
database_accounts_list=database_account_list,
AZURE_SUBSCRIPTION_ID=subscription_id,
azure_update_tag=azure_update_tag,
)
@timeit
def sync_database_account_data_resources(
neo4j_session: neo4j.Session, subscription_id: str, database_account_list: List[Dict], azure_update_tag: int,
) -> None:
"""
This function calls the load functions for the resources that are present as a part of the database account
response (like cors policy, failover policy, private endpoint connections, virtual network rules and locations).
"""
for database_account in database_account_list:
_load_cosmosdb_cors_policy(neo4j_session, database_account, azure_update_tag)
_load_cosmosdb_failover_policies(neo4j_session, database_account, azure_update_tag)
_load_cosmosdb_private_endpoint_connections(neo4j_session, database_account, azure_update_tag)
_load_cosmosdb_virtual_network_rules(neo4j_session, database_account, azure_update_tag)
_load_database_account_write_locations(neo4j_session, database_account, azure_update_tag)
_load_database_account_read_locations(neo4j_session, database_account, azure_update_tag)
_load_database_account_associated_locations(neo4j_session, database_account, azure_update_tag)
@timeit
def _load_database_account_write_locations(
neo4j_session: neo4j.Session, database_account: Dict, azure_update_tag: int,
) -> None:
"""
Ingest the details of location with write permission enabled.
"""
if 'write_locations' in database_account and len(database_account['write_locations']) > 0:
database_account_id = database_account['id']
write_locations = database_account['write_locations']
ingest_write_location = """
UNWIND {write_locations_list} as wl
MERGE (loc:AzureCosmosDBLocation{id: wl.id})
ON CREATE SET loc.firstseen = timestamp()
SET loc.lastupdated = {azure_update_tag},
loc.locationname = wl.location_name,
loc.documentendpoint = wl.document_endpoint,
loc.provisioningstate = wl.provisioning_state,
loc.failoverpriority = wl.failover_priority,
loc.iszoneredundant = wl.is_zone_redundant
WITH loc
MATCH (d:AzureCosmosDBAccount{id: {DatabaseAccountId}})
MERGE (d)-[r:CAN_WRITE_FROM]->(loc)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {azure_update_tag}
"""
neo4j_session.run(
ingest_write_location,
write_locations_list=write_locations,
DatabaseAccountId=database_account_id,
azure_update_tag=azure_update_tag,
)
@timeit
def _load_database_account_read_locations(
neo4j_session: neo4j.Session, database_account: Dict, azure_update_tag: int,
) -> None:
"""
Ingest the details of location with read permission enabled.
"""
if 'read_locations' in database_account and len(database_account['read_locations']) > 0:
database_account_id = database_account['id']
read_locations = database_account['read_locations']
ingest_read_location = """
UNWIND {read_locations_list} as rl
MERGE (loc:AzureCosmosDBLocation{id: rl.id})
ON CREATE SET loc.firstseen = timestamp()
SET loc.lastupdated = {azure_update_tag},
loc.locationname = rl.location_name,
loc.documentendpoint = rl.document_endpoint,
loc.provisioningstate = rl.provisioning_state,
loc.failoverpriority = rl.failover_priority,
loc.iszoneredundant = rl.is_zone_redundant
WITH loc
MATCH (d:AzureCosmosDBAccount{id: {DatabaseAccountId}})
MERGE (d)-[r:CAN_READ_FROM]->(loc)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {azure_update_tag}
"""
neo4j_session.run(
ingest_read_location,
read_locations_list=read_locations,
DatabaseAccountId=database_account_id,
azure_update_tag=azure_update_tag,
)
@timeit
def _load_database_account_associated_locations(
neo4j_session: neo4j.Session, database_account: Dict, azure_update_tag: int,
) -> None:
"""
Ingest the details of enabled location for the database account.
"""
if 'locations' in database_account and len(database_account['locations']) > 0:
database_account_id = database_account['id']
associated_locations = database_account['locations']
ingest_associated_location = """
UNWIND {associated_locations_list} as al
MERGE (loc:AzureCosmosDBLocation{id: al.id})
ON CREATE SET loc.firstseen = timestamp()
SET loc.lastupdated = {azure_update_tag},
loc.locationname = al.location_name,
loc.documentendpoint = al.document_endpoint,
loc.provisioningstate = al.provisioning_state,
loc.failoverpriority = al.failover_priority,
loc.iszoneredundant = al.is_zone_redundant
WITH loc
MATCH (d:AzureCosmosDBAccount{id: {DatabaseAccountId}})
MERGE (d)-[r:ASSOCIATED_WITH]->(loc)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {azure_update_tag}
"""
neo4j_session.run(
ingest_associated_location,
associated_locations_list=associated_locations,
DatabaseAccountId=database_account_id,
azure_update_tag=azure_update_tag,
)
@timeit
def transform_cosmosdb_cors_policy(database_account: Dict) -> Dict:
"""
Transform CosmosDB Cors Policy response for neo4j ingestion.
"""
for policy in database_account['cors']:
if 'cors_policy_unique_id' not in policy:
policy['cors_policy_unique_id'] = str(uuid.uuid4())
return database_account
@timeit
def _load_cosmosdb_cors_policy(
neo4j_session: neo4j.Session, database_account: Dict, azure_update_tag: int,
) -> None:
"""
Ingest the details of the Cors Policy of the database account.
"""
if 'cors' in database_account and len(database_account['cors']) > 0:
database_account = transform_cosmosdb_cors_policy(database_account)
database_account_id = database_account['id']
cors_policies = database_account['cors']
ingest_cors_policy = """
UNWIND {cors_policies_list} AS cp
MERGE (corspolicy:AzureCosmosDBCorsPolicy{id: cp.cors_policy_unique_id})
ON CREATE SET corspolicy.firstseen = timestamp(),
corspolicy.allowedorigins = cp.allowed_origins
SET corspolicy.lastupdated = {azure_update_tag},
corspolicy.allowedmethods = cp.allowed_methods,
corspolicy.allowedheaders = cp.allowed_headers,
corspolicy.exposedheaders = cp.exposed_headers,
corspolicy.maxageinseconds = cp.max_age_in_seconds
WITH corspolicy
MATCH (d:AzureCosmosDBAccount{id: {DatabaseAccountId}})
MERGE (d)-[r:CONTAINS]->(corspolicy)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {azure_update_tag}
"""
neo4j_session.run(
ingest_cors_policy,
cors_policies_list=cors_policies,
DatabaseAccountId=database_account_id,
azure_update_tag=azure_update_tag,
)
@timeit
def _load_cosmosdb_failover_policies(
neo4j_session: neo4j.Session, database_account: Dict, azure_update_tag: int,
) -> None:
"""
Ingest the details of the Failover Policies of the database account.
"""
if 'failover_policies' in database_account and len(database_account['failover_policies']) > 0:
database_account_id = database_account['id']
failover_policies = database_account['failover_policies']
ingest_failover_policies = """
UNWIND {failover_policies_list} AS fp
MERGE (fpolicy:AzureCosmosDBAccountFailoverPolicy{id: fp.id})
ON CREATE SET fpolicy.firstseen = timestamp()
SET fpolicy.lastupdated = {azure_update_tag},
fpolicy.locationname = fp.location_name,
fpolicy.failoverpriority = fp.failover_priority
WITH fpolicy
MATCH (d:AzureCosmosDBAccount{id: {DatabaseAccountId}})
MERGE (d)-[r:CONTAINS]->(fpolicy)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {azure_update_tag}
"""
neo4j_session.run(
ingest_failover_policies,
failover_policies_list=failover_policies,
DatabaseAccountId=database_account_id,
azure_update_tag=azure_update_tag,
)
@timeit
def _load_cosmosdb_private_endpoint_connections(
neo4j_session: neo4j.Session, database_account: Dict, azure_update_tag: int,
) -> None:
"""
Ingest the details of the Private Endpoint Connections of the database account.
"""
if 'private_endpoint_connections' in database_account and len(
database_account['private_endpoint_connections'],
) > 0:
database_account_id = database_account['id']
private_endpoint_connections = database_account['private_endpoint_connections']
ingest_private_endpoint_connections = """
UNWIND {private_endpoint_connections_list} AS connection
MERGE (pec:AzureCDBPrivateEndpointConnection{id: connection.id})
ON CREATE SET pec.firstseen = timestamp()
SET pec.lastupdated = {azure_update_tag},
pec.name = connection.name,
pec.privateendpointid = connection.private_endpoint.id,
pec.status = connection.private_link_service_connection_state.status,
pec.actionrequired = connection.private_link_service_connection_state.actions_required
WITH pec
MATCH (d:AzureCosmosDBAccount{id: {DatabaseAccountId}})
MERGE (d)-[r:CONFIGURED_WITH]->(pec)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {azure_update_tag}
"""
neo4j_session.run(
ingest_private_endpoint_connections,
private_endpoint_connections_list=private_endpoint_connections,
DatabaseAccountId=database_account_id,
azure_update_tag=azure_update_tag,
)
@timeit
def _load_cosmosdb_virtual_network_rules(
neo4j_session: neo4j.Session, database_account: Dict, azure_update_tag: int,
) -> None:
"""
Ingest the details of the Virtual Network Rules of the database account.
"""
if 'virtual_network_rules' in database_account and len(database_account['virtual_network_rules']) > 0:
database_account_id = database_account['id']
virtual_network_rules = database_account['virtual_network_rules']
ingest_virtual_network_rules = """
UNWIND {virtual_network_rules_list} AS vnr
MERGE (rules:AzureCosmosDBVirtualNetworkRule{id: vnr.id})
ON CREATE SET rules.firstseen = timestamp()
SET rules.lastupdated = {azure_update_tag},
rules.ignoremissingvnetserviceendpoint = vnr.ignore_missing_v_net_service_endpoint
WITH rules
MATCH (d:AzureCosmosDBAccount{id: {DatabaseAccountId}})
MERGE (d)-[r:CONFIGURED_WITH]->(rules)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {azure_update_tag}
"""
neo4j_session.run(
ingest_virtual_network_rules,
virtual_network_rules_list=virtual_network_rules,
DatabaseAccountId=database_account_id,
azure_update_tag=azure_update_tag,
)
@timeit
def sync_database_account_details(
neo4j_session: neo4j.Session, credentials: Credentials, subscription_id: str,
database_account_list: List[Dict], sync_tag: int, common_job_parameters: Dict,
) -> None:
details = get_database_account_details(credentials, subscription_id, database_account_list)
load_database_account_details(neo4j_session, credentials, subscription_id, details, sync_tag, common_job_parameters)
@timeit
def get_database_account_details(
credentials: Credentials, subscription_id: str, database_account_list: List[Dict],
) -> Generator[Any, Any, Any]:
"""
Iterate over the database accounts and return the list of SQL and MongoDB databases, Cassandra keyspaces and
table resources associated with each database account.
"""
for database_account in database_account_list:
sql_databases = get_sql_databases(credentials, subscription_id, database_account)
cassandra_keyspaces = get_cassandra_keyspaces(credentials, subscription_id, database_account)
mongodb_databases = get_mongodb_databases(credentials, subscription_id, database_account)
table_resources = get_table_resources(credentials, subscription_id, database_account)
yield database_account['id'], database_account['name'], database_account[
'resourceGroup'
], sql_databases, cassandra_keyspaces, mongodb_databases, table_resources
@timeit
def get_sql_databases(credentials: Credentials, subscription_id: str, database_account: Dict) -> List[Dict]:
"""
Return the list of SQL Databases in a database account.
"""
try:
client = get_client(credentials, subscription_id)
sql_database_list = list(
map(
lambda x: x.as_dict(),
client.sql_resources.list_sql_databases(
database_account['resourceGroup'],
database_account['name'],
),
),
)
except ClientAuthenticationError:
logger.warning('Client Authentication Error while retrieving SQL databases', exc_info=True)
return []
except ResourceNotFoundError:
logger.warning('SQL databases resource not found error', exc_info=True)
return []
except HttpResponseError:
logger.warning('Error while retrieving SQL Database list', exc_info=True)
return []
return sql_database_list
@timeit
def get_cassandra_keyspaces(credentials: Credentials, subscription_id: str, database_account: Dict) -> List[Dict]:
"""
Return the list of Cassandra Keyspaces in a database account.
"""
try:
client = get_client(credentials, subscription_id)
cassandra_keyspace_list = list(
map(
| |
with open(filename[0], "w") as log_file:
log_file.write("Indicator Variables\n===========================\n")
for key, val in self.rev_id_conn.iteritems():
log_file.write(val+" = "+key+"\n")
log_file.write("===========================\n\nComposite Index Formula"
"\n===========================\nCI('Region','Year'): [")
log_file.write(string_formula.replace("self.indicator_var_eval", "IndValue") + "]")
log_file.write("\n===========================\n\nFailed Calculations"
"\n===========================\n")
for Region in self.iry_iteration["r"][1:]:
region_formula = string_formula.replace('Region', Region)
for Year in self.iry_iteration["y"][1:]:
year_formula = region_formula.replace('Year', Year)
try:
# If evaluation of year_formula results a float, add that in self.cim.
self.cim[self.rev_country_dict[Region]].append(
float(eval(year_formula)))
except Exception as fe:
# If no float was returned, add the formula and error message in log.
self.cim[self.rev_country_dict[Region]].append("-")
log_file.write("CI" +
str((self.rev_country_dict[Region], Year)) + ": ")
for item in formula:
if "indicator_var_eval" in item:
log_file.write(str(eval(item.replace(
'Region', Region).replace(
'Year', Year))))
else:
log_file.write(item)
log_file.write(" <"+str(fe)+">\n")
log_file.write("===========================")
# Something really unexpected just happened.
except Exception as l_er:
e["my_index.log"] = l_er
try:
# Start building the calculation file.
with open(filename[1], "w") as calc_file:
for y in self.iry_iteration["y"][1:]:
calc_file.write(";"+y)
for r in self.iry_iteration["r"][1:]:
calc_file.write("\n"+self.rev_country_dict[r])
for val in self.cim[self.rev_country_dict[r]]:
calc_file.write(";"+str(val).replace('.', ","))
# Something really unexpected just happened.
except Exception as c_er:
e["my_index.csv"] = c_er
# If error dict is not empty, a problem has occurred.
if e != {}:
self.popuper("Could not prepare files properly:\n" +
str([k + " >> " + v.__doc__ for k, v in e.iteritems()]) +
"\n\nMake sure file/s above are not already opened"
"\nand you have permission to write"
"\nto the selected folder!",
'Unexpected Error!')
# If not, no errors found.
else:
self.popuper("Two files have been saved\nin selected directory.\n\n"
"Check 'my_index.csv' for results and\n"
"'my_index.log' for calculation logs.",
"Calculations done!")
# Prepare Thematic Screen.
self.prepare_thematic()
# Evaluate Indicator value function.
def indicator_var_eval(self, ind, reg, year):
try:
# If there is any data, return it.
return float(self.all_indicators_data[ind][self.rev_country_dict[reg]][year])
except KeyError:
# If no Data, return notice for logging.
return "NoData["+str(ind)+", "+str(self.rev_country_dict[reg])+", "+str(year)+"]"
# Something really unexpected just happened.
except Exception as e:
print "def indicator_var_eval(self, ind, reg, year):", type(e), e.__doc__, e.message
# This function prepares the Thematic Designer Screen.
def prepare_thematic(self):
# Activate Thematic Button from MainWindow.
self.ic_thematic_btn.size = (100, 70)
self.ic_thematic_btn_box.opacity = 1
# Clear all previously created year_buttons and Data Tables.
self.ic_th_designer.th_years_stack.clear_widgets()
self.ic_th_designer.th_data_table_regions.clear_widgets()
self.ic_th_designer.th_data_table_values.clear_widgets()
for y in self.iry_iteration["y"][1:]:
year_btn = Factory.TH_YearSelector(text=str(y), group='year')
self.ic_th_designer.th_years_stack.add_widget(year_btn)
year_btn.bind(on_release=self.ic_th_designer.th_data_table_init)
class LegendClassContainer(BoxLayout):
cc_color = ListProperty([1, 1, 1, 1])
text_min = StringProperty("")
text_max = StringProperty("")
class ThematicValues(Label):
calc_number = ObjectProperty()
region = StringProperty("")
class SvgWidget(FloatLayout, StencilView):
pass
class WorldMapSVG(Scatter):
# Prepare kivy properties that will handle labels and borders opacity.
show_labels = BooleanProperty(True)
show_borders = BooleanProperty(True)
def __init__(self, **kwargs):
super(WorldMapSVG, self).__init__(**kwargs)
with self.canvas.before:
Svg("./DB/TH_WMap.svg")
class MapDesigner(MouseScreen):
# Link to IndexCreation.
md_index_creation = ObjectProperty()
# Format number to be more friendly.
@staticmethod
def number_formatter(number):
tup = str("%.5G" % number).partition('E')
val = (('[size=12] E'.join((tup[0], tup[-1]))+"[/size]")
.replace("[size=12] E[/size]", ""))\
.replace(".", ",")
return val
@mainthread
def popuper(self, message, title):
Popup(title=title, content=Label(
text=message,
font_size=15,
halign="center",
italic=True
), size_hint=(None, None), size=(350, 180)).open()
# "#FFFFFF" -> [255,255,255].
@staticmethod
def hex_to_rgb(hex_c):
# Pass 16 to the integer function for change of base.
return [int(hex_c[i:i+2], 16) for i in range(1,6,2)]
# [255,255,255] -> "#FFFFFF".
@staticmethod
def rgb_to_hex(rgb_c):
# Components need to be integers for hex to make sense.
rgb_c = [int(x) for x in rgb_c]
return "#"+"".join(["0{0:x}".format(v) if v < 16 else "{0:x}".format(v) for v in rgb_c])
# Creates thematic legend based on given number of classes.
def calc_linear_gradient(self, start_hex, finish_hex, n):
# Starting and ending colors in RGB form
s = self.hex_to_rgb(start_hex)
f = self.hex_to_rgb(finish_hex)
# Initialize a list of the output colors with the starting color.
rgb_list = [s]
# Calculate a color at each evenly spaced value of t from 1 to n.
for t in range(1, n):
# Interpolate RGB vector for color at the current value of t
curr_vector = [int(s[j] + (float(t)/(n-1))*(f[j]-s[j])) for j in range(3)]
# Add it to our list of output colors
rgb_list.append(curr_vector)
# Prepare a temp data_set {Region: Value}, to be used for the legend's creation.
data_set = {p.region: p.calc_number for p in self.th_data_table_values.children
if p.calc_number != "-"}
# Takes in the RGB sub-lists and returns dictionary of colors in RGB and hex form.
color_dict = {"hex": [self.rgb_to_hex(rgb_c) for rgb_c in rgb_list],
"r": [rgb_c[0] for rgb_c in rgb_list],
"g": [rgb_c[1] for rgb_c in rgb_list],
"b": [rgb_c[2] for rgb_c in rgb_list]}
# If there is at least 1 region with a value in data table, proceed with the calculations.
if len(data_set) > 0:
min_v = min(data_set.values())
max_v = max(data_set.values())
range_v = max_v - min_v
interval_v = range_v/float(n)
# If we have only one interval, use current min and max values.
if n == 1 or len(data_set.values()) == 1:
classification = [(min_v, max_v)]
# If we selected more than one intervals for a single value, pop a notice msg.
if len(data_set.values()) == 1 and n != 1:
self.popuper("Only one class was generated,\n"
"because only one value was found\n"
"in Data Table!",
"Warning!")
# If we have more than one intervals..
else:
# This list will hold the range groups.
classification = []
r1 = min_v
r2 = r1 + interval_v
for i in range(1, n):
classification.append((r1, r2))
r1 = r2
r2 = r1 + interval_v
classification.append((r1, max_v))
# Create classified legend.
self.build_legend(classification, color_dict['r'], color_dict['g'], color_dict['b'])
# Replace data value with the equivalent color.
for k, v in data_set.iteritems():
for g in range(len(classification)):
if classification[g][0] <= v <= classification[g][1]:
break
data_set[k] = color_dict['hex'][g]
# Prepare and load the SVG, according to calculated color classes.
self.prepare_svg(data_set)
# If data table has no values pop a notice.
else:
self.popuper("There should be at least one region\n"
"with a numeric value in the Data Table.",
"Warning!")
# This function creates the legend with all color classes.
def build_legend(self, classes, r, g, b):
# Clear legend
self.legend.clear_widgets()
for i in range(len(classes)):
cc = Factory.LegendClassContainer(cc_color=[r[i]/255., g[i]/255., b[i]/255., 1],
text_min=self.number_formatter(classes[i][0]),
text_max=self.number_formatter(classes[i][1]))
self.legend.add_widget(cc)
# This function prepares and loads the SVG thematic colors.
def prepare_svg(self, d_set):
# Try to generate the temp SVG with the thematic colors applied.
try:
orig_svg = open("./Sources/WorldMap.svg", "r")
temp_svg = open("./DB/TH_WMap.svg", "w")
for line in orig_svg:
try:
start = line.index('<path class="') + 13
end = line.index('" fill="#', start)
region = line[start:end]
if region in d_set:
temp_svg.write(line.replace('fill="#383838"', 'fill="'+d_set[region]+'"'))
else:
temp_svg.write(line)
except ValueError:
temp_svg.write(line)
temp_svg.close()
orig_svg.close()
# Load the temp SVG.
self.map_canvas.clear_widgets()
svg = WorldMapSVG()
self.map_canvas.add_widget(svg)
svg.center_x = self.width/2
svg.center_y = self.height/2
# Something really unexpected just happened.
except Exception as e:
print "def prepare_svg(d_set):", type(e), e.__doc__, e.message
# Prepare list that will be used to build the Data Table.
def th_data_table_init(self, year_btn):
# Ref the index of year button pressed.
year_index = list(reversed(self.th_years_stack.children)).index(year_btn)
data_set = sorted([(v[year_index], k) for k, v in self.md_index_creation.cim.iteritems()],
key=operator.itemgetter(1))
# Build the data table using the generated data_set.
self.build_th_data_table(data_set)
# Prepare list that will be used to sort the Data Table.
def sort_data(self, direction_btn):
# Set reverse option based on button pressed.
rev = False if direction_btn == "ascending" else True
data_set = sorted([(i.calc_number, i.region) for i in self.th_data_table_values.children],
key=operator.itemgetter(0),
reverse=rev)
# Build the data table using the generated data_set.
self.build_th_data_table(data_set)
# Build Data Table.
def build_th_data_table(self, data_set):
# Clear Data Tables.
self.th_data_table_regions.clear_widgets()
self.th_data_table_values.clear_widgets()
# For each item in our data_set.
for i in range(len(data_set)):
# Ref float and region to handle str/float items.
data_value = data_set[i][0]
r = data_set[i][1]
# Create the region label.
region = Factory.TH_Regions(text=r)
self.th_data_table_regions.add_widget(region)
if isinstance(data_value, float):
# Format number to be more friendly.
val = self.number_formatter(data_value)
else:
# Handle cases where item is not a number.
val = data_value
data_value = "-"
year_value = Factory.ThematicValues(text=val, calc_number=data_value, region=r)
self.th_data_table_values.add_widget(year_value)
class SaveDialog(FloatLayout):
save = ObjectProperty(None)
cancel = ObjectProperty(None)
file = StringProperty("")
class Saver(Button):
caller = ObjectProperty(None)
def dismiss_popup(self):
self._popup.dismiss()
def show_save(self, fn):
self._popup = Popup(title="Choose Save Destination.\n(CIM overwrites any previously "
"created project files, so they must not be in use!)",
content=SaveDialog(save=self.save, cancel=self.dismiss_popup, file=fn),
size_hint=(None, None),
size=(600, 400),
auto_dismiss=False)
self._popup.open()
def save(self, path, fn):
try:
# If we are exporting to png.
if fn == "File: TH_Map.png":
# Take a screen shot and save the img.
self.cnv.parent.export_to_png(os.path.join(path, "TH_Map.png"))
elif fn == "File: TH_Map.svg":
# Generate the user | |
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
asset_value = nb.asset_value_grouped_nb(asset_value, group_lens)
else:
asset_value = nb.asset_value_nb(close, assets)
return self.wrapper.wrap(asset_value, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def gross_exposure(self, direction: str = 'all', group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get gross exposure."""
asset_value = to_2d(self.asset_value(group_by=group_by, direction=direction), raw=True)
cash = to_2d(self.cash(group_by=group_by, free=True), raw=True)
gross_exposure = nb.gross_exposure_nb(asset_value, cash)
return self.wrapper.wrap(gross_exposure, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def net_exposure(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get net exposure."""
long_exposure = to_2d(self.gross_exposure(direction='longonly', group_by=group_by), raw=True)
short_exposure = to_2d(self.gross_exposure(direction='shortonly', group_by=group_by), raw=True)
net_exposure = long_exposure - short_exposure
return self.wrapper.wrap(net_exposure, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def value(self, group_by: tp.GroupByLike = None, in_sim_order: bool = False,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get portfolio value series per column/group.
By default, will generate portfolio value for each asset based on cash flows and thus
independent from other assets, with the initial cash balance and position being that of the
entire group. Useful for generating returns and comparing assets within the same group.
When `group_by` is False and `in_sim_order` is True, returns value generated in
simulation order (see [row-major order](https://en.wikipedia.org/wiki/Row-_and_column-major_order).
This value cannot be used for generating returns as-is. Useful to analyze how value
evolved throughout simulation."""
cash = to_2d(self.cash(group_by=group_by, in_sim_order=in_sim_order), raw=True)
asset_value = to_2d(self.asset_value(group_by=group_by), raw=True)
if self.wrapper.grouper.is_grouping_disabled(group_by=group_by) and in_sim_order:
group_lens = self.wrapper.grouper.get_group_lens()
call_seq = to_2d(self.call_seq, raw=True)
value = nb.value_in_sim_order_nb(cash, asset_value, group_lens, call_seq)
# price of NaN is already addressed by ungrouped_value_nb
else:
value = nb.value_nb(cash, asset_value)
return self.wrapper.wrap(value, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def total_profit(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Get total profit per column/group.
Calculated directly from order records (fast)."""
if self.wrapper.grouper.is_grouped(group_by=group_by):
total_profit = to_1d(self.total_profit(group_by=False), raw=True)
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
total_profit = nb.total_profit_grouped_nb(
total_profit,
group_lens
)
else:
if self.fillna_close:
close = to_2d(self.get_fillna_close(), raw=True)
else:
close = to_2d(self.close, raw=True)
total_profit = nb.total_profit_nb(
self.wrapper.shape_2d,
close,
self.orders.values,
self.orders.col_mapper.col_map
)
wrap_kwargs = merge_dicts(dict(name_or_index='total_profit'), wrap_kwargs)
return self.wrapper.wrap_reduced(total_profit, group_by=group_by, **wrap_kwargs)
@cached_method
def final_value(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Get total profit per column/group."""
init_cash = to_1d(self.get_init_cash(group_by=group_by), raw=True)
total_profit = to_1d(self.total_profit(group_by=group_by), raw=True)
final_value = nb.final_value_nb(total_profit, init_cash)
wrap_kwargs = merge_dicts(dict(name_or_index='final_value'), wrap_kwargs)
return self.wrapper.wrap_reduced(final_value, group_by=group_by, **wrap_kwargs)
@cached_method
def total_return(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Get total profit per column/group."""
init_cash = to_1d(self.get_init_cash(group_by=group_by), raw=True)
total_profit = to_1d(self.total_profit(group_by=group_by), raw=True)
total_return = nb.total_return_nb(total_profit, init_cash)
wrap_kwargs = merge_dicts(dict(name_or_index='total_return'), wrap_kwargs)
return self.wrapper.wrap_reduced(total_return, group_by=group_by, **wrap_kwargs)
@cached_method
def returns(self, group_by: tp.GroupByLike = None, in_sim_order=False,
wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get return series per column/group based on portfolio value."""
value = to_2d(self.value(group_by=group_by, in_sim_order=in_sim_order), raw=True)
if self.wrapper.grouper.is_grouping_disabled(group_by=group_by) and in_sim_order:
group_lens = self.wrapper.grouper.get_group_lens()
init_cash_grouped = to_1d(self.init_cash, raw=True)
call_seq = to_2d(self.call_seq, raw=True)
returns = nb.returns_in_sim_order_nb(value, group_lens, init_cash_grouped, call_seq)
else:
init_cash = to_1d(self.get_init_cash(group_by=group_by), raw=True)
returns = nb.returns_nb(value, init_cash)
return self.wrapper.wrap(returns, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def active_returns(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get active return series per column/group.
This type of returns is based solely on cash flows and asset value rather than portfolio
value. It ignores passive cash and thus it will return the same numbers irrespective of the amount of
cash currently available, even `np.inf`. The scale of returns is comparable to that of going
all in and keeping available cash at zero."""
cash_flow = to_2d(self.cash_flow(group_by=group_by), raw=True)
asset_value = to_2d(self.asset_value(group_by=group_by), raw=True)
active_returns = nb.active_returns_nb(cash_flow, asset_value)
return self.wrapper.wrap(active_returns, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def market_value(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get market (benchmark) value series per column/group.
If grouped, evenly distributes the initial cash among assets in the group.
!!! note
Does not take into account fees and slippage. For this, create a separate portfolio."""
if self.fillna_close:
close = to_2d(self.get_fillna_close(), raw=True)
else:
close = to_2d(self.close, raw=True)
if self.wrapper.grouper.is_grouped(group_by=group_by):
group_lens = self.wrapper.grouper.get_group_lens(group_by=group_by)
init_cash_grouped = to_1d(self.get_init_cash(group_by=group_by), raw=True)
market_value = nb.market_value_grouped_nb(close, group_lens, init_cash_grouped)
else:
init_cash = to_1d(self.get_init_cash(group_by=False), raw=True)
market_value = nb.market_value_nb(close, init_cash)
return self.wrapper.wrap(market_value, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def market_returns(self, group_by: tp.GroupByLike = None, wrap_kwargs: tp.KwargsLike = None) -> tp.SeriesFrame:
"""Get return series per column/group based on market (benchmark) value."""
market_value = to_2d(self.market_value(group_by=group_by), raw=True)
init_cash = to_1d(self.get_init_cash(group_by=group_by), raw=True)
market_returns = nb.returns_nb(market_value, init_cash)
return self.wrapper.wrap(market_returns, group_by=group_by, **merge_dicts({}, wrap_kwargs))
@cached_method
def total_market_return(self, group_by: tp.GroupByLike = None,
wrap_kwargs: tp.KwargsLike = None) -> tp.MaybeSeries:
"""Get total market (benchmark) return."""
market_value = to_2d(self.market_value(group_by=group_by), raw=True)
total_market_return = nb.total_market_return_nb(market_value)
wrap_kwargs = merge_dicts(dict(name_or_index='total_market_return'), wrap_kwargs)
return self.wrapper.wrap_reduced(total_market_return, group_by=group_by, **wrap_kwargs)
@cached_method
def stats(self,
column: tp.Optional[tp.Label] = None,
group_by: tp.GroupByLike = None,
incl_unrealized: tp.Optional[bool] = None,
active_returns: bool = False,
in_sim_order: bool = False,
agg_func: tp.Optional[tp.Callable] = _mean_agg_func,
wrap_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.SeriesFrame:
"""Compute various statistics on this portfolio.
`kwargs` will be passed to each `vectorbt.returns.accessors.ReturnsAccessor` method.
Can either return aggregated statistics by reducing metrics of all columns with
`agg_func` (mean by default) or return statistics for a single column if `column`
was specified or portfolio contains only one column of data. To display rich data types
such as durations correctly, use an aggregation function that can be applied on `pd.Series`.
!!! note
Use `column` only if caching is enabled, otherwise it may re-compute the same
objects multiple times."""
if self.wrapper.freq is None:
raise ValueError("Couldn't parse the frequency of index. You must set `freq`.")
# Pre-calculate
trades = self.get_trades(group_by=group_by)
if incl_unrealized is None:
incl_unrealized = self.incl_unrealized
if not incl_unrealized:
trades = trades.closed
drawdowns = self.get_drawdowns(group_by=group_by)
if active_returns:
returns = self.active_returns(group_by=group_by)
else:
returns = self.returns(group_by=group_by, in_sim_order=in_sim_order)
# Run stats
stats_df = pd.DataFrame({
'Start': self.wrapper.index[0],
'End': self.wrapper.index[-1],
'Duration': self.wrapper.shape[0] * self.wrapper.freq,
'Init. Cash': self.get_init_cash(group_by=group_by),
'Total Profit': self.total_profit(group_by=group_by),
'Total Return [%]': self.total_return(group_by=group_by) * 100,
'Benchmark Return [%]': self.total_market_return(group_by=group_by) * 100,
'Position Coverage [%]': self.position_coverage(group_by=group_by) * 100,
'Max. Drawdown [%]': -drawdowns.max_drawdown() * 100,
'Avg. Drawdown [%]': -drawdowns.avg_drawdown() * 100,
'Max. Drawdown Duration': drawdowns.max_duration(),
'Avg. Drawdown Duration': drawdowns.avg_duration(),
'Num. Trades': trades.count(),
'Win Rate [%]': trades.win_rate() * 100,
'Best Trade [%]': trades.returns.max() * 100,
'Worst Trade [%]': trades.returns.min() * 100,
'Avg. Trade [%]': trades.returns.mean() * 100,
'Max. Trade Duration': trades.duration.max(wrap_kwargs=dict(time_units=True)),
'Avg. Trade Duration': trades.duration.mean(wrap_kwargs=dict(time_units=True)),
'Expectancy': trades.expectancy(),
'SQN': trades.sqn(),
'Gross Exposure': self.gross_exposure(group_by=group_by).mean(),
'Sharpe Ratio': self.sharpe_ratio(reuse_returns=returns, **kwargs),
'Sortino Ratio': self.sortino_ratio(reuse_returns=returns, **kwargs),
'Calmar Ratio': self.calmar_ratio(reuse_returns=returns, **kwargs)
}, index=self.wrapper.grouper.get_columns(group_by=group_by))
# Select columns or reduce
if self.wrapper.get_ndim(group_by=group_by) == 1:
wrap_kwargs = merge_dicts(dict(name_or_index=stats_df.columns), wrap_kwargs)
return self.wrapper.wrap_reduced(stats_df.iloc[0], group_by=group_by, **wrap_kwargs)
if column is not None:
return stats_df.loc[column]
if agg_func is not None:
if agg_func == _mean_agg_func:
warnings.warn("Taking mean across columns. To return a DataFrame, pass agg_func=None.", stacklevel=2)
func_name = 'stats_mean'
else:
func_name = 'stats_' + agg_func.__name__
agg_stats_sr = pd.Series(index=stats_df.columns, name=func_name)
agg_stats_sr.iloc[:3] = stats_df.iloc[0, :3]
agg_stats_sr.iloc[3:] = agg_func(stats_df.iloc[:, 3:])
return agg_stats_sr
return stats_df
def returns_stats(self,
column: tp.Optional[tp.Label] = None,
group_by: tp.GroupByLike = None,
active_returns: bool = False,
in_sim_order: bool = False,
agg_func: tp.Optional[tp.Callable] = _mean_agg_func,
year_freq: tp.Optional[tp.FrequencyLike] = None,
**kwargs) -> tp.SeriesFrame:
"""Compute various statistics on returns of this portfolio.
For keyword arguments and notes, see `Portfolio.stats`.
`kwargs` will be passed to `vectorbt.returns.accessors.ReturnsAccessor.stats` method.
If `benchmark_rets` is not set, uses `Portfolio.market_returns`."""
# Pre-calculate
if active_returns:
returns = self.active_returns(group_by=group_by)
else:
returns = self.returns(group_by=group_by, in_sim_order=in_sim_order)
# Run stats
if 'benchmark_rets' not in kwargs:
kwargs['benchmark_rets'] = self.market_returns(group_by=group_by)
stats_obj = returns.vbt.returns(freq=self.wrapper.freq, year_freq=year_freq).stats(**kwargs)
# Select columns or reduce
if checks.is_series(stats_obj):
return stats_obj
if column is not None:
return stats_obj.loc[column]
if agg_func is not None:
if agg_func == _mean_agg_func:
warnings.warn("Taking mean across columns. To return a DataFrame, pass agg_func=None.", stacklevel=2)
func_name = 'stats_mean'
else:
func_name = 'stats_' + agg_func.__name__
agg_stats_sr = pd.Series(index=stats_obj.columns, name=func_name)
agg_stats_sr.iloc[:3] = stats_obj.iloc[0, :3]
agg_stats_sr.iloc[3:] = agg_func(stats_obj.iloc[:, 3:])
return agg_stats_sr
return stats_obj
# ############# Plotting ############# #
def plot_asset_flow(self,
column: tp.Optional[tp.Label] = None,
direction: str = 'all',
xref: str = 'x',
yref: str = 'y',
hline_shape_kwargs: tp.KwargsLike = None,
**kwargs) -> tp.BaseFigure:
"""Plot one column of asset flow.
Args:
column (str): Name of the column to plot.
direction (Direction): See `vectorbt.portfolio.enums.Direction`.
xref (str): X coordinate axis.
yref (str): Y coordinate axis.
hline_shape_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Figure.add_shape` for zeroline.
**kwargs: Keyword arguments passed to `vectorbt.generic.accessors.GenericAccessor.plot`.
"""
from vectorbt._settings import settings
plotting_cfg = settings['plotting']
kwargs = merge_dicts(dict(
trace_kwargs=dict(
line=dict(
color=plotting_cfg['color_schema']['brown']
),
| |
# Matrix archiver script.
# Copyright (c) <NAME>, 2020. All rights reserved.
# Released under the MIT License (See LICENSE)
# Some portions (namely event retrieval as a batching generator) are taken
# from the MIT Licensed "matrix-archive" project by <NAME>.
import os
import sys
import argparse
import json
import sqlite3
from datetime import datetime
from itertools import islice
from matrix_client.client import MatrixClient
import requests
from pony.orm import *
# ----------------------------------------------------------------------------
# Globals
# ----------------------------------------------------------------------------
MATRIX_USER = os.environ['MATRIX_USER']
MATRIX_PASSWORD = os.environ['MATRIX_PASSWORD']
MATRIX_HOST = os.environ.get('MATRIX_HOST', "https://matrix.org")
MATRIX_ROOM_IDS = os.environ['MATRIX_ROOM_IDS'].split(',')
EXCLUDED_ROOM_IDS = os.environ.get('EXCLUDED_MATRIX_ROOM_IDS')
if EXCLUDED_ROOM_IDS is None:
EXCLUDED_ROOM_IDS = []
else:
EXCLUDED_ROOM_IDS = EXCLUDED_ROOM_IDS.split(',')
MAX_FILESIZE = os.environ.get('MAX_FILESIZE', 1099511627776) # 1 TB max filesize.
# ----------------------------------------------------------------------------
# DB Models
# ----------------------------------------------------------------------------
db = Database()
class Room(db.Entity):
id = PrimaryKey(int, auto=True)
room_id = Required(str, unique=True)
display_name = Required(str)
topic = Optional(str, nullable=True)
members = Set('Member')
events = Set('Event')
retrieval_ts = Required(datetime, default=lambda: datetime.utcnow())
class Member(db.Entity):
id = PrimaryKey(int, auto=True)
room = Required(Room)
display_name = Required(str)
user_id = Required(str)
room_id = Required(str)
avatar_url = Optional(str, nullable=True)
retrieval_ts = Required(datetime, default=lambda: datetime.utcnow())
class Device(db.Entity):
id = PrimaryKey(int, auto=True)
user_id = Required(str)
device_id = Required(str, unique=True)
display_name = Optional(str, nullable=True)
last_seen_ts = Optional(str, nullable=True)
last_seen_ip = Optional(str, nullable=True)
retrieval_ts = Required(datetime, default=lambda: datetime.utcnow())
class Event(db.Entity):
id = PrimaryKey(int, auto=True)
room = Required(Room)
content = Required(Json)
sender = Required(str)
type = Required(str)
event_id = Required(str, unique=True)
room_id = Required(str)
origin_server_ts = Required(datetime)
raw_json = Required(Json)
retrieval_ts = Required(datetime, default=lambda: datetime.utcnow())
class File(db.Entity):
id = PrimaryKey(int, auto=True)
filename = Required(str)
size = Required(int) # Size of file in bytes.
mime_type = Optional(str, nullable=True)
is_image = Required(bool, default=False) # Flag to make queries easier.
is_cached = Required(bool, default=False) # Flag to make queries easier.
data = Optional(bytes, nullable=True)
fetch_url_http = Required(str, unique=True) # Resolved HTTP URL for the file.
fetch_url_matrix = Required(str, unique=True)
last_fetch_status = Required(str)
last_fetch_ts = Required(datetime, default=lambda: datetime.utcnow())
retrieval_ts = Required(datetime, default=lambda: datetime.utcnow())
# ----------------------------------------------------------------------------
# ORM Startup jazz
# ----------------------------------------------------------------------------
# Default setting. Useful for testing.
db_provider = os.environ.get('DB_PROVIDER', 'sqlite')
# Avoid running configuration stuff when generating Sphinx docs.
# Cite: https://stackoverflow.com/a/45441490
if 'sphinx' not in sys.modules:
if db_provider == "postgres":
# Cite: https://stackoverflow.com/a/23331896
pwd = os.environ.get('DB_PASSWORD')
port = os.environ.get('DB_PORT')
# Connect to DB and auto-gen tables as needed.
db.bind(provider='postgres',
user=os.environ['DB_USER'],
password=<PASSWORD>,
host=os.environ['DB_HOST'],
port=port,
database=os.environ['DB_NAME'])
db.generate_mapping(create_tables=True)
print("Connected to database: {}".format(os.environ['DB_NAME']))
elif db_provider == "sqlite":
# Connect to DB and auto-gen tables as needed.
db.bind(provider='sqlite',
filename='db.sqlite',
create_db=True)
db.generate_mapping(create_tables=True)
print("Connected to database: {}".format('db.sqlite'))
# Borrowed straight from osteele/matrix-archive.
def get_room_events(client, room_id):
"""Iterate room events, starting at the cursor."""
room = client.get_rooms()[room_id]
print(f" |---- Reading events from room {room.display_name!r}…")
yield from room.events
batch_size = 1000 # empirically, this is the largest honored value
prev_batch = room.prev_batch
while True:
res = room.client.api.get_room_messages(room.room_id, prev_batch, 'b',
limit=batch_size)
events = res['chunk']
if not events:
break
print(f" |---- Read {len(events)} events...")
yield from events
prev_batch = res['end']
# Convert matrix timestamps to ISO8601 timestamps at highest resolution.
def convert_to_iso8601(ts):
return datetime.utcfromtimestamp(ts/1000).isoformat(timespec='milliseconds')
@db_session
def add_devices(devices):
print("Archiving Device list for user.")
for d in devices["devices"]:
user_id = d["user_id"]
device_id = d["device_id"]
display_name = d["display_name"]
last_seen_ts = d["last_seen_ts"]
last_seen_ip = d["last_seen_ip"]
item = Device.get(user_id=d["user_id"], device_id=d["device_id"])
if item is None:
# Fix up timestamp if it is present.
if last_seen_ts is not None:
last_seen_ts = convert_to_iso8601(last_seen_ts)
item = Device(user_id=user_id,
device_id=device_id,
display_name=display_name,
last_seen_ts=last_seen_ts,
last_seen_ip=last_seen_ip)
item.flush()
else:
# We've seen this device before.
print(" |-- Skipping Device: '{}' (Device ID: '{}') because it has already been archived.".format(display_name, device_id))
commit()
@db_session
def add_rooms(rooms):
# ------------------------------------------------
# Back up room metadata first, then members, then events.
for room_id in rooms:
room = rooms[room_id]
display_name = room.display_name
print("Archiving Room: '{}' (Room ID: '{}')".format(display_name, room_id))
# Skip rooms the user specifically wants to exclude.
if room_id in EXCLUDED_ROOM_IDS:
print(" |-- Skipping Room: '{}' (Room ID: '{}') because it is on the EXCLUDED list.".format(room.display_name, room_id))
continue
# Topic retrieval can fail with a 404 sometimes.
try:
topic = json.dumps(client.api.get_room_topic(room_id))
except Exception as e:
topic = None
# See if the room already exists in the DB.
print(" | Backing up room metadata...")
r = Room.get(room_id=room_id)
if r is None:
# Room hasn't been archived before.
item = Room(room_id=room_id,
display_name=display_name,
topic=topic)
item.flush()
r = item
else:
# We've seen this room before.
print(" |-- Skipping metadata for Room: '{}' (Room ID: '{}') because it has already been archived.".format(display_name, room_id))
# --------------------------------------------
# Back up room members.
print(" | Backing up list of room members...")
for member in room.get_joined_members():
display_name = member.displayname
user_id = member.user_id
avatar_url = member.get_avatar_url()
# See if the member already exists in the DB.
item = Member.get(room=r, user_id=user_id)
if item is None:
# Member hasn't been archived before.
item = Member(room=r,
user_id=user_id,
room_id=r.room_id,
display_name=display_name,
avatar_url=avatar_url)
item.flush()
else:
# We've seen this room before.
print(" |-- Skipping Member: '{}' (User ID: '{}') because it has already been archived.".format(display_name, user_id))
# --------------------------------------------
# Back up room events.
print(" | Backing up list of room events...")
events = get_room_events(client, room_id)
last_events = select(e for e in Event
if e.room == r).order_by(desc(Event.origin_server_ts))[:1000]
last_event_ids = set()
if last_events is None or last_events == []:
# No existing backup. Let's make a new one.
print(" |-- No existing events backup for this room. Creating a new one...")
else:
# We've got an existing backup, let's add to it.
print(" |-- Checking to see if new events have occurred since the last backup...")
last_event_ids = set([e.event_id for e in last_events])
#print("Last event ID: {} timestamp: {}".format(last_event_id, last_event.origin_server_ts))
new_events_saved = 0
# Events will be pulled down in batches.
# Note: Insertion order will be off globally, but correct within a batch.
# Users will need to ORDER BY `origin_server_ts` to get a globally correct ordering.
stop_on_this_batch = False
event_batch = list(islice(events, 0, 1000))
while len(event_batch) > 0:
incoming_event_ids = set([e["event_id"] for e in event_batch])
# Set difference of incoming versus last 1k events in DB.
diff = incoming_event_ids.difference(last_event_ids)
for event in event_batch:
event_id = event["event_id"]
origin_server_ts = datetime.utcfromtimestamp(event["origin_server_ts"]/1000).isoformat(timespec='milliseconds')
#print("Current event ID: {} timestamp: {}".format(event_id, origin_server_ts))
# If we run into something we've already archived we'll be done after this batch.
if event_id not in diff:
stop_on_this_batch = True
continue
# Otherwise, archive this event.
new_events_saved += 1
content = event["content"]
sender = event["sender"]
type = event["type"]
origin_server_ts = datetime.utcfromtimestamp(event["origin_server_ts"]/1000).isoformat(timespec='milliseconds')
raw_json = json.dumps(event)
item = Event(room=r,
event_id=event_id,
room_id=r.room_id,
content=content,
sender=sender,
type=type,
origin_server_ts=origin_server_ts,
raw_json=raw_json)
item.flush()
# Download files if message.content['msgtype'] == 'm.file'
if "msgtype" in item.content.keys() and item.content["msgtype"] in ["m.file", "m.image"]:
print(" |---- Attempting to archive file: '{}'".format(item.content["body"]))
filename = item.content["body"]
file_size = item.content["info"]["size"]
is_image = (item.content["msgtype"] == "m.image")
matrix_download_url = item.content["url"]
http_download_url = client.api.get_download_url(matrix_download_url)
data = None
is_cached = False
last_fetch_status = "Fail"
file_entry = File.get(fetch_url_matrix=matrix_download_url)
# If not cached, or last fetch failed, try fetching the file.
if file_entry is None or file_entry.is_cached == False:
try:
req = requests.get(http_download_url, stream=True)
if int(req.headers["content-length"]) < MAX_FILESIZE:
data = req.content
is_cached = True
last_fetch_status = "{} {}".format(req.status_code, req.reason)
else:
print(" | File: '{}' of size {} bytes was not archived due to size in excess of limit ({} bytes).".format(filename, file_size, MAX_FILESIZE))
except Exception as e:
print(" Could not fetch file. Traceback:\n {}".format(e))
is_cached = False
else:
print(" |------ Skipping because file is already archived!")
if file_entry is None:
file_entry = File(filename=filename,
size=file_size,
mime_type=item.content["info"].get("mimetype"),
is_image=is_image,
is_cached=is_cached,
data=data,
fetch_url_http=http_download_url,
fetch_url_matrix=matrix_download_url,
last_fetch_status=last_fetch_status)
else:
# Update data field if we had a successful fetch.
if data is not None:
file_entry.data = data
file_entry.last_fetch_status = last_fetch_status
file_entry.last_fetch_ts = datetime.utcnow().isoformat()
file_entry.flush()
# Terminate if we hit known event IDs in this batch.
if stop_on_this_batch:
break
# Fetch next batch.
event_batch = list(islice(events, 0, 1000))
commit()
print(" | Archived {} new events for room '{}'".format(new_events_saved, room.display_name))
# ----------------------------------------------------------------------------
# Main function
# ----------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Matrix Room Archiver Client')
parser.add_argument('-u', '--user', type=str, help="Username to use for logging in.")
parser.add_argument('-p', '--password', type=str, help="Password to use for logging in.")
parser.add_argument('--db', type=str, default="archive.sqlite", help="Name of the database file to export to. (default: 'archive.sqlite')")
parser.add_argument('--room', action="append", help="Name of the Matrix room to export. Applying this argument multiple times will export multiple rooms, in sequence.")
parser.add_argument('--host', type=str, help="Matrix host address. (default: 'https://matrix.org')")
args = parser.parse_args()
| |
<reponame>lverdier1/CEASIOMpy
"""
CEASIOMpy: Conceptual Aircraft Design Software
Developed by CFS ENGINEERING, 1015 Lausanne, Switzerland
This programm stors all function needed for stability analysis (dynamic and static)
Python version: >=3.6
| Author: <NAME>
| Creation: 2020-02-24
| Last modifiction: 2020-03-24 (AJ)
TODO:
* ...
"""
#=============================================================================
# IMPORTS
#=============================================================================
import os
import sys
from math import sqrt
import numpy as np
from numpy import log as ln
from numpy import linalg # For eigen values and aigen voectors
import matplotlib.patheffects
import matplotlib.pyplot as plt
from matplotlib import rcParams, cycler
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
from matplotlib.ticker import ScalarFormatter
from ceasiompy.utils.ceasiomlogger import get_logger
log = get_logger(__file__.split('.')[0])
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
#=============================================================================
# CLASSES
#=============================================================================
#=============================================================================
# FUNCTIONS
#=============================================================================
### -------------------- 'MILF-8785C ShortPeriod natual frequency requirements for flight phase A' ---------
def plot_sp_level_a(x_axis, y_axis, legend, show_plots, save_plots):
# Create figure
fig, ax = plt.subplots()
# Plot data:
for n in range(len(x_axis)) :
x = x_axis[n]
y = y_axis[n]
plt.plot(x, y, marker='o', markersize=4, linestyle = 'None')
plot_title = r'MILF-8785C ShortPeriod natual frequency $\omega_{N}$ requirements for flight phase A'
# Graphic Sttelement
# Title
plt.title(plot_title, fontdict=None, loc='center')
# Legend
ax.legend(legend, loc='upper right')
# Axes labels
plt.xlabel(r"n/$\alpha \backsim$ g's/rad")
plt.ylabel(r'$\omega_{N}$ [rad/s]')
# Axes format and Limit:
ax.loglog() # Loglog axes
for axis in [ax.xaxis, ax.yaxis]: # Ful number and not scientific notaion on axes
formatter = ScalarFormatter()
formatter.set_scientific(False)
axis.set_major_formatter(formatter)
axes = plt.gca()
( x_min,x_max, y_min, y_max ) = (1,100, 0.1,100)
axes.set_xlim([x_min , x_max])
axes.set_ylim([y_min,y_max])
# grid
plt.grid(True,which="both",ls="-")
# Stability Levels
#Level 1 sup.
x_level_one_sup = [1,100]
y_level_one_sup = [1.9,20]
plt.plot(x_level_one_sup, y_level_one_sup, color='green', linewidth=1.5)
# Plot text : "Level1" on garpah
l1_sup = np.array((11, 5.2)) #Location
angle = 18 # Rotate angle
plt.gca().transData.transform_angles(np.array((45,)), l1_sup.reshape((1, 2)))[0] # Position+ Rotation
plt.text(l1_sup[0], l1_sup[1], 'Level 1', fontsize=6, rotation=angle, rotation_mode='anchor') # Plot text
#Level 2 sup.
x_level_two_sup = [1,100]
y_level_two_sup = [3.2,33]
plt.plot(x_level_two_sup, y_level_two_sup, color='orange', linewidth=1.5)
# Plot text : "Level1" on garpah
l2_sup = np.array((11, 8.7)) #Location
angle = 18 # Rotate angle
plt.gca().transData.transform_angles(np.array((45,)), l2_sup.reshape((1, 2)))[0] # Position+ Rotation
plt.text(l2_sup[0], l2_sup[1], 'Level 2', fontsize=6, rotation=angle, rotation_mode='anchor') # Plot text
#Level 1 inf.
x_level_one_inf1 = [1,3.7]
y_level_one_inf1 = [1,1]
plt.plot(x_level_one_inf1, y_level_one_inf1, color='green', linewidth=1.5)
x_level_one_inf2 = [3.7,100]
y_level_one_inf2 = [1,5.6]
plt.plot(x_level_one_inf2, y_level_one_inf2, color='green', linewidth=1.5)
# Plot text : "Level1" on garpah
l1_inf= np.array((11, 1.9)) #Location
angle = 18 # Rotate angle
plt.gca().transData.transform_angles(np.array((45,)), l1_inf.reshape((1, 2)))[0] # Position+ Rotation
plt.text(l1_inf[0], l1_inf[1], 'Level 1', fontsize=6, rotation=angle, rotation_mode='anchor') # Plot text
#Level 2 inf.
x_level_two_inf1 = [1,2.4]
y_level_two_inf1 = [0.6,0.6]
plt.plot(x_level_two_inf1, y_level_two_inf1, color='orange', linewidth=1.5)
# Plot text : "Level2" on garpah
l2_inf1 = np.array((1.1, 0.62)) #Location
angle = 0 # Rotate angle
plt.gca().transData.transform_angles(np.array((45,)), l2_inf1.reshape((1, 2)))[0] # Position+ Rotation
plt.text(l2_inf1[0], l2_inf1[1], 'Level 2', fontsize=6, rotation=angle, rotation_mode='anchor') # Plot text
x_level_two_inf2 = [2.4,100]
y_level_two_inf2 = [0.6,4.1]
plt.plot(x_level_two_inf2, y_level_two_inf2, color='orange', linewidth=1.5)
# Plot text : "Level2 & 3" on garpah
l2_inf = np.array((11, 1.35)) #Location
angle = 18 # Rotate angle
plt.gca().transData.transform_angles(np.array((45,)), l2_inf.reshape((1, 2)))[0] # Position+ Rotation
plt.text(l2_inf[0], l2_inf[1], 'Level 2 & 3', fontsize=6, rotation=angle, rotation_mode='anchor') # Plot text
#Level 3 inf.
x_level_three_inf1 = [1,2.4]
y_level_three_inf1 = [0.39,0.6]
plt.plot(x_level_three_inf1, y_level_three_inf1, color='red', linewidth=1.5)
# Plot text : "Level 3" on garpah
l3_inf = np.array((1.1, 0.33)) #Location
angle = 18 # Rotate angle
plt.gca().transData.transform_angles(np.array((45,)), l2_inf.reshape((1, 2)))[0] # Position+ Rotation
plt.text(l3_inf[0], l3_inf[1], 'Level 3', fontsize=6, rotation=angle, rotation_mode='anchor') # Plot text
if save_plots:
fig_title = plot_title.replace(' ','_')
fig_path = os.path.join(MODULE_DIR,'ToolOutput',fig_title) + '.svg'
plt.savefig(fig_path)
if show_plots:
plt.show()
### -------------------- 'MILF-8785C ShortPeriod natual frequency requirements for flight phase B' ---------
def plot_sp_level_b(x_axis, y_axis, legend, show_plots, save_plots):
# Create figure
fig, ax = plt.subplots()
# Plot data:
for n in range(len(x_axis)) :
x = x_axis[n]
y = y_axis[n]
plt.plot(x, y, marker='o', markersize=4, linestyle = 'None')
plot_title = r'MILF-8785C ShortPeriod natual frequency $\omega_{N}$ requirements for flight phase B'
# Graphic Sttelement
# Title
plt.title(plot_title, fontdict=None, loc='center')
# Legend
ax.legend(legend, loc='upper right')
# Axes labels
plt.xlabel(r"n/$\alpha \backsim$ g's/rad")
plt.ylabel(r'$\omega_{N} \backsim$ rad/s')
# Axes format and Limit:
ax.loglog() # Loglog axes
for axis in [ax.xaxis, ax.yaxis]: # Ful number and not scientific notaion on axes
formatter = ScalarFormatter()
formatter.set_scientific(False)
axis.set_major_formatter(formatter)
axes = plt.gca()
( x_min,x_max, y_min, y_max ) = (1,100, 0.1,100)
axes.set_xlim([x_min , x_max])
axes.set_ylim([y_min,y_max])
# grid
plt.grid(True,which="both",ls="-")
# Stability Levels
#Level 1 sup.
x_level_one_sup = [1,100]
y_level_one_sup = [1.9,18]
plt.plot(x_level_one_sup, y_level_one_sup, color='green', linewidth=1.5)
# Plot text : "Level1" on garpah
l1_sup = np.array((11, 5.1)) #Location
angle = 18 # Rotate angle
plt.gca().transData.transform_angles(np.array((45,)), l1_sup.reshape((1, 2)))[0] # Position+ Rotation
plt.text(l1_sup[0], l1_sup[1], 'Level 1', fontsize=6, rotation=angle, rotation_mode='anchor') # Plot text
#Level 2 sup.
x_level_two_sup = [1,100]
y_level_two_sup = [3.1,28]
plt.plot(x_level_two_sup, y_level_two_sup, color='orange', linewidth=1.5)
# Plot text : "Level1" on garpah
l2_sup = np.array((11, 8.4)) #Location
angle = 18 # Rotate angle
plt.gca().transData.transform_angles(np.array((45,)), l2_sup.reshape((1, 2)))[0] # Position+ Rotation
plt.text(l2_sup[0], l2_sup[1], 'Level 2', fontsize=6, rotation=angle, rotation_mode='anchor') # Plot text
#Level 1 inf.
x_level_one_inf1 = [1,100]
y_level_one_inf1 = [0.3,2.8]
plt.plot(x_level_one_inf1, y_level_one_inf1, color='green', linewidth=1.5)
# Plot text : "Level1" on garpah
l1_inf= np.array((11, 1)) #Location
angle = 18 # Rotate angle
plt.gca().transData.transform_angles(np.array((45,)), l1_inf.reshape((1, 2)))[0] # Position+ Rotation
plt.text(l1_inf[0], l1_inf[1], 'Level 1', fontsize=6, rotation=angle, rotation_mode='anchor') # Plot text
#Level 2 inf.
x_level_two_inf1 = [1,100]
y_level_two_inf1 = [0.2,01.9]
plt.plot(x_level_two_inf1, y_level_two_inf1, color='orange', linewidth=1.5)
# Plot text : "Level2 & 3" on garpah
l2_inf = np.array((11, 0.68)) #Location
angle = 18 # Rotate angle
plt.gca().transData.transform_angles(np.array((45,)), l2_inf.reshape((1, 2)))[0] # Position+ Rotation
plt.text(l2_inf[0], l2_inf[1], 'Level 2 & 3', fontsize=6, rotation=angle, rotation_mode='anchor') # Plot text
if save_plots:
fig_title = plot_title.replace(' ','_')
fig_path = os.path.join(MODULE_DIR,'ToolOutput',fig_title) + '.svg'
plt.savefig(fig_path)
# Show Plots
if show_plots:
plt.show()
### -------------------- 'MILF-8785C ShortPeriod natual frequency requirements for flight phase C ---------
def plot_sp_level_c(x_axis, y_axis, legend, show_plots, save_plots):
# Create figure
fig, ax = plt.subplots()
# Plot data:
for n in range(len(x_axis)) :
x = x_axis[n]
y = y_axis[n]
plt.plot(x, y, marker='o', markersize=4, linestyle = 'None')
plot_title = r'MILF-8785C ShortPeriod natual frequency $\omega_{N}$ requirements for flight phase C'
# Graphic Sttelement
# Title
plt.title(plot_title, fontdict=None, loc='center')
# Legend
ax.legend(legend, loc='upper right')
# Axes labels
plt.xlabel(r"n/$\alpha \backsim$ g's/rad")
plt.ylabel(r'$\omega_{N} \backsim$ rad/s')
# Axes format and Limit:
ax.loglog() # Loglog axes
for axis in [ax.xaxis, ax.yaxis]: # Ful number and not scientific notaion on axes
formatter = ScalarFormatter()
formatter.set_scientific(False)
axis.set_major_formatter(formatter)
axes = plt.gca()
( x_min,x_max, y_min, y_max ) = (1,100, 0.1,100)
axes.set_xlim([x_min , x_max])
axes.set_ylim([y_min,y_max])
# grid
plt.grid(True,which="both",ls="-")
# Stability Levels
#Level 1 sup.
x_level_one_sup = [2,100]
y_level_one_sup = [2.7,18]
plt.plot(x_level_one_sup, y_level_one_sup, color='green', linewidth=1.5)
# Plot text : "Level1" on garpah
l1_sup = np.array((11, 5.3)) #Location
angle = 18 # Rotate angle
plt.gca().transData.transform_angles(np.array((45,)), l1_sup.reshape((1, 2)))[0] # Position+ Rotation
plt.text(l1_sup[0], l1_sup[1], 'Level 1', fontsize=6, rotation=angle, rotation_mode='anchor') # Plot text
#Level 2 sup.
x_level_two_sup = [1,100]
y_level_two_sup = [3.1,32]
plt.plot(x_level_two_sup, y_level_two_sup, color='orange', linewidth=1.5)
# Plot text : "Level1" on garpah
l2_sup = np.array((11, 8.8)) #Location
angle = 18 # Rotate angle
plt.gca().transData.transform_angles(np.array((45,)), l2_sup.reshape((1, 2)))[0] # Position+ Rotation
plt.text(l2_sup[0], l2_sup[1], 'Level 2', fontsize=6, rotation=angle, rotation_mode='anchor') # Plot text
#Level 1 inf.
x_level_one_inf1 = [2.9,100]
y_level_one_inf1 = [0.7,3.9]
plt.plot(x_level_one_inf1, y_level_one_inf1, color='green', linewidth=1.5)
# Plot text : "Level1" on garpah
l1_inf= np.array((11, 1.4)) #Location
angle = 18 # Rotate angle
plt.gca().transData.transform_angles(np.array((45,)), l1_inf.reshape((1, 2)))[0] # Position+ Rotation
plt.text(l1_inf[0], l1_inf[1], 'Level 1', fontsize=6, rotation=angle, rotation_mode='anchor') # Plot text
#Level 1 inf2
x_level_one_inf2 = [2.6,4.5]
y_level_one_inf2 = [0.88,0.88]
plt.plot(x_level_one_inf2, y_level_one_inf2, color='green', linewidth=1.5)
#Level 1 inf3
x_level_one_inf3 = [2,2.9]
y_level_one_inf3 = [0.7,0.7]
plt.plot(x_level_one_inf3, y_level_one_inf3, color='green', linewidth=1.5)
#Level 1 Vertical 1
x_level_one_vert1 = [2.6,2.6]
y_level_one_vert1 = [0.88,3]
plt.plot(x_level_one_vert1, y_level_one_vert1, color='green', linewidth=1.5)
# Plot text : "Classes I & IV" on garpah
l1_vert1 = np.array((3, 0.93)) #Location
angle = 90 # Rotate angle
plt.gca().transData.transform_angles(np.array((45,)), l1_vert1.reshape((1, 2)))[0] # Position+ Rotation
plt.text(l1_vert1[0], l1_vert1[1], 'Classes I & IV', fontsize=6, rotation=angle, rotation_mode='anchor') # Plot text
#Level 1 Vertical 2
x_level_one_vert2 = [2,2]
y_level_one_vert2 = [0.7,2.7]
plt.plot(x_level_one_vert2, y_level_one_vert2, color='green', linewidth=1.5)
# Plot text : "Classes II & III" on garpah
l1_vert2 = np.array((2.2, 0.8)) #Location
angle = 90 # Rotate angle
plt.gca().transData.transform_angles(np.array((45,)), l1_vert1.reshape((1, 2)))[0] # Position+ Rotation
plt.text(l1_vert2[0], l1_vert2[1], 'Classes II & III', fontsize=6, rotation=angle, rotation_mode='anchor') # Plot text
#Level 2 inf.
x_level_two_inf1 = [1.4,100]
y_level_two_inf1 = [0.39,3]
plt.plot(x_level_two_inf1, y_level_two_inf1, color='orange', linewidth=1.5)
# Plot text : "Level2 & 3" on garpah
l2_inf = np.array((11, 1.08)) #Location
| |
import distutils.dir_util #for multiple file copy.
import getpass #for username.
import glob #for globbing paths.
import os #fs stat and path check.
import re #regex.
import shutil #move and copy files.
import string #for string replacal.
import StringIO #for redirecting stdout and stderr.
import subprocess #run a subprocess.
import sys #various system functions.
import tempfile #for temporary directories.
import time #for time.
#Set the path to the current directory.
os.chdir(os.path.dirname(os.path.realpath(sys.argv[0])));
#Enable color support if available.
try:
#{
import colorama;
colorama.init();
#Setup color shortcuts.
BLACK = colorama.Style.BRIGHT + colorama.Fore.BLACK;
RED = colorama.Style.BRIGHT + colorama.Fore.RED;
GREEN = colorama.Style.BRIGHT + colorama.Fore.GREEN;
YELLOW = colorama.Style.BRIGHT + colorama.Fore.YELLOW;
BLUE = colorama.Style.BRIGHT + colorama.Fore.BLUE;
MAGENTA = colorama.Style.BRIGHT + colorama.Fore.MAGENTA;
CYAN = colorama.Style.BRIGHT + colorama.Fore.CYAN;
WHITE = colorama.Style.BRIGHT + colorama.Fore.WHITE;
RESET = colorama.Style.RESET_ALL;
#}
except:
#{
#Fallback to non-colored mode.
BLACK = '';
RED = '';
GREEN = '';
YELLOW = '';
BLUE = '';
MAGENTA = '';
CYAN = '';
WHITE = '';
RESET = '';
#}
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~ Returns the temporary directory with the specified file appended. ~~~~~
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def tempdir(filename=None):
#{
local = lambda:0;
#No temp directory set.
if (not hasattr(tempdir, "location")):
#{
#Try to set one.
try:
#{
tempdir.location = tempfile.mkdtemp(dir=os.getcwd());
#}
except:
#{
#Failed - return nothing.
return None;
#}
#}
#check if a filename was provided.
if (filename == None):
#{
#Return just the temporary directory.
return tempdir.location;
#}
else:
#{
#Append filename.
return os.path.join(tempdir.location, filename);
#}
#}
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~~~ Class to handle general patching related activities. ~~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class autodoc:
#{
enableService = False; #Whether to ask to install as a service.
enableSilence = False; #Whether we are to execute silently or not.
enableSimulation = False; #Whether we are testing the hack.
reversing = False; #Whether to reverse backups.
found = False; #If any binaries are found.
error = False; #If any modifications had errors.
backupList = []; #List of backed up binaries.
helpInfo = []; #List of user help arguments.
arguments = []; #List of user arguments and their functions.
preSetupCode = []; #List of functions to be run on pre-setup.
postSetupCode = []; #List of functions to be run on post-setup.
preTeardownCode = []; #List of functions to be run on pre-teardown.
postTeardownCode = []; #List of functions to be run on post-teardown.
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~~~ Overridden Exception Handler with Color and More. ~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def exception_handler(type, value, traceback):
#{
#Reverse any backups we did before crashing.
autodoc.reverse_backups();
#Swap standard error to a string.
orig_stderr = sys.stderr;
sys.stderr = str_stderr = StringIO.StringIO();
#Call the original exception handler.
sys.__excepthook__(type, value, traceback);
#Swap standard error back.
sys.stderr = orig_stderr;
#Grab the trace.
trace = str_stderr.getvalue();
#Color numbers.
regex = re.compile("line [0-9]+,");
for match in regex.finditer(trace):
replacal = match.group(0).rstrip(",");
trace = trace.replace(replacal, YELLOW + replacal + RESET);
#Color filenames.
regex = re.compile("File \\\".+\\\"");
for match in regex.finditer(trace):
replacal = match.group(0).lstrip("File \"").rstrip("\"");
trace = trace.replace(replacal, CYAN + replacal + RESET);
#Color modules.
regex = re.compile(", in.+\\\n");
for match in regex.finditer(trace):
replacal = match.group(0).lstrip(", in ");
trace = trace.replace(replacal, GREEN + replacal + RESET);
#Color code.
regex = re.compile(" .+");
for match in regex.finditer(trace):
replacal = match.group(0);
trace = trace.replace(replacal, RED + match.group(0) + RESET);
#Print colored trace.
print "\nUnhandled exception:"
print trace;
#Call our exit method.
autodoc.exit();
#}
sys.excepthook = autodoc = exception_handler;
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~~~~~~~ Tries to re-execute as admin upon error. ~~~~~~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@staticmethod
def escalate_admin():
#{
#On non-windows platforms try to execute as root.
if os.name != "nt" and os.geteuid() != 0:
#{
display.info("Please re-execute with administrator privileges by providing your password below...");
display.info("Please note, no characters will be shown when typing...");
# os.execvp() replaces the running process, rather than launching a child
# process, so there's no need to exit afterwards. The extra "sudo" in the
# second parameter is required because Python doesn't automatically set $0
# in the new process.
os.execvp("sudo", ["sudo"] + sys.argv);
#}
#}
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~~~~~ Locates the specified binaries by searching. ~~~~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@staticmethod
def find_binaries(directories, list):
#{
expandedSet = set();
for directory in directories: #Iterate over specified directories.
for path, subdirs, files in os.walk(directory): #Walk recursively through each directory.
for file in files: #Iterate over files in the directory.
for binary in list: #Iterate over binaries.
if (file == binary): #Check if binary found.
expandedSet.add(os.path.join(path, binary));
#return valid binary list.
return sorted(expandedSet);
#}
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~~~~~ Locates the specified binaries by globbing. ~~~~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@staticmethod
def glob_binaries(list):
#{
expandedSet = set();
for item in list:
#{
#expand entry -- empty if it doesn't match any valid binaries.
expandedItem = glob.glob(item);
#Iterate over matches and add them to our list of valid binaries.
for entry in expandedItem:
#{
expandedSet.add(entry);
#}
#}
#return valid binary list.
return sorted(expandedSet);
#}
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~ Locates the specified resource (bundled or relative). ~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@staticmethod
def find_resource(resource):
#{
#Get the resource path for pyinstaller or fallback to cwd.
try:
basePath = sys._MEIPASS;
except:
basePath = os.path.abspath(".");
path = os.path.join(basePath, "tools", resource);
#Alternative bundled path.
if(os.path.exists(path) == False):
#{
path = os.path.join(basePath, "bundled", resource);
#}
#Check for the resources existence.
sys.stdout.write("Checking for resource '"+ CYAN + resource + RESET + "'... ");
if (os.path.exists(path)):
#{
#Exists.
print GREEN + "found" + RESET + ".";
return path;
#}
else:
#{
#Doesn't exist.
autodoc.error = True;
print RED + "NOT found" + RESET + ".";
#}
#}
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~ Backs up a file and adds it to a list for later reversal. ~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@staticmethod
def add_backup(filename):
#{
try:
#{
#Check if an older backup exists and remove.
if (os.path.exists(filename+".bak")):
#{
os.remove(filename+".bak");
#}
#Move backup in place.
shutil.copy(filename, filename+".bak");
autodoc.backupList += [filename];
return True;
#}
except Exception as e:
#{
print CYAN + filename + RED + ":" + RED + " Failed to backup binary (" + e.strerror + ")!";
autodoc.escalate_admin();
return False;
#}
#}
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~ Moves the specified backup back to its original location. ~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@staticmethod
def reverse_backup(filename):
#{
try:
#{
#Rename the file.
#This is a fix for in use files and also race conditions in deletion
#introduced by the "FILE_SHARE_DELETE" state on windows.
tempFilename = filename + "." + str(time.time());
shutil.move(filename, tempFilename);
#Move backup back in place.
shutil.move(filename + ".bak", filename);
#Remove modified binary.
os.remove(tempFilename);
return True;
#}
except Exception as e:
#{
if os.name == "nt":
#{
try:
#{
#Work around for in-use windows files.
import win32file
import win32api
#Schedule it for deletion on reboot.
win32file.MoveFileEx(tempFilename, None, win32file.MOVEFILE_DELAY_UNTIL_REBOOT);
return True;
#}
except Exception as e:
#{
print CYAN + filename + RED + ":" + RED + " Failed to move backup in place (" + e.strerror + ")!";
return False;
#}
#}
else:
#{
print CYAN + filename + RED + ":" + RED + " Failed to move backup in place (" + e.strerror + ")!";
autodoc.escalate_admin();
#Set the path to the current directory.
os.chdir(os.path.dirname(os.path.realpath(sys.argv[0])));
return False;
#}
#}
#}
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~~ Moves all backups back to their original location. ~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@staticmethod
def reverse_backups(bPrintInfo=False):
#{
#Grab the length of the list.
length = len(autodoc.backupList);
#Deep copy list.
tempBackupList = list(autodoc.backupList);
for filename in autodoc.backupList:
#{
#Print information if specified.
if(bPrintInfo):
print CYAN + filename + RED + ":" + RESET + " Reversing backup...";
#Reverse the backup.
if(autodoc.reverse_backup(filename)):
#{
#Remove from backup list on success.
tempBackupList.remove(filename);
#}
#}
#Print information if specified.
if(bPrintInfo and length > 0):
#{
display.separator();
#}
#Move over the new list (should be empty unless failure occurred).
autodoc.backupList = tempBackupList;
#}
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~~ Creates an autodoc task to run on system startup. ~~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@staticmethod
def create_task():
#{
print "Scheduling task...";
#Get executable path and name combination.
if(hasattr(sys, "frozen")):
#Running from an executable.
executable = os.path.join(os.getcwd(), sys.executable);
else:
#Not running from an executable.
executable = os.path.join(os.getcwd(), __file__);
#Make taskname.
taskName = "Auto-Doc Healer [" + os.path.basename(os.getcwd()) + "]";
print
#Schedule task.
retCode = subprocess.Popen("schtasks /Create /F /TN \""+ taskName + "\" /TR \"'" + executable + "' --silent\" /SC ONSTART /RU " + getpass.getuser() + " /RP").wait();
#Check for error creating | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import division
import collections
import numpy as np
from stingray import Lightcurve
from stingray.events import EventList
import stingray.utils as utils
__all__ = ['Covariancespectrum', 'AveragedCovariancespectrum']
class Covariancespectrum(object):
"""
Compute a covariance spectrum for the data. The input data can be
either in event data or pre-made light curves. Event data can either
be in the form of a ``numpy.ndarray`` with ``(time stamp, energy)`` pairs or
a :class:`stingray.events.EventList` object. If light curves are formed ahead
of time, then a list of :class:`stingray.Lightcurve` objects should be passed to the
object, ideally one light curve for each band of interest.
For the case where the data is input as a list of :class:`stingray.Lightcurve` objects,
the reference band(s) should either be
1. a single :class:`stingray.Lightcurve` object,
2. a list of :class:`stingray.Lightcurve` objects with the reference band for each band
of interest pre-made, or
3. ``None``, in which case reference bands will
formed by combining all light curves *except* for the band of interest.
In the case of event data, ``band_interest`` and ``ref_band_interest`` can
be (multiple) pairs of energies, and the light curves for the bands of
interest and reference bands will be produced dynamically.
Parameters
----------
data : {``numpy.ndarray`` | :class:`stingray.events.EventList` object | list of :class:`stingray.Lightcurve` objects}
``data`` contains the time series data, either in the form of a
2-D array of ``(time stamp, energy)`` pairs for event data, or as a
list of light curves.
Note : The event list must be in sorted order with respect to the
times of arrivals.
dt : float
The time resolution of the :class:`stingray.Lightcurve` formed from the energy bin.
Only used if ``data`` is an event list.
band_interest : {``None``, iterable of tuples}
If ``None``, all possible energy values will be assumed to be of
interest, and a covariance spectrum in the highest resolution
will be produced.
Note: if the input is a list of :class:`stingray.Lightcurve` objects, then the user may
supply their energy values here, for construction of a
reference band.
ref_band_interest : {``None``, tuple, :class:`stingray.Lightcurve`, list of :class:`stingray.Lightcurve` objects}
Defines the reference band to be used for comparison with the
bands of interest. If ``None``, all bands *except* the band of
interest will be used for each band of interest, respectively.
Alternatively, a tuple can be given for event list data, which will
extract the reference band (always excluding the band of interest),
or one may put in a single :class:`stingray.Lightcurve` object to be used (the same
for each band of interest) or a list of :class:`stingray.Lightcurve` objects, one for
each band of interest.
std : float or np.array or list of numbers
The term ``std`` is used to calculate the excess variance of a band.
If ``std`` is set to ``None``, default Poisson case is taken and the
std is calculated as ``mean(lc)**0.5``. In the case of a single
float as input, the same is used as the standard deviation which
is also used as the std. And if the std is an iterable of
numbers, their mean is used for the same purpose.
Attributes
----------
unnorm_covar : np.ndarray
An array of arrays with mid point ``band_interest`` and their
covariance. It is the array-form of the dictionary ``energy_covar``.
The covariance values are unnormalized.
covar : np.ndarray
Normalized covariance spectrum.
covar_error : np.ndarray
Errors of the normalized covariance spectrum.
References
----------
[1] <NAME>. and <NAME>. (2009), Accretion disc variability\
in the hard state of black hole X-ray binaries. Monthly Notices\
of the Royal Astronomical Society, 397: 666–676.\
doi: 10.1111/j.1365-2966.2009.15008.x
Examples
--------
See the `notebooks repository <https://github.com/StingraySoftware/notebooks>`_ for
detailed notebooks on the code.
"""
def __init__(self, data, dt=None, band_interest=None,
ref_band_interest=None, std=None):
self.dt = dt
self.std = std
# check whether data is an EventList object:
if isinstance(data, EventList):
data = np.vstack([data.time, data.energy]).T
# check whether the data contains a list of Lightcurve objects
if isinstance(data[0], Lightcurve):
self.use_lc = True
self.lcs = data
else:
self.use_lc = False
# if band_interest is None, extract the energy bins and make an array
# with the lower and upper bounds of the energy bins
if not band_interest:
if not self.use_lc:
self._create_band_interest(data)
else:
self.band_interest = np.vstack([np.arange(len(data)),
np.arange(1, len(data)+1, 1)]).T
else:
if np.size(band_interest) < 2:
raise ValueError('band_interest must contain at least 2 values '
'(minimum and maximum values for each band) '
'and be a 2D array!')
self.band_interest = np.atleast_2d(band_interest)
if self.use_lc is False and not dt:
raise ValueError("If the input data is event data, the dt keyword "
"must be set and supply a time resolution for "
"creating light curves!")
# if we don't have light curves already, make them:
if not self.use_lc:
if not np.all(np.diff(data, axis=0).T[0] >= 0):
utils.simon("The event list must be sorted with respect to "
"times of arrivals.")
data = data[data[:, 0].argsort()]
self.lcs = self._make_lightcurves(data)
# check whether band of interest contains a Lightcurve object:
if np.size(ref_band_interest) == 1 or isinstance(ref_band_interest,
Lightcurve):
if isinstance(ref_band_interest, Lightcurve):
self.ref_band_lcs = ref_band_interest
# ref_band_interest must either be a Lightcurve, or must have
# multiple entries
elif ref_band_interest is None:
if self.use_lc:
self.ref_band_lcs = \
self._make_reference_bands_from_lightcurves(ref_band_interest)
else:
self.ref_band_lcs = \
self._make_reference_bands_from_event_data(data)
else:
raise ValueError("ref_band_interest must contain either "
"a Lightcurve object, a list of Lightcurve "
"objects or a tuple of length 2.")
else:
# check whether ref_band_interest is a list of light curves
if isinstance(ref_band_interest[0], Lightcurve):
self.ref_band_lcs = ref_band_interest
assert len(ref_band_interest) == len(self.lcs), "The list of " \
"reference light " \
"curves must have " \
"the same length as " \
"the list of light curves" \
"of interest."
# if not, it must be a tuple, so we're going to make a list of light
# curves
else:
if self.use_lc:
self.ref_band_lcs = \
self._make_reference_bands_from_lightcurves(bounds=
ref_band_interest)
else:
self.ref_band_lcs = \
self._make_reference_bands_from_event_data(data)
self._construct_covar()
def _make_reference_bands_from_event_data(self, data, bounds=None):
"""
Helper method constructing reference bands for each band of interest, and constructing
light curves from these reference bands. This operates only if the data given to
:class:`Covariancespectrum` is event list data (i.e. photon arrival times and energies).
Parameters
----------
data : numpy.ndarray
Array of shape ``(N, 2)``, where N is the number of photons. First column contains the
times of arrivals, second column the corresponding photon energies.
bounds : iterable
The energy bounds to use for the reference band. Must be of type ``(elow, ehigh)``.
Returns
-------
lc_all: list of :class:`stingray.Lightcurve` objects.
The list of `:class:`stingray.Lightcurve` objects containing all reference
bands, between the values given in ``bounds``.
"""
if not bounds:
bounds = [np.min(data[:, 1]), np.max(data[:, 1])]
if bounds[1] <= np.min(self.band_interest[:, 0]) or \
bounds[0] >= np.max(self.band_interest[:, 1]):
elow = bounds[0]
ehigh = bounds[1]
toa = data[np.logical_and(
data[:, 1] >= elow,
data[:, 1] <= ehigh)]
lc_all = Lightcurve.make_lightcurve(toa, self.dt,
tstart=self.tstart,
tseg=self.tseg)
else:
lc_all = []
for i, b in enumerate(self.band_interest):
elow = b[0]
ehigh = b[1]
emask1 = data[np.logical_and(
data[:, 1] <= elow,
data[:, 1] >= bounds[0])]
emask2 = data[np.logical_and(
data[:, 1] <= bounds[1],
data[:, 1] >= ehigh)]
toa = np.vstack([emask1, emask2])
lc = Lightcurve.make_lightcurve(toa, self.dt,
tstart=self.tstart,
tseg=self.tseg)
lc_all.append(lc)
return lc_all
def _make_reference_bands_from_lightcurves(self, bounds=None):
'''
Helper class to construct reference bands for all light curves in ``band_interest``, assuming the
data is given to the class :class:`Covariancespectrum` as a (set of) lightcurve(s). Generally
sums up all other light curves within ``bounds`` that are *not* the band of interest.
Parameters
----------
bounds : iterable
The energy bounds to use for the reference band. Must be of type ``(elow, ehigh)``.
Returns
-------
lc_all: list of :class:`stingray.Lightcurve` objects.
The list of :class:`stingray.Lightcurve` objects containing all reference bands,
between the values given in ``bounds``.
'''
if not bounds:
bounds_idx = [0, len(self.band_interest)]
else:
low_bound = self.band_interest.searchsorted(bounds[0])
high_bound = self.band_interest.searchsorted(bounds[1])
bounds_idx = [low_bound, high_bound]
lc_all = []
for i, b in enumerate(self.band_interest):
# initialize empty counts array
counts = np.zeros_like(self.lcs[0].counts)
for j in range(bounds_idx[0], bounds_idx[1], 1):
if i == j:
continue
else:
counts += self.lcs[j].counts
# make a | |
"v6.0.11": True
}
},
{
"value": "dialup",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "peer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "peergrp",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ipv6_dns_server2": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"wizard_type": {
"type": "string",
"options": [
{
"value": "custom",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "dialup-forticlient",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "dialup-ios",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "dialup-android",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "dialup-windows",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "dialup-cisco",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "static-fortigate",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "dialup-fortigate",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "static-cisco",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "dialup-cisco-fw",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "simplified-static-fortigate",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "hub-fortigate-auto-discovery",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "spoke-fortigate-auto-discovery",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"network_overlay": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"ipv6_exclude_range": {
"type": "list",
"children": {
"start_ip": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"end_ip": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"id": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"mode_cfg": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"send_cert_chain": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"fec_egress": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"fec_send_timeout": {
"type": "integer",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"add_gw_route": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"usrgrp": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ppk_identity": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"idle_timeout": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": | |
import json
import math
from typing import Union, Optional, List
from .core import pack, data as _data, Data, Ref, Expando, expando_to_dict
# TODO add formal parameters for shape functions, including presentation attributes:
# https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/Presentation
def stage(**kwargs) -> str:
"""
Create a stage. A stage holds static graphics elements that are rendered as part of the background (behind the scene).
The return value must be assigned to the `stage` property of a `h2o_wave.types.GraphicsCard`.
Args:
kwargs: Graphical elements to render as part of the stage.
Returns:
Packed data.
"""
return pack([expando_to_dict(v) for v in kwargs.values()])
def scene(**kwargs) -> Data:
"""
Create a scene. A scene holds graphic elements whose attributes need to be changed dynamically (causing a re-render).
The return value must be assigned to the `scene` property of a `h2o_wave.types.GraphicsCard`.
Args:
kwargs: Graphical elements to render as part of the scene.
Returns:
A `h2o_wave.core.Data` instance.
"""
return _data(fields='d o', rows={k: [json.dumps(expando_to_dict(v)), ''] for k, v in kwargs.items()})
def draw(element: Ref, **kwargs) -> Ref:
"""
Schedule a redraw of the specified graphical element using the provided attributes.
Args:
element: A reference to a graphical element.
kwargs: Attributes to use while performing a redraw.
Returns:
The element reference, without change.
"""
element['o'] = json.dumps(kwargs)
return element
def reset(element: Ref) -> Ref:
"""
Schedule a redraw of the specified graphical element using its original attributes.
Calling this function clears any changes performed using the `h2o_wave.graphics.draw` function.
Args:
element: A reference to a graphical element.
Returns:
The element reference, without change.
"""
element['o'] = ''
return element
def _el(t: str, d: dict) -> Expando:
d['_t'] = t
return Expando(d)
_element_types = dict(
a='arc',
c='circle',
e='ellipse',
i='image',
l='line',
p='path',
pg='polygon',
pl='polyline',
s='spline',
r='rect',
t='text',
)
def type_of(element: Expando) -> Optional[str]:
"""
Get the type of the graphical element.
Args:
element: A graphical element.
Returns:
A string indicating the type of the element, e.g. 'circle', 'line', etc.
"""
return _element_types.get(element['_t'], None)
def arc(r1: float, r2: float, a1: float, a2: float, **kwargs) -> Expando:
"""
Draw circular or annular sector, as in a pie or donut chart, centered at (0, 0).
Args:
r1: inner radius.
r2: outer radius.
a1: start angle, in degrees.
a2: end angle, in degrees.
kwargs: Attributes to use for the initial render. SVG attributes, snake-cased.
Returns:
Data for the graphical element.
"""
return _el('a', dict(r1=r1, r2=r2, a1=a1, a2=a2, **kwargs))
def circle(**kwargs) -> Expando:
"""
Draw a circle.
See https://developer.mozilla.org/en-US/docs/Web/SVG/Element/circle
Args:
kwargs: Attributes to use for the initial render. SVG attributes, snake-cased.
Returns:
Data for the graphical element.
"""
return _el('c', kwargs)
def ellipse(**kwargs) -> Expando:
"""
Draw an ellipse.
See https://developer.mozilla.org/en-US/docs/Web/SVG/Element/ellipse
Args:
kwargs: Attributes to use for the initial render. SVG attributes, snake-cased.
Returns:
Data for the graphical element.
"""
return _el('e', kwargs)
def image(**kwargs) -> Expando:
"""
Draw an image.
See https://developer.mozilla.org/en-US/docs/Web/SVG/Element/image
Args:
kwargs: Attributes to use for the initial render. SVG attributes, snake-cased.
Returns:
Data for the graphical element.
"""
return _el('i', kwargs)
def line(**kwargs) -> Expando:
"""
Draw a line.
See https://developer.mozilla.org/en-US/docs/Web/SVG/Element/line
Args:
kwargs: Attributes to use for the initial render. SVG attributes, snake-cased.
Returns:
Data for the graphical element.
"""
return _el('l', kwargs)
def path(**kwargs) -> Expando:
"""
Draw a path.
See https://developer.mozilla.org/en-US/docs/Web/SVG/Element/path
Args:
kwargs: Attributes to use for the initial render. SVG attributes, snake-cased.
Returns:
Data for the graphical element.
"""
return _el('p', kwargs)
def polygon(**kwargs) -> Expando:
"""
Draw a polygon.
See https://developer.mozilla.org/en-US/docs/Web/SVG/Element/polygon
Args:
kwargs: Attributes to use for the initial render. SVG attributes, snake-cased.
Returns:
Data for the graphical element.
"""
return _el('pg', kwargs)
def polyline(**kwargs) -> Expando:
"""
Draw a polyline.
See https://developer.mozilla.org/en-US/docs/Web/SVG/Element/polyline
Args:
kwargs: Attributes to use for the initial render. SVG attributes, snake-cased.
Returns:
Data for the graphical element.
"""
return _el('pl', kwargs)
Floats = Optional[List[Optional[float]]]
def _str(fs: Floats) -> Optional[str]:
if fs is None:
return None
return ' '.join(['' if f is None else str(round(f, 2)) for f in fs])
def spline(x: Floats = None, y: Floats = None,
x0: Floats = None, y0: Floats = None,
curve: Optional[str] = None, radial: Optional[bool] = None, **kwargs) -> Expando:
"""
Draw a spline.
If x, y are specified, draws a regular spline.
If x, y, y0 are specified, draws a horizontal area spline. Sets baseline to zero if y0 is an empty list.
If x, x0, y are specified, draws a vertical area spline. Sets baseline to zero if x0 is an empty list
Missing information is rendered as gaps in the spline.
Args:
x: x-coordinates.
y: y-coordinates.
x0: base x-coordinates.
y0: base y-coordinates.
curve: Interpolation. One of basis, basis-closed, basis-open, cardinal, cardinal-closed, cardinal-open, smooth, smooth-closed, smooth-open, linear, linear-closed, monotone-x, monotone-y, natural, step, step-after, step-before. Defaults to linear.
radial: Whether (x, y) should be treated as (angle,radius) or (x0, x, y0, y) should be treated as (start-angle, end-angle, inner-radius, outer-radius).
kwargs: Attributes to use for the initial render. SVG attributes, snake-cased.
Returns:
Data for the graphical element.
"""
attrs = dict(x=_str(x), y=_str(y), x0=_str(x0), y0=_str(y0), curve=curve, radial=radial)
return _el('s', dict(**{k: v for k, v in attrs.items() if v is not None}, **kwargs))
def rect(**kwargs) -> Expando:
"""
Draw a rectangle.
See https://developer.mozilla.org/en-US/docs/Web/SVG/Element/rect
Args:
kwargs: Attributes to use for the initial render. SVG attributes, snake-cased.
Returns:
Data for the graphical element.
"""
return _el('r', kwargs)
def text(text: str, **kwargs) -> Expando:
"""
Draw text.
See https://developer.mozilla.org/en-US/docs/Web/SVG/Element/text
Args:
text: The text content.
kwargs: Attributes to use for the initial render. SVG attributes, snake-cased.
Returns:
Data for the graphical element.
"""
return _el('t', dict(text=text, **kwargs))
class Path:
"""
A convenience class for drawing SVG paths.
"""
def __init__(self):
self.__d = []
def _d(self, command: str, *args) -> 'Path':
self.__d.append(command)
for arg in args:
self.__d.append(str(round(arg, 2) if isinstance(arg, float) else arg))
return self
def d(self) -> str:
"""
Serialize this path's commands into SVG path data.
Returns:
The ``d`` attribute for a SVG path.
"""
return ' '.join(self.__d)
def path(self, **kwargs) -> Expando:
"""
A SVG path element representing the commands in this ``Path`` instance.
Same as calling ``h2o_wave.graphics.path(d=path.d())``
Args:
kwargs: Additional attributes for the SVG path element.
Returns:
A SVG path element.
"""
return path(d=self.d(), **kwargs)
def M(self, x: float, y: float) -> 'Path':
"""
Start a new sub-path at the given (x,y) coordinates.
In absolute coordinates.
See https://www.w3.org/TR/SVG/paths.html#PathDataMovetoCommands
Args:
x: x-coordinate
y: y-coordinate
Returns:
The current ``Path`` instance.
"""
return self._d('M', x, y)
def m(self, x: float, y: float) -> 'Path':
"""
Start a new sub-path at the given (x,y) coordinates.
In relative coordinates.
See https://www.w3.org/TR/SVG/paths.html#PathDataMovetoCommands
Args:
x: x-coordinate
y: y-coordinate
Returns:
The current ``Path`` instance.
"""
return self._d('m', x, y)
def Z(self) -> 'Path':
"""
Close the current subpath by connecting it back to the current subpath's initial point.
See https://www.w3.org/TR/SVG/paths.html#PathDataClosePathCommand
Returns:
The current ``Path`` instance.
"""
return self._d('Z')
def z(self) -> 'Path':
"""
Close the current subpath by connecting it back to the current subpath's initial point.
See https://www.w3.org/TR/SVG/paths.html#PathDataClosePathCommand
Returns:
The current ``Path`` instance.
"""
return self._d('z')
def L(self, x: float, y: float) -> 'Path':
"""
Draw a line from the current point to the given (x,y) coordinate which becomes the new current point.
In absolute coordinates.
See https://www.w3.org/TR/SVG/paths.html#PathDataLinetoCommands
Args:
x: x-coordinate
y: y-coordinate
Returns:
The current ``Path`` instance.
"""
return self._d('L', x, y)
def l(self, x: float, y: float) -> 'Path':
"""
Draw a line from the current point to the given (x,y) coordinate which becomes the new current point.
In relative coordinates.
See https://www.w3.org/TR/SVG/paths.html#PathDataLinetoCommands
Args:
x: x-coordinate
y: y-coordinate
Returns:
The current ``Path`` instance.
"""
return self._d('l', x, y)
def H(self, x: float) -> 'Path':
"""
Draws a horizontal line from the current point.
In absolute coordinates.
See https://www.w3.org/TR/SVG/paths.html#PathDataLinetoCommands
Args:
x: x-coordinate
Returns:
The current ``Path`` instance.
"""
return self._d('H', x)
def h(self, x: float) -> 'Path':
"""
Draws a horizontal line from the current point.
In relative coordinates.
See https://www.w3.org/TR/SVG/paths.html#PathDataLinetoCommands
Args:
x: x-coordinate
Returns:
The current ``Path`` instance.
"""
return self._d('h', x)
def V(self, y: float) -> 'Path':
"""
Draws a vertical line from the current point.
In absolute | |
<filename>ChrisFuncs/ChrisFuncs.py<gh_stars>0
# Import smorgasbord
import pdb
import pdb
"""from IPython import get_ipython
get_ipython().run_line_magic('pdb','on')"""
import sys
import os
#sys.path.append( os.path.split( os.path.realpath(__file__) )[:-1][0] )
#sys.path.append( os.path.split( os.path.split( os.path.realpath(__file__) )[:-1][0] )[:-1][0] )
#sys.path.insert(0, '../')
import numpy as np
import scipy.stats
import scipy.ndimage
import scipy.ndimage.measurements
import scipy.spatial
#import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm
import matplotlib.patches
import astropy
astropy.log.setLevel('ERROR')
import astropy.io.fits
import astropy.wcs
import astropy.convolution
import astropy.coordinates
import astropy.units
import astroquery.irsa_dust
import shutil
import wget
import glob
import time
import re
import copy
# A python 2/3 compatability hack for stirng type handling
try:
basestring
except NameError:
basestring = str
# Function to sum all elements in an ellipse centred on the middle of a given array
# Args: Array, semi-major axis (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
# Returns: Numpy array containing the sum of the pixel values in the ellipse, total number of pixels counted, and an array containing the pixel values
def EllipseSum(array, rad, axial_ratio, angle, i_centre, j_centre):
from . import Photom
return Photom.EllipseSum(array, rad, axial_ratio, angle, i_centre, j_centre)
# Function to sum all elements in an annulus centred upon the middle of the given array
# Args: Array, semi-major axis of inside edge of annulus (pix), width of annulus (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
# Returns: Numpy array containing the sum of the pixel values in the annulus, the total number of pixels counted, and an array containing the pixel values
def AnnulusSum(array, rad_inner, width, axial_ratio, angle, i_centre, j_centre):
from . import Photom
return Photom.AnnulusSum(array, rad_inner, width, axial_ratio, angle, i_centre, j_centre)
# Function to make annular photometry faster by pre-preparing arrays of transposed coords that are to be repeatedly used
# Args: Array, semi-major axis of inside edge of annulus (pix), width of annulus (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
# Returns: List containing i & j transposed coords
def AnnulusQuickPrepare(array, angle, i_centre, j_centre):
from . import Photom
return Photom.AnnulusQuickPrepare(array, angle, i_centre, j_centre)
# Function to sum all elements in an annulus centred upon the middle of the given array, usingpre-prepared transposed coord arrays
# Args: Array, semi-major axis of inside edge of annulus (pix), width of annulus (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse, i & j transposed coord arrays
# Returns: Numpy array containing the sum of the pixel values in the annulus, the total number of pixels counted, and an array containing the pixel values
def AnnulusQuickSum(array, rad_inner, width, axial_ratio, angle, i_centre, j_centre, i_trans, j_trans):
from . import Photom
return Photom.AnnulusQuickSum(array, rad_inner, width, axial_ratio, angle, i_centre, j_centre, i_trans, j_trans)
# Function to sum all elements in an annulus centred upon the middle of the given array, using pre-prepared transposed coord arrays
# Args: Array, semi-major axis of ellipse (pix), position angle (deg), i & j coords of centre of ellipse, i & j transposed coord arrays
# Returns: Numpy array containing the sum of the pixel values in the ellipse, the total number of pixels counted, and an array containing the pixel values
def EllipseQuickSum(array, rad, axial_ratio, angle, i_centre, j_centre, i_trans, j_trans):
from . import Photom
return Photom.EllipseQuickSum(array, rad, axial_ratio, angle, i_centre, j_centre, i_trans, j_trans)
# Function to return a mask identifying all pixels within an ellipse of given parameters
# Args: Array, semi-major axis (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
# Returns: Mask array of same dimensions as input array where pixels that lie within ellipse have value 1
def EllipseMask(array, rad, axial_ratio, angle, i_centre, j_centre):
from . import Photom
return Photom.EllipseMask(array, rad, axial_ratio, angle, i_centre, j_centre)
# Function to sum all pixel elements inside a given circle... the old-fashioned way
# Args: Array to be used, i & j coordinates of centre of circle, radius of circle
# Returns: Sum of elements within circle, number of pixels within circle
def CircleSum(fits, i_centre, j_centre, r):
from . import Photom
return Photom.CircleSum(fits, i_centre, j_centre, r)
# Function to sum all pixel elements inside a given circle... the old-fashioned way
# Args: Array to be used, i & j coordinates of centre of circle, radius of circle
# Returns: Sum of elements within circle, number of pixels within circle
def CircleAnnulusSum(fits, i_centre, j_centre, r, width):
from . import Photom
return Photom.CircleAnnulusSum(fits, i_centre, j_centre, r, width)
# Function to sum all elements in an ellipse centred on the middle of an array that has been resized to allow better pixel sampling
# Args: Array, semi-major axis (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse, upscaling factor
# Returns: Numpy array containing the sum of the pixel values in the ellipse, the total number of pixels counted, and an array containing the pixel values
def EllipseSumUpscale(cutout, rad, axial_ratio, angle, i_centre, j_centre, upscale=1):
from . import Photom
return Photom.EllipseSumUpscale(cutout, rad, axial_ratio, angle, i_centre, j_centre, upscale=1)
# Function to sum all elements in an annulus centred upon the middle of an array that has been resized to allow better pixel sampling
# Args: Array, semi-major axis of inside edge of annulus (pix), width of annulus (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse, upscaling factor
# Returns: Numpy array containing the sum of the pixel values in the annulus, the total number of pixels counted, and an array containing the pixel values
def AnnulusSumUpscale(cutout, rad_inner, width, axial_ratio, angle, i_centre, j_centre, upscale=1):
from . import Photom
return Photom.AnnulusSumUpscale(cutout, rad_inner, width, axial_ratio, angle, i_centre, j_centre, upscale=1)
# Function to iteratively calculate SPIRE aperture noise of photometry cutout using randomly-placed (annular-background-subtracted) circular aperture
# Args: Map, radius of aperture (pix), area of aperture (pix), boolean of whether or not to sky-subtract the noise apertures, relative radius of inner edge of annulus, relative width of annulus, angle of source ellipse, axial ratio of source ellipse
# Returns: Aperture standard deviation, and list of mean background values, list of aperture sum values
def CircularApertureStandardDeviationFinder(fits, area, ann=True, ann_inner=1.5, ann_width=1.0, angle=0.0, axial_ratio=1.0, apertures=100):
from . import Photom
return Photom.CircularApertureStandardDeviationFinder(fits, area, ann=True, ann_inner=1.5, ann_width=1.0, angle=0.0, axial_ratio=1.0, apertures=100)
# Function to find all contiguous pixels that lie above a given flux limit
# Args: Array, radius of guess region (pix), i & j coords of centre of guess region, cutoff value for pixel selection
# Returns: Array of ones and zeros indicating contiguous region
def ContiguousPixels(cutout, rad_initial, i_centre, j_centre, cutoff):
from . import Photom
return Photom.ContiguousPixels(cutout, rad_initial, i_centre, j_centre, cutoff)
# Function that combines all of the ellipse-fitting steps (finds convex hull, fits ellipse to this, then finds properties of ellipse)
# Args: x & y coordinates to which the ellipse is to be fitted
# Returns: Array of x & y coordinates of ellipse centre, array of ellipse's major & minor axes, ellipse's position angle
def EllipseFit(x,y):
from . import Photom
return Photom.EllipseFit(x,y)
# Function to calculate the coordinates of the centre of an ellipse produced by EllipseFit
# Args: Ellipse produced by EllipseFit
# Returns: Array of x & y coordinates of ellipse centre
def EllipseCentre(a):
from . import Photom
return Photom.EllipseCentre(a)
# Function to calculate the lengths of the axes of an ellipse produced by EllipseFit
# Args: Ellipse produced by EllipseFit
# Returns: Array of ellipse's major & minor axes
def EllipseAxes(a):
from . import Photom
return Photom.EllipseAxes(a)
# Function to calculat the position angle of the centre of an ellipse produced by EllipseFit
# Args: Ellipse produced by EllipseFit
# Returns: Ellipse's position angle
def EllipseAngle(a):
from . import Photom
return Photom.EllipseAngle(a)
# Function to create a cutout of a fits file - NOW JUST A WRAPPER OF AN ASTROPY FUNCTION
# Args: Input fits, cutout central ra (deg), cutout central dec (deg), cutout radius (arcsec), pixel width (arcsec), fits image extension, boolean of whether to reproject, boolean stating if an output variable is desired, output fits pathname if required
# Returns: HDU of new file
def FitsCutout(pathname, ra, dec, rad_arcsec, pix_width_arcsec=None, exten=0, reproj=False, variable=False, outfile=False, parallel=True, fast=True):
from . import Fits
return Fits.FitsCutout(pathname, ra, dec, rad_arcsec, pix_width_arcsec=pix_width_arcsec, exten=exten, reproj=reproj, variable=variable, outfile=outfile, parallel=parallel, fast=fast)
# Function to embed a fits file in a larger array of NaNs (for APLpy or the like)
# Args: Input fits pathname, margin to place around array, fits extension of interest, boolean stating if | |
from datetime import time, date, datetime, timedelta
import pytest
import hallo.modules.dailys.dailys_field
from hallo.events import EventMessage, RawDataTelegram, EventMinute
from hallo.modules.dailys.field_mood import DailysMoodField, MoodTime
from hallo.test.modules.dailys.dailys_spreadsheet_mock import DailysSpreadsheetMock
class Obj:
pass
def get_telegram_time(date_time_val):
fake_telegram_obj = Obj()
fake_telegram_obj.message = Obj()
fake_telegram_obj.message.date = date_time_val
fake_telegram_obj.message.reply_to_message = None
return fake_telegram_obj
def get_telegram_time_reply(date_time_val, message_id):
fake_telegram_obj = Obj()
fake_telegram_obj.message = Obj()
fake_telegram_obj.message.date = date_time_val
fake_telegram_obj.message.reply_to_message = Obj()
fake_telegram_obj.message.reply_to_message.message_id = message_id
return fake_telegram_obj
def test_create_from_input(hallo_getter, requests_mock):
dailys_times = ["WakeUpTime", "12:00:00", "SleepTime"]
dailys_moods = ["happiness", "anger", "tiredness", "boisterousness"]
# Setup stuff
command_name = "setup dailys field"
command_args = "mood"
test_hallo = hallo_getter({"dailys"})
evt = EventMessage(
test_hallo.test_server,
test_hallo.test_chan,
test_hallo.test_user,
"{} {}".format(command_name, command_args),
)
evt.split_command_text(command_name, command_args)
spreadsheet = DailysSpreadsheetMock(test_hallo.test_user, test_hallo.test_chan)
requests_mock.get(
"{}/stats/mood/static/".format(spreadsheet.dailys_url),
json=[
{
"date": "static",
"source": "Mock test data",
"stat_name": "mood",
"data": {
"moods": dailys_moods,
"times": dailys_times
}
}
]
)
# Try and create dailys field
field = DailysMoodField.create_from_input(evt, spreadsheet)
assert field.spreadsheet == spreadsheet
assert isinstance(field.times, list)
assert len(field.times) == 3
assert MoodTime(MoodTime.WAKE) in field.times
assert MoodTime(MoodTime.SLEEP) in field.times
assert MoodTime(time(12, 0, 0)) in field.times
assert isinstance(field.moods, list)
assert len(field.moods) == 4
assert field.moods == dailys_moods
def test_create_from_input__no_static_data(hallo_getter, requests_mock):
# Setup stuff
command_name = "setup dailys field"
command_args = "mood"
test_hallo = hallo_getter({"dailys"})
evt = EventMessage(
test_hallo.test_server,
test_hallo.test_chan,
test_hallo.test_user,
"{} {}".format(command_name, command_args),
)
evt.split_command_text(command_name, command_args)
spreadsheet = DailysSpreadsheetMock(test_hallo.test_user, test_hallo.test_chan)
requests_mock.get(
"{}/stats/mood/static/".format(spreadsheet.dailys_url),
json=[]
)
# Try and create dailys field
with pytest.raises(hallo.modules.dailys.dailys_field.DailysException) as e:
DailysMoodField.create_from_input(evt, spreadsheet)
assert "mood field static data has not been set up on dailys system" in str(e.value).lower()
def test_trigger_morning_query(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
spreadsheet = DailysSpreadsheetMock(test_hallo.test_user, test_hallo.test_chan)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0, 0))]
moods = ["Happiness", "Anger", "Tiredness"]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_wake = EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "morning")
field.passive_trigger(evt_wake)
# Check mood query is sent
notif_dict = spreadsheet.saved_data["mood"][evt_wake.get_send_time().date()]
assert MoodTime.WAKE in notif_dict
assert "message_id" in notif_dict[MoodTime.WAKE]
# Check query is given
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "how are you feeling" in data_wake[0].text.lower()
assert MoodTime.WAKE in data_wake[0].text
assert all([mood in data_wake[0].text for mood in moods])
def test_trigger_sleep_query(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
moods = ["Happiness", "Anger", "Tiredness"]
evt_sleep = EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "night")
saved_data = dict()
saved_data[MoodTime.WAKE] = dict()
saved_data[MoodTime.WAKE]["message_id"] = 1232
saved_data[str(time(14, 0, 0))] = dict()
saved_data[str(time(14, 0, 0))]["message_id"] = 1234
for mood in moods:
saved_data[MoodTime.WAKE][mood] = 3
saved_data[str(time(14, 0, 0))][mood] = 2
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user,
test_hallo.test_chan,
saved_data={"mood": {evt_sleep.get_send_time().date(): saved_data}},
)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0)), MoodTime(MoodTime.SLEEP)]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
field.passive_trigger(evt_sleep)
# Check mood query is sent
notif_dict = spreadsheet.saved_data["mood"][evt_sleep.get_send_time().date()]
assert MoodTime.SLEEP in notif_dict
assert "message_id" in notif_dict[MoodTime.SLEEP]
# Check query is given
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "how are you feeling" in data_wake[0].text.lower()
assert MoodTime.SLEEP in data_wake[0].text
assert all([mood in data_wake[0].text for mood in moods])
def test_trigger_morning_no_query_if_not_in_times(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {}}
)
# Setup field
times = [MoodTime(time(14, 0)), MoodTime(MoodTime.SLEEP)]
moods = ["Happiness", "Anger", "Tiredness"]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_wake = EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "morning")
field.passive_trigger(evt_wake)
# Check mood query is not sent or added to saved data
assert evt_wake.get_send_time().date() not in spreadsheet.saved_data["mood"]
test_hallo.test_server.get_send_data(0)
def test_trigger_sleep_no_query_if_not_in_times(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {}}
)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0))]
moods = ["Happiness", "Anger", "Tiredness"]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_sleep = EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "night")
field.passive_trigger(evt_sleep)
# Check mood query is not sent or added to saved data
assert evt_sleep.get_send_time().date() not in spreadsheet.saved_data["mood"]
test_hallo.test_server.get_send_data(0)
def test_trigger_sleep_no_query_if_already_given(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
moods = ["Happiness", "Anger", "Tiredness"]
evt_sleep1 = EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "night")
saved_data = dict()
saved_data[MoodTime.WAKE] = dict()
saved_data[MoodTime.WAKE]["message_id"] = 1232
saved_data[str(time(14, 0, 0))] = dict()
saved_data[str(time(14, 0, 0))]["message_id"] = 1234
for mood in moods:
saved_data[MoodTime.WAKE][mood] = 3
saved_data[str(time(14, 0, 0))][mood] = 2
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user,
test_hallo.test_chan,
saved_data={"mood": {evt_sleep1.get_send_time().date(): saved_data}},
)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0)), MoodTime(MoodTime.SLEEP)]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_sleep1 = EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "night")
field.passive_trigger(evt_sleep1)
# Check mood query is sent
notif_dict = spreadsheet.saved_data["mood"][evt_sleep1.get_send_time().date()]
assert MoodTime.SLEEP in notif_dict
assert "message_id" in notif_dict[MoodTime.SLEEP]
# Check query is given
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "how are you feeling" in data_wake[0].text.lower()
assert MoodTime.SLEEP in data_wake[0].text
assert all([mood in data_wake[0].text for mood in moods])
# Set message ID to something
msg_id = "test_message_id"
notif_dict[MoodTime.SLEEP]["message_id"] = msg_id
spreadsheet.saved_data["mood"][evt_sleep1.get_send_time().date()] = notif_dict
# Send second sleep query
evt_sleep2 = EventMessage(test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "night")
field.passive_trigger(evt_sleep2)
# Check no mood query is sent
notif_dict = spreadsheet.saved_data["mood"][evt_sleep1.get_send_time().date()]
assert notif_dict[MoodTime.SLEEP]["message_id"] == msg_id
test_hallo.test_server.get_send_data(0)
def test_trigger_sleep_after_midnight(hallo_getter):
test_hallo = hallo_getter({"dailys"})
mood_date = date(2019, 1, 15)
sleep_time = datetime(2019, 1, 16, 0, 34, 15)
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0)), MoodTime(MoodTime.SLEEP)]
moods = ["Happiness", "Anger", "Tiredness"]
# Setup
saved_data = dict()
saved_data[MoodTime.WAKE] = dict()
saved_data[MoodTime.WAKE]["message_id"] = 1232
saved_data[str(time(14, 0, 0))] = dict()
saved_data[str(time(14, 0, 0))]["message_id"] = 1234
for mood in moods:
saved_data[MoodTime.WAKE][mood] = 3
saved_data[str(time(14, 0, 0))][mood] = 2
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {mood_date: saved_data}}
)
# Setup field
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_sleep = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "night"
).with_raw_data(RawDataTelegram(get_telegram_time(sleep_time)))
field.passive_trigger(evt_sleep)
# Check mood query is sent for previous day
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.SLEEP in notif_dict
assert "message_id" in notif_dict[MoodTime.SLEEP]
# Check query is given
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "how are you feeling" in data_wake[0].text.lower()
assert MoodTime.SLEEP in data_wake[0].text
assert all([mood in data_wake[0].text for mood in moods])
def test_trigger_time_exactly_once(hallo_getter):
test_hallo = hallo_getter({"dailys"})
mood_date = date(2019, 1, 18)
# Setup
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {}}
)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0, 0)), MoodTime(MoodTime.SLEEP)]
moods = ["Happiness", "Anger", "Tiredness"]
field = DailysMoodField(spreadsheet, times, moods)
# Prepare events
evt1 = EventMinute()
evt1.send_time = datetime(2019, 1, 18, 13, 59, 11)
evt2 = EventMinute()
evt2.send_time = datetime(2019, 1, 18, 14, 0, 11)
evt3 = EventMinute()
evt3.send_time = datetime(2019, 1, 18, 14, 1, 11)
# Send time before trigger time
field.passive_trigger(evt1)
# Check mood data not updated and query not sent
assert mood_date not in spreadsheet.saved_data["mood"]
test_hallo.test_server.get_send_data(0)
# Send time after trigger time
field.passive_trigger(evt2)
# Check mood query is sent
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert str(time(14, 0, 0)) in notif_dict
assert "message_id" in notif_dict[str(time(14, 0, 0))]
# Check query is given
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "how are you feeling" in data_wake[0].text.lower()
assert str(time(14, 0, 0)) in data_wake[0].text
assert all([mood in data_wake[0].text for mood in moods])
# Set message ID to something
msg_id = "test_message_id"
notif_dict[str(time(14, 0, 0))]["message_id"] = msg_id
spreadsheet.saved_data["mood"][mood_date] = notif_dict
# Send another time after trigger time
field.passive_trigger(evt3)
# Check mood data not updated and query not sent
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert notif_dict[str(time(14, 0, 0))]["message_id"] == msg_id
test_hallo.test_server.get_send_data(0)
def test_process_reply_to_query(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
mood_date = date(2019, 1, 18)
mood_datetime = datetime.combine(mood_date, time(8, 13, 6))
msg_id = 41212
mood_data = dict()
mood_data[MoodTime.WAKE] = dict()
mood_data[MoodTime.WAKE]["message_id"] = msg_id
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {mood_date: mood_data}}
)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0, 0))]
moods = ["Happiness", "Anger", "Tiredness"]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_mood = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "413"
).with_raw_data(
RawDataTelegram(get_telegram_time_reply(mood_datetime, msg_id))
)
field.passive_trigger(evt_mood)
# Check mood response is logged
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.WAKE in notif_dict
assert "message_id" in notif_dict[MoodTime.WAKE]
assert notif_dict[MoodTime.WAKE]["message_id"] == msg_id
assert notif_dict[MoodTime.WAKE]["Happiness"] == 4
assert notif_dict[MoodTime.WAKE]["Anger"] == 1
assert notif_dict[MoodTime.WAKE]["Tiredness"] == 3
# Check response is given
data_wake = test_hallo.test_server.get_send_data(1, test_hallo.test_chan, EventMessage)
assert "added" in data_wake[0].text.lower()
assert MoodTime.WAKE in data_wake[0].text
assert mood_date.isoformat() in data_wake[0].text
assert "413" in data_wake[0].text
def test_process_most_recent_query(hallo_getter):
test_hallo = hallo_getter({"dailys"})
# Setup
mood_date = date(2019, 1, 18)
mood_datetime = datetime.combine(mood_date, time(8, 13, 6))
msg_id = 41212
mood_data = dict()
mood_data[MoodTime.WAKE] = dict()
mood_data[MoodTime.WAKE]["message_id"] = msg_id
spreadsheet = DailysSpreadsheetMock(
test_hallo.test_user, test_hallo.test_chan, saved_data={"mood": {mood_date: mood_data}}
)
# Setup field
times = [MoodTime(MoodTime.WAKE), MoodTime(time(14, 0, 0))]
moods = ["Happiness", "Anger", "Tiredness"]
field = DailysMoodField(spreadsheet, times, moods)
# Send message
evt_mood = EventMessage(
test_hallo.test_server, test_hallo.test_chan, test_hallo.test_user, "413"
).with_raw_data(RawDataTelegram(get_telegram_time(mood_datetime)))
field.passive_trigger(evt_mood)
# Check mood response is logged
notif_dict = spreadsheet.saved_data["mood"][mood_date]
assert MoodTime.WAKE in notif_dict
| |
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module dealing with command line parsing"""
import sys
import textwrap
import os.path
import time
import logging
import errno
import itertools
import shlex
from io import StringIO
from optparse import (OptionParser, TitledHelpFormatter)
from ganeti import utils
from ganeti import errors
from ganeti import constants
from ganeti import opcodes
import ganeti.rpc.errors as rpcerr
from ganeti import ssh
from ganeti import compat
from ganeti import netutils
from ganeti import qlang
from ganeti import objects
from ganeti import pathutils
from ganeti import serializer
import ganeti.cli_opts
# Import constants
from ganeti.cli_opts import * # pylint: disable=W0401,W0614
from ganeti.runtime import (GetClient)
__all__ = [
# Generic functions for CLI programs
"ConfirmOperation",
"CreateIPolicyFromOpts",
"GenericMain",
"GenericInstanceCreate",
"GenericList",
"GenericListFields",
"GetClient",
"GetOnlineNodes",
"GetNodesSshPorts",
"GetNodeUUIDs",
"JobExecutor",
"ParseTimespec",
"RunWhileClusterStopped",
"RunWhileDaemonsStopped",
"SubmitOpCode",
"SubmitOpCodeToDrainedQueue",
"SubmitOrSend",
# Formatting functions
"ToStderr", "ToStdout",
"ToStdoutAndLoginfo",
"FormatError",
"FormatQueryResult",
"FormatParamsDictInfo",
"FormatPolicyInfo",
"PrintIPolicyCommand",
"PrintGenericInfo",
"GenerateTable",
"AskUser",
"FormatTimestamp",
"FormatLogMessage",
# Tags functions
"ListTags",
"AddTags",
"RemoveTags",
# command line options support infrastructure
"ARGS_MANY_INSTANCES",
"ARGS_MANY_NODES",
"ARGS_MANY_GROUPS",
"ARGS_MANY_NETWORKS",
"ARGS_MANY_FILTERS",
"ARGS_NONE",
"ARGS_ONE_INSTANCE",
"ARGS_ONE_NODE",
"ARGS_ONE_GROUP",
"ARGS_ONE_OS",
"ARGS_ONE_NETWORK",
"ARGS_ONE_FILTER",
"ArgChoice",
"ArgCommand",
"ArgFile",
"ArgGroup",
"ArgHost",
"ArgInstance",
"ArgJobId",
"ArgNetwork",
"ArgNode",
"ArgOs",
"ArgExtStorage",
"ArgFilter",
"ArgSuggest",
"ArgUnknown",
"FixHvParams",
"SplitNodeOption",
"CalculateOSNames",
"ParseFields",
] + ganeti.cli_opts.__all__ # Command line options
# Query result status for clients
(QR_NORMAL,
QR_UNKNOWN,
QR_INCOMPLETE) = range(3)
#: Maximum batch size for ChooseJob
_CHOOSE_BATCH = 25
# constants used to create InstancePolicy dictionary
TISPECS_GROUP_TYPES = {
constants.ISPECS_MIN: constants.VTYPE_INT,
constants.ISPECS_MAX: constants.VTYPE_INT,
}
TISPECS_CLUSTER_TYPES = {
constants.ISPECS_MIN: constants.VTYPE_INT,
constants.ISPECS_MAX: constants.VTYPE_INT,
constants.ISPECS_STD: constants.VTYPE_INT,
}
#: User-friendly names for query2 field types
_QFT_NAMES = {
constants.QFT_UNKNOWN: "Unknown",
constants.QFT_TEXT: "Text",
constants.QFT_BOOL: "Boolean",
constants.QFT_NUMBER: "Number",
constants.QFT_NUMBER_FLOAT: "Floating-point number",
constants.QFT_UNIT: "Storage size",
constants.QFT_TIMESTAMP: "Timestamp",
constants.QFT_OTHER: "Custom",
}
class _Argument(object):
def __init__(self, min=0, max=None): # pylint: disable=W0622
self.min = min
self.max = max
def __repr__(self):
return ("<%s min=%s max=%s>" %
(self.__class__.__name__, self.min, self.max))
class ArgSuggest(_Argument):
"""Suggesting argument.
Value can be any of the ones passed to the constructor.
"""
# pylint: disable=W0622
def __init__(self, min=0, max=None, choices=None):
_Argument.__init__(self, min=min, max=max)
self.choices = choices
def __repr__(self):
return ("<%s min=%s max=%s choices=%r>" %
(self.__class__.__name__, self.min, self.max, self.choices))
class ArgChoice(ArgSuggest):
"""Choice argument.
Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
but value must be one of the choices.
"""
class ArgUnknown(_Argument):
"""Unknown argument to program (e.g. determined at runtime).
"""
class ArgInstance(_Argument):
"""Instances argument.
"""
class ArgNode(_Argument):
"""Node argument.
"""
class ArgNetwork(_Argument):
"""Network argument.
"""
class ArgGroup(_Argument):
"""Node group argument.
"""
class ArgJobId(_Argument):
"""Job ID argument.
"""
class ArgFile(_Argument):
"""File path argument.
"""
class ArgCommand(_Argument):
"""Command argument.
"""
class ArgHost(_Argument):
"""Host argument.
"""
class ArgOs(_Argument):
"""OS argument.
"""
class ArgExtStorage(_Argument):
"""ExtStorage argument.
"""
class ArgFilter(_Argument):
"""Filter UUID argument.
"""
ARGS_NONE = []
ARGS_MANY_INSTANCES = [ArgInstance()]
ARGS_MANY_NETWORKS = [ArgNetwork()]
ARGS_MANY_NODES = [ArgNode()]
ARGS_MANY_GROUPS = [ArgGroup()]
ARGS_MANY_FILTERS = [ArgFilter()]
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
ARGS_ONE_FILTER = [ArgFilter(min=1, max=1)]
def _ExtractTagsObject(opts, args):
"""Extract the tag type object.
Note that this function will modify its args parameter.
"""
if not hasattr(opts, "tag_type"):
raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
kind = opts.tag_type
if kind == constants.TAG_CLUSTER:
retval = kind, ""
elif kind in (constants.TAG_NODEGROUP,
constants.TAG_NODE,
constants.TAG_NETWORK,
constants.TAG_INSTANCE):
if not args:
raise errors.OpPrereqError("no arguments passed to the command",
errors.ECODE_INVAL)
name = args.pop(0)
retval = kind, name
else:
raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
return retval
def _ExtendTags(opts, args):
"""Extend the args if a source file has been given.
This function will extend the tags with the contents of the file
passed in the 'tags_source' attribute of the opts parameter. A file
named '-' will be replaced by stdin.
"""
fname = opts.tags_source
if fname is None:
return
if fname == "-":
new_fh = sys.stdin
else:
new_fh = open(fname, "r")
new_data = []
try:
# we don't use the nice 'new_data = [line.strip() for line in fh]'
# because of python bug 1633941
while True:
line = new_fh.readline()
if not line:
break
new_data.append(line.strip())
finally:
new_fh.close()
args.extend(new_data)
def ListTags(opts, args):
"""List the tags on a given object.
This is a generic implementation that knows how to deal with all
three cases of tag objects (cluster, node, instance). The opts
argument is expected to contain a tag_type field denoting what
object type we work on.
"""
kind, name = _ExtractTagsObject(opts, args)
cl = GetClient()
result = cl.QueryTags(kind, name)
result = list(result)
result.sort()
for tag in result:
ToStdout(tag)
def AddTags(opts, args):
"""Add tags on a given object.
This is a generic implementation that knows how to deal with all
three cases of tag objects (cluster, node, instance). The opts
argument is expected to contain a tag_type field denoting what
object type we work on.
"""
kind, name = _ExtractTagsObject(opts, args)
_ExtendTags(opts, args)
if not args:
raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
SubmitOrSend(op, opts)
def RemoveTags(opts, args):
"""Remove tags from a given object.
This is a generic implementation that knows how to deal with all
three cases of tag objects (cluster, node, instance). The opts
argument is expected to contain a tag_type field denoting what
object type we work on.
"""
kind, name = _ExtractTagsObject(opts, args)
_ExtendTags(opts, args)
if not args:
raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
SubmitOrSend(op, opts)
class _ShowUsage(Exception):
"""Exception class for L{_ParseArgs}.
"""
def __init__(self, exit_error):
"""Initializes instances of this class.
@type exit_error: bool
@param exit_error: Whether to report failure on exit
"""
Exception.__init__(self)
self.exit_error = exit_error
class _ShowVersion(Exception):
"""Exception class for L{_ParseArgs}.
"""
def _ParseArgs(binary, argv, commands, aliases, env_override):
"""Parser for the command line arguments.
This function parses the arguments and returns the function which
must be executed together with its (modified) arguments.
@param binary: Script name
@param argv: Command line arguments
@param commands: Dictionary containing command definitions
@param aliases: dictionary with command aliases {"alias": "target", ...}
@param env_override: list of env variables allowed for default args
@raise _ShowUsage: If usage description should be shown
@raise _ShowVersion: If version should be shown
"""
assert not (env_override - set(commands))
assert not (set(aliases.keys()) & set(commands.keys()))
if len(argv) > 1:
cmd = argv[1]
else:
# No option or command given
raise _ShowUsage(exit_error=True)
if cmd == "--version":
raise _ShowVersion()
elif cmd == "--help":
raise _ShowUsage(exit_error=False)
elif not (cmd in commands or cmd in aliases):
raise _ShowUsage(exit_error=True)
# get command, unalias it, and look it up in commands
if cmd in aliases:
if aliases[cmd] not in commands:
raise errors.ProgrammerError("Alias '%s' maps to non-existing"
" command '%s'" % (cmd, aliases[cmd]))
cmd = aliases[cmd]
if cmd in env_override:
args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
env_args = os.environ.get(args_env_name)
if env_args:
argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
func, args_def, parser_opts, usage, description = commands[cmd]
parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
description=description,
formatter=TitledHelpFormatter(),
usage="%%prog %s %s" % (cmd, usage))
parser.disable_interspersed_args()
options, args = parser.parse_args(args=argv[2:])
if not _CheckArguments(cmd, args_def, args):
return None, None, None
return func, options, args
def _FormatUsage(binary, commands):
"""Generates a nice description of all commands.
@param binary: Script name
@param commands: Dictionary containing command definitions
"""
# compute the max line length for cmd + usage
mlen = min(60, max(map(len, commands)))
yield "Usage: %s {command} | |
-2): (0, 1),
(9, 33, 3, -1): (0, 1),
(9, 33, 3, 0): (0, 1),
(9, 33, 3, 1): (0, 1),
(9, 33, 3, 2): (0, 1),
(9, 33, 3, 3): (0, 1),
(9, 33, 3, 4): (0, 1),
(9, 33, 3, 5): (0, 1),
(9, 33, 4, -5): (0, 1),
(9, 33, 4, -4): (0, 0),
(9, 33, 4, -3): (-1, -1),
(9, 33, 4, -2): (0, 1),
(9, 33, 4, -1): (0, 1),
(9, 33, 4, 0): (0, 1),
(9, 33, 4, 1): (0, 1),
(9, 33, 4, 2): (0, 1),
(9, 33, 4, 3): (0, 1),
(9, 33, 4, 4): (0, 1),
(9, 33, 4, 5): (0, 1),
(9, 33, 5, -5): (0, 1),
(9, 33, 5, -4): (0, 0),
(9, 33, 5, -3): (-1, -1),
(9, 33, 5, -2): (0, 1),
(9, 33, 5, -1): (0, 1),
(9, 33, 5, 0): (0, 1),
(9, 33, 5, 1): (0, 1),
(9, 33, 5, 2): (0, 1),
(9, 33, 5, 3): (0, 1),
(9, 33, 5, 4): (0, 1),
(9, 33, 5, 5): (0, 1),
(9, 34, -5, -5): (0, 1),
(9, 34, -5, -4): (0, 1),
(9, 34, -5, -3): (0, 1),
(9, 34, -5, -2): (0, 1),
(9, 34, -5, -1): (0, 1),
(9, 34, -5, 0): (0, 0),
(9, 34, -5, 1): (-1, -1),
(9, 34, -5, 2): (-1, -1),
(9, 34, -5, 3): (0, 1),
(9, 34, -5, 4): (0, 1),
(9, 34, -5, 5): (0, 1),
(9, 34, -4, -5): (0, 1),
(9, 34, -4, -4): (-1, 1),
(9, 34, -4, -3): (-1, 1),
(9, 34, -4, -2): (0, 1),
(9, 34, -4, -1): (0, 1),
(9, 34, -4, 0): (0, 0),
(9, 34, -4, 1): (-1, -1),
(9, 34, -4, 2): (-1, -1),
(9, 34, -4, 3): (-1, 1),
(9, 34, -4, 4): (-1, 1),
(9, 34, -4, 5): (-1, 1),
(9, 34, -3, -5): (-1, 1),
(9, 34, -3, -4): (0, 1),
(9, 34, -3, -3): (0, 1),
(9, 34, -3, -2): (-1, 1),
(9, 34, -3, -1): (-1, 1),
(9, 34, -3, 0): (-1, 0),
(9, 34, -3, 1): (-1, -1),
(9, 34, -3, 2): (-1, -1),
(9, 34, -3, 3): (-1, 1),
(9, 34, -3, 4): (-1, 1),
(9, 34, -3, 5): (-1, 1),
(9, 34, -2, -5): (0, 1),
(9, 34, -2, -4): (-1, 1),
(9, 34, -2, -3): (-1, 1),
(9, 34, -2, -2): (-1, 1),
(9, 34, -2, -1): (-1, 1),
(9, 34, -2, 0): (-1, 0),
(9, 34, -2, 1): (-1, -1),
(9, 34, -2, 2): (-1, -1),
(9, 34, -2, 3): (-1, 1),
(9, 34, -2, 4): (-1, 1),
(9, 34, -2, 5): (-1, 1),
(9, 34, -1, -5): (1, 1),
(9, 34, -1, -4): (1, 1),
(9, 34, -1, -3): (-1, 1),
(9, 34, -1, -2): (-1, 1),
(9, 34, -1, -1): (-1, 1),
(9, 34, -1, 0): (-1, 0),
(9, 34, -1, 1): (-1, -1),
(9, 34, -1, 2): (-1, -1),
(9, 34, -1, 3): (-1, 1),
(9, 34, -1, 4): (-1, 1),
(9, 34, -1, 5): (-1, 1),
(9, 34, 0, -5): (1, 0),
(9, 34, 0, -4): (1, -1),
(9, 34, 0, -3): (1, 1),
(9, 34, 0, -2): (-1, 1),
(9, 34, 0, -1): (-1, 1),
(9, 34, 0, 0): (-1, 0),
(9, 34, 0, 1): (-1, -1),
(9, 34, 0, 2): (-1, -1),
(9, 34, 0, 3): (-1, 1),
(9, 34, 0, 4): (-1, 1),
(9, 34, 0, 5): (-1, 1),
(9, 34, 1, -5): (0, 0),
(9, 34, 1, -4): (0, -1),
(9, 34, 1, -3): (0, 1),
(9, 34, 1, -2): (0, 1),
(9, 34, 1, -1): (0, 1),
(9, 34, 1, 0): (0, 1),
(9, 34, 1, 1): (0, 1),
(9, 34, 1, 2): (0, 1),
(9, 34, 1, 3): (0, 1),
(9, 34, 1, 4): (0, 1),
(9, 34, 1, 5): (0, 1),
(9, 34, 2, -5): (0, 0),
(9, 34, 2, -4): (-1, -1),
(9, 34, 2, -3): (0, 1),
(9, 34, 2, -2): (0, 1),
(9, 34, 2, -1): (0, 1),
(9, 34, 2, 0): (0, 1),
(9, 34, 2, 1): (0, 1),
(9, 34, 2, 2): (0, 1),
(9, 34, 2, 3): (0, 1),
(9, 34, 2, 4): (0, 1),
(9, 34, 2, 5): (0, 1),
(9, 34, 3, -5): (0, 0),
(9, 34, 3, -4): (-1, -1),
(9, 34, 3, -3): (0, 1),
(9, 34, 3, -2): (0, 1),
(9, 34, 3, -1): (0, 1),
(9, 34, 3, 0): (0, 1),
(9, 34, 3, 1): (0, 1),
(9, 34, 3, 2): (0, 1),
(9, 34, 3, 3): (0, 1),
(9, 34, 3, 4): (0, 1),
(9, 34, 3, 5): (0, 1),
(9, 34, 4, -5): (0, 0),
(9, 34, 4, -4): (-1, -1),
(9, 34, 4, -3): (0, 1),
(9, 34, 4, -2): (0, 1),
(9, 34, 4, -1): (0, 1),
(9, 34, 4, 0): (0, 1),
(9, 34, 4, 1): (0, 1),
(9, 34, 4, 2): (0, 1),
(9, 34, 4, 3): (0, 1),
(9, 34, 4, 4): (0, 1),
(9, 34, 4, 5): (0, 1),
(9, 34, 5, -5): (0, 0),
(9, 34, 5, -4): (-1, -1),
(9, 34, 5, -3): (0, 1),
(9, 34, 5, -2): (0, 1),
(9, 34, 5, -1): (0, 1),
(9, 34, 5, 0): (0, 1),
(9, 34, 5, 1): (0, 1),
(9, 34, 5, 2): (0, 1),
(9, 34, 5, 3): (0, 1),
(9, 34, 5, 4): (0, 1),
(9, 34, 5, 5): (0, 1),
(9, 35, -5, -5): (0, 1),
(9, 35, -5, -4): (0, 1),
(9, 35, -5, -3): (0, 1),
(9, 35, -5, -2): (0, 1),
(9, 35, -5, -1): (0, 1),
(9, 35, -5, 0): (0, 0),
(9, 35, -5, 1): (-1, -1),
(9, 35, -5, 2): (0, 1),
(9, 35, -5, 3): (0, 1),
(9, 35, -5, 4): (0, 1),
(9, 35, -5, 5): (0, 1),
(9, 35, -4, -5): (-1, 1),
(9, 35, -4, -4): (-1, 1),
(9, 35, -4, -3): (0, 1),
(9, 35, -4, -2): (0, 1),
(9, 35, -4, -1): (0, 1),
(9, 35, -4, 0): (0, 0),
(9, 35, -4, 1): (-1, -1),
(9, 35, -4, 2): (-1, 1),
(9, 35, -4, 3): (-1, 1),
(9, 35, -4, 4): (-1, 1),
(9, 35, -4, 5): (-1, 1),
(9, 35, -3, -5): (-1, 1),
(9, 35, -3, -4): (-1, 1),
(9, 35, -3, -3): (-1, 1),
(9, 35, -3, -2): (-1, 1),
(9, 35, -3, -1): (-1, 1),
(9, 35, -3, 0): (-1, 0),
(9, 35, -3, 1): (-1, -1),
(9, 35, -3, 2): (-1, 1),
(9, 35, -3, 3): (-1, 1),
(9, 35, -3, 4): (-1, 1),
(9, 35, -3, 5): (-1, 1),
(9, 35, -2, -5): (0, 1),
(9, 35, -2, -4): (-1, 1),
(9, 35, -2, -3): (-1, 1),
(9, 35, -2, -2): (-1, 1),
(9, 35, -2, -1): (-1, 1),
(9, 35, -2, 0): (-1, 0),
(9, 35, -2, 1): (-1, -1),
(9, 35, -2, 2): (-1, 1),
(9, 35, -2, 3): (-1, 1),
(9, 35, -2, 4): (-1, 1),
(9, 35, -2, 5): (-1, 1),
(9, 35, -1, -5): (1, 1),
(9, 35, -1, -4): (-1, 1),
(9, 35, -1, -3): (-1, 1),
(9, 35, -1, -2): (-1, 1),
(9, 35, -1, -1): (-1, 1),
(9, 35, -1, 0): (-1, 0),
(9, 35, -1, 1): (-1, -1),
(9, 35, -1, 2): (-1, 1),
(9, 35, -1, 3): (-1, 1),
(9, 35, -1, 4): (-1, 1),
(9, 35, -1, 5): (-1, 1),
(9, 35, 0, -5): (1, 0),
(9, 35, 0, -4): (1, 1),
(9, 35, 0, -3): (-1, 1),
(9, 35, 0, -2): (-1, 1),
(9, 35, 0, -1): (-1, 1),
(9, 35, 0, 0): (-1, 0),
(9, 35, 0, 1): (-1, -1),
(9, 35, 0, 2): (-1, | |
lock in locks:
loc = self.route.index(origin)
for r in self.route[loc:]:
if 'Line-up area' in self.env.FG.nodes[r].keys():
locks2 = self.env.FG.nodes[r]["Line-up area"]
for r2 in self.route[loc:]:
if 'Lock' in self.env.FG.nodes[r2].keys():
locks3 = self.env.FG.nodes[r2]["Lock"]
break
self.lock_name = []
for lock3 in locks3:
if lock3.water_level == self.route[self.route.index(r2)-1]:
for lock2 in locks2:
if lock2.name == lock3.name:
if lock2.lock_queue_length == 0:
self.lock_name = lock3.name
break
lock_queue_length = [];
if self.lock_name == []:
for lock2 in locks2:
lock_queue_length.append(lock2.lock_queue_length)
self.lock_name = locks2[lock_queue_length.index(min(lock_queue_length))].name
for lock2 in locks2:
if lock2.name == self.lock_name:
lock2.lock_queue_length += 1
for lock2 in locks2:
if lock2.name == self.lock_name:
self.v = 0.5*speed
break
wait_for_lineup_area = self.env.now
lock.waiting_area[origin].release(access_waiting_area)
if self.route[self.route.index(r2)-1] == lock3.node_1:
if lock3.doors_2[lock3.node_3].users != [] and lock3.doors_2[lock3.node_3].users[0].priority == -1:
if self.L < lock2.length.level + lock3.length.level:
access_lineup_length = lock2.length.get(self.L)
elif self.L < lock2.length.level:
if lock2.length.level == lock2.length.capacity:
access_lineup_length = lock2.length.get(self.L)
elif lock2.line_up_area[r].users != [] and lock3.length.level < lock2.line_up_area[r].users[0].length:
access_lineup_length = lock2.length.get(self.L)
else:
if lock2.length.get_queue == []:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
total_length_waiting_vessels = 0
for q in reversed(range(len(lock2.length.get_queue))):
if lock2.length.get_queue[q].amount == lock2.length.capacity:
break
for q2 in range(q,len(lock2.length.get_queue)):
total_length_waiting_vessels += lock2.length.get_queue[q2].length
if self.L > lock2.length.capacity - total_length_waiting_vessels:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
access_lineup_length = lock2.length.get(self.L)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
else:
if lock2.length.get_queue == []:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
total_length_waiting_vessels = 0
for q in reversed(range(len(lock2.length.get_queue))):
if lock2.length.get_queue[q].amount == lock2.length.capacity:
break
for q2 in range(q,len(lock2.length.get_queue)):
total_length_waiting_vessels += lock2.length.get_queue[q2].length
if self.L > lock2.length.capacity - total_length_waiting_vessels:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
access_lineup_length = lock2.length.get(self.L)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
else:
if lock2.length.level == lock2.length.capacity:
access_lineup_length = lock2.length.get(self.L)
elif lock2.line_up_area[r].users != [] and self.L < lock2.line_up_area[r].users[-1].lineup_dist-0.5*lock2.line_up_area[r].users[-1].length:
access_lineup_length = lock2.length.get(self.L)
else:
if lock2.length.get_queue == []:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
total_length_waiting_vessels = 0
for q in reversed(range(len(lock2.length.get_queue))):
if lock2.length.get_queue[q].amount == lock2.length.capacity:
break
for q2 in range(q,len(lock2.length.get_queue)):
total_length_waiting_vessels += lock2.length.get_queue[q2].length
if self.L > lock2.length.capacity - total_length_waiting_vessels:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
access_lineup_length = lock2.length.get(self.L)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
elif self.route[self.route.index(r2)-1] == lock3.node_3:
if lock3.doors_1[lock3.node_1].users != [] and lock3.doors_1[lock3.node_1].users[0].priority == -1:
if self.L < lock2.length.level + lock3.length.level:
access_lineup_length = lock2.length.get(self.L)
elif self.L < lock2.length.level:
if lock2.length.level == lock2.length.capacity:
access_lineup_length = lock2.length.get(self.L)
elif lock2.line_up_area[r].users != [] and lock3.length.level < lock2.line_up_area[r].users[0].length:
access_lineup_length = lock2.length.get(self.L)
else:
if lock2.length.get_queue == []:
access_lineup_length = lock2.length.get(lock2.length.capacity)
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
yield correct_lineup_length
else:
total_length_waiting_vessels = 0
for q in reversed(range(len(lock2.length.get_queue))):
if lock2.length.get_queue[q].amount == lock2.length.capacity:
break
for q2 in range(q,len(lock2.length.get_queue)):
total_length_waiting_vessels += lock2.length.get_queue[q2].length
if self.L > lock2.length.capacity - total_length_waiting_vessels:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
access_lineup_length = lock2.length.get(self.L)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
else:
if lock2.length.get_queue == []:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
total_length_waiting_vessels = 0
for q in reversed(range(len(lock2.length.get_queue))):
if lock2.length.get_queue[q].amount == lock2.length.capacity:
break
for q2 in range(q,len(lock2.length.get_queue)):
total_length_waiting_vessels += lock2.length.get_queue[q2].length
if self.L > lock2.length.capacity - total_length_waiting_vessels:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
access_lineup_length = lock2.length.get(self.L)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
else:
if lock2.length.level == lock2.length.capacity:
access_lineup_length = lock2.length.get(self.L)
elif lock2.line_up_area[r].users != [] and self.L < lock2.line_up_area[r].users[-1].lineup_dist-0.5*lock2.line_up_area[r].users[-1].length:
access_lineup_length = lock2.length.get(self.L)
else:
if lock2.length.get_queue == []:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
total_length_waiting_vessels = 0
for q in reversed(range(len(lock2.length.get_queue))):
if lock2.length.get_queue[q].amount == lock2.length.capacity:
break
for q2 in range(q,len(lock2.length.get_queue)):
total_length_waiting_vessels += lock2.length.get_queue[q2].length
if self.L > lock2.length.capacity - total_length_waiting_vessels:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
access_lineup_length = lock2.length.get(self.L)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
if len(lock2.line_up_area[r].users) != 0:
self.lineup_dist = lock2.line_up_area[r].users[-1].lineup_dist - 0.5*lock2.line_up_area[r].users[-1].length - 0.5*self.L
else:
self.lineup_dist = lock2.length.capacity - 0.5*self.L
self.wgs84 = pyproj.Geod(ellps="WGS84")
[lineup_area_start_lat, lineup_area_start_lon, lineup_area_stop_lat, lineup_area_stop_lon] = [self.env.FG.nodes[self.route[self.route.index(r)]]['geometry'].x, self.env.FG.nodes[self.route[self.route.index(r)]]['geometry'].y,
self.env.FG.nodes[self.route[self.route.index(r)+1]]['geometry'].x, self.env.FG.nodes[self.route[self.route.index(r)+1]]['geometry'].y]
fwd_azimuth,_,_ = self.wgs84.inv(lineup_area_start_lat, lineup_area_start_lon, lineup_area_stop_lat, lineup_area_stop_lon)
[self.lineup_pos_lat,self.lineup_pos_lon,_] = self.wgs84.fwd(self.env.FG.nodes[self.route[self.route.index(r)]]['geometry'].x,
self.env.FG.nodes[self.route[self.route.index(r)]]['geometry'].y,
fwd_azimuth,self.lineup_dist)
access_lineup_area = lock2.line_up_area[r].request()
lock2.line_up_area[r].users[-1].length = self.L
lock2.line_up_area[r].users[-1].id = self.id
lock2.line_up_area[r].users[-1].lineup_pos_lat = self.lineup_pos_lat
lock2.line_up_area[r].users[-1].lineup_pos_lon = self.lineup_pos_lon
lock2.line_up_area[r].users[-1].lineup_dist = self.lineup_dist
lock2.line_up_area[r].users[-1].n = len(lock2.line_up_area[r].users)
lock2.line_up_area[r].users[-1].v = 0.25*speed
lock2.line_up_area[r].users[-1].wait_for_next_cycle = False
yield access_lineup_area
enter_lineup_length = lock2.enter_line_up_area[r].request()
yield enter_lineup_length
lock2.enter_line_up_area[r].users[0].id = self.id
if wait_for_lineup_area != self.env.now:
self.v = 0.25*speed
waiting = self.env.now - wait_for_lineup_area
self.log_entry("Waiting in waiting area start", wait_for_lineup_area, 0, nx.get_node_attributes(self.env.FG, "geometry")[origin])
self.log_entry("Waiting in waiting area stop", self.env.now, waiting, nx.get_node_attributes(self.env.FG, "geometry")[origin])
break
if "Line-up area" in self.env.FG.nodes[destination].keys():
locks = self.env.FG.nodes[destination]["Line-up area"]
for lock in locks:
if lock.name == self.lock_name:
loc = self.route.index(destination)
orig = shapely.geometry.Point(self.lineup_pos_lat,self.lineup_pos_lon)
for r in self.route[loc:]:
if 'Lock' in self.env.FG.nodes[r].keys():
locks = self.env.FG.nodes[r]["Lock"]
for lock2 in locks:
for q in range(len(lock.line_up_area[destination].users)):
if lock.line_up_area[destination].users[q].id == self.id:
if self.route[self.route.index(r)-1] == lock2.node_1:
if lock2.doors_2[lock2.node_3].users != [] and lock2.doors_2[lock2.node_3].users[0].priority == -1:
if q <= 1 and lock.line_up_area[destination].users[q].n != lock.line_up_area[destination].users[q].n-len(lock2.resource.users):
self.lineup_dist = lock.length.capacity - 0.5*self.L
elif self.route[self.route.index(r)-1] == lock2.node_3:
if lock2.doors_1[lock2.node_1].users != [] and lock2.doors_1[lock2.node_1].users[0].priority == -1:
if q <= 1 and lock.line_up_area[destination].users[q].n != lock.line_up_area[destination].users[q].n-len(lock2.resource.users):
self.lineup_dist = lock.length.capacity - 0.5*self.L
[self.lineup_pos_lat,self.lineup_pos_lon,_] = self.wgs84.fwd(self.env.FG.nodes[self.route[self.route.index(destination)]]['geometry'].x,
self.env.FG.nodes[self.route[self.route.index(destination)]]['geometry'].y,
fwd_azimuth,self.lineup_dist)
lock.line_up_area[destination].users[q].lineup_pos_lat = self.lineup_pos_lat
lock.line_up_area[destination].users[q].lineup_pos_lon = self.lineup_pos_lon
lock.line_up_area[destination].users[q].lineup_dist = self.lineup_dist
break
if "Line-up area" in self.env.FG.nodes[origin].keys():
locks = self.env.FG.nodes[origin]["Line-up area"]
for lock in locks:
if lock.name == self.lock_name:
loc = self.route.index(origin)
orig = shapely.geometry.Point(self.lineup_pos_lat,self.lineup_pos_lon)
for r in self.route[loc:]:
if 'Lock' in self.env.FG.nodes[r].keys():
locks = self.env.FG.nodes[r]["Lock"]
lock.enter_line_up_area[origin].release(enter_lineup_length)
for q in range(len(lock.line_up_area[origin].users)):
if lock.line_up_area[origin].users[q].id == self.id:
if q > 0:
_,_,distance = self.wgs84.inv(orig.x,
orig.y,
lock.line_up_area[origin].users[0].lineup_pos_lat,
lock.line_up_area[origin].users[0].lineup_pos_lon)
yield self.env.timeout(distance/self.v)
break
for lock2 in locks:
if lock2.name == self.lock_name:
self.v = 0.25*speed
wait_for_lock_entry = self.env.now
for r2 in self.route[(loc+1):]:
if 'Line-up area' in self.env.FG.nodes[r2].keys():
locks = self.env.FG.nodes[r2]["Line-up area"]
for lock3 in locks:
if lock3.name == self.lock_name:
break
break
if self.route[self.route.index(r)-1] == lock2.node_1:
if len(lock2.doors_2[lock2.node_3].users) != 0:
if lock2.doors_2[lock2.node_3].users[0].priority == -1:
if self.L > (lock2.resource.users[-1].lock_dist-0.5*lock2.resource.users[-1].length) or lock2.resource.users[-1].converting == True:
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
yield access_lock_door2
lock2.doors_2[lock2.node_3].release(access_lock_door2)
wait_for_next_cycle = lock3.pass_line_up_area[r2].request()
yield wait_for_next_cycle
lock3.pass_line_up_area[r2].release(wait_for_next_cycle)
if lock.converting_while_in_line_up_area[origin].users != []:
waiting_during_converting = lock.converting_while_in_line_up_area[origin].request(priority = -1)
yield waiting_during_converting
lock.converting_while_in_line_up_area[origin].release(waiting_during_converting)
elif (len(lock2.doors_1[lock2.node_1].users) == 0 or (len(lock2.doors_1[lock2.node_1].users) != 0 and lock2.doors_1[lock2.node_1].users[0].priority != -1)) and self.route[self.route.index(r)-1] != lock2.water_level:
waiting_during_converting = lock.converting_while_in_line_up_area[origin].request()
yield waiting_during_converting
yield from lock2.convert_chamber(self.env, self.route[self.route.index(r)-1], 0)
lock.converting_while_in_line_up_area[origin].release(waiting_during_converting)
access_lock_door1 = lock2.doors_1[lock2.node_1].request()
yield access_lock_door1
if lock2.doors_2[lock2.node_3].users != [] and lock2.doors_2[lock2.node_3].users[0].priority == -1:
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
lock2.doors_2[lock2.node_3].release(lock2.doors_2[lock2.node_3].users[0])
yield access_lock_door2
lock2.doors_2[lock2.node_3].users[0].id = self.id
else:
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
yield access_lock_door2
lock2.doors_2[lock2.node_3].users[0].id = self.id
else:
if lock3.converting_while_in_line_up_area[r2].users != []:
waiting_during_converting = lock3.converting_while_in_line_up_area[r2].request()
yield waiting_during_converting
lock3.converting_while_in_line_up_area[r2].release(waiting_during_converting)
access_lock_door1 = lock2.doors_1[lock2.node_1].request()
yield access_lock_door1
if lock2.doors_2[lock2.node_3].users != [] and lock2.doors_2[lock2.node_3].users[0].priority == -1:
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
lock2.doors_2[lock2.node_3].release(lock2.doors_2[lock2.node_3].users[0])
yield access_lock_door2
lock2.doors_2[lock2.node_3].users[0].id = self.id
else:
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
yield access_lock_door2
lock2.doors_2[lock2.node_3].users[0].id = self.id
else:
if lock2.doors_2[lock2.node_3].users != [] and lock2.doors_2[lock2.node_3].users[0].priority == -1:
access_lock_door1 = lock2.doors_1[lock2.node_1].request()
yield access_lock_door1
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
lock2.doors_2[lock2.node_3].release(lock2.doors_2[lock2.node_3].users[0])
yield access_lock_door2
lock2.doors_2[lock2.node_3].users[0].id = self.id
elif lock2.doors_2[lock2.node_3].users != [] and lock2.doors_2[lock2.node_3].users[0].priority == 0:
access_lock_door1 = lock2.doors_1[lock2.node_1].request()
yield access_lock_door1
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
yield access_lock_door2
lock2.doors_2[lock2.node_3].users[0].id = self.id
else:
if lock.converting_while_in_line_up_area[origin].users != []:
waiting_during_converting = lock.converting_while_in_line_up_area[origin].request(priority = -1)
yield waiting_during_converting
lock.converting_while_in_line_up_area[origin].release(waiting_during_converting)
access_lock_door1 = lock2.doors_1[lock2.node_1].request()
yield access_lock_door1
elif (len(lock2.doors_1[lock2.node_1].users) == 0 or (len(lock2.doors_1[lock2.node_1].users) != 0 and lock2.doors_1[lock2.node_1].users[0].priority != -1)) and self.route[self.route.index(r)-1] != lock2.water_level:
access_lock_door1 = lock2.doors_1[lock2.node_1].request()
waiting_during_converting = lock.converting_while_in_line_up_area[origin].request()
yield waiting_during_converting
yield from lock2.convert_chamber(self.env, self.route[self.route.index(r)-1], 0)
lock.converting_while_in_line_up_area[origin].release(waiting_during_converting)
elif len(lock2.doors_1[lock2.node_1].users) != 0 and lock2.doors_1[lock2.node_1].users[0].priority == -1:
access_lock_door1 = lock2.doors_1[lock2.node_1].request()
yield access_lock_door1
else:
access_lock_door1 = lock2.doors_1[lock2.node_1].request()
if lock2.doors_2[lock2.node_3].users != [] and lock2.doors_2[lock2.node_3].users[0].priority == -1:
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
lock2.doors_2[lock2.node_3].release(lock2.doors_2[lock2.node_3].users[0])
yield access_lock_door2
lock2.doors_2[lock2.node_3].users[0].id = self.id
else:
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
yield access_lock_door2
lock2.doors_2[lock2.node_3].users[0].id = self.id
elif self.route[self.route.index(r)-1] == lock2.node_3:
if len(lock2.doors_1[lock2.node_1].users) != 0:
if lock2.doors_1[lock2.node_1].users[0].priority == -1:
if self.L > (lock2.resource.users[-1].lock_dist-0.5*lock2.resource.users[-1].length) or lock2.resource.users[-1].converting == True:
access_lock_door1 = lock2.doors_1[lock2.node_1].request(priority = -1)
yield access_lock_door1
lock2.doors_1[lock2.node_1].release(access_lock_door1)
wait_for_next_cycle = lock3.pass_line_up_area[r2].request()
yield wait_for_next_cycle
lock3.pass_line_up_area[r2].release(wait_for_next_cycle)
if lock.converting_while_in_line_up_area[origin].users != []:
waiting_during_converting = lock.converting_while_in_line_up_area[origin].request(priority = -1)
yield waiting_during_converting
lock.converting_while_in_line_up_area[origin].release(waiting_during_converting)
elif (len(lock2.doors_2[lock2.node_3].users) == 0 or (len(lock2.doors_2[lock2.node_3].users) != 0 and lock2.doors_2[lock2.node_3].users[0].priority != -1)) and self.route[self.route.index(r)-1] != lock2.water_level:
waiting_during_converting = lock.converting_while_in_line_up_area[origin].request()
yield waiting_during_converting
yield from lock2.convert_chamber(self.env, self.route[self.route.index(r)-1], 0)
lock.converting_while_in_line_up_area[origin].release(waiting_during_converting)
access_lock_door2 = lock2.doors_2[lock2.node_3].request()
| |
#!/usr/bin/env python
##
## video.py - recoding VNC to FLV.
##
## Copyright (c) 2009-2010 by <NAME>
##
import sys, zlib, re
from struct import pack, unpack
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from flvscreen import FlvScreen
def str2clip(s):
m = re.match(r'^(\d+)x(\d+)([\-\+])(\d+)([\-\+])(\d+)$', s)
if not m:
raise ValueError('Invalid clipping spec: %r' % s)
return ((m.group(3), int(m.group(4))),
(m.group(5), int(m.group(6))),
int(m.group(1)), int(m.group(2)))
def str2size(s):
m = re.match(r'^(\d+)x(\d+)$', s)
if not m:
raise ValueError('Invalid size spec: %r' % s)
f = map(int, m.groups())
return (f[0], f[1])
class MultipleRange(object):
def __init__(self, s):
self.ranges = []
if isinstance(s, basestring):
t = 0
for x in s.split(','):
m = re.match(r'(\d+)?-(\d+)?', x)
if not m:
raise ValueError('Invalid range spec: %r' % x)
if m.group(1):
i1 = int(m.group(1))
else:
i1 = 0
if m.group(2):
i2 = int(m.group(2))
else:
i2 = sys.maxint
self.ranges.append((t, i1, i2))
t += (i2 - i1)
elif isinstance(s, list):
t = 0
for (i1, i2) in s:
self.ranges.append((t, i1, i2))
t += (i2 - i1)
self.ranges.sort()
self.pos = 0
return
def __iter__(self):
return iter(self.ranges)
def get_total(self, tmax):
t = 0
for (_, i1, i2) in self.ranges:
if i2 == sys.maxint:
i2 = tmax
t += (i2 - i1)
return t
def seekandmap(self, i):
while self.pos < len(self.ranges):
(t, i1, i2) = self.ranges[self.pos]
if i < i1: return -1
if i <= i2: return (i - i1 + t)
self.pos += 1
return -1
## VideoSink
##
## edit by luoxiao:
## x、y : 在vnc界面中截取录像的偏移位置
##
## width、height : 截取录像的大小
##
class VideoSink(object):
def __init__(self, clipping=None, debug=0):
self.debug = debug
self.clipping = clipping
self.initialized = False
return
# width、height两传入参数是vnc服务器端的值,本方法返回值中的width、height优先使用-C启动参数中设定的值。
def init_screen(self, width, height, name=None):
if self.debug:
print >> sys.stderr, 'init_screen: %dx%d, name=%r' % (width, height, name)
# 优先使用-C启动参数的值设置x,y,width,height
if self.clipping:
((xs, x), (ys, y), w, h) = self.clipping
if xs == '-':
(x, width) = (width - w - x, w)
else:
(x, width) = (x, w)
if ys == '-':
(y, height) = (height - h - x, h)
else:
(y, height) = (y, h)
else:
(x, y) = (0, 0)
self.initialized = True
return (x, y, width, height)
# data is given as ARGB
def convert_pixels(self, data):
return data
def convert_color1(self, data):
return unpack('BBBx', data)
def update_cursor_image(self, width, height, data):
if self.debug:
print >> sys.stderr, 'update_cursor_image: %dx%d' % (width, height)
return
def update_cursor_pos(self, x, y):
if self.debug:
print >> sys.stderr, 'update_cursor_pos: (%d,%d)' % (x, y)
return
def update_screen_rgbabits(self, (x, y), (width, height), data):
if self.debug:
print >> sys.stderr, 'update_screen_rgbabits: %dx%d at (%d,%d)' % (width, height, x, y)
return
def update_screen_solidrect(self, (x, y), (w, h), data):
if self.debug:
print >> sys.stderr, 'update_screen_solidrect: %dx%d at (%d,%d), color=%r' % (width, height, x, y, color)
return
def flush(self, t):
if self.debug:
print >> sys.stderr, 'flush', t
return
def close(self):
if self.debug:
print >> sys.stderr, 'close'
return
## FLVVideoSink
##
## add by luoxiao:
## width、height - 截取录像的大小
## x、y - 在vnc界面中截取录像的偏移位置
class FLVVideoSink(VideoSink):
def __init__(self, writer, blocksize=32, framerate=15, keyframe=0,
clipping=None, panwindow=None, panspeed=0, debug=0):
VideoSink.__init__(self, clipping=clipping, debug=debug)
self.writer = writer
self.blocksize = blocksize
self.framerate = framerate
self.keyframe = keyframe
self.panwindow = panwindow
self.panspeed = panspeed
self.screen = None
self.screenpos = (0, 0)
self.screensize = None
self.windowpos = (0, 0)
self.windowsize = None
self.curframe = 0
self.changes = []
self.newFBSizeChange = False # add by luoxiao
self.vnc_width = 0 # add by luoxiao
self.vnc_height = 0 # add by luoxiao
return
def init_screen(self, width, height, name=None):
self.vnc_width = width
self.vnc_height = height
(x, y, width, height) = VideoSink.init_screen(self, width, height, name=name)
bw = (width + self.blocksize - 1) / self.blocksize
bh = (height + self.blocksize - 1) / self.blocksize
self.screenpos = (x, y)
self.screensize = (bw, bh)
# 初始化flvscreen,blocksize默认为32,bw=width/blocksize=1024/32=32,bh=height/blocksize=768/32=24
self.screen = FlvScreen(self.blocksize, bw, bh)
if self.panwindow:
(w, h) = self.panwindow
self.windowsize = ((w + self.blocksize - 1) / self.blocksize,
(h + self.blocksize - 1) / self.blocksize)
else:
self.windowsize = (bw, bh)
if self.debug:
print >> sys.stderr, 'start: %d,%d (%dx%d)' % (x, y, width, height)
self.writer.set_screen_size(width, height)
# add by luoxiao
# import pdb
# pdb.set_trace()
return (x, y, width, height)
# 将图像写入flvscreen缓冲
def update_screen_rgbabits(self, (x, y), (w, h), data):
(x0, y0) = self.screenpos
# add by luoxiao , vnc实际屏幕大小 < 录像屏幕大小时使图像居中
(screenwidth, screenheight) = self.screensize
if screenwidth * self.blocksize > self.vnc_width:
x = x + (screenwidth * self.blocksize - self.vnc_width) / 2
if screenheight * self.blocksize > self.vnc_height:
y = y + (screenheight * self.blocksize - self.vnc_height) / 2
# import pdb
# pdb.set_trace()
# end add
self.screen.blit_rgba(x - x0, y - y0, w, h, data)
return
# 将flvscreen缓冲的图像写入flv文件
def flush(self, t):
# t must be >= 0
if not self.screen: return
while 1:
timestamp = self.curframe * 1000 / self.framerate
if t < timestamp: break
self.writer.write_video_frame(timestamp, self.get_update_frame()) # 获取更新的帧,并写入flv文件
self.curframe += 1
return
# add by luoxiao
def onNewFBSize(self, x, y, width, height):
self.screen.reset(1) # 清空缓存的帧像素及block
self.newFBSizeChange = True # 强制全部刷新
self.vnc_width = width
self.vnc_height = height
return
# write SCREENVIDEOPACKET tag
def get_update_frame(self):
changes = self.screen.changed()
# edit by luoxiao:
# self.screen.reset()
self.screen.reset(0)
(bw, bh) = self.windowsize
# edit by luoxiao: 跳过panding提高性能(录像20分钟后高cpu占用问题)
# (bx,by) = self.do_autopan(self.windowpos, changes)
bx = 0;
by = 0;
# edit end
key = ((bx, by) != self.windowpos or
(self.keyframe and (self.curframe % self.keyframe) == 0))
# edit by luoxiao
# if key:
if key or self.newFBSizeChange: # end edit
# update the entire screen if necessary.
self.windowpos = (bx, by)
changes = set((bx + x, by + y) for y in xrange(bh) for x in xrange(bw))
# add by luoxiao
self.newFBSizeChange = False
else:
changes = set(changes)
if self.debug:
print >> sys.stderr, 'update(%d): changes=%r' % (self.curframe, len(changes)), sorted(changes)
flags = 3 # screenvideo codec
if key:
flags |= 0x10
else:
flags |= 0x20
data = chr(flags)
w = bw * self.blocksize
h = bh * self.blocksize
data += chr((self.blocksize / 16 - 1) << 4 | w >> 8) + chr(w & 0xff)
data += chr((self.blocksize / 16 - 1) << 4 | h >> 8) + chr(h & 0xff)
n = 0
for y in xrange(bh, 0, -1):
y = by + y - 1
for x in xrange(bw):
x += bx
if (x, y) in changes:
# changed block
block = zlib.compress(self.screen.get(x, y)) # 从screen获取指定位置的block的像素图片
data += pack('>H', len(block)) + block
else:
# unchanged block
data += pack('>H', 0)
return data
# do paning.
def do_autopan(self, (wx, wy), changes):
if changes:
r = (min(x for (x, y) in changes),
min(y for (x, y) in changes),
max(x for (x, y) in changes) + 1,
max(y for (x, y) in changes) + 1)
self.changes.append(r)
elif self.changes:
self.changes.append(self.changes[-1])
self.changes = self.changes[-self.panspeed:]
cx0 = sum(x0 for (x0, y0, x1, y1) in self.changes) / len(self.changes)
cy0 = sum(y0 for (x0, y0, x1, y1) in self.changes) / len(self.changes)
cx1 = sum(x1 for (x0, y0, x1, y1) in self.changes) / len(self.changes)
cy1 = sum(y1 for (x0, y0, x1, y1) in self.changes) / len(self.changes)
(w, h) = self.windowsize
(bw, bh) = self.screensize
if w < cx1 - cx0:
wx = min(max(0, (cx0 + cx1 - w) / 2), bw - w)
elif cx0 < wx:
wx = cx0
elif wx < cx1 - w:
wx = cx1 - w
if h <= cy1 - cy0:
wy = min(max(0, (cy0 + cy1 - h) / 2), bh - h)
elif cy0 < wy:
wy = cy0
elif wy < cy1 - h:
wy = cy1 - h
return (wx, wy)
##
## FLVMovieProcessor
##
## :flvrec.py not use this class
##
class FLVMovieProcessor(object):
def __init__(self, writer=None, debug=0):
self.debug = debug
self.writer = writer
self.basetime = 0
return
def process_audio_tag(self, audiosink, data):
flags = ord(data[0])
# must be mp3 packet
if (flags & 0xf0) != 0x20: return
samplerate = (flags & 0x0c) >> 2
samplerate = [5500, 11025, 22050, 44100][samplerate]
samplesize = 8
if flags & 2:
samplesize = 16
samplestereo = flags & 1
audiosink.load(data[1:])
return
def process_video_tag(self, videosink, data):
import flvscreen, zlib
(frametype, codecid) = ord(data[0]) >> 4, ord(data[0]) & 0xf
# must be ScreenVideo
if codecid | |
72335,
-1,
82511906,
72341,
82707708,
81825990,
-1,
82773243,
82672720,
-1,
82838770,
82743422,
-1,
-64271,
82800460,
72281,
-1,
82857207,
72282,
83035380,
82806935,
72275,
-1,
82979679,
72274,
83166454,
82993253,
72277,
-1,
83110751,
72278,
83297529,
83103505,
-1,
-64264,
83265687,
72280,
-1,
83312733,
72279,
-64262,
83247197,
72273,
-1,
83438431,
72276,
-1,
82727305,
72283,
83690753,
82663240,
-1,
-64258,
83658593,
72346,
-64257,
83720949,
72347,
-64256,
83782662,
72349,
-1,
83831203,
72348,
84018439,
83632563,
-1,
84083973,
83982370,
-1,
-64252,
84032213,
72328,
-1,
84086545,
72329,
-64250,
84045721,
72326,
-1,
84233545,
72327,
84411660,
83982370,
-1,
84477195,
84361974,
-1,
-64246,
84447322,
72343,
-1,
84481161,
72342,
-1,
84445572,
72345,
84739344,
84360348,
72352,
84869119,
84672543,
-1,
-64241,
84772662,
72350,
-1,
84817216,
72351,
85001491,
84706027,
-1,
-64238,
84937202,
72354,
-1,
85002582,
72353,
-1,
84949193,
72344,
85263693,
78067846,
-1,
85329199,
85217705,
-1,
85394715,
85293090,
-1,
85460250,
85331729,
-1,
-64231,
85416067,
69436,
-1,
85469090,
69439,
-1,
85409325,
69441,
85722398,
85347575,
-1,
-64227,
85668167,
69444,
-1,
85725943,
69433,
85919008,
85671051,
69427,
-1,
85885725,
69430,
86050083,
85856017,
-1,
-64222,
86022080,
69437,
-1,
86069661,
69424,
-64220,
86022391,
69429,
-64219,
86218841,
69431,
-64218,
86282724,
69428,
-64217,
86344305,
69442,
-64216,
86405747,
69440,
-64215,
86469196,
69438,
-64214,
86532153,
69435,
-64213,
86595726,
69434,
-64212,
86657858,
69432,
-64211,
86718896,
69426,
-64210,
86782958,
69443,
-1,
86841768,
69425,
87033151,
85271049,
-1,
87098675,
87001163,
-1,
-64206,
87038316,
69447,
-1,
87101383,
69449,
87295286,
87047880,
-1,
-64203,
87234924,
69453,
-1,
87297991,
69452,
87491897,
87238795,
-1,
-64200,
87431532,
69446,
-1,
87494599,
69448,
87688508,
87434539,
-1,
-64197,
87628140,
69451,
-1,
87691207,
69450,
-64195,
87654460,
69456,
-64194,
87847528,
69455,
-1,
87905399,
69454,
88081734,
86994332,
-1,
88147268,
88049725,
-1,
88212803,
88117046,
69461,
-1,
88147504,
69462,
-1,
88154309,
69464,
-64187,
88095592,
69465,
-1,
88350829,
69463,
88540492,
88039412,
-1,
88606026,
88506702,
-1,
-64183,
88576614,
69459,
-1,
88616686,
69458,
88866815,
88564484,
69457,
-1,
88736302,
69460,
-1,
88491044,
69445,
88999289,
85225376,
-1,
89064811,
88953257,
-1,
89130323,
89021271,
-1,
-64175,
89102095,
69857,
-64174,
89143320,
69847,
-1,
89198943,
69849,
89392470,
89085534,
-1,
-64171,
89340615,
69846,
-1,
89402207,
69864,
89589081,
89336671,
-1,
-64168,
89537223,
69863,
-1,
89599443,
69859,
-64166,
89560847,
69852,
-64165,
89755218,
69850,
-64164,
89819775,
69861,
-64163,
89883143,
69841,
-64162,
89946295,
69840,
-64161,
90009559,
69853,
-64160,
90073401,
69851,
-64159,
90136981,
69862,
-64158,
90197425,
69848,
-64157,
90262316,
69855,
-64156,
90327590,
69856,
-64155,
90391373,
69860,
-64154,
90454768,
69854,
-64153,
90519576,
69845,
-64152,
90580500,
69844,
-64151,
90643828,
69843,
-64150,
90708115,
69842,
-1,
90771807,
69858,
91029503,
89008121,
-1,
91030895,
90931534,
-1,
-64146,
91002070,
69874,
-1,
91045714,
69875,
91227506,
90994722,
-1,
-64143,
91179389,
69878,
-1,
91238966,
69879,
91424117,
91173670,
-1,
-64140,
91383562,
69876,
-1,
91441375,
69877,
-64138,
91396359,
69872,
-64137,
91579140,
69873,
-64136,
91643330,
69881,
-1,
91696640,
69880,
91882890,
88968697,
-1,
91948418,
91853447,
-1,
92013951,
91909169,
-1,
-64131,
91983939,
128601,
-64130,
92032911,
128593,
-1,
92085444,
128609,
92276097,
91981515,
11113,
-1,
92210515,
11129,
-1,
92240148,
129111,
92536831,
91892784,
-1,
92538247,
92433457,
-1,
-64123,
92508227,
128603,
-64122,
92557199,
128595,
-1,
92609732,
128611,
92800393,
92505803,
11112,
-1,
92734803,
11128,
-1,
92764436,
129110,
92997007,
91830117,
-1,
93062542,
92930048,
-1,
-64115,
93027146,
128428,
-1,
93077859,
127846,
-1,
93001895,
129358,
93324689,
92951249,
128618,
-1,
93258301,
128619,
-64110,
93288482,
983181,
-64109,
93414117,
128284,
-64108,
93469383,
983043,
-1,
93528415,
129510,
93718072,
78002887,
-1,
93783578,
93654801,
-1,
93849112,
93744972,
-1,
93914647,
93786251,
-1,
93980121,
93868457,
-1,
94045601,
93949493,
-1,
94111136,
94003403,
-1,
94176670,
94072652,
70025,
-1,
94138188,
70026,
94371839,
94129399,
70027,
-1,
94260471,
70028,
-1,
94048017,
70062,
94504359,
94011726,
-1,
94569893,
94470478,
-1,
-64092,
94517973,
70044,
-1,
94572305,
70043,
-64090,
94517973,
70049,
-1,
94703377,
70048,
94897580,
94460759,
-1,
-64087,
94869256,
70042,
-64086,
94919600,
70047,
-64085,
94975970,
70037,
-1,
95031057,
70052,
95225266,
94840159,
-1,
95290800,
95167839,
-1,
-64081,
95238869,
70046,
-1,
95293201,
70045,
-64079,
95238869,
70051,
-1,
95424273,
70050,
95618486,
95162129,
70019,
-64076,
95586455,
70032,
-64075,
95633501,
70030,
-1,
95686417,
70020,
95880634,
95582242,
-1,
-64072,
95844510,
70064,
-64071,
95894229,
70063,
-1,
95948561,
70065,
96142780,
95848599,
70023,
-1,
96110743,
70024,
96273855,
96102609,
-1,
-64066,
96221909,
70054,
-1,
96276241,
70053,
96470466,
96226551,
-1,
-64063,
96423241,
70061,
-1,
96472849,
70060,
96667077,
96422595,
-1,
-64060,
96615125,
70034,
-1,
96669457,
70033,
96863688,
96618997,
-1,
-64057,
96811733,
70041,
-1,
96866065,
70040,
97060298,
96813149,
70021,
-1,
97009757,
70022,
97191373,
97007529,
-1,
-64052,
97139413,
70036,
-1,
97193745,
70035,
97387984,
97131831,
-1,
-64049,
97336021,
70039,
-1,
97390353,
70038,
97584595,
97327138,
-1,
-64046,
97532629,
70056,
-1,
97586961,
70055,
-64044,
97556232,
70058,
-64043,
97742745,
70059,
-64042,
97804389,
70031,
-64041,
97867358,
70057,
-64040,
97925845,
70066,
-1,
97987423,
70029,
98174443,
93950150,
-1,
98239978,
98139216,
-1,
98305504,
98209918,
-1,
98371038,
98266956,
70072,
-1,
98332492,
70073,
98566143,
98323703,
70074,
-1,
98454775,
70075,
98633188,
98242321,
-1,
-64030,
98601111,
70079,
-64029,
98648157,
70077,
-1,
98701073,
70067,
98895334,
98601111,
70070,
-1,
98863255,
70071,
99026408,
98844765,
70068,
-1,
98975837,
70069,
-64023,
98984037,
70078,
-1,
99101535,
70076,
-1,
98196069,
70091,
99354111,
98138146,
-1,
99419640,
99304182,
-1,
99485168,
99389441,
-1,
-64017,
99449154,
70018,
-1,
99512383,
70080,
99681779,
99421969,
-1,
-64014,
99651146,
70081,
-1,
99704893,
70017,
-64012,
99650608,
70083,
-64011,
99843091,
70107,
-64010,
99901422,
70090,
-64009,
99961453,
70082,
-1,
100015566,
70016,
100206077,
99363679,
-1,
100271612,
100148386,
-1,
-64005,
100207090,
70111,
-1,
100272470,
70110,
-1,
100231626,
70088,
-64002,
100175381,
70093,
-1,
100471744,
70089,
100664848,
99296607,
-1,
100730382,
100614889,
-1,
100795908,
100696398,
-1,
-63997,
100766934,
70098,
-1,
100810578,
70099,
100992519,
100759586,
-1,
-63994,
100944253,
70102,
-1,
101003830,
70103,
101189130,
100938534,
-1,
-63991,
101148426,
70100,
-1,
101206239,
70101,
-63989,
101161223,
70096,
-63988,
101344004,
70097,
-63987,
101408194,
70105,
-1,
101461504,
70104,
-63985,
100689552,
70086,
-1,
101585851,
70085,
101778963,
100608863,
-1,
-63982,
101750413,
70092,
-1,
101796670,
70106,
-63980,
101737013,
70084,
-63979,
101924055,
70108,
-63978,
101983029,
70109,
-1,
102043567,
70087,
-1,
93866691,
129416,
-63975,
93818517,
127847,
-1,
102257705,
129368,
102434349,
93741157,
-1,
102499879,
102397896,
-1,
102565409,
102448076,
-1,
-63970,
102534149,
113827,
-63969,
102584818,
113824,
-63968,
102640186,
113826,
-1,
102703943,
113825,
102893094,
102498304,
-1,
102958629,
102832162,
-1,
-63964,
102903718,
11103,
-1,
102961209,
11087,
-1,
102923513,
11086,
-1,
102833532,
127856,
103286314,
102460666,
-1,
-63959,
103253843,
128722,
-1,
103291022,
128717,
-63957,
103256684,
128703,
-63956,
103441980,
127776,
-1,
103489820,
129327,
103679539,
102383709,
-1,
103745073,
103626600,
-1,
-63952,
103704475,
983075,
-1,
103760861,
983078,
-63950,
103704785,
128674,
-1,
103886435,
128737,
104072758,
103640908,
-1,
-63947,
104041054,
129335,
-1,
104088525,
129424,
-63945,
104044205,
983197,
-1,
104213854,
128017,
104400533,
93679207,
-1,
104466068,
104338766,
-1,
104531601,
104408417,
-1,
104597081,
104484087,
-1,
104662612,
104535862,
-1,
104728147,
104603126,
-1,
-63937,
104700024,
127305,
-63936,
104765101,
127304,
-63935,
104830450,
127303,
-63934,
104894005,
127301,
-63933,
104958103,
127300,
-63932,
105021774,
127299,
-63931,
105082700,
127297,
-63930,
105147949,
127296,
-63929,
105209957,
127294,
-63928,
105272852,
127292,
-63927,
105336055,
127291,
-63926,
105401027,
127290,
-63925,
105466357,
127289,
-63924,
105529437,
127288,
-63923,
105593543,
127287,
-63922,
105658281,
127286,
-63921,
105722662,
127285,
-63920,
105786207,
127284,
-63919,
105850207,
127283,
-63918,
105913655,
127282,
-1,
105975569,
127280,
-1,
104693072,
127397,
106299391,
104620133,
-1,
106301016,
106183067,
-1,
-63913,
106259767,
10191,
-1,
106304363,
10190,
-1,
106266401,
127401,
106563179,
104537399,
-1,
106628713,
106515101,
-1,
106694239,
106564961,
-1,
-63907,
106631011,
127539,
-63906,
106696461,
127538,
-1,
106761405,
127544,
106956388,
106630345,
-1,
107021923,
106892780,
-1,
-63902,
106958470,
127542,
-1,
107023879,
127543,
-1,
106965995,
127541,
107284072,
106892319,
-1,
-63898,
107220165,
127546,
-63897,
107285522,
127540,
-1,
107350726,
127545,
-1,
107220741,
127547,
-63894,
106587254,
127378,
-1,
107564279,
127377,
107742834,
106526754,
-1,
107808367,
107686751,
-1,
-63890,
107777749,
127393,
-1,
107815571,
127388,
-63888,
107767251,
127384,
-63887,
107956680,
127395,
-1,
108019777,
127402,
108201591,
107708750,
-1,
108267126,
108171545,
-1,
-63883,
108224631,
127389,
-1,
108277736,
127394,
-1,
108216151,
127387,
108529275,
108149447,
-1,
-63879,
108478694,
127400,
-63878,
108538597,
127399,
-1,
108600631,
127398,
108791423,
108475174,
-1,
-63875,
108753346,
127379,
-63874,
108816176,
127390,
-1,
108874011,
127392,
109053570,
108760629,
-1,
-63871,
109017122,
127386,
-1,
109076757,
127404,
109250181,
109021335,
-1,
-63868,
109209843,
127385,
-1,
109264008,
127403,
109446792,
109207653,
-1,
-63865,
109403310,
127396,
-1,
109464259,
127383,
109643403,
109402967,
-1,
-63862,
109590441,
127382,
-1,
109654703,
127381,
109840014,
109595537,
-1,
-63859,
109803678,
127490,
-1,
109857913,
127489,
-63857,
109810215,
127311,
-63856,
109986202,
127380,
-1,
110046733,
127391,
110297087,
104464384,
-1,
-63853,
110199395,
128918,
-1,
110259367,
11216,
-1,
104415642,
129425,
110495420,
104368279,
-1,
110560947,
| |
import copy
import json
from collections.abc import Iterable
from falcon.testing import create_environ
from falcon import Request
from falcon import errors
import falcon
import pytest
from unittest.mock import Mock
from graceful.errors import ValidationError
from graceful.resources.base import BaseResource
from graceful.resources.generic import Resource
from graceful.resources import mixins
from graceful.parameters import StringParam, BaseParam, IntParam
from graceful.serializers import BaseSerializer
from graceful.fields import StringField
from graceful.validators import min_validator, max_validator
class TestResource(Resource):
def retrieve(self, params, meta, **kwargs):
return None
def _retrieve_header(response, header):
""" Little compatibility utility for response.get_header() method.
response.get_header() was introduced in falcon 1.0 but we want to retrieve
response header values in al versions in consitent manner.
Args:
response (falcon.Request): request object instance
header (str): case-insensitive header name
"""
try:
return response.get_header(header)
except AttributeError:
# compat: on falcon<1.0 there is not get_header() method so we must
# access _headers dictionary directly
# note: _headers dictionary stores headers with lower-case names but
# get_header is case-insensitive so make make it lowercase to
# ensure consistency acros versions.
return response._headers[header.lower()]
def test_base_resource_get(req, resp):
"""
Test that simple resource GET will return 200 OK response with JSON encoded
body.
Args:
req (falcon.Request): request instance object provided by ``req``
pytest fixture
resp (falcon.Response): responce instance provided by ``resp`` pytest
fixture
"""
resource = TestResource()
resource.on_get(req, resp)
assert resp.content_type == "application/json"
assert resp.body
assert resp.status == falcon.HTTP_200
def test_resource_indent(req, resp):
resource = TestResource()
# default: without indent
resource.on_get(req, resp)
assert " " not in resp.body
assert "\n" not in resp.body
# with explicit indent
req.params['indent'] = '4'
resource.on_get(req, resp)
assert " " in resp.body
def test_resource_meta(req, resp):
"""
Test if meta output part on resource GET has a desired structure
Args:
req (falcon.Request): request instance object provided by ``req``
pytest fixture
resp (falcon.Response): responce instance provided by ``resp`` pytest
fixture
"""
resource = TestResource()
resource.on_get(req, resp)
body = json.loads(resp.body)
assert 'meta' in body
assert 'params' in body['meta']
def test_required_params(req, resp):
"""
Test that when params are missing then specific falcon exception is raised
and thus proper status code will be returned.
Args:
req (falcon.Request): request instance object provided by ``req``
pytest fixture
resp (falcon.Response): responce instance provided by ``resp`` pytest
fixture
"""
class ParametrizedResource(TestResource):
foo = StringParam(details="required foo!", required=True)
resource = ParametrizedResource()
with pytest.raises(errors.HTTPMissingParam):
resource.on_get(req, resp)
param_req = copy.copy(req)
param_req.params['foo'] = 'bar'
resource.on_get(req, resp)
assert resp.status == falcon.HTTP_OK
def test_resource_accepts_kwargs(req, resp):
"""
Test that on_get method accepts additional keyword arguments.
This is important because allows passing of arguments from url template.
Args:
req (falcon.Request): request instance object provided by ``req``
pytest fixture
resp (falcon.Response): responce instance provided by ``resp`` pytest
fixture
"""
resource = TestResource()
resource.on_get(req, resp, foo='bar')
def test_describe(req, resp):
"""
Test if output of resource.description() has desired form.
Args:
req (falcon.Request): request instance object provided by ``req``
pytest fixture
resp (falcon.Response): responce instance provided by ``resp`` pytest
fixture
"""
# default description keys
resource = Resource()
description = resource.describe(req, resp)
assert 'path' in description
assert 'name' in description
assert 'details' in description
assert 'params' in description
assert 'methods' in description
# test extending of description through kwargs
assert 'foo' not in description
description = resource.describe(req, resp, foo='bar')
assert 'foo' in description
assert description['foo'] == 'bar'
def test_options(resp):
"""
Test that options is a json serialized output of resource.describe()
Args:
req (falcon.Request): request instance object provided by ``req``
pytest fixture
resp (falcon.Response): responce instance provided by ``resp`` pytest
fixture
"""
# note: creating request is optional here since we bypass whole falcon
# routing and dispatching procedure
env = create_environ(method="OPTIONS")
req = Request(env) # noqa
resource = Resource()
resource.on_options(req, resp)
assert all([
'OPTIONS' in _retrieve_header(resp, 'allow'),
'GET' in _retrieve_header(resp, 'allow'),
])
assert resp.status == falcon.HTTP_200
assert json.loads(resp.body)
# assert this is obviously the same
assert resource.describe(req, resp) == json.loads(resp.body)
def test_options_with_additional_args(req, resp):
"""
Test that requesting OPTIONS will succeed even if not expected additional
kwargs are passed.
Note: this is a case when OPTIONS are requested on resource that is routed
with URL template.
"""
# note: creating request is optional here since we bypass whole falcon
# routing and dispatching procedure
env = create_environ(method="OPTIONS")
req = Request(env) # noqa
resource = Resource()
resource.on_options(req, resp, additionnal_kwarg="foo")
def test_declarative_parameters(req, resp):
class SomeResource(Resource):
required_param = StringParam(details="some param", required=True)
optional_param = StringParam(details="some param", required=False)
# test existence of params property
resource = SomeResource()
assert 'required_param' in resource.params
assert 'optional_param' in resource.params
# test if params are describing themselves
description = resource.describe(req, resp)
assert 'required_param' in description['params']
assert 'optional_param' in description['params']
# test if there are basic keys in param description
assert description['params']['required_param']['required'] is True
assert description['params']['optional_param']['required'] is False
def test_parameter_inheritance():
"""
Test that derrived classes inherit parameters and those can be overriden
"""
class SomeResource(Resource):
foo = StringParam(details="give me foo", required=False)
bar = StringParam(details="give me bar", required=False)
class DerrivedResource(SomeResource):
# note: we toggle 'required' to check if was properly overriden
bar = StringParam(details="overridden parameter", required=True)
resource = DerrivedResource()
# test both params are available
assert 'foo' in resource.params
assert 'bar' in resource.params
# test 'bar' was overriden
assert resource.params['bar'].required is True
def test_parameter_with_many_and_required():
class SomeResource(Resource):
foo = IntParam(details="give me foo", required=True, many=True)
env = create_environ(query_string="foo=1&foo=2")
resource = SomeResource()
params = resource.require_params(Request(env))
assert isinstance(params['foo'], Iterable)
assert set(params['foo']) == {1, 2}
@pytest.mark.parametrize(
'query_string', ['number=10', 'number=15', 'number=20']
)
def test_parameter_with_validation_enabled_passes(query_string):
class SomeResource(Resource):
number = IntParam(
details="number with min/max bounds",
validators=[min_validator(10), max_validator(20)]
)
env = create_environ(query_string=query_string)
resource = SomeResource()
params = resource.require_params(Request(env))
assert isinstance(params['number'], int)
@pytest.mark.parametrize(
'query_string', ['number=5', 'number=100']
)
def test_parameter_with_validation_raises_bad_request(query_string):
class SomeResource(Resource):
number = IntParam(
details="number with min/max bounds",
validators=[min_validator(10), max_validator(20)]
)
env = create_environ(query_string=query_string)
resource = SomeResource()
with pytest.raises(errors.HTTPBadRequest):
resource.require_params(Request(env))
def test_parameter_with_many_and_default(req):
class SomeResource(Resource):
foo = StringParam(details="give me foo", default='baz', many=True)
resource = SomeResource()
params = resource.require_params(req)
assert isinstance(params['foo'], Iterable)
assert params['foo'] == ['baz']
env = create_environ(query_string="foo=bar")
params = resource.require_params(Request(env))
assert isinstance(params['foo'], Iterable)
assert params['foo'] == ['bar']
def test_parameter_with_many_unspecified(req):
class SomeResource(Resource):
foo = StringParam(details="give me foo", many=True)
resource = SomeResource()
params = resource.require_params(req)
assert 'foo' not in params
def test_parameter_with_many_and_custom_container_type_object():
class StringSetParam(StringParam):
# container is simply a type object (it is not a descriptor)
# so does not receive self as a parameter
container = set
class SomeResource(Resource):
foo = StringSetParam(details="give me foo", many=True)
env = create_environ(query_string="foo=bar&foo=baz")
resource = SomeResource()
params = resource.require_params(Request(env))
assert isinstance(params['foo'], set)
assert 'bar' in params['foo'] and 'baz' in params['foo']
def test_parameter_with_many_and_custom_container_method():
class StringSetParam(StringParam):
# container is custom method call ("bound" to param instance)
def container(self, values):
return set(values)
class SomeResource(Resource):
foo = StringSetParam(details="give me foo", many=True)
env = create_environ(query_string="foo=bar&foo=baz")
resource = SomeResource()
params = resource.require_params(Request(env))
assert isinstance(params['foo'], set)
assert 'bar' in params['foo'] and 'baz' in params['foo']
def test_parameter_value_errors_translated_to_http_errors(req, resp):
class InvalidParam(BaseParam):
def value(self, raw_value):
raise ValueError("some error")
class InvalidResource(TestResource):
foo = InvalidParam("invalid", "having this will always raise error")
resource = InvalidResource()
# test GET without this param works fine
resource.on_get(req, resp)
# but with this parameter it should raise falcon.errors.HTTPBadRequest
with pytest.raises(errors.HTTPBadRequest):
req.params['foo'] = 'bar'
resource.on_get(req, resp)
def test_default_parameters(req):
class ResourceWithDefaults(Resource):
foo = StringParam(details="foo with defaults", default="default")
bar = StringParam(details="bar w/o default")
resource = ResourceWithDefaults()
params = resource.require_params(req)
assert 'foo' in params
assert params['foo'] == 'default'
assert 'bar' not in params
def test_whole_serializer_validation_as_hhtp_bad_request(req):
class TestSerializer(BaseSerializer):
one = StringField("one different than two")
two = StringField("two different than one")
def validate(self, object_dict, partial=False):
super().validate(object_dict, partial)
# possible use case: kind of uniqueness relationship
if object_dict['one'] == object_dict['two']:
raise ValidationError("one must be different than two")
class TestResource(Resource):
serializer = TestSerializer()
resource = TestResource()
env = create_environ(
body=json.dumps({'one': 'foo', 'two': 'foo'}),
headers={'Content-Type': 'application/json'},
)
with pytest.raises(errors.HTTPBadRequest):
resource.require_validated(Request(env))
def test_require_representation_application_json():
resource = TestResource()
# simple application/json content type
env = create_environ(
body=json.dumps({'one': 'foo', 'two': 'foo'}),
headers={'Content-Type': 'application/json'},
)
representation = resource.require_representation(Request(env))
assert isinstance(representation, dict)
# application/json content type with charset param
env = create_environ(
body=json.dumps({'one': 'foo', 'two': 'foo'}),
headers={'Content-Type': 'application/json; charset=UTF-8'},
)
representation = resource.require_representation(Request(env))
assert isinstance(representation, dict)
def test_require_representation_unsupported_media_type():
resource = TestResource()
# invalid content type format
env = create_environ(
body=json.dumps({'one': 'foo', 'two': 'foo'}),
headers={'Content-Type': 'foo bar'},
)
with pytest.raises(falcon.HTTPUnsupportedMediaType):
resource.require_representation(Request(env))
# valid format but surely unsupported (RFC-1437)
env = create_environ(
body=json.dumps({'one': 'foo', 'two': 'foo'}),
headers={'Content-Type': 'matter-transport/sentient-life-form'},
)
with pytest.raises(falcon.HTTPUnsupportedMediaType):
resource.require_representation(Request(env))
@pytest.mark.parametrize("mixin,method,http_handler", [
(mixins.RetrieveMixin, 'retrieve', 'on_get'),
(mixins.ListMixin, 'list', 'on_get'),
(mixins.CreateMixin, 'create', 'on_post'),
(mixins.CreateBulkMixin, 'create_bulk', 'on_patch'),
(mixins.UpdateMixin, 'update', | |
try:
created_at = iso8601.parse_date(created_at)
except iso8601.ParseError as exc:
log.error(str(exc))
return created_at
created_at = pytz.UTC.normalize(created_at)
return created_at
def _list_machines__postparse_machine(self, machine, node_dict):
updated = False
os_type = 'linux'
if 'windows' in str(
node_dict['extra'].get('image', '')).lower():
os_type = 'windows'
if os_type != machine.os_type:
machine.os_type = os_type
updated = True
# Get number of vCPUs for bare metal and cloud servers, respectively.
if 'cpu' in node_dict['extra'] and \
node_dict['extra'].get('cpu') != machine.extra.get(
'cpus'):
machine.extra['cpus'] = node_dict['extra'].get['cpu']
updated = True
elif 'maxCpu' in node_dict.extra and \
machine.extra['cpus'] != node_dict['extra']['maxCpu']:
machine.extra['cpus'] = node_dict['extra']['maxCpu']
updated = True
return updated
def _list_machines__cost_machine(self, machine, node_dict):
# SoftLayer includes recurringFee on the VM metadata but
# this is only for the compute - CPU pricing.
# Other costs (ram, bandwidth, image) are included
# on billingItemChildren.
extra_fee = 0
if not node_dict['extra'].get('hourlyRecurringFee'):
cpu_fee = float(node_dict['extra'].get('recurringFee'))
for item in node_dict['extra'].get('billingItemChildren',
()):
# don't calculate billing that is cancelled
if not item.get('cancellationDate'):
extra_fee += float(item.get('recurringFee'))
return 0, cpu_fee + extra_fee
else:
# node_dict['extra'].get('recurringFee')
# here will show what it has cost for the current month, up to now.
cpu_fee = float(
node_dict['extra'].get('hourlyRecurringFee'))
for item in node_dict['extra'].get('billingItemChildren',
()):
# don't calculate billing that is cancelled
if not item.get('cancellationDate'):
extra_fee += float(item.get('hourlyRecurringFee'))
return cpu_fee + extra_fee, 0
def _list_machines__get_location(self, node):
return node['extra'].get('datacenter')
def _reboot_machine(self, machine, node):
self.connection.reboot_node(node)
return True
def _destroy_machine(self, machine, node):
self.connection.destroy_node(node)
def _parse_networks_from_request(self, auth_context, networks_dict):
ret_networks = {}
vlan = networks_dict.get('vlan')
if vlan:
ret_networks['vlan'] = vlan
return ret_networks
def _parse_extra_from_request(self, extra, plan):
plan['metal'] = extra.get('metal', False)
plan['hourly'] = extra.get('hourly', False)
def _post_parse_plan(self, plan):
machine_name = plan.get('machine_name')
if '.' in machine_name:
plan['domain'] = '.'.join(machine_name.split('.')[1:])
plan['machine_name'] = machine_name.split('.')[0]
class AzureComputeController(BaseComputeController):
def _connect(self, **kwargs):
tmp_cert_file = tempfile.NamedTemporaryFile(delete=False)
tmp_cert_file.write(self.cloud.certificate.encode())
tmp_cert_file.close()
return get_driver(Provider.AZURE)(self.cloud.subscription_id,
tmp_cert_file.name)
def _list_machines__postparse_machine(self, machine, node_dict):
updated = False
os_type = node_dict['extra'].get('os_type', 'linux')
if machine.os_type != os_type:
machine.os_type = os_type
updated = True
return updated
def _list_machines__cost_machine(self, machine, node_dict):
if node_dict['state'] not in [NodeState.RUNNING.value,
NodeState.PAUSED.value]:
return 0, 0
return node_dict['extra'].get('cost_per_hour', 0), 0
def _list_images__fetch_images(self, search=None):
images = self.connection.list_images()
images = [image for image in images
if 'RightImage' not in image.name and
'Barracude' not in image.name and
'BizTalk' not in image.name]
# There are many builds for some images eg Ubuntu.
# All have the same name!
images_dict = {}
for image in images:
if image.name not in images_dict:
images_dict[image.name] = image
return list(images_dict.values())
def _cloud_service(self, node_id):
"""
Azure libcloud driver needs the cloud service
specified as well as the node
"""
cloud_service = self.connection.get_cloud_service_from_node_id(
node_id)
return cloud_service
def _get_libcloud_node(self, machine, no_fail=False):
cloud_service = self._cloud_service(machine.machine_id)
for node in self.connection.list_nodes(
ex_cloud_service_name=cloud_service):
if node.id == machine.machine_id:
return node
if no_fail:
return Node(machine.machine_id, name=machine.machine_id,
state=0, public_ips=[], private_ips=[],
driver=self.connection)
raise MachineNotFoundError("Machine with id '%s'." %
machine.machine_id)
def _start_machine(self, machine, node):
cloud_service = self._cloud_service(machine.machine_id)
return self.connection.ex_start_node(
node, ex_cloud_service_name=cloud_service)
def _stop_machine(self, machine, node):
cloud_service = self._cloud_service(machine.machine_id)
return self.connection.ex_stop_node(
node, ex_cloud_service_name=cloud_service)
def _reboot_machine(self, machine, node):
cloud_service = self._cloud_service(machine.machine_id)
return self.connection.reboot_node(
node, ex_cloud_service_name=cloud_service)
def _destroy_machine(self, machine, node):
cloud_service = self._cloud_service(machine.machine_id)
return self.connection.destroy_node(
node, ex_cloud_service_name=cloud_service)
def _list_machines__machine_actions(self, machine, node_dict):
super(AzureComputeController, self)._list_machines__machine_actions(
machine, node_dict)
if node_dict['state'] is NodeState.PAUSED.value:
machine.actions.start = True
class AzureArmComputeController(BaseComputeController):
def _connect(self, **kwargs):
return get_driver(Provider.AZURE_ARM)(self.cloud.tenant_id,
self.cloud.subscription_id,
self.cloud.key,
self.cloud.secret)
def _list_machines__postparse_machine(self, machine, node_dict):
updated = False
os_type = node_dict['extra'].get('os_type', 'linux')
if os_type != machine.os_type:
machine.os_type = os_type
updated = True
subnet = node_dict['extra'].get('subnet')
if subnet:
network_id = subnet.split('/subnets')[0]
from mist.api.networks.models import Network
try:
network = Network.objects.get(cloud=self.cloud,
network_id=network_id,
missing_since=None)
if network != machine.network:
machine.network = network
updated = True
except me.DoesNotExist:
pass
network_id = machine.network.network_id if machine.network else ''
if machine.extra.get('network') != network_id:
machine.extra['network'] = network_id
updated = True
return updated
def _list_machines__cost_machine(self, machine, node_dict):
if node_dict['state'] not in [NodeState.RUNNING.value,
NodeState.PAUSED.value]:
return 0, 0
return node_dict['extra'].get('cost_per_hour', 0), 0
def _list_machines__machine_actions(self, machine, node_dict):
super(AzureArmComputeController, self)._list_machines__machine_actions(
machine, node_dict)
if node_dict['state'] is NodeState.PAUSED.value:
machine.actions.start = True
def _list_machines__get_location(self, node):
return node['extra'].get('location')
def _list_machines__get_size(self, node):
return node['extra'].get('size')
def _list_images__fetch_images(self, search=None):
images_file = os.path.join(config.MIST_API_DIR,
config.AZURE_IMAGES_FILE)
with open(images_file, 'r') as f:
default_images = json.load(f)
images = [NodeImage(id=image, name=name,
driver=self.connection, extra={})
for image, name in list(default_images.items())]
return images
def _reboot_machine(self, machine, node):
self.connection.reboot_node(node)
def _destroy_machine(self, machine, node):
self.connection.destroy_node(node)
def _list_sizes__fetch_sizes(self):
location = self.connection.list_locations()[0]
return self.connection.list_sizes(location)
def _list_sizes__get_cpu(self, size):
return size.extra.get('numberOfCores')
def _list_sizes__get_name(self, size):
return size.name + ' ' + str(size.extra['numberOfCores']) \
+ ' cpus/' + str(size.ram / 1024) + 'GB RAM/ ' \
+ str(size.disk) + 'GB SSD'
def _list_locations__get_available_sizes(self, location):
libcloud_size_ids = [size.id
for size in self.connection.list_sizes(location=location)] # noqa
from mist.api.clouds.models import CloudSize
return CloudSize.objects(cloud=self.cloud,
external_id__in=libcloud_size_ids)
def _list_machines__machine_creation_date(self, machine, node_dict):
# workaround to avoid overwriting creation time
# as Azure updates it when a machine stops, reboots etc.
if machine.created is not None:
return machine.created
return super()._list_machines__machine_creation_date(machine,
node_dict)
def _generate_plan__parse_networks(self, auth_context, networks_dict,
location):
return networks_dict.get('network')
def _generate_plan__parse_custom_volume(self, volume_dict):
try:
size = int(volume_dict['size'])
except KeyError:
raise BadRequestError('Volume size parameter is required')
except (TypeError, ValueError):
raise BadRequestError('Invalid volume size type')
if size < 1:
raise BadRequestError('Volume size should be at least 1 GB')
try:
name = volume_dict['name']
except KeyError:
raise BadRequestError('Volume name parameter is required')
storage_account_type = volume_dict.get('storage_account_type',
'StandardSSD_LRS')
# https://docs.microsoft.com/en-us/rest/api/compute/virtual-machines/create-or-update#storageaccounttypes # noqa
if storage_account_type not in {'Premium_LRS',
'Premium_ZRS',
'StandardSSD_LRS',
'Standard_LRS',
'StandardSSD_ZRS',
'UltraSSD_LRS'}:
raise BadRequestError('Invalid storage account type for volume')
caching_type = volume_dict.get('caching_type', 'None')
if caching_type not in {'None',
'ReadOnly',
'ReadWrite',
}:
raise BadRequestError('Invalid caching type')
return {
'name': name,
'size': size,
'storage_account_type': storage_account_type,
'caching_type': caching_type,
}
def _generate_plan__parse_extra(self, extra, plan):
from mist.api.clouds.models import CloudLocation
location = CloudLocation.objects.get(
id=plan['location']['id'], cloud=self.cloud)
resource_group_name = extra.get('resource_group') or 'mist'
if not re.match(r'^[-\w\._\(\)]+$', resource_group_name):
raise BadRequestError('Invalid resource group name')
resource_group_exists = self.connection.ex_resource_group_exists(
resource_group_name)
plan['resource_group'] = {
'name': resource_group_name,
'exists': resource_group_exists
}
storage_account_type = extra.get('storage_account_type',
'StandardSSD_LRS')
# https://docs.microsoft.com/en-us/rest/api/compute/virtual-machines/create-or-update#storageaccounttypes # noqa
if storage_account_type not in {'Premium_LRS',
'Premium_ZRS',
'StandardSSD_LRS',
'StandardSSD_ZRS',
'Standard_LRS'}:
raise BadRequestError('Invalid storage account type for OS disk')
plan['storage_account_type'] = storage_account_type
plan['user'] = extra.get('user') or 'azureuser'
if extra.get('password'):
if validate_password(extra['password']) is False:
raise BadRequestError(
'Password must be between 8-123 characters long and '
'contain: an uppercase character, a lowercase character'
' and a numeric digit')
plan['password'] = extra['password']
def _generate_plan__post_parse_plan(self, plan):
from mist.api.images.models import CloudImage
from mist.api.clouds.models import CloudLocation
location = CloudLocation.objects.get(
id=plan['location']['id'], cloud=self.cloud)
image = CloudImage.objects.get(
id=plan['image']['id'], cloud=self.cloud)
if image.os_type == 'windows':
plan.pop('key', None)
if plan.get('password') is None:
raise BadRequestError('Password is required on Windows images')
if image.os_type == 'linux':
# we don't use password in linux images
# so don't return it in plan
plan.pop('password', None)
if plan.get('key') is None:
raise BadRequestError('Key is required on Unix-like images')
try:
network_name = plan.pop('networks')
except KeyError:
if plan['resource_group']['name'] == 'mist':
network_name = (f'mist-{location.external_id}')
else:
network_name = (f"mist-{plan['resource_group']['name']}"
f"-{location.external_id}")
if plan['resource_group']['exists'] is True:
try:
network = self.connection.ex_get_network(
network_name,
plan['resource_group']['name'])
except BaseHTTPError as exc:
if exc.code == 404:
# network doesn't exist so we'll have to create it
network_exists = False
else:
# TODO Consider what to raise on other status codes
raise BadRequestError(exc)
else:
# make sure network is in the same location
if network.location != location.external_id:
raise BadRequestError(
'Network is in a different location'
' from the one given')
network_exists = True
else:
network_exists = False
plan['networks'] = {
'name': network_name,
'exists': network_exists
}
def _create_machine__get_image_object(self, image):
from mist.api.images.models import CloudImage
from libcloud.compute.drivers.azure_arm import AzureImage
cloud_image = CloudImage.objects.get(id=image)
publisher, offer, sku, version = cloud_image.external_id.split(':')
image_obj = AzureImage(version, sku, offer, publisher, None, None)
return image_obj
def _create_machine__compute_kwargs(self, plan):
kwargs = super()._create_machine__compute_kwargs(plan)
kwargs['ex_user_name'] = plan['user']
kwargs['ex_use_managed_disks'] = True
kwargs['ex_storage_account_type'] = plan['storage_account_type']
kwargs['ex_customdata'] = plan.get('cloudinit', '')
key = kwargs.pop('auth', None)
if key:
kwargs['auth'] = NodeAuthSSHKey(key.public)
else:
kwargs['auth'] = NodeAuthPassword(plan['password'])
if plan['resource_group']['exists'] is False:
try:
self.connection.ex_create_resource_group(
plan['resource_group']['name'], kwargs['location'])
except BaseHTTPError as exc:
raise MachineCreationError(
'Could not create resource group: %s' % exc)
# add delay because sometimes the resource group is not yet ready
time.sleep(5)
kwargs['ex_resource_group'] = plan['resource_group']['name']
if plan['networks']['exists'] is False:
try:
security_group = self.connection.ex_create_network_security_group( # noqa
plan['networks']['name'],
kwargs['ex_resource_group'],
location=kwargs['location'],
securityRules=config.AZURE_SECURITY_RULES
)
except BaseHTTPError as exc:
raise MachineCreationError(
'Could not create security group: %s' % exc)
# add delay because sometimes the security group is not yet ready
time.sleep(3)
try:
network = self.connection.ex_create_network(
plan['networks']['name'],
kwargs['ex_resource_group'],
location=kwargs['location'],
networkSecurityGroup=security_group.id)
except | |
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_hotspot20_hsprofile
short_description: Configure hotspot profile.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- <NAME> (@chillancezen)
- <NAME> (@JieX19)
- <NAME> (@fshen01)
- <NAME> (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
hotspot20_hsprofile:
description: the top level parameters set
required: false
type: dict
suboptions:
3gpp-plmn:
type: str
description: '3GPP PLMN name.'
access-network-asra:
type: str
description: 'Enable/disable additional step required for access (ASRA).'
choices:
- 'disable'
- 'enable'
access-network-esr:
type: str
description: 'Enable/disable emergency services reachable (ESR).'
choices:
- 'disable'
- 'enable'
access-network-internet:
type: str
description: 'Enable/disable connectivity to the Internet.'
choices:
- 'disable'
- 'enable'
access-network-type:
type: str
description: 'Access network type.'
choices:
- 'private-network'
- 'private-network-with-guest-access'
- 'chargeable-public-network'
- 'free-public-network'
- 'personal-device-network'
- 'emergency-services-only-network'
- 'test-or-experimental'
- 'wildcard'
access-network-uesa:
type: str
description: 'Enable/disable unauthenticated emergency service accessible (UESA).'
choices:
- 'disable'
- 'enable'
anqp-domain-id:
type: int
description: 'ANQP Domain ID (0-65535).'
bss-transition:
type: str
description: 'Enable/disable basic service set (BSS) transition Support.'
choices:
- 'disable'
- 'enable'
conn-cap:
type: str
description: 'Connection capability name.'
deauth-request-timeout:
type: int
description: 'Deauthentication request timeout (in seconds).'
dgaf:
type: str
description: 'Enable/disable downstream group-addressed forwarding (DGAF).'
choices:
- 'disable'
- 'enable'
domain-name:
type: str
description: 'Domain name.'
gas-comeback-delay:
type: int
description: 'GAS comeback delay (0 or 100 - 4000 milliseconds, default = 500).'
gas-fragmentation-limit:
type: int
description: 'GAS fragmentation limit (512 - 4096, default = 1024).'
hessid:
type: str
description: 'Homogeneous extended service set identifier (HESSID).'
ip-addr-type:
type: str
description: 'IP address type name.'
l2tif:
type: str
description: 'Enable/disable Layer 2 traffic inspection and filtering.'
choices:
- 'disable'
- 'enable'
nai-realm:
type: str
description: 'NAI realm list name.'
name:
type: str
description: 'Hotspot profile name.'
network-auth:
type: str
description: 'Network authentication name.'
oper-friendly-name:
type: str
description: 'Operator friendly name.'
osu-provider:
type: str
description: 'Manually selected list of OSU provider(s).'
osu-ssid:
type: str
description: 'Online sign up (OSU) SSID.'
pame-bi:
type: str
description: 'Enable/disable Pre-Association Message Exchange BSSID Independent (PAME-BI).'
choices:
- 'disable'
- 'enable'
proxy-arp:
type: str
description: 'Enable/disable Proxy ARP.'
choices:
- 'disable'
- 'enable'
qos-map:
type: str
description: 'QoS MAP set ID.'
roaming-consortium:
type: str
description: 'Roaming consortium list name.'
venue-group:
type: str
description: 'Venue group.'
choices:
- 'unspecified'
- 'assembly'
- 'business'
- 'educational'
- 'factory'
- 'institutional'
- 'mercantile'
- 'residential'
- 'storage'
- 'utility'
- 'vehicular'
- 'outdoor'
venue-name:
type: str
description: 'Venue name.'
venue-type:
type: str
description: 'Venue type.'
choices:
- 'unspecified'
- 'arena'
- 'stadium'
- 'passenger-terminal'
- 'amphitheater'
- 'amusement-park'
- 'place-of-worship'
- 'convention-center'
- 'library'
- 'museum'
- 'restaurant'
- 'theater'
- 'bar'
- 'coffee-shop'
- 'zoo-or-aquarium'
- 'emergency-center'
- 'doctor-office'
- 'bank'
- 'fire-station'
- 'police-station'
- 'post-office'
- 'professional-office'
- 'research-facility'
- 'attorney-office'
- 'primary-school'
- 'secondary-school'
- 'university-or-college'
- 'factory'
- 'hospital'
- 'long-term-care-facility'
- 'rehab-center'
- 'group-home'
- 'prison-or-jail'
- 'retail-store'
- 'grocery-market'
- 'auto-service-station'
- 'shopping-mall'
- 'gas-station'
- 'private'
- 'hotel-or-motel'
- 'dormitory'
- 'boarding-house'
- 'automobile'
- 'airplane'
- 'bus'
- 'ferry'
- 'ship-or-boat'
- 'train'
- 'motor-bike'
- 'muni-mesh-network'
- 'city-park'
- 'rest-area'
- 'traffic-control'
- 'bus-stop'
- 'kiosk'
wan-metrics:
type: str
description: 'WAN metric name.'
wnm-sleep-mode:
type: str
description: 'Enable/disable wireless network management (WNM) sleep mode.'
choices:
- 'disable'
- 'enable'
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Configure hotspot profile.
fmgr_hotspot20_hsprofile:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
state: <value in [present, absent]>
hotspot20_hsprofile:
3gpp-plmn: <value of string>
access-network-asra: <value in [disable, enable]>
access-network-esr: <value in [disable, enable]>
access-network-internet: <value in [disable, enable]>
access-network-type: <value in [private-network, private-network-with-guest-access, chargeable-public-network, ...]>
access-network-uesa: <value in [disable, enable]>
anqp-domain-id: <value of integer>
bss-transition: <value in [disable, enable]>
conn-cap: <value of string>
deauth-request-timeout: <value of integer>
dgaf: <value in [disable, enable]>
domain-name: <value of string>
gas-comeback-delay: <value of integer>
gas-fragmentation-limit: <value of integer>
hessid: <value of string>
ip-addr-type: <value of string>
l2tif: <value in [disable, enable]>
nai-realm: <value of string>
name: <value of string>
network-auth: <value of string>
oper-friendly-name: <value of string>
osu-provider: <value of string>
osu-ssid: <value of string>
pame-bi: <value in [disable, enable]>
proxy-arp: <value in [disable, enable]>
qos-map: <value of string>
roaming-consortium: <value of string>
venue-group: <value in [unspecified, assembly, business, ...]>
venue-name: <value of string>
venue-type: <value in [unspecified, arena, stadium, ...]>
wan-metrics: <value of string>
wnm-sleep-mode: <value in [disable, enable]>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/wireless-controller/hotspot20/hs-profile',
'/pm/config/global/obj/wireless-controller/hotspot20/hs-profile'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/wireless-controller/hotspot20/hs-profile/{hs-profile}',
'/pm/config/global/obj/wireless-controller/hotspot20/hs-profile/{hs-profile}'
]
url_params = ['adom']
module_primary_key = 'name'
module_arg_spec = {
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'hotspot20_hsprofile': {
'required': False,
'type': 'dict',
'options': {
'3gpp-plmn': {
'required': False,
'type': 'str'
},
'access-network-asra': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'access-network-esr': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'access-network-internet': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'access-network-type': {
'required': False,
'choices': [
'private-network',
'private-network-with-guest-access',
'chargeable-public-network',
'free-public-network',
'personal-device-network',
'emergency-services-only-network',
'test-or-experimental',
'wildcard'
],
'type': 'str'
},
'access-network-uesa': | |
import gc
import os, sys
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.models import Model
import tensorflow.keras.layers as kl
import tensorflow.keras.models as km
import tensorflow.keras.callbacks as kc
import tensorflow.keras.backend as kb
import tensorflow.keras.regularizers as kr
import tensorflow.keras.optimizers as ko
from tensorflow.keras.utils import plot_model
import utils
import logging
import time
import glob
from sklearn import metrics
tf.get_logger().setLevel('INFO')
tf.logging.set_verbosity(tf.logging.ERROR)
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
class Config:
def __init__(self):
self.batch_size = 32
self.epochs = 5
self.ckpt_dir = 'ckpt'
self.embedding_method = 'random'
self.is_debug = True
self.data_dir = 'dataset'
self.load_weights = False
self.deep_layers = [32, 32]
def Attention(input_dim):
def func(inputs):
# a = kl.Permute([2,1])(inputs)
probs = kl.Dense(input_dim, activation='softmax')(inputs)
# probs = kl.Permute([2,1])(a)
outputs = kl.Multiply()([inputs, probs])
return outputs, probs
return func
def MLP(shapes, drop_rate, activation='relu', name='MLP'):
def func(x):
if len(shapes)==0: return x
for i,n in enumerate(shapes):
x = kl.Dense(n, activation=activation, name='{}-{}'.format(name, i))(x)
if drop_rate>1e-4:
x = kl.Dropout(rate=drop_rate)(x)
return x
return func
def Classifier(shapes, drop_rate=0, activation='relu' , name='Classifier'):
def func(x):
x = MLP(shapes[:-1], drop_rate, activation=activation, name='%s_MLP'%name)(x)
act = 'sigmoid' if shapes[-1]==1 else 'softmax'
x = kl.Dense(shapes[-1], activation=act, name='{}_output'.format(name))(x)
return x
return func
def Discriminator(**args):
if 'name' not in args:
args['name'] = 'Discriminator'
return Classifier(**args)
class Model:
def __init__(self, feat_dim, input_dim, deep_layers = [32, 32]):
print(f'Building deepfm model with deep layers: {deep_layers} ...')
self.ph_x_neg_i = tf.placeholder(tf.float32, shape=[None, input_dim], name='PH_x_neg_i')
self.ph_x_neg_v = tf.placeholder(tf.float32, shape=[None, input_dim], name='PH_x_neg_v')
self.ph_x_pos_i = tf.placeholder(tf.float32, shape=[None, input_dim], name='PH_x_pos_i')
self.ph_x_pos_v = tf.placeholder(tf.float32, shape=[None, input_dim], name='PH_x_pos_v')
self.ph_x_test_i = tf.placeholder(tf.float32, shape=[None, input_dim], name='PH_test_i')
self.ph_x_test_v = tf.placeholder(tf.float32, shape=[None, input_dim], name='PH_test_v')
# self.ph_y_dis = tf.placeholder(tf.int32, shape=[None, 1], name='PH_y_dis')
self.ph_y = tf.placeholder(tf.int32, shape=[None, 1], name='PH_y')
self.ph_size = tf.placeholder(tf.int32, name='PH_size')
self.drop_rate = tf.placeholder(tf.float32, name='PH_drop_rate')
self.xi = tf.concat([self.ph_x_neg_i, self.ph_x_pos_i, self.ph_x_test_i], axis=0, name='Concat_i')
self.xv = tf.concat([self.ph_x_neg_v, self.ph_x_pos_v, self.ph_x_test_v], axis=0, name='Concat_v')
embedding_size = int(np.log2(feat_dim)) + 1
initializer1 = keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=2020)
initializer2 = keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=2021)
Embedding = kl.Embedding(feat_dim, embedding_size,
embeddings_initializer=initializer1
)
Embedding_bias = kl.Embedding(feat_dim, 1,
embeddings_initializer=initializer2
)
input_idx = kl.Input(tensor=self.xi, name='Input_idx')
input_val = kl.Input(tensor=self.xv, name='Input_val')
embeddings = Embedding(input_idx)
# feat_value2 = tf.reshape(feat_val, shape=[-1, field_size, 1])
feat_val = kl.Lambda(lambda x:tf.expand_dims(x, axis=-1))(input_val)
embeddings = kl.Multiply()([embeddings, feat_val])
# embeddings = kl.Lambda(lambda x:tf.multiply(*x))([embeddings, feat_val])
# ---------- first order term ----------
y_first_order = Embedding_bias(input_idx)
y_first_order = kl.Multiply()([y_first_order, feat_val])
y_first_order = kl.Lambda(lambda x:tf.reduce_sum(x, 2))(y_first_order) # None * F
y_first_order = kl.Dropout(rate=self.drop_rate)(y_first_order)
# ---------- second order term ---------------
# sum_square part
summed_features_emb = kl.Lambda(lambda x:tf.reduce_sum(x, 1))(embeddings) # None * K
summed_features_emb_square = kl.Lambda(tf.square)(summed_features_emb) # None * K
# square_sum part
squared_features_emb = kl.Lambda(tf.square)(embeddings)
squared_sum_features_emb = kl.Lambda(lambda x:tf.reduce_sum(x, 1))(squared_features_emb) # None * K
# second order
y_second_order = kl.Lambda(lambda x:0.5*tf.subtract(*x))([summed_features_emb_square, squared_sum_features_emb]) # None * K
y_second_order = kl.Dropout(rate=self.drop_rate)(y_second_order) # None * K
# ---------- Deep component ----------
# y_deep = tf.reshape(embeddings, shape=[-1, input_dim * embedding_size]) # None * (F*K)
y_deep = kl.Flatten()(embeddings)
y_deep = kl.Dropout(rate=self.drop_rate)(y_deep)
for units in deep_layers:
y_deep = kl.Dense(units)(y_deep)
y_deep = kl.BatchNormalization()(y_deep)
y_deep = kl.Activation(activation='relu')(y_deep)
y_deep = kl.Dropout(rate=self.drop_rate)(y_deep)
# ---------- DeepFM ----------
# concat_input = tf.concat([y_first_order, y_second_order, y_deep], axis=1)
concat_input = kl.Concatenate(axis=1)([y_first_order, y_second_order, y_deep])
concat_input = kl.Dropout(rate=self.drop_rate)(concat_input)
y_pred_dis = Discriminator(shapes=[8,1], drop_rate=0, activation='relu', name='Disc')(concat_input)
concat_input = kl.Lambda(lambda x: x[:self.ph_size])(concat_input)
# self.y_pred = kl.Dense(1, activation='sigmoid', name='Output')(concat_input)
self.y_pred = Classifier(shapes=[8,1], activation='relu', name='Output')(concat_input)
self.loss = tf.losses.log_loss(labels=self.ph_y, predictions=self.y_pred)
y_dis = tf.concat([tf.zeros([self.ph_size,1], dtype='int32'), tf.ones([self.ph_size,1], dtype='int32')], axis=0)
self.loss_dis = tf.losses.log_loss(labels=y_dis, predictions=y_pred_dis)
# Optimization of FE and Classifier
alpha = 0.4
self.loss_total = alpha*self.loss - (1-alpha)*self.loss_dis
self.model = km.Model([input_val, input_idx], [self.y_pred, y_pred_dis])
opt = tf.train.AdamOptimizer(name='Adam')
self.train_class()
self.op_optimize = opt.minimize(self.loss_total, var_list=self.model.trainable_weights)
# Optimization of Discriminator
self.train_disc()
self.op_optimize_dis = opt.minimize(self.loss_dis, var_list=self.model.trainable_weights)
self.acc = self.binary_acc(self.ph_y, self.y_pred)
self.acc_dis = self.binary_acc(y_dis, y_pred_dis)
def train_class(self):
for layer in self.model.layers:
if 'Disc' in layer.name:
layer.trainable = False
else:
layer.trainable = True
def train_disc(self):
for layer in self.model.layers:
if 'Disc' in layer.name:
layer.trainable = True
else:
layer.trainable = False
def binary_acc(self, y_true, y_pred):
y_pred_cmp = tf.math.greater(y_pred, tf.constant(0.5))
y_pred_lb = tf.cast(y_pred_cmp, tf.int32)
acc_cnt = tf.equal(y_pred_lb, y_true)
acc = tf.reduce_mean(tf.cast(acc_cnt, tf.float32))
return acc
def best_f1_thr(self, y_true, y_score):
t0 = 0.05
v = 0.002
best_t = t0
best_f1 = 0
cnt_no_inc = 0
for step in range(201):
curr_t = t0 + step * v
y = [1 if x >= curr_t else 0 for x in y_score]
curr_f1 = metrics.f1_score(y_true, y_score)
if curr_f1 > best_f1:
best_t = curr_t
best_f1 = curr_f1
cnt_no_inc = 0
else:
cnt_no_inc += 1
if cnt_no_inc > 15:
break
return best_f1, best_t
def predict(self, sess, xdata, batch_size=64):
y_score = []
for x_batch in utils.gen_batch2(xdata, batch_size, drop_last=False, one_epoch=True, shuffle=False):
n = len(x_batch[0])
zeros = np.zeros((n,)+xdata[0].shape[1:], dtype='float32')
feed_dict = {self.ph_x_neg_i: x_batch[0][:n//2],
self.ph_x_pos_i: x_batch[0][n//2:],
self.ph_x_neg_v: x_batch[1][:n//2:],
self.ph_x_pos_v: x_batch[1][n//2:],
self.ph_x_test_i: zeros,
self.ph_x_test_v: zeros,
self.ph_size: n,
self.drop_rate: 0
}
pred = sess.run(self.y_pred, feed_dict)
y_score.extend(pred)
y_score = np.array(y_score)
return y_score
def evaluate(self, sess, xdata, y_true, batch_size=64, threshold=0.5):
print(' Evaluating: pos {}, neg {}'.format(y_true.sum(), len(y_true)-y_true.sum()))
y_score = self.predict(sess, xdata, batch_size)
fpr, tpr, thr = metrics.roc_curve(y_true, y_score)
# print('thr', thr)
auc = metrics.auc(fpr, tpr)
y_pred = np.zeros_like(y_score)
y_pred[y_score>threshold] = 1
acc = metrics.accuracy_score(y_true, y_pred)
f1 = metrics.f1_score(y_true, y_pred)
print(' acc: {:.4f}, auc: {:.4f}, f1: {:.4f}'.format(acc, auc, f1))
t = metrics.confusion_matrix(y_true, y_pred, labels=[0,1])
print(' TP {} FN {} FP {} TN {}'.format(t[1,1], t[1,0], t[0,1], t[0,0]), end='')
if (t[1,1]+t[0,1])>0:
p = t[1,1]/(t[1,1]+t[0,1])
else:
p = 0
r = t[1,1]/(t[1,1]+t[1,0])
print(' P={:.4f}, R={:.4f}, F1={:.4f}'.format(p, r, 2*p*r/(p+r)))
# best_f1, best_thr = self.best_f1_thr(y_true, y_score)
# print(' Best_f1={:.4f}, Best_thr={:.4f}'.format(best_f1, best_thr))
return acc, f1, auc
def run_batch(self, sess, x_neg_batch, x_pos_batch, x_test_batch, y, config):
feed_dict = {self.ph_x_neg_i: x_neg_batch[0],
self.ph_x_neg_v: x_neg_batch[1],
self.ph_x_pos_i: x_pos_batch[0],
self.ph_x_pos_v: x_pos_batch[1],
self.ph_x_test_i: x_test_batch[0],
self.ph_x_test_v: x_test_batch[1],
self.ph_size: config.batch_size,
self.ph_y: y,
self.drop_rate: 0.5}
self.train_disc()
sess.run([self.op_optimize_dis], feed_dict=feed_dict)
self.train_class()
_, acc, loss, acc_dis, loss_dis = sess.run([self.op_optimize, self.acc, self.loss, self.acc_dis, self.loss_dis], feed_dict=feed_dict)
return acc, loss, acc_dis, loss_dis
def run(self, sess, x_neg, x_pos, x_val, y_val, x_test, config, epoch_offset=0):
print(f'Training with batch size {config.batch_size}, epochs {config.epochs}')
timer_epoch = utils.Clock(config.epochs)
exp_acc = 0
exp_loss = 0
alpha = 0.2
p_neg = len(x_neg[0])/(len(x_neg[0])+len(x_pos[0]))
print('p_neg:', p_neg)
p_inc = np.power(p_neg/0.5, 1.0/5)
tmp_p_neg = 0.5
pre_acc, pre_f1 = 0, 0
cnt_converge = 0
for epoch in range(config.epochs):
print(f'\nEpoch {epoch+1}/{config.epochs} p_neg {round(tmp_p_neg, 3)} [{time.ctime()}]')
tmp_num_neg = int(config.batch_size * tmp_p_neg)
# tmp_p_neg = min(tmp_p_neg*p_inc, p_neg)
y = np.zeros([config.batch_size,1], dtype='int32')
y[tmp_num_neg:,0] = 1
num_batchs = x_neg[0].shape[0]//config.batch_size
gen_x_neg = utils.gen_batch2(x_neg, tmp_num_neg)
gen_x_pos = utils.gen_batch2(x_pos, config.batch_size-tmp_num_neg)
gen_x_test = utils.gen_batch2(x_test, config.batch_size)
timer_batch = utils.Clock(num_batchs)
avg_acc, avg_loss = 0, 0
for batch in range(num_batchs):
x_neg_batch, x_pos_batch = next(gen_x_neg), next(gen_x_pos)
x_test_batch = next(gen_x_test)
acc, loss, acc_dis, loss_dis = self.run_batch(sess, x_neg_batch, x_pos_batch, x_test_batch, y, config)
exp_acc = (1-alpha)*exp_acc + alpha*acc
exp_loss = (1-alpha)*exp_loss + alpha*loss
avg_acc = (avg_acc*batch + acc)/(batch+1)
avg_loss = (avg_loss*batch + loss)/(batch+1)
toc = timer_batch.toc(batch)
if (batch+1)%(num_batchs//10) == 0:
# acc, loss = sess.run([self.acc, self.loss], feed_dict=feed_dict)
stat = [avg_acc, avg_loss, exp_acc, exp_loss]
print(' Batch {}/{}: acc {:.4f} | loss {:.4f} | d_acc {:.4f} | d_loss {:.4f} | Elapsed {} | ETA {}'.format(batch+1, num_batchs, acc, loss, acc_dis, loss_dis, *toc))
print(' avg_acc {:.4f} | avg_loss {:.4f} | exp_acc {:.4f} | exp_loss {:.4f}'.format(*stat))
scores = self.evaluate(sess, x_val, y_val, config.batch_size)
toc = timer_epoch.toc(epoch)
ckpt_path = '{:04d}_acc{:.4f}_f{:.4f}_auc{:.4f}.h5'.format(epoch_offset+epoch, *scores)
ckpt_path = os.path.join(config.ckpt_dir, ckpt_path)
print(' Saving model to {}'.format(ckpt_path))
# self.model.save_weights(os.path.join(config.ckpt_dir, ckpt_path))
self.model.save_weights(ckpt_path)
print('Elapsed {} | ETA {}'.format(*toc))
if scores[0]==pre_acc and scores[1]==pre_f1:
cnt_converge += 1
else:
pre_acc, pre_f1 = scores[:2]
cnt_converge = 0
if cnt_converge >= 2:
print('Converged and exist.')
break
print('\nEvaluating on train dataset ...')
x_train_i = np.concatenate([x_neg[0], x_pos[0]], axis=0)
x_train_v = np.concatenate([x_neg[1], x_pos[1]], axis=0)
x_train = (x_train_i, x_train_v)
y_train = np.zeros([len(x_train_i), 1], dtype='int32')
y_train[len(x_neg[0]):,0] = 1
self.evaluate(sess, x_train, y_train, config.batch_size)
print('done.')
def get_data(config):
df, sparse_cols, num_train = utils.get_dataset_df(config.data_dir, debug=config.is_debug)
print('cols:', ', '.join(df.columns))
print('Sparse_cols:', sparse_cols)
ignore_cols = ['target']
dense_cols = [col for col in df.columns if col not in sparse_cols+ignore_cols]
sparse_cols.sort()
dense_cols.sort()
# print('Get embeddings ....')
# # embedding = {col:utils.binary_embedding(df[col].max()+1) for col in sparse_cols}
# embedding = utils.load_embedding(df, sparse_cols, config.data_dir, method=config.embedding_method)
# print('Processing embeddings ....')
# x = utils.process_embedding(df, dense_cols, embedding)
feat_dict, feat_dim = utils.gen_feat_dict(df, sparse_cols, ignore_cols)
Xi, Xv = utils.parse_dataset(df, feat_dict, sparse_cols, ignore_cols)
idx0 = df[df['target']==0].index.values
idx1 = df[df['target']==1].index.values
del df
gc.collect()
x_neg_train_i = Xi[idx0,:]
x_pos_train_i = Xi[idx1,:]
x_neg_train_v = Xv[idx0,:]
x_pos_train_v = Xv[idx1,:]
x_test_i = Xi[num_train:]
x_test_v = Xv[num_train:]
x_neg_train = (x_neg_train_i, x_neg_train_v)
x_pos_train = (x_pos_train_i, | |
torch.save(local_rref, fname)
@dist_init
def test_remote_throw(self):
rref = rpc.remote(worker_name((self.rank + 1) % self.world_size),
raise_or_inc,
args=(torch.ones(2),))
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_non_cont_tensors(self):
if self.rank == 0:
# Create a non-contiguous tensor.
t = torch.rand(5, 5)
t_view = t.narrow(1, 2, 2)
self.assertFalse(t_view.is_contiguous())
t_cont = t_view.contiguous()
self.assertTrue(t_cont.is_contiguous())
self.assertEqual(t_view, t_cont)
# Send non-cont tensor over RPC.
next_rank = (self.rank + 1) % self.world_size
t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont))
# Verify the returned tensor.
self.assertEqual(t_view, t_ret)
self.assertFalse(t_ret.is_contiguous())
@dist_init
def test_callback_simple(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
def callback(fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
set_by_cb.set_result(ret.clone() + 1)
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_wrong_arg_num(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_fut = fut.then(my_function)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
with self.assertRaisesRegex(
RuntimeError,
"my\\_function\\(\\) missing 2 required positional arguments"
):
cb_fut.wait()
@dist_init
def test_callback_wrong_arg_type(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1))
fut1 = fut0.then(lambda x: x + 1)
with self.assertRaisesRegex(
RuntimeError,
"unsupported operand type\\(s\\) for \\+"
):
fut1.wait()
@dist_init
def test_callback_multi(self):
num_cbs = 10
n = self.rank + 1
def callback(idx, fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
return ret + idx
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_futs = []
for idx in range(num_cbs):
cb_futs.append(fut.then(partial(callback, idx)))
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
for idx in range(num_cbs):
self.assertEqual(
cb_futs[idx].wait(),
torch.ones(n, n) * 2 + idx
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
dst = worker_name(n % self.world_size)
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), 1)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_callback_in_rpc(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
add_use_future_cb,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_callback_with_ret(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
fut2 = rpc.rpc_async(
dst,
torch.add,
args=(fut0.wait(), 1)
).then(lambda fut1: fut1.wait() + 1)
return fut2.wait()
fut3 = rpc.rpc_async(
dst,
torch.add,
args=(torch.ones(2, 2), 1)
).then(callback)
self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3)
@dist_init
def test_callback_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
with self.assertRaisesRegex(ValueError, "Expected error"):
fut0.wait()
raise RuntimeError("Another expected error")
fut1 = rpc.rpc_async(dst, raise_func).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
fut1.wait()
@dist_init
def test_callback_none(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
TypeError,
"incompatible function arguments."
):
rpc.rpc_async(dst, raise_func).then(None)
@dist_init
def test_add_done_callback(self):
set_by_cb = False
n = self.rank + 1
def callback(fut):
nonlocal set_by_cb
fut.wait()
set_by_cb = True
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.add_done_callback(callback)
fut_then = fut.then(lambda _: True)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
fut_then.wait()
self.assertTrue(set_by_cb)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_mark_future_twice(self):
fut = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.zeros(2, 2), 1)
)
self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
@dist_init
def test_pickle_future(self):
fut = torch.futures.Future()
errMsg = "Can not pickle torch.futures.Future"
dst = worker_name((self.rank + 1) % self.world_size)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_sync(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_async(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.remote(dst, fail_on_fut, args=(fut,))
@dist_init
def test_future_done(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1))
fut.wait()
self.assertTrue(fut.done())
@dist_init
def test_future_done_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
self.assertTrue(fut.done())
def _test_future_cb(self, func):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
func,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_future_in_rpc(self):
self._test_future_cb(add_use_future_set_result)
@dist_init
def test_future_nested_callback(self):
self._test_future_cb(add_use_future_nested_cb)
def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None):
if mode == RPCExecMode.SYNC:
return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs)
elif mode == RPCExecMode.ASYNC:
return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait()
elif mode == RPCExecMode.REMOTE:
return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here()
def _test_async_function_raise(self, mode):
with self.assertRaisesRegex(RuntimeError, "Expected error"):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_raise_func,
mode
)
@dist_init
def test_async_function_raise(self):
self._test_async_function_raise(RPCExecMode.SYNC)
@dist_init
def test_async_function_raise_async(self):
self._test_async_function_raise(RPCExecMode.ASYNC)
@dist_init
def test_async_function_raise_remote(self):
self._test_async_function_raise(RPCExecMode.REMOTE)
def _test_async_function_wrong_return_type(self, mode):
errMsg = (
"Functions decorated with @rpc\\.async_function must return a "
"torch\\.futures\\.Future object,"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_wrong_type,
mode
)
@dist_init
def test_async_function_wrong_return_type(self):
self._test_async_function_wrong_return_type(RPCExecMode.SYNC)
@dist_init
def test_async_function_wrong_return_type_async(self):
self._test_async_function_wrong_return_type(RPCExecMode.ASYNC)
@dist_init
def test_async_function_wrong_return_type_remote(self):
self._test_async_function_wrong_return_type(RPCExecMode.REMOTE)
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _test_async_function(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
args = (dst2, torch.ones(2, 2), 1, 2)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + 3)
@dist_init
def test_async_function_with_future_ctor(self):
self._test_async_function(async_add_with_future_ctor)
@dist_init
def test_async_function_with_future_ctor_remote(self):
self._test_async_function(
async_add_with_future_ctor,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_chained(self):
self._test_async_function(async_add_chained)
@dist_init
def test_async_function_chained_remote(self):
self._test_async_function(async_add_chained, RPCExecMode.REMOTE)
@dist_init
def test_async_function_nested(self):
self._test_async_function(async_add_nested)
@dist_init
def test_async_function_nested_remote(self):
self._test_async_function(async_add_nested, RPCExecMode.REMOTE)
@dist_init
def test_async_static_method(self):
self._test_async_function(AsyncExecutionClass.static_async_add)
@dist_init
def test_async_static_method_remote(self):
self._test_async_function(
AsyncExecutionClass.static_async_add,
RPCExecMode.REMOTE
)
@dist_init
def test_async_class_method(self):
self._test_async_function(AsyncExecutionClass.class_async_add)
@dist_init
def test_async_class_method_remote(self):
self._test_async_function(
AsyncExecutionClass.class_async_add,
RPCExecMode.REMOTE
)
def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(dst1, AsyncExecutionClass)
x = torch.ones(2, 2)
y = torch.ones(2, 2) + 1
if mode == RPCExecMode.SYNC:
ret = rref.rpc_sync().static_async_add(dst2, x, x, y)
ret += rref.rpc_sync().class_async_add(dst2, x, x, y)
ret += rref.rpc_sync().bound_async_add(dst2, x, x, y)
elif mode == RPCExecMode.ASYNC:
ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait()
elif mode == RPCExecMode.REMOTE:
ret = rref.remote().static_async_add(dst2, x, x, y).to_here()
ret += rref.remote().class_async_add(dst2, x, x, y).to_here()
ret += rref.remote().bound_async_add(dst2, x, x, y).to_here()
self.assertEqual(ret, 3 * 4 * x)
@dist_init
def test_async_class_rref_proxy(self):
self._test_test_async_class_rref_proxy()
@dist_init
def test_async_class_rref_proxy_async(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC)
@dist_init
def test_async_class_rref_proxy_remote(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE)
def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
step = 3
args = (dst2, torch.ones(2, 2), num, step)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + num * step)
@dist_init
def test_async_function_multi_chained(self):
self._test_async_function_multi(async_add_chained_multi)
@dist_init
def test_async_function_multi_chained_async(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_chained_remote(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_multi_fanout(self):
self._test_async_function_multi(async_add_multi_fanout)
@dist_init
def test_async_function_multi_fanout_async(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_fanout_remote(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.REMOTE
)
def _test_return_future(self, mode):
with self.assertRaisesRegex(
RuntimeError,
"Can not pickle torch.futures.Future"
):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
return_future,
mode
)
@dist_init
def test_return_future(self):
self._test_return_future(RPCExecMode.SYNC)
@dist_init
def test_return_future_async(self):
self._test_return_future(RPCExecMode.ASYNC)
@dist_init
def test_return_future_remote(self):
self._test_return_future(RPCExecMode.REMOTE)
@dist_init
def test_rref_timeout(self):
# This test is similar to ones in FaultyProcessGroupTest, but is meant to be
# run with other backends besides ProcessGroup.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
wait_until_owners_and_forks_on_rank(1, 1, rank=1)
@dist_init(setup_rpc=False)
@unittest.skipIf(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_pg_then_rpc(self):
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
@unittest.skipIf(
| |
#!/usr/bin/env python
import csv
import json
from os.path import isfile
from os.path import basename
from os.path import join as pjoin
from pprint import pprint as pp
from typing import Dict, Tuple, List, Union, Any, Optional, IO
import datagristle.common as comm
from datagristle.common import abort
import datagristle.csvhelper as csvhelper
OUTPUT_TYPES = ['insert', 'delete', 'same', 'chgnew', 'chgold']
FieldPositionsType = List[int]
RecordType = List[str]
class FileDelta:
"""" Compares two files based on a common key writing results to multiple
output files.
Args:
out_dir: the output file directory
dialect: a csv dialect object
Raises:
ValueError: if field_type is invalid
ValueError: if the same field is referenced by ignore_fields and key_fields
"""
def __init__(self,
out_dir: str,
dialect: csvhelper.Dialect) -> None:
self.out_dir = out_dir
self.dialect = dialect
self.join_fields: FieldPositionsType = []
self.compare_fields: FieldPositionsType = []
self.ignore_fields: FieldPositionsType = []
self.dry_run: bool = False
self.old_rec: List[str] = None
self.new_rec: List[str] = None
self.new_read_cnt = 0
self.old_read_cnt = 0
self.out_file: Dict[str, IO[str]] = {}
self.out_fqfn: Dict[str, str] = {}
self.out_writer: Dict[str, Any] = {}
self.out_counts: Dict[str, int] = {}
self.dass = DeltaAssignments()
def set_fields(self,
field_type: str,
*fields: Any) -> None:
""" Assign fields to joins, compares and ignores lists.
Args:
field_type - must be 'join', 'compare' or 'ignore'
*fields - the list of 0-offset field numbers. Multiple
representations are accepted:
- string: '0,1,3'
- list of numbers: 0,1,3
- list of strings: '0', '1', '3'
- single number: 0
- single string: '0'
Returns:
nothing
Raises
ValueError: if field_type is invalid
"""
if len(fields) == 1 and ',' in str(fields[0]):
split_fields = fields[0].split(',')
else:
split_fields = fields
if field_type == 'join':
self.join_fields.extend([int(x) for x in split_fields if x is not None])
elif field_type == 'compare':
self.compare_fields.extend([int(x) for x in split_fields if x is not None])
elif field_type == 'ignore':
self.ignore_fields.extend([int(x) for x in split_fields if x is not None])
else:
raise ValueError('Invalid field_type value: %s' % field_type)
def _validate_fields(self) -> None:
if not self.join_fields:
raise ValueError('key (join) fields are missing')
# should add compare_fields to this check
for field in self.ignore_fields:
if field in self.join_fields:
raise ValueError('invalid ignore_fields value: %s' % field)
# should also confirm that compare_fields or ignore_fields are populated,
# not both
def compare_files(self,
old_fqfn: str,
new_fqfn: str,
dry_run: bool = False) -> None:
""" Reads two sorted csv files, compares them based on a key, and
writes results out to five output files.
Args:
old_fqfn: the fully-qualified file name of the old file
new_fqfn: the fully-qualified file name of the new file
dry_run: a boolean, if True will not write output. Defaults to
False
Returns:
nothing
Raises:
"""
self.dry_run = dry_run
self.new_fqfn = new_fqfn
self.old_fqfn = old_fqfn
assert isfile(old_fqfn)
assert isfile(new_fqfn)
compare_fields = self.compare_fields
ignore_fields = self.ignore_fields
self._validate_fields()
# set up input csv readers
old_f = open(old_fqfn, 'r')
self.old_csv = csv.reader(old_f, dialect=self.dialect)
new_f = open(new_fqfn, 'r')
self.new_csv = csv.reader(new_f, dialect=self.dialect)
# set up output files, counts and writers
for outtype in OUTPUT_TYPES:
self.out_fqfn[outtype] = pjoin(self.out_dir, self._get_name(new_fqfn, outtype))
self.out_file[outtype] = open(self.out_fqfn[outtype], 'w')
self.out_writer[outtype] = csv.writer(self.out_file[outtype],
dialect=self.dialect)
# prime the main loop
self._read_old_csv()
self._read_new_csv()
while self.old_rec and self.new_rec:
key_match_result = self._key_match()
if key_match_result == 'new-greater':
self._writer('delete', self.old_rec)
self._read_old_csv()
elif key_match_result == 'old-greater':
self._writer('insert', self.new_rec)
self._read_new_csv()
else:
if self._data_match(ignore_fields, compare_fields):
self._writer('same', self.old_rec)
else:
self._writer('chgold', self.old_rec)
self._writer('chgnew', self.new_rec)
self._read_new_csv()
self._read_old_csv()
while self.new_rec:
self._writer('insert', self.new_rec)
self._read_new_csv()
while self.old_rec:
self._writer('delete', self.old_rec)
self._read_old_csv()
old_f.close()
new_f.close()
for filename in self.out_file:
self.out_file[filename].close()
@staticmethod
def _get_name(in_fn: str,
out_type: str) -> str:
""" Gets the formatted name of an output file.
Args:
in_fn: input file name
out_type: one of insert, delete, same, chgnew, chgold
Returns:
out_fn: which is the basename of in_fn, with .uniq or .sorted.uniq removed,
then with out_type added
Notes:
parsing is fragile should improve
"""
fn = basename(in_fn)
if fn.endswith('.sorted.uniq'):
out_fn = fn[:-12] #todo: make less fragile
elif fn.endswith('.uniq'):
out_fn = fn[:-5] #todo: make less fragile
elif fn.endswith('.sorted'):
out_fn = fn[:-7] #todo: make less fragile
else:
out_fn = fn
return out_fn + '.' + out_type
def _key_match(self) -> str:
""" Determines if an old-file record matches a new-file record based
on all the join-keys.
Args:
None
Returns:
result: equal if all keys match, new-greater if the new file's key
is greater than the old files, old-greater if the opposite.
"""
for key in self.join_fields:
if self.new_rec[key] > self.old_rec[key]:
return 'new-greater'
elif self.new_rec[key] < self.old_rec[key]:
return 'old-greater'
return 'equal'
def _data_match(self,
ignore_fields: FieldPositionsType,
compare_fields: FieldPositionsType) -> bool:
""" Determines if an old-file record matches a new-file record based
on the non-join-keys.
Args:
ignore_fields: a list of fields to ignore represented as a list of
positions given a zero-offset. If provided the
compare_fields cannot be provided - since all fields
not in this list will be compared.
compare_fields: a list of fields to compare represented as a list
of positions given a zero-offset. if provided the
ignore_fields cannot be provided - since all fields
not in this list will be ignored.
Returns:
result: a boolean, if True the columns matched
"""
# todo: pre-calc the list of fields to actually compare
for index, _ in enumerate(self.new_rec):
if index in self.join_fields:
continue
elif index in ignore_fields:
continue
elif compare_fields and index not in compare_fields:
continue
elif self.new_rec[index] != self.old_rec[index]:
return False
else:
return True
def _read_new_csv(self) -> None:
""" Reads next rec from new file into self.new_rec
Args: None
Returns: Nothing
Notes:
- Confirms sort order of file
- Will assign None to self.new_rec at eof
"""
try:
last_rec = self.new_rec
self.new_rec = self.new_csv.__next__()
if last_rec is None: # first read priming
last_rec = self.new_rec
if len(last_rec) != len(self.new_rec):
abort('new file has inconsistent number of fields', f'new_rec = {self.new_rec}')
for key in self.join_fields:
if self.new_rec[key] > last_rec[key]:
self.new_read_cnt += 1
break # good
if self.new_rec[key] < last_rec[key]:
abort('ERROR: new file is not sorted correctly',
f'This refers to file {self.new_fqfn}, and key: {key}, and record: {self.new_rec} and last rec: {last_rec}')
except StopIteration:
self.new_rec = None
def _read_old_csv(self) -> None:
""" Reads next rec from new file into self.old_rec
Args: None
Returns: Nothing
Notes:
- Confirms sort order of file
- Will assign None to self.old_rec at eof
"""
try:
last_rec = self.old_rec
self.old_rec = self.old_csv.__next__()
if last_rec is None: # first read priming
last_rec = self.old_rec
if len(last_rec) != len(self.old_rec):
abort('old file has inconsistent number of fields', f'old_rec = {self.new_rec}')
for key in self.join_fields:
if self.old_rec[key] > last_rec[key]:
self.old_read_cnt += 1
break # good
if self.old_rec[key] < last_rec[key]:
abort('ERROR: old file is not sorted correctly',
f'This refers to file {self.old_fqfn}, and key: {key}, and record: {self.old_rec} and last rec: {last_rec}')
except StopIteration:
self.old_rec = None
def _writer(self,
outtype: str,
outrec: RecordType) -> None:
"""" Run post-delta assignment then write record.
Args:
outtype - one of insert, delete, chgnew, chgold, same
outrec - output list
Returns: nothing
"""
try:
self.out_counts[outtype] += 1
except KeyError:
self.out_counts[outtype] = 1
if not self.dry_run:
adj_rec = self.dass.assign(outtype, outrec, self.old_rec, self.new_rec)
self.out_writer[outtype].writerow(adj_rec)
class DeltaAssignments:
""" Manages the post-delta transformations (aka assignments).
"""
def __init__(self) -> None:
self.assignments: Dict[str, Dict] = {} # supports minor transformations
self.special_values: Dict[str, str] = {}
self.seq: Dict[int, Dict[str, Any]] = {}
def set_assignment(self,
dest_file: str,
dest_field: int,
src_type: str,
src_val: str = None,
src_file: str = None,
src_field: int = None) -> None:
""" Write instructions for the assignment of a csv field in an output file.
Args:
dest_file: one of insert, delete, chgold or chgnew
dest_field: the field position, given a zero-offset
src_type: one of literal, copy, sequence, or special
src_val: used by literal, lookup and sequence
src_file: one of old, new or None
src_field: the field position, given a zero-offset
Returns:
nothing
Raises:
ValueError if args are invalid
sys.exit if sequence assignment is invalid
"""
if dest_field:
assert int(dest_field)
if src_field:
assert int(src_field)
if dest_file not in ['insert', 'delete', 'chgold', 'chgnew', 'same']:
raise ValueError('Invalid dest_file: %s' % dest_file)
if not comm.isnumeric(dest_field):
raise ValueError('Invalid dest_field: %s' % dest_field)
if src_type not in ['literal', 'copy', 'sequence', 'special']:
raise ValueError('Invalid src_type of %s' | |
<reponame>YudinYury/Question_by_HackerRank
''' Python question by HackerRank
TODO 1: "Introduction to Regex Module"
You are given a string N.
Your task is to verify that N is a floating point number.
In this task, a valid float number must satisfy all of the following requirements:
Number can start with +, - or . symbol.
For example:
✔
+4.50
✔
-1.0
✔
.5
✔
-.7
✔
+.4
✖
-+4.5
Number must contain at least 1 decimal value.
For example:
✖
12.
✔
12.0
Number must have exactly one . symbol.
Number must not give any exceptions when converted using float(N).
Input Format:
The first line contains an integer T, the number of test cases.
The next T line(s) contains a string N.
Constraints: 0 < T < 10
Output Format: Output True or False for each test case.
TODO 2: "Re.split()"
You are given a string S, containing , and/or . and 0-9 digits.
Your task is to re.split() all of the , and . symbols.
Input Format: A single line of input containing the string S.
Output Format: Print the numbers obtained after splitting the string on separate lines.
TODO 3: "Group(), Groups() & Groupdict()"
You are given a string S.
Your task is to find the first occurrence of an alphanumeric character in S(read from left to right) that has consecutive repetitions.
Input Format: A single line of input containing the string S.
Constraints: 0 < len(S) < 100
Output Format: Print the first occurrence of the repeating character. If there are no repeating characters, print -1.
TODO 4: "Re.findall() & Re.finditer()"
You are given a string S. It consists of alphanumeric characters, spaces and symbols(+,-).
Your task is to find all the substrings of S that contains 2 or more vowels.
Also, these substrings must lie in between 2 consonants and should contain vowels only.
Note :
Vowels are defined as: AEIOU and aeiou.
Consonants are defined as: QWRTYPSDFGHJKLZXCVBNM and qwrtypsdfghjklzxcvbnm.
Input Format: A single line of input containing string S.
Constraints: 0 < len(S) < 100
Output Format: Print the matched substrings in their order of occurrence on separate lines.
If no match is found, print -1.
TODO 5: "Re.start() & Re.end()"
You are given a string S.
Your task is to find the indices of the start and end of string K in S.
Input Format:
The first line contains the string S.
The second line contains the string K.
Constraints: 0 < len(s) < 100, 0 < len(k) < len(s)
Output Format:
Print the tuple in this format: (start _index, end _index).
If no match is found, print (-1, -1).
TODO 6: "Regex Substitution"
You are given a text of lines. The text contains && and || symbols.
Your task is to modify those symbols to the following:
&& → and
|| → or
Both && and || should have a space " " on both sides.
Input Format: The first line contains the integer, N.
The next N lines each contain a line of the text.
Constraints: 0 < N < 100
Neither && nor || occur in the start or end of each line.
Output Format: Output the modified text.
# TODO 7: "Validating Roman Numerals"
You are given a string, and you have to validate whether it's a valid Roman numeral. If it is valid, print True.
Otherwise, print False. Try to create a regular expression for a valid Roman numeral.
Input Format: A single line of input containing a string of Roman characters.
Output Format: Output a single line containing True or False according to the instructions above.
Constraints: The number will be between 1 and 3999 (both included).
# TODO 8: "Validating phone numbers"
Let's dive into the interesting topic of regular expressions! You are given some input, and you are required to check
whether they are valid mobile numbers.
A valid mobile number is a ten digit number starting with a 7,8 or 9.
Concept
A valid mobile number is a ten digit number starting with a 7,8 or 9.
Regular expressions are a key concept in any programming language. A quick explanation with Python examples is available
here. You could also go through the link below to read more about regular expressions in Python.
Input Format:
The first line contains an integer N, the number of inputs.
N lines follow, each containing some string.
Output Format:
For every string listed, print "YES" if it is a valid mobile number and "NO" if it is not on separate lines.
Do not print the quotes.
# TODO 9: "Validating and Parsing Email Addresses"
A valid email address meets the following criteria:
It's composed of a username, domain name, and extension assembled in this format: username@domain.extension
The username starts with an English alphabetical character, and any subsequent characters consist of one or more of the following: alphanumeric characters, -,., and _.
The domain and extension contain only English alphabetical characters.
The extension is 1, 2, or 3 characters in length.
Given Т pairs of names and email addresses as input, print each name and email address pair having a valid email address on a new line.
Hint: Try using Email.utils() to complete this challenge. For example, this code:
import email.utils
print email.utils.parseaddr('DOSHI <<EMAIL>>')
print email.utils.formataddr(('DOSHI', '<EMAIL>'))
produces this output:
('DOSHI', '<EMAIL>')
DOSHI <<EMAIL>>
Input Format
The first line contains a single integer, N, denoting the number of email address.
Each line i of the N subsequent lines contains a name and an email address as two space-separated values following this format:
name <<EMAIL>>
Output Format:
Print the space-separated name and email address pairs containing valid email addresses only. Each pair must be printed on a new line in the following format:
name <<EMAIL>>
# TODO 10: "Validating UID"
ABCXYZ company has up to 100 employees.
The company decides to create a unique identification number (UID) for each of its employees.
The company has assigned you the task of validating all the randomly generated UIDs.
A valid UID must follow the rules below:
It must contain at least 2 uppercase English alphabet characters.
It must contain at least 3 digits (0 - 9).
It should only contain alphanumeric characters (a - z,A - Z & 0 - 9).
No character should repeat.
There must be exactly 10 characters in a valid UID.
Input Format: The first line contains an integer T, the number of test cases.
The next T lines contains an employee's UID.
Output Format:
For each test case, print 'Valid' if the UID is valid. Otherwise, print 'Invalid', on separate lines. Do not print the quotation marks.
# TODO 11: "Validating Credit Card Numbers"
You and Fredrick are good friends. Yesterday, Fredrick received N credit cards from ABCD Bank. He wants to verify whether his credit card numbers are valid or not. You happen to be great at regex so he is asking for your help!
A valid credit card from ABCD Bank has the following characteristics:
► It must start with a 4, 5 or 6.
► It must contain exactly 16 digits.
► It must only consist of digits (0-9).
► It may have digits in groups of 4, separated by one hyphen "-".
► It must NOT use any other separator like ' ' , '_', etc.
► It must NOT have 4 or more consecutive repeated digits.
Input Format
The first line of input contains an integer N.
The next N lines contain credit card numbers.
Output Format
Print 'Valid' if the credit card number is valid. Otherwise, print 'Invalid'. Do not print the quotes.
# TODO 12: "Validating Postal Codes"
A postal code P must be a number in the range of (100000-999999).
A postal code P must not contain more than one alternating repetitive digit pair.
Alternating repetitive digits are digits which repeat immediately after the next digit. In other words, an alternating repetitive digit pair is formed by two equal digits that have just a single digit between them.
For example:
121426 # Here, 1 is an alternating repetitive digit.
523563 # Here, NO digit is an alternating repetitive digit.
552523 # Here, both 2 and 5 are alternating repetitive digits.
Your task is to validate whether is a valid postal code or not.
Input Format
One single line of input containing the string P.
Output Format
Print "True" if is valid. Otherwise, print "False". Do not print the quotation marks.
Sample Input 0
110000
Sample Output 0
False
Explanation 0
1 1 0O0O : (0, 0) and (O, O) are two alternating digit pairs. Hence, it is an invalid postal code.
Note:
A score of 0 will be awarded for using 'if' | |
<reponame>tdiprima/code
# Config - reads a configuration file.
#
# This module parses a configuration file using a restricted Python syntax.
# The Python tokenizer/parser is used to read the file, unwanted expressions
# are removed from the parser output before the result is compiled and
# executed. The initialised configuration settings are returned in a dict.
import parser
import compiler
import symbol
import token
def _get_forbidden_symbols():
"""
Returns a list of symbol codes representing statements that are *not*
wanted in configuration files.
"""
try:
# Python 2.5:
symlst = [symbol.break_stmt, symbol.classdef, symbol.continue_stmt,
symbol.decorator, symbol.decorators, symbol.eval_input,
symbol.except_clause, symbol.exec_stmt, symbol.flow_stmt,
symbol.for_stmt, symbol.fpdef, symbol.fplist, symbol.funcdef,
symbol.global_stmt, symbol.import_as_name, symbol.import_as_names,
symbol.import_from, symbol.import_name, symbol.import_stmt,
symbol.lambdef, symbol.old_lambdef, symbol.print_stmt,
symbol.raise_stmt, symbol.try_stmt, symbol.while_stmt,
symbol.with_stmt, symbol.with_var, symbol.yield_stmt,
symbol.yield_expr]
except AttributeError:
# Python 2.4:
symlst = [symbol.break_stmt, symbol.classdef, symbol.continue_stmt,
symbol.decorator, symbol.decorators, symbol.eval_input,
symbol.except_clause, symbol.exec_stmt, symbol.flow_stmt,
symbol.for_stmt, symbol.fpdef, symbol.fplist, symbol.funcdef,
symbol.global_stmt, symbol.import_as_name, symbol.import_as_names,
symbol.import_from, symbol.import_name, symbol.import_stmt,
symbol.lambdef, symbol.print_stmt, symbol.raise_stmt,
symbol.try_stmt, symbol.while_stmt]
return symlst
def _parseconf(confstr):
"""
Parse the configuration *confstr* string and remove anything else
than the supported constructs, which are:
Assignments, bool, dict list, string, float, bool, and, or, xor,
arithmetics, string expressions and if..then..else.
The *entire* statement containing the unsupported statement is removed
from the parser; the effect is that the whole expression is ignored
from the 'root' down.
The modified AST object is returned to the Python parser for evaluation.
"""
# Initialise the parse tree, convert to list format and get a list of
# the symbol ID's for the unwanted statements. Might raise SyntaxError.
ast = parser.suite(confstr)
#ast = parser.expr(confstr)
stmts = parser.ast2list(ast)
rmsym = _get_forbidden_symbols()
result = list()
# copy 256: 'single_input', 257: 'file_input' or 258: 'eval_input'. The
# parse tree must begin with one of these to compile back to an AST obj.
result.append(stmts[0])
# NOTE: This might be improved with reduce(...) builtin!? How can we get
# line number for better warnings?
for i in range(1, len(stmts)):
# censor the parse tree produced from parsing the configuration.
if _check_ast(stmts[i], rmsym):
result.append(stmts[i])
else:
pass
return parser.sequence2ast(result)
def _check_ast(astlist, forbidden):
"""
Recursively check a branch of the AST tree (in list form) for "forbidden"
symbols. A token terminates the search.
Returns False if any "forbidden symbols" are present, True otherwise.
"""
# check for token - tokens are always allowed.
if astlist[0] in token.tok_name.keys():
return True
elif astlist[0] in forbidden:
raise UserWarning('Statement containing '+symbol.sym_name[astlist[0]]
+' not allowed, ignored!')
return False
else:
return _check_ast(astlist[1], forbidden)
def parse_config(confstr):
"""
Use *eval(...)* to execute a filtered AST tree formed by parsing a
configuration file, removing unwanted expressions (if any) and then
compiling the filtered output to Python byte code. This approach
allows the use of Python expressions and comments in config files,
avoids the use of modules which is not particularily pretty (IMO).
The following expressions (and combinations of) are allowed:
Assignments, Arithmetic, Strings, Lists, if...then...else and
Comments.
Returns a dict containing the configuration values, if successful.
"""
dummy = dict()
result = dict()
# Parse the python source code into a filtered AST-tree.
cast = _parseconf(confstr)
# Compile AST to bytecode, this also detects syntax errors.
cobj = parser.compileast(cast)
# Run the bytecode. The dicts *dummy* and *results* are placeholders
# for the *globals* *locals* environments used by *eval(...)*. The
# variables declared and initialised in the config file will end up
# in *locals* together with the *__globals__* environment while the
# *locals* will contain only the values created by *eval(...)*. This
# is good, because we can protect *__globals__* and return only the
# configuration values by doing this:
eval(cobj, dummy, result)
return result
# EOF
# <--------- split here ----------->
#!/usr/bin/env python
"""
Unit test for configuration handling. This is intended to be
run through the *tests* package, but should work stand-alone.
"""
import unittest
import sys, os
import tempfile
import string
# Adjust the import path to get the module we want to test.
# We are sitting in '<module>\\test' so one directory up
# will do the trick.
mod_path = os.path.normpath(os.getcwd()+'/..')
sys.path.append(mod_path)
import config
# import modules used internally by config.py.
import parser
import compiler
import symbol
import token
# Test: Assignments, bool, dict list, string, float, bool, and,
# or, xor,arithmetics, string expressions and if..then..else.
#
# The test code is mostly ripped from 'test_grammar.py', available
# from the Python source tree.
test_backslash_1 = r"""
x = 1 \
+ 1
"""
test_backslash_2 = r"""
# Backslash does not means continuation in comments :\
x = 0
"""
test_integers_1 = r"""
a = 0xff
b = 0377
c = 2147483647
"""
test_long_ints_1 = r"""
x = 0L
x = 0l
x = 0xffffffffffffffffL
x = 0xffffffffffffffffl
x = 077777777777777777L
x = 077777777777777777l
x = 123456789012345678901234567890L
x = 123456789012345678901234567890l
"""
test_string_literals_1 = r"""
x = ''; y = ""
"""
test_string_literals_2 = r"""
x = '\''; y = "'"
"""
test_string_literals_3 = r"""
x = '"'; y = "\""
"""
test_string_literals_4 = r"""
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
"""
test_string_literals_5 = r"""
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
"""
test_string_literals_6 = r'''
x = r"""
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
'''
test_string_literals_7 = r"""
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''
"""
test_string_literals_8 = r'''
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"
'''
test_string_literals_9 = r"""
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'
"""
test_if_stmt_1 = r"""
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
"""
test_and_or_not_1 = r"""
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
"""
test_comparison_1 = r"""
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 <> 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
"""
test_binary_ops_1 = r"""
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
"""
test_shift_ops_1 = r"""
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
"""
test_additive_ops_1 = r"""
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
"""
test_multiplicative_ops_1 = r"""
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
"""
test_unary_ops_1 = r"""
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
"""
test_stmt_suite_1 = r"""
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
"""
test_atoms_1 = r"""
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = `x`
x = `1 or 2 or 3`
x = x
x = 'x'
x = 123
"""
class test_config_internals(unittest.TestCase):
"""
Verify the internal functions in the config module.
"""
def test_get_forbidden_symbols(self):
# verify that we can get the symbol ID's.
res = config._get_forbidden_symbols()
self.failUnless(len(res) > 0)
# verify that the forbidden symbols are valid.
for i in range(0, len(res)):
self.failUnless(res[i] in symbol.sym_name.keys())
### BUG? Unittest flags expected exception as Error!
###
## def test_warning_on_banned_statement(self):
## d = config._get_forbidden_symbols()
## self.failUnlessRaises(UserWarning,
## config._check_ast([d[0],[d[1]]], d))
def test_parseconf(self):
pass
def test_check_ast(self):
pass
class test_config(unittest.TestCase):
"""
Verify the functionality of the configuration parser for
a range of different data types and statements.
"""
# Only use setUp() and tearDown() if necessary
def setUp(self):
pass
def tearDown(self):
pass
## def _remove_rc_files(self, root):
## # wipe out the temp files we created for the file load
## # tests.
## | |
None,
count: Optional[int] = 20,
limit: int = 20,
page_token: Optional[str] = None,
return_json: bool = False,
):
"""
Retrieve authorized user's activities.
Note:
This need you do authorize first.
Args:
parts ((str,list,tuple,set) optional):
The resource parts for activities you want to retrieve.
If not provide, use default public parts.
You can pass this with single part str, comma-separated parts str or a list,tuple,set of parts.
before (str, optional):
Set this will only return the activities occurred before this timestamp.
This need specified in ISO 8601 (YYYY-MM-DDThh:mm:ss.sZ) format.
after (str, optional):
Set this will only return the activities occurred after this timestamp.
This need specified in ISO 8601 (YYYY-MM-DDThh:mm:ss.sZ) format.
region_code (str, optional):
Set this will only return the activities for the specified country.
This need specified with an ISO 3166-1 alpha-2 country code.
count (int, optional):
The count will retrieve activities data.
Default is 20.
If provide this with None, will retrieve all activities.
limit (int, optional):
The maximum number of items each request retrieve.
For activities, this should not be more than 50.
Default is 20.
page_token (str, optional):
The token of the page of activities result to retrieve.
You can use this retrieve point result page directly.
And you should know about the page result set for YouTube.
return_json(bool, optional):
The return data type. If you set True JSON data will be returned.
False will return a pyyoutube.ActivityListResponse instance.
Returns:
ActivityListResponse or original data.
"""
if count is None:
limit = 50 # for activities the max limit for per request is 50
else:
limit = min(count, limit)
args = {
"mine": True,
"part": enf_parts(resource="activities", value=parts),
"maxResults": limit,
}
if before:
args["publishedBefore"] = before
if after:
args["publishedAfter"] = after
if region_code:
args["regionCode"] = region_code
if page_token is not None:
args["pageToken"] = page_token
res_data = self.paged_by_page_token(
resource="activities", args=args, count=count
)
if return_json:
return res_data
else:
return ActivityListResponse.from_dict(res_data)
def get_captions_by_video(
self,
*,
video_id: str,
parts: Optional[Union[str, list, tuple, set]] = None,
caption_id: Optional[Union[str, list, tuple, set]] = None,
return_json: bool = False,
):
"""
Retrieve authorized user's video's caption data.
Note:
This need you do authorize first.
Args:
video_id (str):
The id for video which you want to get caption.
parts ((str,list,tuple,set) optional):
The resource parts for caption you want to retrieve.
If not provide, use default public parts.
You can pass this with single part str, comma-separated parts str or a list,tuple,set of parts.
caption_id ((str,list,tuple,set)):
The id for caption that you want to get data.
You can pass this with single id str,comma-separated id str, or list, tuple, set of id str.
return_json(bool, optional):
The return data type. If you set True JSON data will be returned.
False will return a pyyoutube.CaptionListResponse instance.
Returns:
CaptionListResponse or original data.
"""
args = {
"videoId": video_id,
"part": enf_parts("captions", parts),
}
if caption_id is not None:
args["id"] = enf_comma_separated("caption_id", caption_id)
resp = self._request(resource="captions", method="GET", args=args)
data = self._parse_response(resp)
if return_json:
return data
else:
return CaptionListResponse.from_dict(data)
def get_channel_sections_by_id(
self,
*,
section_id: Union[str, list, tuple, set],
parts: Optional[Union[str, list, tuple, set]] = None,
return_json: Optional[bool] = False,
) -> Union[ChannelSectionResponse, dict]:
"""
Retrieve channel section info by his ids(s).
Args:
section_id:
The id(s) for channel sections.
You can pass this with single id str, comma-separated id str, or a list,tuple,set of ids.
parts:
The resource parts for channel section you want to retrieve.
If not provide, use default public parts.
You can pass this with single part str, comma-separated parts str or a list,tuple,set of parts.
return_json:
The return data type. If you set True JSON data will be returned.
False will return a pyyoutube.ChannelSectionResponse instance.
Returns:
ChannelSectionResponse or original data.
"""
args = {
"id": enf_comma_separated(field="section_id", value=section_id),
"part": enf_parts(resource="channelSections", value=parts),
}
resp = self._request(resource="channelSections", args=args)
data = self._parse_response(resp)
if return_json:
return data
else:
return ChannelSectionResponse.from_dict(data)
def get_channel_sections_by_channel(
self,
*,
channel_id: Optional[str] = None,
mine: bool = False,
parts: Optional[Union[str, list, tuple, set]] = None,
return_json: Optional[bool] = False,
) -> Union[ChannelSectionResponse, dict]:
"""
Retrieve channel sections by channel id.
Args:
channel_id:
The id for channel which you want to get channel sections.
mine:
If you want to get your channel's sections, set this with True.
And this need your authorization.
parts:
The resource parts for channel section you want to retrieve.
If not provide, use default public parts.
You can pass this with single part str, comma-separated parts str or a list,tuple,set of parts.
return_json:
The return data type. If you set True JSON data will be returned.
False will return a pyyoutube.ChannelSectionResponse instance.
Returns:
ChannelSectionResponse or original data.
"""
args = {
"part": enf_parts(resource="channelSections", value=parts),
}
if mine:
args["mine"] = mine
else:
args["channelId"] = channel_id
resp = self._request(resource="channelSections", args=args)
data = self._parse_response(resp)
if return_json:
return data
else:
return ChannelSectionResponse.from_dict(data)
def get_i18n_regions(
self,
*,
parts: Optional[Union[str, list, tuple, set]] = None,
hl: Optional[str] = "en_US",
return_json: Optional[bool] = False,
) -> Union[I18nRegionListResponse, dict]:
"""
Retrieve all available regions.
Args:
parts:
The resource parts for i18n region you want to retrieve.
If not provide, use default public parts.
You can pass this with single part str, comma-separated parts str or a list,tuple,set of parts.
hl:
If provide this. Will return i18n region's language localized info.
This value need https://developers.google.com/youtube/v3/docs/i18nLanguages.
return_json:
The return data type. If you set True JSON data will be returned.
False will return a pyyoutube.I18nRegionListResponse instance.
Returns:
I18nRegionListResponse or origin data
"""
args = {"hl": hl, "part": enf_parts(resource="i18nRegions", value=parts)}
resp = self._request(resource="i18nRegions", args=args)
data = self._parse_response(resp)
if return_json:
return data
else:
return I18nRegionListResponse.from_dict(data)
def get_i18n_languages(
self,
*,
parts: Optional[Union[str, list, tuple, set]] = None,
hl: Optional[str] = "en_US",
return_json: Optional[bool] = False,
) -> Union[I18nLanguageListResponse, dict]:
"""
Returns a list of application languages that the YouTube website supports.
Args:
parts:
The resource parts for i18n language you want to retrieve.
If not provide, use default public parts.
You can pass this with single part str, comma-separated parts str or a list,tuple,set of parts.
hl:
If provide this. Will return i18n language's language localized info.
This value need https://developers.google.com/youtube/v3/docs/i18nLanguages.
return_json:
The return data type. If you set True JSON data will be returned.
False will return a pyyoutube.I18nLanguageListResponse instance.
Returns:
I18nLanguageListResponse or original data.
"""
args = {"hl": hl, "part": enf_parts(resource="i18nLanguages", value=parts)}
resp = self._request(resource="i18nLanguages", args=args)
data = self._parse_response(resp)
if return_json:
return data
else:
return I18nLanguageListResponse.from_dict(data)
def get_members(
self,
*,
parts: Optional[Union[str, list, tuple, set]] = None,
mode: Optional[str] = "all_current",
count: Optional[int] = 5,
limit: Optional[int] = 5,
page_token: Optional[str] = None,
has_access_to_level: Optional[str] = None,
filter_by_member_channel_id: Optional[Union[str, list, tuple, set]] = None,
return_json: Optional[bool] = False,
) -> Union[MemberListResponse, dict]:
"""
Retrieve a list of members for a channel.
Args:
parts ((str,list,tuple,set) optional):
The resource parts for member you want to retrieve.
If not provide, use default public parts.
You can pass this with single part str, comma-separated parts str or a list,tuple,set of parts.
mode:
The mode parameter indicates which members will be included in the API response.
Set the parameter value to one of the following values:
- all_current (default): List current members, from newest to oldest. When this value is used,
the end of the list is reached when the API response does not contain a nextPageToken.
- updates : List only members that joined or upgraded since the previous API call.
Note that the first call starts a new stream of updates but does not actually return any members.
To start retrieving the membership updates, you need to poll the endpoint using the
nextPageToken at your desired frequency.
Note that when this value is used, the API response always contains a nextPageToken.
count (int, optional):
The count will retrieve videos data.
Default is 5.
limit (int, optional):
The maximum number of items each request retrieve.
For members, this should not be more than 1000.
| |
<reponame>dylanirion/wildbook-ia
# -*- coding: utf-8 -*-
"""
Dependencies: flask, tornado
"""
import logging
from flask import current_app
from wbia.control import controller_inject
from wbia.web import appfuncs as appf
import utool as ut
from wbia.web import routes
(print, rrr, profile) = ut.inject2(__name__)
logger = logging.getLogger('wbia')
register_route = controller_inject.get_wbia_flask_route(__name__)
def get_associations_dict(ibs, desired_species=None, **kwargs):
import itertools
if ibs.dbname == 'ZEBRA_Kaia':
valid_aid_set = set(ibs.get_valid_aids())
imageset_list = ibs._princeton_kaia_imageset_filtering(**kwargs)
else:
valid_aid_set = set(ibs.get_valid_aids())
imageset_list = ibs.get_valid_imgsetids(is_special=False)
valid_nid_set = ibs.get_annot_nids(valid_aid_set)
valid_nid_set = set([nid for nid in valid_nid_set if nid > 0])
imageset_text_list = ibs.get_imageset_text(imageset_list)
time_list = ibs.get_imageset_start_time_posix(imageset_list)
nids_list = ibs.get_imageset_nids(imageset_list)
ibs.delete_empty_nids()
def _associate(dict_, name1, name2, label):
if name2 < name1:
name1, name2 = name2, name1
if name1 not in dict_:
dict_[name1] = {}
if name2 not in dict_[name1]:
dict_[name1][name2] = []
dict_[name1][name2].append('%s' % (label,))
assoc_dict = {}
for imageset_text, time_, nid_list in zip(imageset_text_list, time_list, nids_list):
if desired_species is not None:
aids_list = ibs.get_name_aids(nid_list)
flag_list = []
for nid, aid_list in zip(nid_list, aids_list):
aid_list = list(set(aid_list) & set(valid_aid_set))
if len(aid_list) == 0:
flag = False
else:
species_list = ibs.get_annot_species(aid_list)
species = max(set(species_list), key=species_list.count)
if ibs.dbname == 'ZEBRA_Kaia' and desired_species == 'zebra':
flag = species in ['zebra_plains', 'zebra_grevys']
else:
flag = species == desired_species
flag_list.append(flag)
nid_list = ut.compress(nid_list, flag_list)
nid_list = list(set(nid_list) & valid_nid_set)
name_list = ibs.get_name_texts(nid_list)
# Add singles
for name in name_list:
_associate(assoc_dict, name, name, imageset_text)
# _associate(assoc_dict, name, name, time_)
# Add pairs
comb_list = itertools.combinations(name_list, 2)
for name1, name2 in sorted(list(comb_list)):
_associate(assoc_dict, name1, name2, imageset_text)
# _associate(assoc_dict, name, name, time_)
return assoc_dict
@register_route('/csv/princeton/associations/list/', methods=['GET'])
def download_associations_list(**kwargs):
ibs = current_app.ibs
key_str_list = []
for key in sorted(kwargs.keys()):
key_str = '%s=%s' % (key, kwargs[key])
key_str_list.append(key_str)
key_str = '.'.join(key_str_list)
if len(key_str) > 0:
key_str += '.'
filename = 'associations.list.%scsv' % (key_str,)
assoc_dict = get_associations_dict(ibs, **kwargs)
combined_list = []
max_length = 0
for name1 in sorted(assoc_dict.keys()):
for name2 in sorted(assoc_dict[name1].keys()):
id_list = sorted(set(assoc_dict[name1][name2]))
id_list = [id_.replace(',', ':COMMA:') for id_ in id_list]
max_length = max(max_length, len(id_list))
args = (
name1,
name2,
len(id_list),
','.join(id_list),
)
combined_str = '%s,%s,%s,%s' % args
combined_list.append(combined_str)
if max_length == 1:
# name_header_str = 'TIME'
name_header_str = 'ENCOUTNER'
else:
# name_header_str = ','.join([ 'TIME%d' % (i + 1, ) for i in range(max_length) ])
name_header_str = ','.join(['ENCOUNTER%d' % (i + 1,) for i in range(max_length)])
combined_str = '\n'.join(combined_list)
combined_str = 'NAME1,NAME2,ASSOCIATIONS,%s\n' % (name_header_str,) + combined_str
return appf.send_csv_file(combined_str, filename)
@register_route('/csv/princeton/associations/matrix/', methods=['GET'])
def download_associations_matrix(**kwargs):
ibs = current_app.ibs
key_str_list = []
for key in sorted(kwargs.keys()):
key_str = '%s=%s' % (key, kwargs[key])
key_str_list.append(key_str)
key_str = '.'.join(key_str_list)
if len(key_str) > 0:
key_str += '.'
filename = 'associations.matrix.%scsv' % (key_str,)
assoc_dict = get_associations_dict(ibs, **kwargs)
assoc_list = sorted(assoc_dict.keys())
# max_length = len(assoc_list)
combined_list = []
for index1, name1 in enumerate(assoc_list):
temp_list = [name1]
for index2, name2 in enumerate(assoc_list):
value = assoc_dict[name1].get(name2, [])
value_len = len(value)
value_str = '' if value_len == 0 else value_len
temp_list.append('%s' % (value_str,))
temp_str = ','.join(temp_list)
combined_list.append(temp_str)
# if max_length == 1:
# name_header_str = 'NAME'
# else:
# name_header_str = ','.join([ 'NAME%d' % (i + 1, ) for i in range(max_length) ])
name_header_str = ','.join(assoc_list)
combined_str = '\n'.join(combined_list)
combined_str = 'MATRIX,%s\n' % (name_header_str,) + combined_str
return appf.send_csv_file(combined_str, filename)
@register_route('/csv/princeton/sightings/', methods=['GET'])
def download_sightings(**kwargs):
ibs = current_app.ibs
filename = 'sightings.csv'
kaia = ibs.dbname == 'ZEBRA_Kaia'
sightings = routes.sightings(html_encode=False, kaia=kaia, **kwargs)
return appf.send_csv_file(sightings, filename)
@register_route('/csv/princeton/images/', methods=['GET'])
def get_image_info(**kwargs):
import datetime
ibs = current_app.ibs
filename = 'images.csv'
valid_aid_set = set(ibs.get_valid_aids())
gid_list = sorted(ibs.get_valid_gids())
gname_list = ibs.get_image_gnames(gid_list)
datetime_list = ibs.get_image_unixtime(gid_list)
datetime_list_ = [
datetime.datetime.fromtimestamp(datetime_).strftime('%Y-%m-%d %H:%M:%S')
for datetime_ in datetime_list
]
lat_list = ibs.get_image_lat(gid_list)
lon_list = ibs.get_image_lon(gid_list)
note_list = ibs.get_image_notes(gid_list)
party_list = []
contributor_list = []
for note in note_list:
try:
note = note.split(',')
party, contributor = note[:2]
party_list.append(party)
contributor_list.append(contributor)
except Exception:
party_list.append('UNKNOWN')
contributor_list.append('UNKNOWN')
zipped_list = zip(
gid_list,
gname_list,
datetime_list_,
lat_list,
lon_list,
party_list,
contributor_list,
note_list,
)
aids_list = ibs.get_image_aids(gid_list)
aids_list = [
[aid_ for aid_ in aid_list_ if aid_ in valid_aid_set] for aid_list_ in aids_list
]
names_list = [ibs.get_annot_name_texts(aid_list) for aid_list in aids_list]
combined_list = [
','.join(map(str, list(zipped) + name_list))
for zipped, aid_list, name_list in zip(zipped_list, aids_list, names_list)
if ibs.dbdir != 'ZEBRA_Kaia' or len(aid_list) > 0
]
max_length = 0
for name_list in names_list:
max_length = max(max_length, len(name_list))
if max_length == 1:
name_header_str = 'NAME'
else:
name_header_str = ','.join(['NAME%d' % (i + 1,) for i in range(max_length)])
combined_str = '\n'.join(combined_list)
combined_str = (
'GID,FILENAME,TIMESTAMP,GPSLAT,GPSLON,PARTY,CONTRIBUTOR,NOTES,%s\n'
% (name_header_str,)
+ combined_str
)
return appf.send_csv_file(combined_str, filename)
@register_route('/csv/princeton/demographics/', methods=['GET'])
def get_demographic_info(**kwargs):
ibs = current_app.ibs
filename = 'demographics.csv'
nid_list = sorted(ibs.get_valid_nids())
name_list = ibs.get_name_texts(nid_list)
sex_list = ibs.get_name_sex_text(nid_list)
min_ages_list = ibs.get_name_age_months_est_min(nid_list)
max_ages_list = ibs.get_name_age_months_est_max(nid_list)
def _get_primary_species(aid_list):
if len(aid_list) == 0:
species = None
else:
species_list = ibs.get_annot_species_texts(aid_list)
species = max(set(species_list), key=species_list.count)
return species
age_list = []
for min_ages, max_ages in zip(min_ages_list, max_ages_list):
if len(set(min_ages)) > 1 or len(set(max_ages)) > 1:
age_list.append('AMBIGUOUS')
continue
min_age = None
max_age = None
if len(min_ages) > 0:
min_age = min_ages[0]
if len(max_ages) > 0:
max_age = max_ages[0]
# Histogram
if (min_age is None and max_age is None) or (min_age is -1 and max_age is -1):
age_list.append('UNREVIEWED')
continue
# Bins
if min_age is None and max_age == 2:
age = '0-3 Months'
elif min_age == 3 and max_age == 5:
age = '3-6 Months'
elif min_age == 6 and max_age == 11:
age = '6-12 Months'
elif min_age == 12 and max_age == 23:
age = 'Yearling'
elif min_age == 24 and max_age == 35:
age = '2-Year-Old'
elif min_age == 36 and max_age is None:
age = 'Adult'
elif min_age is None and max_age is None:
age = 'Unknown'
else:
age = 'Unknown'
age_list.append(age)
zipped_list = zip(nid_list, name_list, sex_list, age_list)
combined_list = [','.join(map(str, list(zipped))) for zipped in zipped_list]
combined_str = '\n'.join(combined_list)
combined_str = 'NID,NAME,SEX,AGE\n' + combined_str
return appf.send_csv_file(combined_str, filename)
@register_route('/csv/princeton/special/kaia-dung-samples/', methods=['GET'])
def get_annotation_special_kaia_dung_samples(**kwargs):
ibs = current_app.ibs
filename = 'dungsamples.csv'
aid_list = ibs.get_valid_aids()
dungsample_list = ibs.get_annot_notes(aid_list)
flag_list = [len(dungsample) > 0 for dungsample in dungsample_list]
aid_list = ut.compress(aid_list, flag_list)
dungsample_list = ut.compress(dungsample_list, flag_list)
nid_list = ibs.get_annot_nids(aid_list)
name_list = ibs.get_annot_name_texts(aid_list)
assoc_dict = get_associations_dict(ibs, desired_species='zebra', tier=1)
encounter_str_list = []
max_length = 0
for name in sorted(name_list):
id_list = sorted(set(assoc_dict[name][name]))
id_list = [id_.replace(',', ':COMMA:') for id_ in id_list]
max_length = max(max_length, len(id_list))
encounter_str = ','.join(id_list)
encounter_str_list.append(encounter_str)
sex_list = ibs.get_annot_sex(aid_list)
age_min_list = ibs.get_annot_age_months_est_min(aid_list)
age_max_list = ibs.get_annot_age_months_est_max(aid_list)
condition_list = ibs.get_annot_qualities(aid_list)
sex_list_ = []
age_list_ = []
condition_list_ = []
zipped = zip(sex_list, age_min_list, age_max_list, condition_list)
for sex_, age_min, age_max, condition_ in zipped:
if sex_ == 1:
sex_ = 'male'
elif sex_ == 0:
sex_ = 'female'
else:
sex_ = 'unknown'
if age_min is None and age_max == 2:
age_ = 'age1'
elif age_min == 3 and age_max == 5:
age_ = 'age2'
elif age_min == 6 and age_max == 11:
age_ = 'age3'
elif age_min == 12 and age_max == 23:
age_ = 'age4'
elif age_min == 24 and age_max == 35:
age_ = 'age5'
elif age_min == 36 and age_max is None:
age_ = 'age6'
elif age_min is None and age_max is None:
age_ = 'unknown'
else:
age_ = 'unknown'
if condition_ is None:
condition_ = 0
assert age_ in ['age1', 'age2', 'age3', 'age4', 'age5', 'age6', 'unknown']
assert sex_ in ['male', 'female', 'unknown']
assert 0 <= condition_ and condition_ <= 5
if sex_ == 'male':
sex_ = 'Male'
elif sex_ == 'female':
sex_ = 'Female'
elif sex_ == 'unknown':
sex_ = 'Unknown'
else:
sex_ = 'Unknown'
if age_ == 'age1':
age_ = '0-3 Months'
elif age_ == 'age2':
age_ = '3-6 Months'
elif age_ == 'age3':
age_ = '6-12 Months'
elif age_ == 'age4':
age_ = 'Yearling'
elif age_ == 'age5':
age_ = '2-Year-Old'
elif age_ == 'age6':
age_ = 'Adult'
elif age_ == 'unknown':
age_ = 'Unknown'
else:
age_ = 'Unknown'
sex_list_.append(sex_)
age_list_.append(age_)
condition_list_.append(condition_)
if max_length == 1:
name_header_str = 'ENCOUTNER'
else:
name_header_str = ','.join(['ENCOUNTER%d' % (i + 1,) for i in range(max_length)])
zipped_list = sorted(
zip(
name_list,
nid_list,
aid_list,
dungsample_list,
age_list_,
sex_list_,
condition_list_,
encounter_str_list,
)
)
combined_list = [','.join(map(str, list(zipped_))) for zipped_ in zipped_list]
combined_str = '\n'.join(combined_list)
combined_str = (
'NAME,NID,AID,DUNGSAMPLE,AGE,SEX,CONDITION,%s\n' % (name_header_str,)
+ combined_str
)
return appf.send_csv_file(combined_str, filename)
@register_route('/csv/princeton/special/monica-laurel-max/', methods=['GET'])
def get_annotation_special_monica_laurel_max(desired_species=None, **kwargs):
ibs = current_app.ibs
filename = | |
empty initial angles for the circuit. modify later.
self.initial_packed_amps = []
if (strategy == 'UCCSD') and (method != 'linalg'):
# UCCSD circuit strategy preparations
self.ref_state = ref_state_preparation_circuit(
molecule,
ref_type='HF',
cq=self.custom_qubits)
if self.parametric_way:
# in the parametric_way,
# the circuit is built with free parameters
self.ansatz = uccsd_ansatz_circuit_parametric(
self.molecule.n_orbitals,
self.molecule.n_electrons,
cq=self.custom_qubits)
else:
# in the non-parametric_way,
# the circuit has hard-coded angles for the gates.
self.ansatz = uccsd_ansatz_circuit(
self.initial_packed_amps,
self.molecule.n_orbitals,
self.molecule.n_electrons,
cq=self.custom_qubits)
elif strategy == 'HF':
self.ref_state = ref_state_preparation_circuit(
self.molecule,
ref_type='HF',
cq=self.custom_qubits)
self.ansatz = Program()
elif strategy == 'custom_program':
self.parametric_way = True
self.ref_state = Program()
self.ansatz = Program()
# prepare tomography experiment if necessary
if self.tomography:
if self.method == 'linalg':
raise NotImplementedError('Tomography is not'
' yet implemented for the linalg method.')
self.compile_tomo_expts()
else:
# avoid having to re-calculate the PauliSum object each time,
# store it.
self.pauli_sum = PauliSum(self.pauli_list)
# perform miscellaneous method-specific preparations
if self.method == 'QC':
if qc is None:
raise ValueError('Method is QC, please supply a valid '
'QuantumComputer() object to the qc variable.')
elif self.method == 'WFS':
if (self.qc is not None) or (self.custom_qubits is not None):
raise ValueError('The WFS method is not intended to be used'
' with a custom qubit lattice'
' or QuantumComputer object.')
elif self.method == 'Numpy':
if self.parametric_way:
raise ValueError('NumpyWavefunctionSimulator() backend'
' does not yet support parametric programs.')
if (self.qc is not None) or (self.custom_qubits is not None):
raise ValueError('NumpyWavefunctionSimulator() backend is'
' not intended to be used with a '
'QuantumComputer() object or custom lattice. '
'Consider using PyQVM instead')
elif self.method == 'linalg':
if molecule is not None:
# sparse initial state vector from the MolecularData() object
self.initial_psi = jw_hartree_fock_state(
self.molecule.n_electrons,
2*self.molecule.n_orbitals)
# sparse operator from the MolecularData() object
self.hamiltonian_matrix = get_sparse_operator(
self.H,
n_qubits=self.n_qubits)
else:
self.hamiltonian_matrix = get_sparse_operator(
pyquilpauli_to_qubitop(PauliSum(self.pauli_list)))
self.initial_psi = None
print('Please supply VQE initial state with method'
' VQEexperiment().set_initial_state()')
else:
raise ValueError('unknown method: please choose from method ='
' {linalg, WFS, tomography} for direct linear '
'algebra, pyquil WavefunctionSimulator, '
'or doing Tomography, respectively')
def compile_tomo_expts(self, pauli_list=None):
"""
This method compiles the tomography experiment circuits
and prepares them for simulation.
Every time the circuits are adjusted,
re-compiling the tomography experiments
is required to affect the outcome.
"""
self.offset = 0
# use Forest's sorting algo from the Tomography suite
# to group Pauli measurements together
experiments = []
if pauli_list is None:
pauli_list = self.pauli_list
for term in pauli_list:
# if the Pauli term is an identity operator,
# add the term's coefficient directly to the VQE class' offset
if len(term.operations_as_set()) == 0:
self.offset += term.coefficient.real
else:
# initial state and term pair.
experiments.append(ExperimentSetting(
TensorProductState(),
term))
suite = Experiment(experiments, program=Program())
gsuite = group_experiments(suite)
grouped_list = []
for setting in gsuite:
group = []
for term in setting:
group.append(term.out_operator)
grouped_list.append(group)
if self.verbose:
print('Number of tomography experiments: ', len(grouped_list))
self.experiment_list = []
for group in grouped_list:
self.experiment_list.append(
GroupedPauliSetting(group,
qc=self.qc,
ref_state=self.ref_state,
ansatz=self.ansatz,
shotN=self.shotN,
parametric_way=self.parametric_way,
n_qubits=self.n_qubits,
method=self.method,
verbose=self.verbose,
cq=self.custom_qubits,
))
def objective_function(self, amps=None):
"""
This function returns the Hamiltonian expectation value
over the final circuit output state.
If argument packed_amps is given,
the circuit will run with those parameters.
Otherwise, the initial angles will be used.
:param [list(), numpy.ndarray] amps: list of circuit angles
to run the objective function over.
:return: energy estimate
:rtype: float
"""
E = 0
t = time.time()
if amps is None:
packed_amps = self.initial_packed_amps
elif isinstance(amps, np.ndarray):
packed_amps = amps.tolist()[:]
elif isinstance(amps, list):
packed_amps = amps[:]
else:
raise TypeError('Please supply the circuit parameters'
' as a list or np.ndarray')
self.term_es = {}
if self.tomography:
if (not self.parametric_way) and (self.strategy == 'UCCSD'):
# modify hard-coded type ansatz circuit based
# on packed_amps angles
self.ansatz = uccsd_ansatz_circuit(
packed_amps,
self.molecule.n_orbitals,
self.molecule.n_electrons,
cq=self.custom_qubits)
self.compile_tomo_expts()
for experiment in self.experiment_list:
E1, term_es = experiment.run_experiment(self.qc, packed_amps)
self.term_es.update(term_es)
E += E1
# Run tomography experiments
E += self.offset
# add the offset energy to avoid doing superfluous
# tomography over the identity operator.
elif self.method == 'WFS':
# In the direct WFS method without tomography,
# direct access to wavefunction is allowed and expectation
# value is exact each run.
if self.parametric_way:
E += WavefunctionSimulator().expectation(
self.ref_state+self.ansatz,
self.pauli_sum,
{'theta': packed_amps}).real
# attach parametric angles here
else:
if packed_amps is not None:
# modify hard-coded type ansatz circuit
# based on packed_amps angles
self.ansatz = uccsd_ansatz_circuit(
packed_amps,
self.molecule.n_orbitals,
self.molecule.n_electrons,
cq=self.custom_qubits)
E += WavefunctionSimulator().expectation(
self.ref_state+self.ansatz,
self.pauli_sum).real
elif self.method == 'Numpy':
if self.parametric_way:
raise ValueError('NumpyWavefunctionSimulator() backend'
' does not yet support parametric programs.')
else:
if packed_amps is not None:
self.ansatz = uccsd_ansatz_circuit(
packed_amps,
self.molecule.n_orbitals,
self.molecule.n_electrons,
cq=self.custom_qubits)
E += NumpyWavefunctionSimulator(n_qubits=self.n_qubits).\
do_program(self.ref_state+self.ansatz).\
expectation(self.pauli_sum).real
elif self.method == 'linalg':
# check if molecule has data sufficient to construct UCCSD ansatz
# and propagate starting from HF state
if self.molecule is not None:
propagator = normal_ordered(
uccsd_singlet_generator(
packed_amps,
2 * self.molecule.n_orbitals,
self.molecule.n_electrons,
anti_hermitian=True))
qubit_propagator_matrix = get_sparse_operator(
propagator,
n_qubits=self.n_qubits)
uccsd_state = expm_multiply(qubit_propagator_matrix,
self.initial_psi)
expected_uccsd_energy = expectation(
self.hamiltonian_matrix, uccsd_state).real
E += expected_uccsd_energy
else:
# apparently no molecule was supplied;
# attempt to just propagate the ansatz from user-specified
# initial state, using a circuit unitary
# if supplied by the user, otherwise the initial state itself,
# and then estimate over <H>
if self.initial_psi is None:
raise ValueError('Warning: no initial wavefunction set.'
' Please set using '
'VQEexperiment().set_initial_state()')
# attempt to propagate with a circuit unitary
if self.circuit_unitary is None:
psi = self.initial_psi
else:
psi = expm_multiply(self.circuit_unitary, self.initial_psi)
E += expectation(self.hamiltonian_matrix, psi).real
else:
raise ValueError('Impossible method: please choose from method'
' = {WFS, Numpy, linalg} if Tomography is set'
' to False, or choose from method = '
'{QC, WFS, Numpy, linalg} if tomography is set to True')
if self.verbose:
self.it_num += 1
print('black-box function call #' + str(self.it_num))
print('Energy estimate is now: ' + str(E))
print('at angles: ', packed_amps)
print('and this took ' + '{0:.3f}'.format(time.time()-t) + \
' seconds to evaluate')
self.history.append(E)
return E
def start_vqe(self, theta=None, maxiter: int = 0, options: dict = {}):
"""
This method starts the VQE algorithm.
User can supply an initial circuit setting, otherwise the stored
initial settings are used.
the maxiter refers to the scipy optimizer number of iterations
(which may well be much less than the number of function calls)
:param [list(),numpy.ndarray] theta: list of initial angles
for the circuit to start the optimizer in.
:param int maxiter: maximum number of iterations.
:param Dict options: options for the scipy.minimize classical optimizer
:return: scipy.optimize.minimize result object containing convergence
details and final energies. See scipy docs
:rtype: OptimizeResult
"""
t0 = time.time()
if self.strategy == 'HF':
raise ValueError('Warning: vqe object set to a static circuit,'
' no variational algorithm possible. '
'Consider running the method objective_function()'
' instead.')
# allows user to initialize the VQE with custom circuit angles.
# len(angles) must be equal to the size of
# memory register Theta in the parametric program
if theta is None:
if self.verbose:
print('Setting starting circuit parameters to initial amps: ',
self.initial_packed_amps)
starting_angles = np.array(self.initial_packed_amps)
elif isinstance(theta, np.ndarray):
starting_angles = theta
elif isinstance(theta, list):
starting_angles = np.array(theta)
else:
raise TypeError('Please supply the circuit parameters'
' as a list or np.ndarray')
if not isinstance(maxiter, int):
raise TypeError('Max number of iterations, maxiter,'
' should be a positive integer.')
elif maxiter < 0:
raise ValueError('Max number of iterations, maxiter,'
' should be positive.')
# store historical values of the optimizer
self.history = []
if maxiter > 0:
max_iter = maxiter
self.maxiter = maxiter
else:
max_iter = self.maxiter
# define a base_options which can be extended with another dictionary
# supplied in the start_vqe() call.
base_options = {'disp': self.verbose, 'maxiter': max_iter}
self.it_num = 0
# run the classical optimizer with the quantum circuit evaluation
# as an objective function.
# self.res = minimize(self.objective_function, starting_angles,
# method=self.optimizer,
# options={**base_options, **options})
self.res = minimizer(self.objective_function,
starting_angles,
method=self.optimizer,
options={**base_options, **options})
if self.verbose:
print('VQE optimization took ' +
'{0:.3f}'.format(time.time()-t0) + ' seconds to evaluate')
return self.res
def get_exact_gs(self, hamiltonian=None):
| |
<gh_stars>10-100
"""
fuzzyvault.py
This contains my attempt at creating a Python version
of pseudocode for the key recovery project.
There are three functions of interest:
gen_params, gen_secret, gen_keys
"""
# pylint: disable=no-name-in-module, invalid-name
import json
import secrets
import binascii
from typing import List
import hashlib
import hmac
import scrypt # type: ignore
import sympy # type: ignore
from flint import nmod_poly, nmod_mat, nmod # type: ignore
import gauss
class FuzzyError(Exception):
"""
Exception class
This is the exception that is thrown for all errors. I
do not return errors. Errors result in exceptions. It
is up to the caller of GenerateSecret and RecoverSecret
to catch this exception in order to detect errors. For
example, if RecoverSecret finds that that the recovery
words are insufficient to recover the keys, an FuzzyError
exception is thrown.
"""
def __init__(self, message: str):
Exception.__init__(self)
self.message = message
def __repr__(self) -> str:
return self.message
class FuzzyInput:
"""
represents the input to gen_params
"""
def __init__(self, json_string: str):
"constructor from json string representation"
obj = json.loads(json_string)
self.setSize: int = obj["setSize"]
self.correctThreshold: int = obj["correctThreshold"]
self.corpusSize: int = obj["corpusSize"]
try:
self.randomBytes = hex_list_to_bytes(obj["randomBytes"])
except KeyError:
self.randomBytes = None
def get_object(self):
"return a dictionary representing the input"
ans = {
"setSize" : self.setSize,
"correctThreshold" : self.correctThreshold,
"corpusSize": self.corpusSize
}
if self.randomBytes:
ans["randomBytes"] = bytes_to_hex_list(self.randomBytes, 32)
return ans
def __repr__(self) -> str:
"for printing"
return json.dumps(self.get_object(), indent=2)
class Params:
"""
represents the output of gen_params and the input to gen_secret
"""
def __init__(self, input_json: str):
"constructor"
obj = json.loads(input_json)
self.setSize: int = obj["setSize"]
self.correctThreshold: int = obj["correctThreshold"]
self.corpusSize: int = obj["corpusSize"]
self.prime: int = obj["prime"]
self.extractor: List[int] = obj["extractor"]
self.salt: bytes = hex_to_bytes(obj["salt"])
def get_object(self):
"return a dictionary representing the input"
return {
"setSize" : self.setSize,
"correctThreshold" : self.correctThreshold,
"corpusSize": self.corpusSize,
"prime": self.prime,
"extractor": self.extractor,
"salt": bytes_to_hex(self.salt)
}
def __repr__(self) -> str:
"for printing"
return json.dumps(self.get_object(), indent=2)
# pylint: disable=too-many-instance-attributes
class Secret:
"""
represents the output gen_secret and the input to gen_keys
"""
def __init__(self, json_string: str, words_json: str = None):
"""
constructor
There are two possibilities:
1. The json_string represents a Params object and
the words_json string is None
2. The json_string represents a Secret object and
the words_json string is None
"""
obj = json.loads(json_string)
self.setSize: int = obj["setSize"]
self.correctThreshold: int = obj["correctThreshold"]
self.corpusSize: int = obj["corpusSize"]
self.prime: int = obj["prime"]
self.extractor: List[int] = obj["extractor"]
self.salt: bytes = hex_to_bytes(obj["salt"])
if words_json:
# json_string represents a Params object.
# The sketch and hash must be calculated.
words = json.loads(words_json)
check_words(words, self.setSize, self.corpusSize)
errorThreshold = 2 * (self.setSize - self.correctThreshold)
self.sketch: List[int] = gen_sketch(words, self.prime, errorThreshold)
words.sort()
self.hash: bytes = scrypt.hash("original_words:" + str(words), self.salt)
else:
# this is for the case where the input is a JSON string
# represents a Secret. In this case just copy the data.
self.sketch: List[int] = obj["sketch"]
self.hash: bytes = hex_to_bytes(obj["hash"])
def get_object(self):
"return a dictionary representing the input"
return {
"setSize" : self.setSize,
"correctThreshold" : self.correctThreshold,
"corpusSize": self.corpusSize,
"prime": self.prime,
"extractor": self.extractor,
"salt": bytes_to_hex(self.salt),
"sketch": self.sketch,
"hash": bytes_to_hex(self.hash)
}
def __repr__(self) -> str:
"for printing"
return json.dumps(self.get_object(), indent=2)
# pylint: enable=too-many-instance-attributes
class Rng:
"""
Random number generator class
"""
def __init__(self, randomBytes: bytes):
"constructor"
self.randomBytes = randomBytes
self.index = 0
def get_bytes(self, count: int) -> bytes:
"returns the required number of random bytes"
if self.randomBytes:
ans = self.randomBytes[self.index: self.index + count]
self.index += count
return ans
return random_bytes(count)
def get_int(self) -> int:
"returns a random unsigned 32-bit integer"
if self.randomBytes:
ans = Rng.create_int(self.randomBytes[self.index: self.index + 4])
self.index += 4
return ans
return Rng.create_int(random_bytes(4))
def select(self, n, m) -> List[int]:
"""
randomly selects m integers from 0 ... n
"""
assert 0 < m <= n
xs = list(range(n))
for i in range(m):
k = i + (self.get_int() % (n - i))
t = xs[i]
xs[i] = xs[k]
xs[k] = t
return xs[:m]
@staticmethod
def create_int(xs: bytes) -> int:
"""
creates an unsigned 32-bit integer from 4 bytes
in little endian order
"""
return xs[0] + 256 * (xs[1] + 256 * (xs[2] + 256 * xs[3]))
def gen_params(input_json: str) -> str:
"""
input: JSON string representing an Input class
output: JSON string representing a Params class
"""
obj = json.loads(input_json)
fInput = FuzzyInput(input_json)
rng = Rng(fInput.randomBytes)
obj["prime"] = first_prime_greater_than(fInput.corpusSize)
obj["salt"] = bytes_to_hex(rng.get_bytes(32))
obj["extractor"] = rng.select(obj["prime"], fInput.setSize)
return json.dumps(obj, indent=2)
def gen_secret(params_json: str, original_words_json: str) -> str:
"""
input: output of gen_params, JSON string representing a list of integers (words)
output: JSON string representing a Secret class
"""
secret: Secret = Secret(params_json, original_words_json)
return str(secret)
def get_hash(words: List[int], salt: bytes) -> bytes:
"""
Returns the slow hash of a set of integers (words)
"""
_words = words[:]
_words.sort()
return scrypt.hash("original_words:" + str(_words), salt)
def get_ek(secret: Secret, words: List[int]) -> bytes:
"""
Returns the bytes constant used in the RecoverSecret loop
It is passed into key_derivation
returns (s_1 * a_1) * (s_2 * a_2) * ... * (s_n * a_n) (mod p)
"""
sList = secret.extractor
aList = words
aList.sort()
e: nmod = nmod(1, secret.prime)
for i in range(secret.setSize):
e *= sList[i] * aList[i]
return scrypt.hash("key:" + str(e), secret.salt)
def gen_keys(secret_json: str, recovery_words_json: str, key_count: int) -> str:
"""
input: output of gen_secret, JSON string representing a list of integers (words)
that are a guess of the original words passed into gen_secret
output: a JSON string representing a list of keys where each key is
represented as a hex string
"""
assert key_count >= 0
if key_count == 0:
return '[]'
secret = Secret(secret_json)
words = json.loads(recovery_words_json)
check_words(words, secret.setSize, secret.corpusSize)
words.sort()
if secret.hash == get_hash(words, secret.salt):
ek = get_ek(secret, words)
else:
ek = get_ek(secret, words)
recouped_words = recoup_words(secret, words)
if secret.hash != get_hash(recouped_words, secret.salt):
raise FuzzyError("Hashes do not match")
ek = get_ek(secret, recouped_words)
keys = [bytes_to_hex(key_derivation(ek, k)) for k in range(key_count)]
return json.dumps(keys, indent=2)
def recoup_words(secret: Secret, words: List[int]) -> List[int]:
"""
Recover the words using the recovery words as a guess
"""
errorThreshold = 2 * (secret.setSize - secret.correctThreshold)
if len(words) != secret.setSize:
raise FuzzyError("length of words is not equal to setsize")
p_high: nmod_poly = get_phigh(secret.sketch, secret.setSize, secret.prime)
a_coeffs: List[int] = words
b_coeffs: List[int] = [p_high(a) for a in a_coeffs]
p_low: nmod_poly = \
Berlekamp_Welch(
a_coeffs,
b_coeffs,
secret.setSize - errorThreshold,
errorThreshold // 2,
secret.prime)
p_diff: nmod_poly = p_high - p_low
if has_repeated_roots(p_diff, secret.prime):
raise FuzzyError("repeated roots have been detected")
return find_roots(p_diff)
def isprime(candidate: int) -> bool:
"""
checks if an integer is prime
"""
return sympy.isprime(candidate)
def create_poly(xs: List[int], p: int) -> nmod_poly:
"""
returns the polynomial (z - x[0]) ... (z - x[N-1]) mod p
"""
ans: nmod_poly = nmod_poly([1], p)
for x in xs:
ans *= nmod_poly([-x, 1], p)
return ans
def gen_sketch(words: List[int], prime: int, thresh: int) -> List[int]:
"""
Return a list of t integers
"""
if thresh % 2 != 0:
raise FuzzyError("bad error threshold")
poly: nmod_poly = create_poly(words, prime)
nwords = len(words)
return [int(x) for x in poly.coeffs()[nwords - thresh: nwords]]
def get_phigh(tlist: List[int], s: int, p: int) -> nmod_poly:
"""
return a polynomial object where the caller specifies the highest coefficients
except the highest coefficient which takes the value 1
"""
nzeros: int = s - len(tlist)
coeffs: List[int] = [0] * nzeros
coeffs.extend(tlist)
coeffs.append(1)
return nmod_poly(coeffs, p)
def has_repeated_roots(poly: nmod_poly, prime: int) -> bool:
"""
returns True if the polynomial has repeated roots
"""
if prime < 2 or not isprime(prime):
raise FuzzyError("prime is not prime")
temp: List[int] = [0] * (prime + 1)
temp[-1] = 1
temp[1] = -1
zpoly: nmod_poly = nmod_poly(temp, prime)
result: nmod_poly = zpoly % poly
return result.coeffs() != []
def mod_get_powers(a: int, n: int, p: int) -> List[nmod]:
"""
returns a list 1, a, a^2, a^{n-1} mod p
example:
mod_get_powers(3, 4, 7) = [1, 3, 9, 27] (mod 7) = [1, 3, 2, 6]
"""
if n < 1:
raise FuzzyError("upper power n is not positive")
if p < 2 or not isprime(p):
raise FuzzyError("prime is not prime")
if a == 0:
ans = [nmod(1, p)]
ans.extend([nmod(0, p)] * (n-1))
return ans
def my_gen():
"""
This is | |
print "Possibly,all the result are summarised as follow:"
print len(result)
for item in report:
show(item)
print
print
else:
print len(result)
for item in report:
show(item)
print
print
return;
#########################end show_status###################################
def combine (set,l,pack):
'''
output set [[]]
input: l,pack
set = l + pack
[[]] = [] + [[],[]] [3,3,3,4,4,4] + [[7,7],[8,8]] = [[3,3,3,4,4,4,7,7,8,8]
'''
ll = l[:]
for ite in pack:
ll = ll + ite
set.append(ll)
return
###########end combine ###########
def is_pai(l):
'''
check if l is a pai
'''
if len(l) == 0:
return False
#Bug17 2,W
if len(l) == 2 and l[0] == l[1]:
return True
#######end is_pai##########################
def is_sin(l):
'''
check if i l a sin except 2 or joker
'''
if len(l) == 0:
return False
#Bug17 2,W
if len(l) == 1:
return True
#########end is_sin#########################
def step4_big_seq_discard(t_seq,pool,flag):
'''
This special function is to let a card
discarded from a big seq to pool
the choice to discard which side card
is according to the flag
Thus, in the find_all_plays this flag
should be different from the previous
'''
if len(pool) == 0:
if debug <= 6:
print "Error in step4_big_seq_discard"
return False
len_pool = sum(map(len,pool))
len_seq = sum(map(len,seq))
if is_sin(pool[0]):
for i in range(len(t_seq)):
if len(t_seq[i]) > 5: #find a 5+ seq
#BUG 15
if pool[0][0]!=t_seq[i][0] and pool[0][0]!=t_seq[i][-1]:
continue #find the next one
if flag: #become a pari 5 5678910-> 55 678910
pool[0].append(pool[0][0]) #[[5]] -> [[5,5]]
t_seq[i].remove(pool[0][0]) #5 removed from t_seq[i]
break #only once
else:
if t_seq[i][0] != pool[0][0]:
t_seq[i].remove(t_seq[i][0])
pool[0].append(t_seq[i][0]) #[[5]] -> [[10,5]]
else:
pool.append([t_seq[i][-1]])#pool append another sin Bug 24
t_seq[i].remove(t_seq[i][-1])
break #only one card case
else:
if debug <= 6:
print "Error in step4_big_seq_discard"
return False
new_len_pool = sum(map(len,pool))
new_len_seq = sum(map(len,t_seq))
if new_len_pool != 1: #There is a change
if debug <= 6:
if len_pool + len_seq != new_len_pool + new_len_seq:
print "Step 4.5 seq give back one failed"
return False
if debug <= 6:
print "Step 4.5 special case success"
print "seq",t_seq
print "pool",pool
return True
################end step4_big_seq_discard#################################
def step6_tri_sin_pai(tri_one,t_tri,pool):
'''
output:tri_one,t_tri,pool
from pool,
triplets get one single or pair,randomly
'''
if debug <= 6:
print "******step 6 ****** small tri get ONE begin"
print "tri_one:",tri_one
print "t_tri:",tri
print "pool:",pool
if len(t_tri) == 0 or len(pool) == 0:
if debug <= 6:
print "no tri or sin or pai"
print "tri_one:",tri_one
return True
d_tri = [] #to be deleted from tri which find a sin or a pair
d_ite = [] #item may be a sin or pair
r_stri = t_tri[:] #no necessary for a reversed order by length of t_tri
#r_stri = sorted(stri, key=lambda s:len(s), reverse=1)
if debug <= 6:
print "r_stri:",r_stri
len_tri = sum(map(len,t_tri))
len_pool = sum(map(len,pool))
len_tri_one = sum(map(len,tri_one))
for i in range(len(r_stri)):
if len(pool) < 1:
if debug <= 6:
print "no items in pool:",pool
break
for k in range(len(pool)):
d_ite.append(pool[k])
if debug <= 6:
print "find a d_ite",d_ite
combine(tri_one,r_stri[i],d_ite)
remove_pack(pool,d_ite)
d_ite = []
d_tri.append(r_stri[i])
break
new_len_pool = sum(map(len,pool))
new_len_tri_one = sum(map(len,tri_one))
#some tri has got a ONE
if new_len_pool != len_pool:
remove_pack(t_tri,d_tri)
new_len_tri = sum(map(len,t_tri))
if debug <= 6:
if len_tri + len_pool + len_tri_one != new_len_tri + new_len_pool\
+ new_len_tri_one:
print "numbers inconsistence"
print "step 6 failed combine tri_one",tri_one
return False
if debug <= 6:
print "step6 finished, tri_one <%d>:"%(sum(map(len,tri_one))),tri_one
print "t_tri:",t_tri
print "pool:",pool
return True
#############end step6_tri_sin_pai######################################
def step5_small_plane_pair(plane,stri,pool):
'''
output:plane,stri,pool
from pool,
small planes get one pair
notice this step only consider small planes
'''
if debug <= 6:
print "******step 5 ****** small plane plus one pair begin"
print "plane:",plane
print "stri:",stri
print "pool:",pool
if len(stri) == 0 or len(pool) == 0:
if debug <= 6:
print "no seq_tri or pai"
print "plane:",plane
return True
d_stri = [] #to be deleted from stri which find a pair
d_pai = [] #pairs to be deleted from pool
r_stri = stri[:] #no necessary for a reversed order by length of seq_tri
#r_stri = sorted(stri, key=lambda s:len(s), reverse=1)
if debug <= 6:
print "r_stri:",r_stri
len_stri = sum(map(len,stri))
len_pool = sum(map(len,pool))
len_plane = sum(map(len,plane))
#First let's count the number of pair in pool:
pai_num = 0;
for item in pool:
if is_pai(item):
pai_num = pai_num + 1
if pai_num < 1 :
if debug <= 6:
print "no pai in pool",pool
print "plane:",plane
return True
need_num = 0
pai_num = 0
if debug <= 6:
print "r_stri",r_stri
print "pool:",pool
print "start search"
for i in range(len(r_stri)):
pai_num = 0
for item in pool:
if is_pai(item):
pai_num = pai_num + 1
if pai_num < 1: # this step only take care small planes, so at least 1 pair
if debug <= 6:
print "no pai in pool:",pool
break
if len(r_stri[i]) == 6: #only consider small plane
need_num = 1 #only need one pai
if need_num <= pai_num:
for k in range(len(pool)):
if is_pai(pool[k]):
d_pai.append(pool[k])
need_num = need_num - 1
if need_num == 0: #enough sins find
if debug <= 6:
print "find enough a pai for plane,d_pai",d_pai
combine(plane,r_stri[i],d_pai)
#del pai from pool
remove_pack(pool,d_pai)
d_pai = []
#record this seq_str ,to delete outside of the circle
d_stri.append(r_stri[i])
pai_num = 0 #restart count the total pai number
break
else:
continue #loop the next less stri
new_len_pool = sum(map(len,pool))
new_len_plane = sum(map(len,plane))
if new_len_pool != len_pool:
#some seqtri has got a pair
remove_pack(stri,d_stri)
new_len_stri = sum(map(len,stri))
if debug <= 6:
if len_stri + len_pool + len_plane != new_len_stri + new_len_pool\
+ new_len_plane:
print "numbers inconsistence"
print "step 5 failed combine plane",plane
return False
if debug <= 6:
print "step5 finished, plane <%d>:"%(sum(map(len,plane))),plane
print "stri:",stri
print "pool:",pool
return True
################end step5_small_plane_pair################################
# rule updated qua only get 2 hands cards
# like 33334455 or 3333810, two different sins or two different pairs
def step3_qua_two(qua_two,t_qua,pool):
'''
output:qua_two,t_qua,pool
from pool,
qua get two hands cards
'''
if debug <= 6:
print "\n"
print "******step 3.****** qua get TWO begin"
print "t_qua:",t_qua
print "pool:",pool
if len(t_qua)==0 or len(pool)<2:
if debug <= 6:
print "no qua or 2 more hands item in pool"
return True
d_qua = [] #to be deleted from t_qua which find two sins
d_ite = [] #to be deleted from pool
r_qua = t_qua[:] #for notcie this is a temp of t_qua
if debug <= 6:
print "r_qua:",r_qua
len_qua = sum(map(len,t_qua))
len_pool = sum(map(len,pool))
len_qua_two = sum(map(len,qua_two))
for i in range(len(r_qua)):
need_num = 2 #qua need 2 hands item
pai_num = 0
sin_num = 0
for item in pool:
if is_pai(item):
pai_num = pai_num + 1
else:
sin_num = sin_num + 1
f_find_pai = False
f_two_pai_same = False
need_num = 2
#Bug22 continue to find the same type as the first if enough
#according to number and first intem to decide grab two what
temp = []
if sin_num == 2:
for j in range(len(pool)):
if is_sin(pool[j]):
temp.append(pool[j])
if len(temp) == 2 and temp[0] == temp[1]:
f_find_pai = True
if pai_num == 2:
temp = []
for j in range(len(pool)):
if is_pai(pool[j]):
temp.append(pool[j])
if len(temp)==2 and temp[0] == temp[1]:
f_two_pai_same = True
if f_find_pai:
break
if pai_num < 2 and sin_num < 2:
break
elif pai_num >= 2 and sin_num >= 2:
if is_pai(pool[0]) and not f_two_pai_same:
f_find_pai = True
elif is_pai(pool[0]) and f_two_pai_same:
if f_find_pai:
break
else:
f_find_pai == False
elif sin_num >= 2 and pai_num < 2:
if f_find_pai:
break
f_find_pai = False
elif sin_num < 2 and pai_num >=2:
if f_two_pai_same:
break
f_find_pai = True
else:
f_find_pai = True
#Bug22 continue to find the same type as the first if enough
for j in range(len(pool)):
if f_find_pai:
if is_sin(pool[j]):
continue
else:
#bug28 same item in quaTwo
if len(d_ite) == 1 and need_num == 1:
if d_ite[0] == pool[j]:
if pai_num - 2 <= 0: #no others
d_ite = []
| |
KMSEncrypted: bool = None, KMSKey: str = None, Tags: List = None) -> Dict:
"""
Creates a cached volume on a specified cached volume gateway. This operation is only supported in the cached volume gateway type.
.. note::
Cache storage must be allocated to the gateway before you can create a cached volume. Use the AddCache operation to add cache storage to a gateway.
In the request, you must specify the gateway, size of the volume in bytes, the iSCSI target name, an IP address on which to expose the target, and a unique client token. In response, the gateway creates the volume and returns information about it. This information includes the volume Amazon Resource Name (ARN), its size, and the iSCSI target ARN that initiators can use to connect to the volume target.
Optionally, you can provide the ARN for an existing volume as the ``SourceVolumeARN`` for this cached volume, which creates an exact copy of the existing volume’s latest recovery point. The ``VolumeSizeInBytes`` value must be equal to or larger than the size of the copied volume, in bytes.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateCachediSCSIVolume>`_
**Request Syntax**
::
response = client.create_cached_iscsi_volume(
GatewayARN='string',
VolumeSizeInBytes=123,
SnapshotId='string',
TargetName='string',
SourceVolumeARN='string',
NetworkInterfaceId='string',
ClientToken='string',
KMSEncrypted=True|False,
KMSKey='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'VolumeARN': 'string',
'TargetARN': 'string'
}
**Response Structure**
- *(dict) --*
- **VolumeARN** *(string) --*
The Amazon Resource Name (ARN) of the configured volume.
- **TargetARN** *(string) --*
The Amazon Resource Name (ARN) of the volume target, which includes the iSCSI name that initiators can use to connect to the target.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:type VolumeSizeInBytes: integer
:param VolumeSizeInBytes: **[REQUIRED]**
The size of the volume in bytes.
:type SnapshotId: string
:param SnapshotId:
The snapshot ID (e.g. \"snap-1122aabb\") of the snapshot to restore as the new cached volume. Specify this field if you want to create the iSCSI storage volume from a snapshot otherwise do not include this field. To list snapshots for your account use `DescribeSnapshots <https://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSnapshots.html>`__ in the *Amazon Elastic Compute Cloud API Reference* .
:type TargetName: string
:param TargetName: **[REQUIRED]**
The name of the iSCSI target used by an initiator to connect to a volume and used as a suffix for the target ARN. For example, specifying ``TargetName`` as *myvolume* results in the target ARN of ``arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume`` . The target name must be unique across all volumes on a gateway.
If you don\'t specify a value, Storage Gateway uses the value that was previously used for this volume as the new target name.
:type SourceVolumeARN: string
:param SourceVolumeARN:
The ARN for an existing volume. Specifying this ARN makes the new volume into an exact copy of the specified existing volume\'s latest recovery point. The ``VolumeSizeInBytes`` value for this new volume must be equal to or larger than the size of the existing volume, in bytes.
:type NetworkInterfaceId: string
:param NetworkInterfaceId: **[REQUIRED]**
The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted. Use DescribeGatewayInformation to get a list of the network interfaces available on a gateway.
Valid Values: A valid IP address.
:type ClientToken: string
:param ClientToken: **[REQUIRED]**
A unique identifier that you use to retry a request. If you retry a request, use the same ``ClientToken`` you specified in the initial request.
:type KMSEncrypted: boolean
:param KMSEncrypted:
True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.
:type KMSKey: string
:param KMSKey:
The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when KMSEncrypted is true. Optional.
:type Tags: list
:param Tags:
A list of up to 50 tags that can be assigned to a cached volume. Each tag is a key-value pair.
.. note::
Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @. The maximum length of a tag\'s key is 128 characters, and the maximum length for a tag\'s value is 256.
- *(dict) --*
A key-value pair that helps you manage, filter, and search for your resource. Allowed characters: letters, white space, and numbers, representable in UTF-8, and the following characters: + - = . _ : /
- **Key** *(string) --* **[REQUIRED]**
Tag key (String). The key can\'t start with aws:.
- **Value** *(string) --* **[REQUIRED]**
Value of the tag key.
:rtype: dict
:returns:
"""
pass
def create_nfs_file_share(self, ClientToken: str, GatewayARN: str, Role: str, LocationARN: str, NFSFileShareDefaults: Dict = None, KMSEncrypted: bool = None, KMSKey: str = None, DefaultStorageClass: str = None, ObjectACL: str = None, ClientList: List = None, Squash: str = None, ReadOnly: bool = None, GuessMIMETypeEnabled: bool = None, RequesterPays: bool = None, Tags: List = None) -> Dict:
"""
Creates a Network File System (NFS) file share on an existing file gateway. In Storage Gateway, a file share is a file system mount point backed by Amazon S3 cloud storage. Storage Gateway exposes file shares using a NFS interface. This operation is only supported for file gateways.
.. warning::
File gateway requires AWS Security Token Service (AWS STS) to be activated to enable you create a file share. Make sure AWS STS is activated in the region you are creating your file gateway in. If AWS STS is not activated in the region, activate it. For information about how to activate AWS STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.
File gateway does not support creating hard or symbolic links on a file share.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateNFSFileShare>`_
**Request Syntax**
::
response = client.create_nfs_file_share(
ClientToken='string',
NFSFileShareDefaults={
'FileMode': 'string',
'DirectoryMode': 'string',
'GroupId': 123,
'OwnerId': 123
},
GatewayARN='string',
KMSEncrypted=True|False,
KMSKey='string',
Role='string',
LocationARN='string',
DefaultStorageClass='string',
ObjectACL='private'|'public-read'|'public-read-write'|'authenticated-read'|'bucket-owner-read'|'bucket-owner-full-control'|'aws-exec-read',
ClientList=[
'string',
],
Squash='string',
ReadOnly=True|False,
GuessMIMETypeEnabled=True|False,
RequesterPays=True|False,
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'FileShareARN': 'string'
}
**Response Structure**
- *(dict) --*
CreateNFSFileShareOutput
- **FileShareARN** *(string) --*
The Amazon Resource Name (ARN) of the newly created file share.
:type ClientToken: string
:param ClientToken: **[REQUIRED]**
A unique string value that you supply that is used by file gateway to ensure idempotent file share creation.
:type NFSFileShareDefaults: dict
:param NFSFileShareDefaults:
File share default values. Optional.
- **FileMode** *(string) --*
The Unix file mode in the form \"nnnn\". For example, \"0666\" represents the default file mode inside the file share. The default value is 0666.
- **DirectoryMode** *(string) --*
The Unix directory mode in the form \"nnnn\". For example, \"0666\" represents the default access mode for all directories inside the file share. The default value is 0777.
- **GroupId** *(integer) --*
The default group ID for the file share (unless the files have another group ID specified). The default value is nfsnobody.
- **OwnerId** *(integer) --*
The default owner ID for files in the file share (unless the files have another owner ID specified). The default value is nfsnobody.
:type GatewayARN: string
:param GatewayARN: **[REQUIRED]**
The Amazon Resource Name (ARN) of the file gateway on which you want to create a file share.
:type KMSEncrypted: boolean
:param KMSEncrypted:
True to use Amazon S3 server side encryption with your own AWS KMS key, or false to use a key managed by Amazon S3. Optional.
:type KMSKey: string
:param KMSKey:
The Amazon Resource Name (ARN) AWS KMS key used for Amazon S3 server side | |
# =============================
# > Command implementations
# ============================
import atexit
import dataclasses
import json
import os
import shutil
import subprocess
import sys
from contextlib import contextmanager
from datetime import datetime, timezone
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Iterator, Optional, Sequence, Set, Tuple
if sys.version_info >= (3, 8):
from typing import Final
else:
from typing_extensions import Final
import click
import rich
import rich.traceback
from rich.markup import escape
from rich.padding import Padding
from rich.theme import Theme
import diff_shades
import diff_shades.results
from diff_shades.analysis import (
GIT_BIN,
RESULT_COLORS,
analyze_projects,
run_cmd,
setup_projects,
)
from diff_shades.config import PROJECTS, Project
from diff_shades.output import (
color_diff,
make_analysis_summary,
make_comparison_summary,
make_project_details_table,
make_rich_progress,
)
from diff_shades.results import Analysis, ProjectResults, diff_two_results, filter_results
console: Final = rich.get_console()
normalize_input: Final = lambda ctx, param, v: v.casefold() if v is not None else None
READABLE_FILE: Final = click.Path(
resolve_path=True, exists=True, dir_okay=False, readable=True, path_type=Path
)
WRITABLE_FILE: Final = click.Path(
resolve_path=True, dir_okay=False, readable=False, writable=True, path_type=Path
)
def load_analysis(path: Path, msg: str = "analysis", quiet: bool = False) -> Analysis:
analysis, cached = diff_shades.results.load_analysis(path)
if not quiet:
console.log(f"Loaded {msg}: {path}{' (cached)' if cached else ''}")
return analysis
@contextmanager
def get_work_dir(*, use: Optional[Path] = None) -> Iterator[Path]:
if use:
use.mkdir(parents=True, exist_ok=True)
yield use
else:
with TemporaryDirectory(prefix="diff-shades-") as wd:
yield Path(wd)
def compare_project_pair(
project: Project, results: ProjectResults, results2: ProjectResults
) -> bool:
found_difference = False
header = f"\[{project.name} - {project.url}]"
if "github" in project.url:
rev_link = project.url[:-4] + f"/tree/{project.commit}"
revision = f"╰─> [link={rev_link}]revision {project.commit}[/link]"
else:
revision = f"╰─> revision {project.commit}"
for file, r1 in results.items():
r2 = results2[file]
if r1 != r2:
if not found_difference:
console.print(f"[bold][reformatted]{header}[/][/]")
console.print(f"[reformatted]{revision}")
found_difference = True
diff = diff_two_results(r1, r2, file=f"{project.name}:{file}", diff_failure=True)
console.print(color_diff(diff), highlight=False)
return found_difference
def check_black_args(args: Sequence[str]) -> None:
if "--fast" in args or "--safe" in args:
console.log("[warning]--fast/--safe is ignored, Black is always ran in safe mode.")
try:
run_cmd([sys.executable, "-m", "black", "-", *args], input="daylily")
except subprocess.CalledProcessError as e:
console.print(f"[error]Invalid black arguments: {' '.join(args)}\n")
console.print(e.stdout.strip(), style="italic")
sys.exit(1)
@click.group()
@click.option(
"--no-color/--force-color", default=None, help="Force disable/enable colored output."
)
@click.option("--show-locals", is_flag=True, help="Show locals for unhandled exceptions.")
@click.option(
"--dump-html", type=WRITABLE_FILE, help="Save a HTML copy of the emitted output."
)
@click.option("--clear-cache", is_flag=True, help="Drop all cached analyses.")
@click.version_option(version=diff_shades.__version__, prog_name="diff-shades")
def main(
no_color: Optional[bool], show_locals: bool, dump_html: Optional[Path], clear_cache: bool
) -> None:
"""
The Black shade analyser and comparison tool.
AKA Richard's personal take at a better black-primer (by stealing
ideas from mypy-primer) :p
Basically runs Black over millions of lines of code from various
open source projects. Why? So any changes to Black can be gauged
on their relative impact.
\b
Features include:
- Simple but readable diffing capabilities
- Repeatable analyses via --repeat-projects-from
- Structured JSON output
- Per-project python_requires support
- Custom per-analysis formatting configuration
- Oh and of course, pretty output!
\b
Potential tasks / additionals:
- jupyter notebook support
- even more helpful output
- better UX (particularly when things go wrong)
- code cleanup as my code is messy as usual :p
"""
rich.traceback.install(suppress=[click], show_locals=show_locals)
color_mode_key = {True: None, None: "auto", False: "truecolor"}
color_mode = color_mode_key[no_color]
width: Optional[int] = None
if os.getenv("GITHUB_ACTIONS") == "true":
# Force colors when running on GitHub Actions (unless --no-color is passed).
if no_color is not True:
color_mode = "truecolor"
# Annoyingly enough rich autodetects the width to be far too small on GHA.
width = 115
# fmt: off
theme = Theme({
"error": "bold red",
"warning": "bold yellow",
"info": "bold",
**RESULT_COLORS
})
# fmt: on
rich.reconfigure(
log_path=False, record=dump_html, color_system=color_mode, theme=theme, width=width
)
if clear_cache:
shutil.rmtree(diff_shades.results.CACHE_DIR)
diff_shades.results.CACHE_DIR.mkdir(parents=True, exist_ok=True)
if dump_html:
atexit.register(console.save_html, path=dump_html)
# fmt: off
@main.command()
@click.argument("results-path", metavar="results-filepath", type=WRITABLE_FILE)
@click.argument("black-args", metavar="[-- black-args]", nargs=-1, type=click.UNPROCESSED)
@click.option(
"-s", "--select",
multiple=True,
callback=lambda ctx, param, values: {p.strip().casefold() for p in values},
help="Select projects from the main list."
)
@click.option(
"-e", "--exclude",
multiple=True,
callback=lambda ctx, param, values: {p.strip().casefold() for p in values},
help="Exclude projects from running."
)
@click.option(
"-w", "--work-dir", "cli_work_dir",
type=click.Path(dir_okay=True, file_okay=False, resolve_path=True, path_type=Path),
help=(
"Directory where project clones are used / stored. By default a"
" temporary directory is used which will be cleaned up at exit."
" Use this option to reuse or cache projects."
)
)
@click.option(
"--repeat-projects-from",
type=READABLE_FILE,
help=(
"Use the same projects (and commits!) used during another analysis."
" This is similar to --work-dir but for when you don't have the"
" checkouts available."
)
)
@click.option(
"-v", "--verbose",
is_flag=True,
help="Be more verbose."
)
# fmt: on
def analyze(
results_path: Path,
black_args: Tuple[str, ...],
select: Set[str],
exclude: Set[str],
cli_work_dir: Optional[Path],
repeat_projects_from: Optional[Path],
verbose: bool,
) -> None:
"""Run Black against 'millions' of LOC and save the results."""
try:
import black
except ImportError as err:
console.print(f"[error]Couldn't import black: {err}")
console.print("[info]╰─> This command requires an installation of Black.")
sys.exit(1)
if GIT_BIN is None:
console.print("[error]Couldn't find a Git executable.")
console.print("[info]╰─> This command requires git sadly enough.")
sys.exit(1)
if results_path.exists() and results_path.is_file():
console.log(f"[warning]Overwriting {results_path} as it already exists!")
elif results_path.exists() and results_path.is_dir():
console.print(f"[error]{results_path} is a pre-existing directory.")
console.print("[info]╰─> Can't continue as I won't overwrite a directory.")
sys.exit(1)
if black_args:
check_black_args(black_args)
if repeat_projects_from:
analysis = load_analysis(repeat_projects_from, msg="blueprint analysis")
projects = list(analysis.projects.values())
else:
projects = PROJECTS
projects = [p for p in projects if p.name not in exclude]
if select:
projects = [p for p in projects if p.name in select]
for proj in projects:
if not proj.supported_by_runtime:
projects.remove(proj)
msg = f"[warning]Skipping {proj.name} as it requires python{proj.python_requires}"
console.log(msg)
with get_work_dir(use=cli_work_dir) as work_dir:
with make_rich_progress() as progress:
title = "[bold cyan]Setting up projects"
task1 = progress.add_task(title, total=len(projects))
prepared = setup_projects(projects, work_dir, black_args, progress, task1, verbose)
with make_rich_progress() as progress:
task2 = progress.add_task("[bold magenta]Running black")
results = analyze_projects(prepared, work_dir, progress, task2, verbose)
metadata = {
"black-version": black.__version__,
"black-extra-args": black_args,
"created-at": datetime.now(timezone.utc).isoformat(),
"data-format": 1,
}
analysis = Analysis(
projects={p.name: p for p, _, _ in prepared}, results=results, metadata=metadata
)
with open(results_path, "w", encoding="utf-8") as f:
raw = dataclasses.asdict(analysis)
# Escaping non-ASCII characters in the JSON blob is very important to keep
# memory usage and load times managable. CPython (not sure about other
# implementations) guarantees that string index operations will be roughly
# constant time which flies right in the face of the efficient UTF-8 format.
# Hence why str instances transparently switch between Latin-1 and other
# constant-size formats. In the worst case a UCS-4 is used exploding
# memory usage (and load times as memory is not infinitely fast). I've seen
# peaks of 1GB max RSS with 100MB analyses which is just not OK.
# See also: https://stackoverflow.com/a/58080893
json.dump(raw, f, indent=2, ensure_ascii=True)
f.write("\n")
console.line()
panel = make_analysis_summary(analysis)
console.print(panel)
@main.command()
@click.argument("analysis-path", metavar="analysis", type=READABLE_FILE)
@click.argument("project_key", metavar="[project]", callback=normalize_input, required=False)
@click.argument("file_key", metavar="[file]", required=False)
@click.argument("field_key", metavar="[field]", callback=normalize_input, required=False)
@click.option("-q", "--quiet", is_flag=True, help="Suppress log messages.")
def show(
analysis_path: Path,
project_key: Optional[str],
file_key: Optional[str],
field_key: Optional[str],
quiet: bool,
) -> None:
"""
Show results or metadata from an analysis.
"""
analysis = load_analysis(analysis_path, quiet=quiet)
if not quiet:
console.line()
if project_key and file_key:
try:
result = analysis.results[project_key][file_key]
except KeyError:
console.print(f"[error]'{file_key}' couldn't be found under {project_key}.")
sys.exit(1)
if field_key:
if not hasattr(result, field_key):
console.print(f"[error]{file_key} has no '{field_key}' field.")
console.print(f"[bold]-> FYI the file's status is {result.type}")
sys.exit(1)
console.print(getattr(result, field_key), highlight=False, soft_wrap=True)
elif result.type == "nothing-changed":
console.print("[bold][nothing-changed]Nothing-changed.")
elif result.type == "failed":
console.print(f"[error]{escape(result.error)}")
console.print(f"[info]-> {escape(result.message)}")
elif result.type == "reformatted":
diff = result.diff(file_key)
console.print(color_diff(diff), highlight=False)
elif project_key and not file_key:
# TODO: implement a list view
# TODO: implement a diff + failures view
console.print("[error]show-ing a project is not implemented, sorry!")
sys.exit(26)
else:
panel = make_analysis_summary(analysis)
console.print(panel)
console.line()
project_table = make_project_details_table(analysis)
console.print(project_table)
@main.command()
@click.argument("analysis-path1", metavar="analysis-one", type=READABLE_FILE)
@click.argument("analysis-path2", metavar="analysis-two", type=READABLE_FILE)
@click.argument("project_key", metavar="[project]", callback=normalize_input, required=False)
@click.option("--check", is_flag=True, help="Return 1 if differences were found.")
@click.option("--diff", "diff_mode", is_flag=True, help="Show a diff of the differences.")
@click.option("--list", "list_mode", is_flag=True, help="List the differing files.")
@click.option("-q", "--quiet", is_flag=True, help="Suppress log messages.")
def compare(
analysis_path1: Path,
analysis_path2: Path,
check: bool,
project_key: Optional[str],
diff_mode: bool,
list_mode: bool,
quiet: bool,
) -> None:
"""Compare two analyses for differences in the results."""
if diff_mode and list_mode:
console.print("[error]--diff and --list can't be used at the same time.")
sys.exit(1)
analysis_one = load_analysis(analysis_path1, msg="first analysis", quiet=quiet)
analysis_two = load_analysis(analysis_path2, msg="second analysis", quiet=quiet)
if project_key is None:
names = {*analysis_one.projects, *analysis_two.projects}
else:
names = {project_key}
shared_projects = []
for n in sorted(names):
if n not in analysis_one.projects or n not in analysis_two.projects:
console.log(f"[warning]Skipping {n} as it's not present in both.")
elif analysis_one.projects[n] != analysis_two.projects[n]:
console.log(f"[warning]Skipping {n} as it was configured differently.")
else:
proj = analysis_one.projects[n]
shared_projects.append((proj, analysis_one.results[n], analysis_two.results[n]))
console.line()
panel = make_comparison_summary([(p1, p2) for _, p1, | |
= 'stcTextWidget'
self.widget = w = stc.StyledTextCtrl(parent,*args,**keys)
# Inject the leo_wrapper_class ivar.
self.widget.leo_wrapper_object = self
w.CmdKeyClearAll() # Essential so backspace is handled properly.
# w.Bind(wx.EVT_KEY_DOWN, self.onChar)
wx.EVT_KEY_DOWN(w,self.onChar)
w.Bind(stc.EVT_STC_MARGINCLICK, self.onMarginClick)
if 0: # Disable undo so the widget doesn't gobble undo.
w.SetUndoCollection(False)
w.EmptyUndoBuffer()
# Init the base class.
name = keys.get('name') or '<unknown stcWidget>'
baseTextWidget.__init__(self,c,baseClassName='stcWidget',name=name,widget=w)
self.initStc()
#@+node:ekr.20090126093408.95: *5* initStc
# Code copied from wxPython demo.
def initStc (self):
import keyword
w = self.widget
use_fold = True
w.SetLexer(stc.STC_LEX_PYTHON)
w.SetKeyWords(0, " ".join(keyword.kwlist))
# Enable folding
if use_fold: w.SetProperty("fold", "1" )
# Highlight tab/space mixing (shouldn't be any)
w.SetProperty("tab.timmy.whinge.level", "1")
# Set left and right margins
w.SetMargins(2,2)
# Set up the numbers in the margin for margin #1
w.SetMarginType(1, stc.STC_MARGIN_NUMBER)
# Reasonable value for, say, 4-5 digits using a mono font (40 pix)
w.SetMarginWidth(1, 40)
# Indentation and tab stuff
w.SetIndent(4) # Proscribed indent size for wx
w.SetIndentationGuides(True) # Show indent guides
w.SetBackSpaceUnIndents(True)# Backspace unindents rather than delete 1 space
w.SetTabIndents(True) # Tab key indents
w.SetTabWidth(4) # Proscribed tab size for wx
w.SetUseTabs(False) # Use spaces rather than tabs, or TabTimmy will complain!
# White space
w.SetViewWhiteSpace(False) # Don't view white space
# EOL: Since we are loading/saving ourselves, and the
# strings will always have \n's in them, set the STC to
# edit them that way.
w.SetEOLMode(stc.STC_EOL_LF)
w.SetViewEOL(False)
# No right-edge mode indicator
w.SetEdgeMode(wx.stc.STC_EDGE_NONE)
# Setup a margin to hold fold markers
if use_fold:
w.SetMarginType(2, stc.STC_MARGIN_SYMBOL)
w.SetMarginMask(2, stc.STC_MASK_FOLDERS)
w.SetMarginSensitive(2, True)
w.SetMarginWidth(2, 12)
# and now set up the fold markers
w.MarkerDefine(stc.STC_MARKNUM_FOLDEREND, stc.STC_MARK_BOXPLUSCONNECTED, "white", "black")
w.MarkerDefine(stc.STC_MARKNUM_FOLDEROPENMID, stc.STC_MARK_BOXMINUSCONNECTED, "white", "black")
w.MarkerDefine(stc.STC_MARKNUM_FOLDERMIDTAIL, stc.STC_MARK_TCORNER, "white", "black")
w.MarkerDefine(stc.STC_MARKNUM_FOLDERTAIL, stc.STC_MARK_LCORNER, "white", "black")
w.MarkerDefine(stc.STC_MARKNUM_FOLDERSUB, stc.STC_MARK_VLINE, "white", "black")
w.MarkerDefine(stc.STC_MARKNUM_FOLDER, stc.STC_MARK_BOXPLUS, "white", "black")
w.MarkerDefine(stc.STC_MARKNUM_FOLDEROPEN, stc.STC_MARK_BOXMINUS, "white", "black")
# Global default style
if wx.Platform == '__WXMSW__':
w.StyleSetSpec(stc.STC_STYLE_DEFAULT,
'fore:#000000,back:#FFFFFF,face:Courier New,size:9')
elif wx.Platform == '__WXMAC__':
# TODO: if this looks fine on Linux too, remove the Mac-specific case
# and use this whenever OS != MSW.
w.StyleSetSpec(stc.STC_STYLE_DEFAULT,
'fore:#000000,back:#FFFFFF,face:Courier')
else:
w.StyleSetSpec(stc.STC_STYLE_DEFAULT,
'fore:#000000,back:#FFFFFF,face:Courier,size:9')
# Clear styles and revert to default.
w.StyleClearAll()
# Following style specs only indicate differences from default.
# The rest remains unchanged.
# Line numbers in margin
w.StyleSetSpec(stc.STC_STYLE_LINENUMBER,'fore:#000000,back:#99A9C2')
# Highlighted brace
w.StyleSetSpec(stc.STC_STYLE_BRACELIGHT,'fore:#00009D,back:#FFFF00')
# Unmatched brace
w.StyleSetSpec(stc.STC_STYLE_BRACEBAD,'fore:#00009D,back:#FF0000')
# Indentation guide
w.StyleSetSpec(stc.STC_STYLE_INDENTGUIDE, "fore:#CDCDCD")
# Python styles
w.StyleSetSpec(stc.STC_P_DEFAULT, 'fore:#000000')
# Comments
w.StyleSetSpec(stc.STC_P_COMMENTLINE, 'fore:#008000,back:#F0FFF0')
w.StyleSetSpec(stc.STC_P_COMMENTBLOCK, 'fore:#008000,back:#F0FFF0')
# Numbers
w.StyleSetSpec(stc.STC_P_NUMBER, 'fore:#008080')
# Strings and characters
w.StyleSetSpec(stc.STC_P_STRING, 'fore:#800080')
w.StyleSetSpec(stc.STC_P_CHARACTER, 'fore:#800080')
# Keywords
w.StyleSetSpec(stc.STC_P_WORD, 'fore:#000080,bold')
# Triple quotes
w.StyleSetSpec(stc.STC_P_TRIPLE, 'fore:#800080,back:#FFFFEA')
w.StyleSetSpec(stc.STC_P_TRIPLEDOUBLE, 'fore:#800080,back:#FFFFEA')
# Class names
w.StyleSetSpec(stc.STC_P_CLASSNAME, 'fore:#0000FF,bold')
# Function names
w.StyleSetSpec(stc.STC_P_DEFNAME, 'fore:#008080,bold')
# Operators
w.StyleSetSpec(stc.STC_P_OPERATOR, 'fore:#800000,bold')
# Identifiers. I leave this as not bold because everything seems
# to be an identifier if it doesn't match the above criterae
w.StyleSetSpec(stc.STC_P_IDENTIFIER, 'fore:#000000')
# Caret color
w.SetCaretForeground("BLUE")
# Selection background
w.SetSelBackground(1, '#66CCFF')
w.SetSelBackground(True, wx.SystemSettings_GetColour(wx.SYS_COLOUR_HIGHLIGHT))
w.SetSelForeground(True, wx.SystemSettings_GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT))
#@+node:ekr.20090126093408.96: *5* onMarginClick & helpers
def onMarginClick(self, evt):
if g.app.killed or self.c.frame.killed: return
self = w = self.widget
# fold and unfold as needed
if evt.GetMargin() == 2:
if evt.GetShift() and evt.GetControl():
self.FoldAll()
else:
lineClicked = self.LineFromPosition(evt.GetPosition())
if self.GetFoldLevel(lineClicked) & stc.STC_FOLDLEVELHEADERFLAG:
if evt.GetShift():
self.SetFoldExpanded(lineClicked, True)
self.Expand(lineClicked, True, True, 1)
elif evt.GetControl():
if self.GetFoldExpanded(lineClicked):
self.SetFoldExpanded(lineClicked, False)
self.Expand(lineClicked, False, True, 0)
else:
self.SetFoldExpanded(lineClicked, True)
self.Expand(lineClicked, True, True, 100)
else:
self.ToggleFold(lineClicked)
#@+node:ekr.20090126093408.97: *6* FoldAll
def FoldAll(self):
lineCount = self.GetLineCount()
expanding = True
# find out if we are folding or unfolding
for lineNum in range(lineCount):
if self.GetFoldLevel(lineNum) & stc.STC_FOLDLEVELHEADERFLAG:
expanding = not self.GetFoldExpanded(lineNum)
break
lineNum = 0
while lineNum < lineCount:
level = self.GetFoldLevel(lineNum)
if (
level & stc.STC_FOLDLEVELHEADERFLAG and
(level & stc.STC_FOLDLEVELNUMBERMASK) == stc.STC_FOLDLEVELBASE
):
if expanding:
self.SetFoldExpanded(lineNum, True)
lineNum = self.Expand(lineNum, True)
lineNum = lineNum - 1
else:
lastChild = self.GetLastChild(lineNum, -1)
self.SetFoldExpanded(lineNum, False)
if lastChild > lineNum:
self.HideLines(lineNum+1, lastChild)
lineNum += 1
#@+node:ekr.20090126093408.98: *6* Expand
def Expand (self,line,doExpand,force=False,visLevels=0,level=-1):
lastChild = self.GetLastChild(line,level)
line = line + 1
while line <= lastChild:
if force:
if visLevels > 0:
self.ShowLines(line,line)
else:
self.HideLines(line,line)
else:
if doExpand:
self.ShowLines(line,line)
if level == -1:
level = self.GetFoldLevel(line)
if level & stc.STC_FOLDLEVELHEADERFLAG:
if force:
if visLevels > 1:
self.SetFoldExpanded(line,True)
else:
self.SetFoldExpanded(line,False)
line = self.Expand(line,doExpand,force,visLevels-1)
else:
if doExpand and self.GetFoldExpanded(line):
line = self.Expand(line,True,force,visLevels-1)
else:
line = self.Expand(line,False,force,visLevels-1)
else:
line += 1
return line
#@+node:ekr.20090126093408.99: *5* Wrapper methods
#@+node:ekr.20090126093408.100: *6* bindings (stc)
# Specify the names of widget-specific methods.
# These particular names are the names of wx.TextCtrl methods.
def _appendText(self,s): return self.widget.AppendText(s)
def _get(self,i,j): return self.widget.GetTextRange(i,j)
def _getAllText(self): return self.widget.GetText()
def _getFocus(self): return self.widget.FindFocus()
def _getInsertPoint(self): return self.widget.GetCurrentPos()
def _getLastPosition(self): return self.widget.GetLength()
def _getSelectedText(self): return self.widget.GetSelectedText()
def _getYScrollPosition(self): return 0,0 # Could also return None.
def _getSelectionRange(self): return self.widget.GetSelection()
def _hitTest(self,pos): return self.widget.HitTest(pos)
#def _insertText(self,i,s): return self.widget.InsertText(i,s)
def _scrollLines(self,n): return self.widget.ScrollToLine(n)
def _see(self,i): g.trace('oops',i) # Should not be called.
def _setAllText(self,s): return self.widget.SetText(s)
def _setBackgroundColor(self,color): return self.widget.SetBackgroundColour(color)
def _setFocus(self): return self.widget.SetFocus()
def _setInsertPoint(self,i): g.trace('oops',i) # Should not be called.
def _setSelectionRange(self,i,j): g.trace('oops',i,j) # Should not be called.
def _setYScrollPosition(self,i): pass
#@+node:ekr.20090126093408.101: *6* Overrides of baseTextWidget methods
#@+node:ekr.20090126093408.102: *7* see & seeInsertPoint
def see(self,index):
w = self
s = w.getAllText()
row,col = g.convertPythonIndexToRowCol(s,index)
w.widget.ScrollToLine(row)
def seeInsertPoint(self):
w = self
s = w.getAllText()
i = w.getInsertPoint()
row,col = g.convertPythonIndexToRowCol(s,i)
w.widget.ScrollToLine(row)
#@+node:ekr.20090126093408.103: *7* insert
def insert(self,i,s):
'''Override the baseTextWidget insert method.
This is a workaround of an apparent stc problem.'''
w = self
i = w.toPythonIndex(i)
s2 = w.getAllText()
w.setAllText(s2[:i] + s + s2[i:])
# w.setInsertPoint(i+len(s))
#@+node:ekr.20090126093408.104: *7* stc.setInsertPoint
def setInsertPoint (self,i):
w = self
i = w.toGuiIndex(i)
# g.trace(self,'stc',i,g.callers(4))
w.widget.SetSelection(i,i)
w.widget.SetCurrentPos(i)
#@+node:ekr.20090126093408.105: *7* stc.setSelectionRange
def setSelectionRange (self,i,j,insert=None):
w = self ; i1,j1,insert1=i,j,insert
i = w.toGuiIndex(i)
j = w.toGuiIndex(j)
if insert is not None:
ins = w.toGuiIndex(insert)
w.virtualInsertPoint = ins
else:
w.virtualInsertPoint = None
# g.trace(self,'stc',i1,j1,'=',i,j,g.callers(4))
# Apparently, both parts of the selection must be set at once. Yet another bug.
if insert in (None,j):
w.widget.SetSelection(i,j)
w.widget.SetCurrentPos(j)
else:
w.widget.SetSelection(j,i)
w.widget.SetCurrentPos(i)
# g.trace(self,'stc,new sel',w.widget.GetCurrentPos(),'new range',w.widget.GetSelection())
#@+node:ekr.20090126093408.106: *7* yview (to do)
def yview (self,*args):
'''w.yview('moveto',y) or w.yview()'''
return 0,0
#@+node:ekr.20090126093408.107: *7* xyToGui/PythonIndex (to do)
def xyToPythonIndex (self,x,y):
w = self
pos = wx.Point(x,y)
data = stc.StyledTextCtrl.HitTest(w.widget,pos)
# g.trace('data',data)
return 0 ### Non-zero value may loop.
#@-others
#@-others
#@+node:ekr.20090126093408.108: *3* wxComparePanel class (not ready yet)
"""Leo's base compare class."""
#@@language python
#@@tabwidth -4
#@@pagewidth 80
import leo.core.leoGlobals as g
import leo.core.leoCompare as leoCompare
class wxComparePanel (leoCompare.leoCompare): #,leoWxDialog):
"""A class that creates Leo's compare panel."""
#@+others
#@+node:ekr.20090126093408.109: *4* Birth...
#@+node:ekr.20090126093408.110: *5* wxComparePanel.__init__
def __init__ (self,c):
# Init the base class.
leoCompare.leoCompare.__init__ (self,c)
###leoTkinterDialog.leoTkinterDialog.__init__(self,c,"Compare files and directories",resizeable=False)
if g.app.unitTesting: return
self.c = c
if 0:
#@+<< init tkinter compare ivars >>
#@+node:ekr.20090126093408.111: *6* << init tkinter compare ivars >>
# Ivars pointing to Tk elements.
self.browseEntries = []
self.extensionEntry = None
self.countEntry = None
self.printButtons = []
# No corresponding ivar in the leoCompare class.
self.useOutputFileVar = Tk.IntVar()
# These all correspond to ivars in leoCompare
self.appendOutputVar = Tk.IntVar()
self.ignoreBlankLinesVar = Tk.IntVar()
self.ignoreFirstLine1Var = Tk.IntVar()
self.ignoreFirstLine2Var = Tk.IntVar()
self.ignoreInteriorWhitespaceVar = Tk.IntVar()
self.ignoreLeadingWhitespaceVar = Tk.IntVar()
self.ignoreSentinelLinesVar = Tk.IntVar()
self.limitToExtensionVar = Tk.IntVar()
self.makeWhitespaceVisibleVar = Tk.IntVar()
self.printBothMatchesVar = Tk.IntVar()
self.printMatchesVar = Tk.IntVar()
self.printMismatchesVar = Tk.IntVar()
self.printTrailingMismatchesVar = Tk.IntVar()
self.stopAfterMismatchVar = Tk.IntVar()
#@-<< init tkinter compare ivars >>
# These ivars are set from Entry widgets.
self.limitCount = 0
self.limitToExtension = None
# The default file name in the "output file name" browsers.
self.defaultOutputFileName = "CompareResults.txt"
if 0:
self.createTopFrame()
self.createFrame()
#@+node:ekr.20090126093408.112: *5* finishCreate (tkComparePanel)
# Initialize ivars from config parameters.
def finishCreate (self):
c = self.c
# File names.
for i,option in (
(0,"compare_file_1"),
(1,"compare_file_2"),
(2,"output_file") ):
name = c.config.getString(option)
if name and len(name) > 0:
e = self.browseEntries[i]
e.delete(0,"end")
e.insert(0,name)
name = c.config.getString("output_file")
b = g.choose(name and len(name) > 0,1,0)
self.useOutputFileVar.set(b)
# File options.
b = c.config.getBool("ignore_first_line_of_file_1")
if b == None: b = 0
self.ignoreFirstLine1Var.set(b)
b = c.config.getBool("ignore_first_line_of_file_2")
if b == None: b = 0
self.ignoreFirstLine2Var.set(b)
b = c.config.getBool("append_output_to_output_file")
if b == None: b = 0
self.appendOutputVar.set(b)
ext = c.config.getString("limit_directory_search_extension")
b = ext and len(ext) > 0
b = g.choose(b and b != 0,1,0)
self.limitToExtensionVar.set(b)
if | |
# -*- coding: utf-8 -*-
"""Copy of t5-trivia
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1DDn-D-0e57hlfCQIzIT9QVGN9K_5yFqk
<a href="https://colab.research.google.com/github/google-research/text-to-text-transfer-transformer/blob/master/notebooks/t5-trivia.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
##### Copyright 2019 The T5 Authors
Licensed under the Apache License, Version 2.0 (the "License");
"""
# Copyright 2019 The T5 Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""# Fine-Tuning the Text-To-Text Transfer Transformer (T5) for Closed-Book Question Answering
## _Or: What does T5 know?_
*The following tutorial guides you through the process of fine-tuning a pre-trained T5 model, evaluating its accuracy, and using it for prediction,
all on a free Google Cloud TPU <a href="https://colab.research.google.com/github/google-research/text-to-text-transfer-transformer/blob/master/notebooks/t5-trivia.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>.*
### Background
T5 was introduced in the paper [_Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer_](https://arxiv.org/abs/1910.10683). In that paper, we provided a comprehensive picture of how we pre-trained a standard text-to-text Transformer model on a large text corpus, achieving state-of-the-art results on many NLP tasks after fine-tuning.
We pre-trained T5 on a mixture of supervised and unsupervised tasks with the majoriy of data coming from an unlabeled dataset we developed called [C4](https://www.tensorflow.org/datasets/catalog/c4). C4 is based on a massive scrape of the web produced by [Common Crawl](https://commoncrawl.org). Loosely speaking, pre-training on C4 ideally gives T5 an understanding of natural language in addition to general world knowledge.
### How can we assess what T5 knows?
As the name implies, T5 is a text-to-text model, which enables us to train it on arbitrary tasks involving a textual input and output. As we showed in our paper, a huge variety of NLP tasks can be cast in this format, including translation, summarization, and even classification and regression tasks.
One way to use this text-to-text framework is on reading comprehension problems, where the model is fed some context along with a question and is trained to predict the question's answer. For example, we might feed the model the text from the Wikipedia article about [<NAME>](https://en.wikipedia.org/wiki/Hurricane_Connie) along with the question "On what date did <NAME> occur?" and train the model to predict the answer "August 3rd, 1955".
A related task is open-domain question answering (QA) where the model is not provided with this oracle context. Typically, open-domain QA systems include a mechanism to look up information in an external knowledge source. This setting is similar to an "open-book" exam.
In this notebook, we'll be training T5 on a variant of this task which we call **closed-book question answering**. In closed-book QA, we feed the model a question *without any context or access to external knowledge* and train it to predict the answer. Since the model doesn't receive any context, the primary way it can learn to answer these questions is based on the "knowledge" it obtained during pre-training. We don't expect T5 to contain super specific information, so we will be focusing on two question-answering datasets which largely include trivia questions (i.e. facts about well-known subjects). [Similar](https://arxiv.org/abs/1909.01066) [investigations](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) have recently been done to test the knowledge stored by BERT and GPT-2.
T5 was not pre-trained on closed-book QA, so in this notebook we'll first create two new tasks and then use the [`t5`](https://github.com/google-research/text-to-text-transfer-transformer) library to fine-tune, evaluate, and obtain predictions from T5. In the end, T5's performance on closed-book QA can give us a sense of what kind (and how much) information T5 managed to learn during pre-training.
We published a [more in-depth investigation](https://tiny.cc/t5-qa) of closed-book QA with T5 where we achieved suprisingly strong performance on open-domain variants of Natural Questions, WebQuestions, and TriviaQA. The code in this notebook is a simplified version of those experiments but still produces good results.
### Caveats
* While we provide instructions for running on a [Cloud TPU](https://cloud.google.com/tpu/) via Colab for free, a [Google Cloud Storage (GCS)](http://console.cloud.google.com/storage) bucket is required for storing model parameters and data. The [GCS free tier](https://cloud.google.com/free/) provides 5 GB of storage, which should be enough to train the `large` model and smaller but not the `3B` or `11B` parameter models. You can use part of your initial $300 credit to get more space.
* The Cloud TPU provided by Colab (a `v2-8`) does not have enough memory to fine-tune the `11B` parameter model. For this model, you will need to fine-tune inside of a GCP instance (see [README](https://github.com/google-research/text-to-text-transfer-transformer/)).
# Set Up
<h3><a href="https://cloud.google.com/tpu/"><img valign="middle" src="https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/tpu-hexagon.png" width="50"></a> Train on TPU</h3>
1. Create a Cloud Storage bucket for your data and model checkpoints at http://console.cloud.google.com/storage, and fill in the `BASE_DIR` parameter in the following form. There is a [free tier](https://cloud.google.com/free/) if you do not yet have an account.
1. On the main menu, click Runtime and select **Change runtime type**. Set "TPU" as the hardware accelerator.
1. Run the following cell and follow instructions to:
* Set up a Colab TPU running environment
* Verify that you are connected to a TPU device
* Upload your credentials to TPU to access your GCS bucket
"""
# Commented out IPython magic to ensure Python compatibility.
# TODO(adarob): Add support for 2.x.
# %tensorflow_version 1.x
import datetime
import functools
import json
import os
import pprint
import random
import string
import sys
import tensorflow as tf
BASE_DIR = "gs://base" # @param { type: "string" }
if not BASE_DIR or BASE_DIR == "gs://":
raise ValueError("You must enter a BASE_DIR.")
DATA_DIR = os.path.join(BASE_DIR, "data")
MODELS_DIR = os.path.join(BASE_DIR, "models")
ON_CLOUD = True
if ON_CLOUD:
assert "COLAB_TPU_ADDR" in os.environ, "ERROR: Not connected to a TPU runtime; please see the first cell in this notebook for instructions!"
TPU_ADDRESS = "grpc://" + os.environ["COLAB_TPU_ADDR"]
TPU_TOPOLOGY = "2x2"
print("TPU address is", TPU_ADDRESS)
from google.colab import auth
auth.authenticate_user()
with tf.Session(TPU_ADDRESS) as session:
print('TPU devices:')
pprint.pprint(session.list_devices())
# Upload credentials to TPU.
with open('/content/adc.json', 'r') as f:
auth_info = json.load(f)
tf.contrib.cloud.configure_gcs(session, credentials=auth_info)
# Now credentials are set for all future sessions on this TPU.
# @title Install and import required packages
if ON_CLOUD:
!pip
install - qU
t5
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import t5
import tensorflow as tf
import tensorflow_datasets as tfds
import time
# Improve logging.
from contextlib import contextmanager
import logging as py_logging
if ON_CLOUD:
tf.get_logger().propagate = False
py_logging.root.setLevel('INFO')
@contextmanager
def tf_verbosity_level(level):
og_level = tf.logging.get_verbosity()
tf.logging.set_verbosity(level)
yield
tf.logging.set_verbosity(og_level)
"""# Creating new Tasks and Mixture
Two core components of the T5 library are `Task` and `Mixture` objects.
A `Task` is a dataset along with preprocessing functions and evaluation metrics. A `Mixture` is a collection of `Task` objects along with a mixing rate or a function defining how to compute a mixing rate based on the properties of the constituent `Tasks`.
For this example, we will fine-tune the model to do closed-book question answering.
### Natural Questions
[Natural Questions (NQ)](https://ai.google.com/research/NaturalQuestions) is a challenging corpus for open-domain QA. Each example includes a question along with an entire Wikipedia article that may or may not contain its answer. The goal is to produce the correct answer given this context. In our case, we will be ignoring the provided context in hopes that the model will learn to find the answers from the world knowledge it has acquired during pre-training.
Since the raw data splits are stored as JSONL files, we will first need to convert them to TSV format to make them parseable in TensorFlow. We will also take the opportunity to drop information we will not be using, remove questions with multiple answers, and to do a bit of cleaning of the text.
"""
import gzip
import json
import os
# Public directory of Natural Questions data on GCS.
NQ_JSONL_DIR = "gs://natural_questions/v1.0-simplified/"
NQ_SPLIT_FNAMES = {
"train": "simplified-nq-train.jsonl.gz",
"validation": "nq-dev-all.jsonl.gz"
}
nq_counts_path = os.path.join(DATA_DIR, "nq-counts.json")
nq_tsv_path = {
"train": os.path.join(DATA_DIR, "nq-train.tsv"),
"validation": os.path.join(DATA_DIR, "nq-validation.tsv")
}
def nq_jsonl_to_tsv(in_fname, out_fname):
def extract_answer(tokens, span):
"""Reconstruct answer from token span and remove extra spaces."""
start, end = span["start_token"], span["end_token"]
ans = " ".join(tokens[start:end])
# Remove incorrect spacing around punctuation.
ans = ans.replace(" | |
yaml.safe_load(
subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE
).communicate()[0])
if 'environment' in ndata:
env = ndata['environment']
else:
env = 'base'
if 'classes' in ndata:
if isinstance(ndata['classes'], dict):
ret[env] = list(ndata['classes'])
elif isinstance(ndata['classes'], list):
ret[env] = ndata['classes']
else:
return ret
# Evaluate all configured master_tops interfaces
opts = {}
grains = {}
if 'opts' in load:
opts = load['opts']
if 'grains' in load['opts']:
grains = load['opts']['grains']
for fun in self.tops:
if fun not in self.opts.get('master_tops', {}):
continue
try:
ret.update(self.tops[fun](opts=opts, grains=grains))
except Exception as exc:
# If anything happens in the top generation, log it and move on
log.error(
'Top function {0} failed with error {1} for minion '
'{2}'.format(
fun, exc, load['id']
)
)
return ret
def _master_opts(self, load):
'''
Return the master options to the minion
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for env in envs:
if env not in file_roots:
file_roots[env] = []
mopts['file_roots'] = file_roots
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
'''
if any(key not in load for key in ('id', 'tgt', 'fun')):
return {}
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
ret = {}
if not bonneville.utils.verify.valid_id(self.opts, load['id']):
return ret
checker = bonneville.utils.minions.CkMinions(self.opts)
minions = checker.check_minions(
load['tgt'],
load.get('expr_form', 'glob')
)
for minion in minions:
mine = os.path.join(
self.opts['cachedir'],
'minions',
minion,
'mine.p')
try:
with bonneville.utils.fopen(mine, 'rb') as fp_:
fdata = self.serial.load(fp_).get(load['fun'])
if fdata:
ret[minion] = fdata
except Exception:
continue
return ret
def _mine(self, load):
'''
Return the mine data
'''
if 'id' not in load or 'data' not in load:
return False
if not bonneville.utils.verify.valid_id(self.opts, load['id']):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'mine.p')
if not load.get('clear', False):
if os.path.isfile(datap):
with bonneville.utils.fopen(datap, 'rb') as fp_:
new = self.serial.load(fp_)
if isinstance(new, dict):
new.update(load['data'])
load['data'] = new
with bonneville.utils.fopen(datap, 'w+b') as fp_:
fp_.write(self.serial.dumps(load['data']))
return True
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
'''
if 'id' not in load or 'fun' not in load:
return False
if not bonneville.utils.verify.valid_id(self.opts, load['id']):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
return True
datap = os.path.join(cdir, 'mine.p')
if os.path.isfile(datap):
try:
with bonneville.utils.fopen(datap, 'rb') as fp_:
mine_data = self.serial.load(fp_)
if isinstance(mine_data, dict):
if mine_data.pop(load['fun'], False):
with bonneville.utils.fopen(datap, 'w+b') as fp_:
fp_.write(self.serial.dumps(mine_data))
except OSError:
return False
return True
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
'''
if 'id' not in load:
return False
if not bonneville.utils.verify.valid_id(self.opts, load['id']):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
return True
datap = os.path.join(cdir, 'mine.p')
if os.path.isfile(datap):
try:
os.remove(datap)
except OSError:
return False
return True
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not self.opts['file_recv'] or os.path.isabs(load['path']):
return False
if os.path.isabs(load['path']) or '../' in load['path']:
# Can overwrite master files!!
return False
if not bonneville.utils.verify.valid_id(self.opts, load['id']):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
load['path'])
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with bonneville.utils.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(load['data'])
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains', 'env')):
return False
if not bonneville.utils.verify.valid_id(self.opts, load['id']):
return False
pillar = bonneville.pillar.Pillar(
self.opts,
load['grains'],
load['id'],
load['env'],
load.get('ext'))
data = pillar.compile_pillar()
if self.opts.get('minion_data_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'data.p')
with bonneville.utils.fopen(datap, 'w+b') as fp_:
fp_.write(
self.serial.dumps(
{'grains': load['grains'],
'pillar': data})
)
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
'''
if 'id' not in load:
return False
if not bonneville.utils.verify.valid_id(self.opts, load['id']):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
if 'events' not in load and ('tag' not in load or 'data' not in load):
return False
if 'events' in load:
for event in load['events']:
self.event.fire_event(event, event['tag']) # old dup event
if load.get('pretag') is not None:
self.event.fire_event(event, tagify(event['tag'], base=load['pretag']))
else:
tag = load['tag']
self.event.fire_event(load, tag)
return True
def _return(self, load):
'''
Handle the return data sent from the minions
'''
# If the return data is invalid, just ignore it
if any(key not in load for key in ('return', 'jid', 'id')):
return False
if not bonneville.utils.verify.valid_id(self.opts, load['id']):
return False
if load['jid'] == 'req':
# The minion is returning a standalone job, request a jobid
load['jid'] = bonneville.utils.prep_jid(
self.opts['cachedir'],
self.opts['hash_type'],
load.get('nocache', False))
log.info('Got return from {id} for job {jid}'.format(**load))
self.event.fire_event(load, load['jid']) # old dup event
self.event.fire_event(load, tagify([load['jid'], 'ret', load['id']], 'job'))
self.event.fire_ret_load(load)
if self.opts['master_ext_job_cache']:
fstr = '{0}.returner'.format(self.opts['master_ext_job_cache'])
self.mminion.returners[fstr](load)
return
if not self.opts['job_cache'] or self.opts.get('ext_job_cache'):
return
jid_dir = bonneville.utils.jid_dir(
load['jid'],
self.opts['cachedir'],
self.opts['hash_type']
)
if not os.path.isdir(jid_dir):
log.error(
'An inconsistency occurred, a job was received with a job id '
'that is not present on the master: {jid}'.format(**load)
)
return False
if os.path.exists(os.path.join(jid_dir, 'nocache')):
return
hn_dir = os.path.join(jid_dir, load['id'])
if not os.path.isdir(hn_dir):
os.makedirs(hn_dir)
# Otherwise the minion has already returned this jid and it should
# be dropped
else:
log.error(
'An extra return was detected from minion {0}, please verify '
'the minion, this could be a replay attack'.format(
load['id']
)
)
return False
self.serial.dump(
load['return'],
# Use atomic open here to avoid the file being read before it's
| |
<filename>tools/contrail/create_subcluster_environment.py<gh_stars>0
#!/usr/bin/python
import yaml, sys, getopt, json, os
from subprocess import check_output
from string import Template
CONTROL_ONLY_HOSTNAME_PREFIX='ctrlonly'
COMPUTE_HOSTNAME_PREFIX='compute'
DPDK_HOSTNAME_PREFIX='computedpdk'
SRIOV_HOSTNAME_PREFIX='computesriov'
CONTROL_ONLY_ROLE_PREFIX='ContrailControlOnly'
COMPUTE_ROLE_PREFIX='ContrailCompute'
CONTROL_ONLY_SERVICE='../../docker/services/contrail/contrail-control-only.yaml'
COMPUTE_SERVICE='../../docker/services/contrail/contrail-vrouter.yaml'
DPDK_SERVICE='../../docker/services/contrail/contrail-vrouter-dpdk.yaml'
SRIOV_SERVICE='../../docker/services/contrail/contrail-vrouter-sriov.yaml'
CONTROL_ONLY_NIC_CONFIG='../../network/config/contrail/contrail-controller-nic-config.yaml'
COMPUTE_NIC_CONFIG='../../network/config/contrail/compute-nic-config.yaml'
DPDK_NIC_CONFIG='../../network/config/contrail/contrail-dpdk-nic-config.yaml'
SRIOV_NIC_CONFIG='../../network/config/contrail/contrail-sriov-nic-config.yaml'
COMPUTE_PRE_NETWORK='../../extraconfig/pre_network/contrail/compute_pre_network.yaml'
DPDK_PRE_NETWORK='../../extraconfig/pre_network/contrail/contrail_dpdk_pre_network.yaml'
SRIOV_PRE_NETWORK='../../extraconfig/pre_network/contrail/contrail_sriov_pre_network.yaml'
DPDK_ROLE_PREFIX='ContrailDpdk'
SRIOV_ROLE_PREFIX='ContrailSriov'
ROLES_FILE='../../roles_data_contrail_aio.yaml'
CONTRAIL_SERVICES='../../environments/contrail/contrail-services.yaml'
CONTRAIL_NET='../../environments/contrail/contrail-net.yaml'
CONTRAIL_PLUGINS='../../environments/contrail/contrail-plugins.yaml'
CONTRAIL_SUBCLUSTER='../../environments/contrail/contrail-subcluster.yaml'
CONTRAIL_STATIC_IP='../../environments/contrail/contrail-ips-from-pool-all.yaml'
CONTRAIL_SCHEDULER_HINTS='../../environments/contrail/contrail-scheduler-hints.yaml'
CONTROL_ONLY_ROLE='''###############################################################################
# Role: $ROLE_NAME #
###############################################################################
- name: $ROLE_NAME
description: |
ContrailController role that has all the Contrail controler services loaded
and handles config, control and webui functions
CountDefault: 0
tags:
- primary
- contrailcontroller
networks:
- InternalApi
- Tenant
HostnameFormatDefault: '%stackname%-$HOSTNAME-%index%'
ServicesDefault:
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::Ipsec
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::LoginDefs
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::ContainersLogrotateCrond
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::$ROLE_NAME'''
COMPUTE_ROLE='''###############################################################################
# Role: $ROLE_NAME #
###############################################################################
- name: $ROLE_NAME
description: |
Basic Compute Node role
CountDefault: 0
networks:
- InternalApi
- Tenant
- Storage
HostnameFormatDefault: '%stackname%-$HOSTNAME-%index%'
# Deprecated & backward-compatible values (FIXME: Make parameters consistent)
# Set uses_deprecated_params to True if any deprecated params are used.
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::Aide
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::ComputeCeilometerAgent
- OS::TripleO::Services::ComputeNeutronCorePlugin
- OS::TripleO::Services::ComputeNeutronL3Agent
- OS::TripleO::Services::ComputeNeutronMetadataAgent
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Fluentd
- OS::TripleO::Services::Ipsec
- OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::LoginDefs
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::NovaMigrationTarget
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::ContainersLogrotateCrond
- OS::TripleO::Services::Rhsm
- OS::TripleO::Services::RsyslogSidecar
- OS::TripleO::Services::Securetty
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::SkydiveAgent
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::Tuned
- OS::TripleO::Services::Ptp'''
class ContrailStaticIp(object):
def __init__(self, subcluster_yaml,roleTypeList):
self.subcluster_yaml = subcluster_yaml
if not os.path.exists(CONTRAIL_STATIC_IP):
fh = open(CONTRAIL_STATIC_IP, "w")
fh.close()
contrailStaticIpFile = yaml.load(open(CONTRAIL_STATIC_IP)) or {}
if not 'resource_registry' in contrailStaticIpFile:
contrailStaticIpFile['resource_registry'] = {}
if not 'parameter_defaults' in contrailStaticIpFile:
contrailStaticIpFile['parameter_defaults'] = {}
self.resourceRegistry = contrailStaticIpFile['resource_registry']
self.parameterDefaults = contrailStaticIpFile['parameter_defaults']
if 'controlOnly' in roleTypeList:
self.createContrailStaticIp(CONTROL_ONLY_ROLE_PREFIX,CONTROL_ONLY_HOSTNAME_PREFIX,'control_nodes')
if 'vrouter' in roleTypeList:
self.createContrailStaticIp(COMPUTE_ROLE_PREFIX,COMPUTE_HOSTNAME_PREFIX, 'compute_nodes')
if 'dpdk' in roleTypeList:
self.createContrailStaticIp(DPDK_ROLE_PREFIX,DPDK_HOSTNAME_PREFIX, 'dpdk_nodes')
if 'sriov' in roleTypeList:
self.createContrailStaticIp(SRIOV_ROLE_PREFIX,SRIOV_HOSTNAME_PREFIX, 'sriov_nodes')
contrailStaticIpFile['resource_registry'] = self.resourceRegistry
contrailStaticIpFile['parameter_defaults'] = self.parameterDefaults
self.contrailStaticIp = contrailStaticIpFile
def setProperties(self,uuid,nodeName):
properties = check_output(["openstack","baremetal","node","show",uuid,'-c','properties','-f','json'])
properties_json = json.loads(properties)
capabilitiesString = properties_json['properties']['capabilities']
capabilitiesList = capabilitiesString.split(',')
newCapabilitiesList = []
nodeExists = False
for capability in capabilitiesList:
capabilityItem = capability.split(':')
if capabilityItem[0] == 'node':
capabilityItem[1] = nodeName
capabilityItemString = ':'.join(capabilityItem)
newCapabilitiesList.append(capabilityItemString)
nodeExists = True
else:
capabilityItemString = ':'.join(capabilityItem)
newCapabilitiesList.append(capabilityItemString)
if not nodeExists:
newCapabilitiesList.append('node:' + nodeName)
newCapabilitiesListString = ','.join(newCapabilitiesList)
newCapabilitiesListString="capabilities=" + newCapabilitiesListString
check_output(["openstack","baremetal","node","set",uuid,'--property',newCapabilitiesListString])
def createContrailStaticIp(self,ROLE_PREFIX, HOSTNAME_PREFIX, nodeType):
subcluster_dict = self.subcluster_yaml
for subcluster in subcluster_dict:
if nodeType not in subcluster:
continue
subclusterRoleName = subcluster['subcluster']
subclusterRoleName = subclusterRoleName.capitalize()
subclusterRoleName = ROLE_PREFIX + subclusterRoleName
subclusterNetworkName = subcluster['network']
subclusterNetworkDict = { subclusterNetworkName: [] }
subclusterPortNetworkName = subclusterNetworkName.capitalize() + "Port"
subclusterPortName = 'OS::TripleO::'+ subclusterRoleName + '::Ports::' + subclusterPortNetworkName
subclusterIpsName = subclusterRoleName + 'IPs'
subclusterHostname = subcluster['subcluster']
subclusterHostname = subclusterHostname[0].lower() + subclusterHostname[1:]
subclusterHostname = HOSTNAME_PREFIX + subclusterHostname
subclusterIpList = []
count = 0
for node in subcluster[nodeType]:
nodeName = subclusterHostname + '-' + str(count)
self.setProperties(node['uuid'],nodeName)
count = count + 1
if 'ipaddress' in node:
subclusterIpList.append(node['ipaddress'])
if len(subclusterIpList) > 0:
self.parameterDefaults[subclusterIpsName] = {}
self.resourceRegistry[subclusterPortName] = '../../network/ports/'+ subclusterNetworkName + '_from_pool.yaml'
subclusterIpsDict = { subclusterIpsName : {}}
subclusterNetworkDict = { subclusterNetworkName : []}
subclusterNetworkDict[subclusterNetworkName] = subclusterIpList
self.parameterDefaults[subclusterIpsName] = subclusterNetworkDict
class ContrailSchedulerHints(object):
def __init__(self, subcluster_yaml, roleTypeList):
self.subcluster_yaml = subcluster_yaml
if not os.path.exists(CONTRAIL_SCHEDULER_HINTS):
fh = open(CONTRAIL_SCHEDULER_HINTS, "w")
fh.close()
contrailSchedulerHintsFile = yaml.load(open(CONTRAIL_SCHEDULER_HINTS)) or {}
if not 'parameter_defaults' in contrailSchedulerHintsFile:
contrailSchedulerHintsFile['parameter_defaults'] = {}
self.parameterDefaults = contrailSchedulerHintsFile['parameter_defaults']
if 'controlOnly' in roleTypeList:
self.createContrailSchedulerHint(CONTROL_ONLY_ROLE_PREFIX, CONTROL_ONLY_HOSTNAME_PREFIX)
if 'vrouter' in roleTypeList:
self.createContrailSchedulerHint(COMPUTE_ROLE_PREFIX, COMPUTE_HOSTNAME_PREFIX)
if 'dpdk' in roleTypeList:
self.createContrailSchedulerHint(DPDK_ROLE_PREFIX, DPDK_HOSTNAME_PREFIX)
if 'sriov' in roleTypeList:
self.createContrailSchedulerHint(SRIOV_ROLE_PREFIX, SRIOV_HOSTNAME_PREFIX)
contrailSchedulerHintsFile['parameter_defaults'] = self.parameterDefaults
self.contrailSchedulerHints = contrailSchedulerHintsFile
def createContrailSchedulerHint(self, ROLE_PREFIX, HOSTNAME_PREFIX):
subcluster_dict = self.subcluster_yaml
for subcluster in subcluster_dict:
subclusterRoleName = subcluster['subcluster']
subclusterRoleName = subclusterRoleName.capitalize()
subclusterRoleName = ROLE_PREFIX + subclusterRoleName
subclusterHostname = subcluster['subcluster']
subclusterHostname = subclusterHostname[0].lower() + subclusterHostname[1:]
subclusterHostname = HOSTNAME_PREFIX + subclusterHostname + '-%index%'
subclusterSchedulerHintName = subclusterRoleName + 'SchedulerHints'
self.parameterDefaults[subclusterSchedulerHintName] = { 'capabilities:node': subclusterHostname}
class ContrailNet(object):
def __init__(self, subcluster_yaml, roleTypeList):
self.subcluster_yaml = subcluster_yaml
contrailNetFile = yaml.load(open(CONTRAIL_NET))
self.resourceRegistry = contrailNetFile['resource_registry']
if 'controlOnly' in roleTypeList:
self.createContrailNet(CONTROL_ONLY_ROLE_PREFIX, CONTROL_ONLY_NIC_CONFIG)
if 'vrouter' in roleTypeList:
self.createContrailNet(COMPUTE_ROLE_PREFIX, COMPUTE_NIC_CONFIG)
if 'dpdk' in roleTypeList:
self.createContrailNet(DPDK_ROLE_PREFIX, DPDK_NIC_CONFIG)
if 'sriov' in roleTypeList:
self.createContrailNet(SRIOV_ROLE_PREFIX, SRIOV_NIC_CONFIG)
contrailNetFile['resource_registry'] = self.resourceRegistry
self.contrailNet = contrailNetFile
def createContrailNet(self, ROLE_PREFIX, NIC_CONFIG):
subcluster_dict = self.subcluster_yaml
for subcluster in subcluster_dict:
subclusterRoleName = subcluster['subcluster']
subclusterRoleName = subclusterRoleName.capitalize()
subclusterRoleName = ROLE_PREFIX + subclusterRoleName
subclusterNetName = 'OS::TripleO::'+ subclusterRoleName + '::Net::SoftwareConfig'
self.resourceRegistry[subclusterNetName] = NIC_CONFIG
class ContrailServices(object):
def __init__(self, subcluster_yaml, roleTypeList):
self.subcluster_yaml = subcluster_yaml
contrailServicesFile = yaml.load(open(CONTRAIL_SERVICES))
self.parameterDefaults = contrailServicesFile['parameter_defaults']
self.contrailServiceNetMap = self.parameterDefaults['ServiceNetMap']
if 'controlOnly' in roleTypeList:
self.createContrailServices(CONTROL_ONLY_ROLE_PREFIX, 'control_nodes')
if 'vrouter' in roleTypeList:
self.createContrailServices(COMPUTE_ROLE_PREFIX, 'compute_nodes')
if 'dpdk' in roleTypeList:
self.createContrailServices(DPDK_ROLE_PREFIX, 'dpdk_nodes')
if 'sriov' in roleTypeList:
self.createContrailServices(SRIOV_ROLE_PREFIX, 'sriov_nodes')
self.parameterDefaults['ServiceNetMap'] = self.contrailServiceNetMap
contrailServicesFile['parameter_defaults'] = self.parameterDefaults
self.contrailServices = contrailServicesFile
def createContrailServices(self, ROLE_PREFIX, roleType):
subcluster_dict = self.subcluster_yaml
for subcluster in subcluster_dict:
if roleType not in subcluster:
continue
subclusterRoleName = subcluster['subcluster']
subclusterRoleName = subclusterRoleName.capitalize()
subclusterRoleName = ROLE_PREFIX + subclusterRoleName
subclusterRoleNetworkName = subclusterRoleName + 'Network'
subclusterRoleCountName = subclusterRoleName + 'Count'
subclusterRoleFlavorName = 'Overcloud' + subclusterRoleName + 'Flavor'
if not subclusterRoleNetworkName in self.contrailServiceNetMap:
self.contrailServiceNetMap[subclusterRoleNetworkName] = subcluster['network']
nodeCount = 0
for node in subcluster[roleType]:
nodeCount = nodeCount + 1
self.parameterDefaults[subclusterRoleCountName] = nodeCount
self.parameterDefaults[subclusterRoleFlavorName] = 'baremetal'
class ContrailPlugin(object):
def __init__(self, subcluster_yaml, roleTypeList):
self.subcluster_yaml = subcluster_yaml
pluginFile = yaml.load(open(CONTRAIL_PLUGINS))
self.resourceRegistry = pluginFile['resource_registry']
if 'controlOnly' in roleTypeList:
self.createContrailPlugin(CONTROL_ONLY_ROLE_PREFIX, CONTROL_ONLY_SERVICE)
if 'vrouter' in roleTypeList:
self.createContrailPlugin(COMPUTE_ROLE_PREFIX, COMPUTE_SERVICE, COMPUTE_PRE_NETWORK)
if 'dpdk' in roleTypeList:
self.createContrailPlugin(DPDK_ROLE_PREFIX, DPDK_SERVICE, DPDK_PRE_NETWORK)
if 'sriov' in roleTypeList:
self.createContrailPlugin(SRIOV_ROLE_PREFIX, SRIOV_SERVICE, SRIOV_PRE_NETWORK)
pluginFile['resource_registry'] = self.resourceRegistry
self.contrailPlugin = pluginFile
def createContrailPlugin(self, ROLE_PREFIX, servicePath, preNetwork=False):
subcluster_dict = self.subcluster_yaml
for subcluster in subcluster_dict:
subclusterRoleName = subcluster['subcluster']
subclusterRoleName = subclusterRoleName.capitalize()
subclusterRoleName = ROLE_PREFIX + subclusterRoleName
subclusterPluginName = 'OS::TripleO::Services::' + subclusterRoleName
subclusterPreNetworkConfig = 'OS::TripleO::' + subclusterRoleName + '::PreNetworkConfig'
subclusterExtraConfigPre = 'OS::TripleO::' + subclusterRoleName + 'ExtraConfigPre'
subclusterPluginExists = False
self.resourceRegistry[subclusterPluginName] = servicePath
self.resourceRegistry[subclusterExtraConfigPre] = '../../extraconfig/pre_deploy/contrail/contrail-init.yaml'
if preNetwork:
self.resourceRegistry[subclusterPreNetworkConfig] = preNetwork
class ContrailRole(object):
def __init__(self, subcluster_yaml, roleTypeList):
self.subcluster_yaml = subcluster_yaml
subcluster_dict = self.subcluster_yaml
contrailControlOnlyRole = Template(CONTROL_ONLY_ROLE)
contrailComputeRole = Template(COMPUTE_ROLE)
subclusterRoleList = []
subclusterString = ''
if 'controlOnly' in roleTypeList:
subclusterString += self.createRole(contrailControlOnlyRole, CONTROL_ONLY_HOSTNAME_PREFIX, CONTROL_ONLY_ROLE_PREFIX)
if 'vrouter' in roleTypeList:
subclusterString += self.createRole(contrailComputeRole, COMPUTE_HOSTNAME_PREFIX, COMPUTE_ROLE_PREFIX)
if 'dpdk' in roleTypeList:
subclusterString += self.createRole(contrailComputeRole, DPDK_HOSTNAME_PREFIX, DPDK_ROLE_PREFIX)
if 'sriov' in roleTypeList:
subclusterString += self.createRole(contrailComputeRole, SRIOV_HOSTNAME_PREFIX, SRIOV_ROLE_PREFIX)
self.subclusterString = subclusterString
def createRole(self, roleTemplate, hostnamePrefix, rolePrefix):
roleFile = yaml.load(open(ROLES_FILE))
subcluster_dict = self.subcluster_yaml
subclusterRoleList = []
subclusterString = ''
for subcluster in subcluster_dict:
subclusterRoleName = subcluster['subcluster']
subclusterRoleName = subclusterRoleName.capitalize()
subclusterRoleName = rolePrefix + subclusterRoleName
subclusterRoleExists = False
for role in roleFile:
if role['name'] == subclusterRoleName:
subclusterRoleExists = True
if not subclusterRoleExists:
subclusterHostname = subcluster['subcluster']
subclusterHostname = subclusterHostname[0].lower() + subclusterHostname[1:]
subclusterHostname = hostnamePrefix + subclusterHostname
subclusterRole = roleTemplate.substitute(ROLE_NAME=subclusterRoleName,HOSTNAME=subclusterHostname)
subclusterRoleList.append(subclusterRole)
for subclusterRole in subclusterRoleList:
subclusterString += subclusterRole
subclusterString += '\n'
return subclusterString
class ContrailSubcluster(object):
def __init__(self, subcluster_yaml):
self.subcluster_yaml = subcluster_yaml
self.subcluster = self.createSubcluster()
def getSystemUUID(self,uuid):
introspection_data = check_output(["openstack","baremetal","introspection","data","save",uuid])
introspection_data_json = json.loads(introspection_data)
system_uuid_string = json.dumps(introspection_data_json['extra']['system']['product']['uuid'])
system_uuid = system_uuid_string.replace('"','')
return system_uuid
def createSubcluster(self):
subcluster_dict = self.subcluster_yaml
output_list = []
node_dict = { "parameter_defaults" : { "NodeDataLookup": {}}}
for subcluster in subcluster_dict:
subcluster_name = subcluster['subcluster']
subcluster_control_nodes = subcluster['control_nodes']
subcluster_compute_nodes = subcluster['compute_nodes']
control_node_ip_dict = { subcluster_name: [] }
control_node_ips = []
for control_node in subcluster_control_nodes:
control_node_ips.append(control_node['ipaddress'])
control_node_ip_dict[subcluster_name] = control_node_ips
control_node_ip_string = ','.join(control_node_ip_dict[subcluster_name])
for control_node in subcluster_control_nodes:
control_node_uuid = control_node['uuid']
control_node_system_uuid = self.getSystemUUID(control_node_uuid)
control_node_dict = {control_node_system_uuid: {}}
contrail_settings = {'contrail_settings': {}}
contrail_settings['contrail_settings']['SUBCLUSTER'] = subcluster_name
contrail_settings['contrail_settings']['BGP_ASN'] = subcluster['asn']
contrail_settings['contrail_settings']['CONTROL_NODES'] = control_node_ip_string
control_node_dict[control_node_system_uuid] = contrail_settings
node_dict['parameter_defaults']['NodeDataLookup'][control_node_system_uuid] = contrail_settings
output_list.append(control_node_dict)
for compute_node in subcluster_compute_nodes:
compute_node_uuid = compute_node['uuid']
compute_node_system_uuid = self.getSystemUUID(compute_node_uuid)
compute_node_dict = {compute_node_system_uuid: {}}
contrail_settings = {'contrail_settings': {}}
contrail_settings['contrail_settings']['SUBCLUSTER'] = subcluster_name
contrail_settings['contrail_settings']['VROUTER_GATEWAY'] = compute_node['vrouter_gateway']
contrail_settings['contrail_settings']['CONTROL_NODES'] = control_node_ip_string
compute_node_dict[compute_node_system_uuid] = contrail_settings
node_dict['parameter_defaults']['NodeDataLookup'][compute_node_system_uuid] = contrail_settings
output_list.append(compute_node_dict)
return node_dict
def writeYaml(outputfile,inputYaml):
with open(outputfile, 'w') as yamlFile:
yaml.dump(inputYaml, yamlFile, default_flow_style=False)
def writeFile(outputfile,inputString):
with open(outputfile, 'a') as writeFile:
writeFile.write(inputString)
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print 'subcluster.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'subcluster.py -i <inputfile> -o <outputfile>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
if not inputfile:
print "-i is missing"
print 'subcluster.py -i <inputfile>'
sys.exit()
subcluster_yaml = yaml.load(open(inputfile))
roleTypeList = []
createControlOnlyRole = False
createComputeRole = False
createComputeDpdkRole = False
createComputeSriovRole = False
for subcluster in subcluster_yaml:
if 'control_nodes' in subcluster:
roleTypeList.append('controlOnly')
if 'compute_nodes' in subcluster:
roleTypeList.append('vrouter')
if 'dpdk_nodes' in subcluster:
roleTypeList.append('dpdk')
if 'sriov_nodes' in subcluster:
roleTypeList.append('sriov')
contrailSubcluster = ContrailSubcluster(subcluster_yaml)
writeYaml(CONTRAIL_SUBCLUSTER,contrailSubcluster.subcluster)
| |
"""
Implementation of the algorithm that gradually disovers the Pareto front.
"""
import os
import numpy as np
import GPy
from scipy.optimize import minimize
from pyDOE import *
import tqdm
from . import get_idx_of_observed_pareto_front
from . import ehvi_2d_func
from . import DistributedObject
from . import ParallelizedGPRegression
from . import distributed_xrange
from . import reduce_max
from . import parallel_eval
from . import get_yref
from . import get_empirical_attainment_function
from . import get_Vorobev_expectation
from . import get_symmetric_deviation_function
#from mpi4py import MPI as mpi
import copy
import pickle
__all__ = ['ParetoFront']
class ParetoFront(DistributedObject):
"""
Initialize the class.
:param X: Input points - num_points x num_dim
:param Y: Objectives - num_points x num_obj
:param obj_funcs: The objective functions to evaluate.
:param obj_funcs_true: The true objective functions for the noisy
problem.
:param Y_true: Finitely large number of observations for
an approximate pareto front.
:param X_design: A set of design points over which we look for
the maximum EHVI. Alternatively, we use them as
starting points for L-BFGS. If X_design is
an integer, then we draw such points randomly
from a hypercube.
:param y_ref: The reference point in the objective space used
for computing the hypervolume.
:param how: Do you want to maximize or minimize the
objectives?
:param ehvi_opt_method: The method used for the optimization of the
EHVI. Any of the methods available to
`scipy.optimize.minimize_`. If you provide
any bounds or constraints you must make sure
:param ehvi_opt_bounds: The bounds of the EHVI optimization. Just like
the bounds in `scipy.optimize.minimize`_.
:param ehvi_opt_constraints:The constraints of the EHVI optimization.
:param ehvi_opt_options: A dictionary of solver options, just like in
`scipy.optimize.minimize`_.
:param max_it: Maximum number of iterations of the algorithm.
:param rtol: Relative tolerance for terminating the
algorithm.
:param add_at_least: The algorithm will not terminate until at least
this many simulations have been performed.
:param add_in_parallel: How many points to add in parallel.
:param verbosity: The greater this integer is, the more
information we print for the progress of the
algorithm.
:param kernel_type: The kernel used in GP regression.
:param gp_regression_type: The GP regression model.
:param gp_opt_num_restarts: Number of restarts for the GP optimization.
:param gp_opt_verbosity >= 1: Should we print information about the GP
optimization.
:param gp_opt_verbosity: If the user wants to print the value of the log
likelihood of the GP during each stage of the
maximization of the likelihood.
:param gp_fixed_noise: What level of noise should we assume for the GP?
Select ``None`` if you want the GP regression
to estimate the noise level. Select, a small
value if your objectives are deterministic.
:param do_posterior_samples:Do posterior samples or not.
:param figname: Name of the figure, may also include full name
of the
path to the directory where the figures have to be
saved.
:param get_fig: A figure object that can be used to plot the
status of the optimization at each stage.
:param lim: Limits specified as tuples for the two
axis of the 'plot status' figures.
:param make_plot_status: Do you want the plot_status figures to be made.
:param comm: The MPI communicator.
:param trans_function: A function to transform the scaled measurements for
obtaining the desired plots.
:param pareto_how: The mode ```min``` or ```max``` to plot the pareto frontier.
.. _scipy.optimize.minimize: http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize
"""
# Observed objectives
Y = None
# Input points corresponding to the observed objectives
X = None
# The indexes of the points that correspond to the Pareto front
idx = None
# The reference point used for the computation of the expected improvement
y_ref = None
@property
def Y_pareto(self):
"""
:getter: The objectives on the Pareto front.
"""
return self.Y_projected[self.idx, :]
@property
def X_pareto(self):
"""
:getter: The design points on the Pareto front.
"""
return self.X[self.idx, :]
@property
def num_obj(self):
"""
:getter: The number of objectives.
"""
return self.Y.shape[1]
@property
def num_dim(self):
"""
:getter: The number of input dimensions.
"""
return self.X.shape[1]
@property
def num_pareto(self):
"""
:getter: The number of points on the Pareto front.
"""
return self.idx.shape[0]
def _update_yref(self, value=None):
"""
Update the value of the reference point.
"""
if value is None:
mx = np.max(self.Y_projected, axis=0)
mn = np.min(self.Y_projected, axis=0)
if self.how == 'min':
value = mx + 0.1 * (mx - mn)
else:
value = mn - 0.1 * (mx - mn)
if self.how == 'min':
t = max
else:
t = min
if self.y_ref is None:
self.y_ref = value
else:
for k in xrange(self.num_obj):
self.y_ref[k] = t(self.y_ref[k], value[k])
def __init__(self, X, Y, obj_funcs,
obj_funcs_true=None,
Y_true=None,
X_design=None,
y_ref=None,
how='max',
ehvi_opt_method='TNC',
ehvi_opt_bounds=None,
ehvi_opt_constraints=None,
ehvi_opt_options=None,
max_it=50,
rtol=1e-3,
add_at_least=1,
add_in_parallel=1,
verbosity=0,
kernel_type=GPy.kern.Matern32,
gp_regression_type=ParallelizedGPRegression,
gp_opt_num_restarts=10,
gp_opt_verbosity=False,
gp_fixed_noise=None,
do_posterior_samples=False,
figname='ex1/moo',
get_fig=None,
lim=None,
make_plot_status=True,
comm=None,
trans_function=None,
label=('Objective 1', 'Objective 2'),
pareto_how='max',
save_surrogates=False):
super(ParetoFront, self).__init__(comm=comm, verbosity=verbosity)
assert X.ndim == 2
self.X = X
assert Y.ndim == 2
assert Y.shape[0] == X.shape[0]
self.Y = Y
assert self.num_obj == 2, 'Currently working only for two objectives'
self.X_design = X_design
self.obj_funcs = obj_funcs
assert how == 'max' or how == 'min'
self.how = how
self.ehvi_opt_method = ehvi_opt_method
self.ehvi_opt_bounds = ehvi_opt_bounds
self.ehvi_opt_constraints = ehvi_opt_constraints
self.ehvi_opt_options = ehvi_opt_options
assert isinstance(max_it, int) and max_it >= 1
assert isinstance(add_at_least, int) and add_at_least >= 1 \
and add_at_least <= max_it
assert isinstance(rtol, float) and rtol >= 0.
self.max_it = max_it
self.rtol = rtol
self.add_at_least = add_at_least
self.add_in_parallel = add_in_parallel
assert isinstance(verbosity, int) and verbosity >= 0
self.kernel_type = kernel_type
self.gp_regression_type = gp_regression_type
self.gp_opt_num_restarts = gp_opt_num_restarts
self.gp_opt_verbosity = gp_opt_verbosity
assert gp_fixed_noise is None or \
isinstance(gp_fixed_noise, float) and gp_fixed_noise >= 0.
self.gp_fixed_noise = gp_fixed_noise
self.do_posterior_samples = do_posterior_samples
self._surrogates = None
self.save_surrogates = save_surrogates
if self.save_surrogates:
self._surrogates_log = []
self.train_surrogates()
self._update_yref(y_ref)
self.figname = figname
self.get_fig = get_fig
self.Y_true = Y_true
self.lim = lim
self.obj_funcs_true = obj_funcs_true
self.make_plot_status = make_plot_status
self.trans_function = trans_function
self.label = label
assert pareto_how == 'max' or pareto_how == 'min'
self.pareto_how = pareto_how
@property
def surrogates(self):
"""
Get the surrogates. Train them if this hasn't happened yet.
"""
if self._surrogates is None:
self.train_surrogates()
return self._surrogates
def train_surrogates(self):
"""
Train the surrogates.
"""
self._surrogates = []
self.Y_projected = self.Y.copy()
for i in xrange(self.num_obj):
k = self.kernel_type(self.num_dim, ARD=True)
gp = self.gp_regression_type(self.X, self.Y[:, i][:, None], k,
comm=self.comm,
verbosity=self.gp_opt_verbosity)
if self.gp_fixed_noise is not None:
fixed_noise = self.gp_fixed_noise * np.std(self.Y[:, i])
gp.Gaussian_noise.variance.unconstrain()
gp.Gaussian_noise.variance.fix(fixed_noise ** 2)
# The following can be parallelized
gp.optimize_restarts(self.gp_opt_num_restarts)
self.Y_projected[:, i] = gp.predict(self.X)[0][:, 0]
self._surrogates.append(gp)
if self.save_surrogates:
self._surrogates_log.append(self._surrogates)
self.idx = get_idx_of_observed_pareto_front(self.Y_projected, how=self.how)
def _optimize_ehvi(self, x0):
"""
Optimize the EHVI starting at ``x0``.
"""
args = (self.surrogates, self.Y_pareto, self.y_ref, self.how)
def func(x, *args):
r, dr = ehvi_2d_func(x, *args)
return -r, -dr
res = minimize(func, x0,
jac=True,
args=args,
method=self.ehvi_opt_method,
bounds=self.ehvi_opt_bounds,
constraints=self.ehvi_opt_constraints,
options=self.ehvi_opt_options)
return res.x, -res.fun
def optimize_ehvi(self, X_design):
"""
Optimize the expected hypervolume improvement starting at all the
points in X_design.
"""
if isinstance(X_design, int):
num_design = X_design
if self.rank == 0:
X_design = lhs(self.num_dim, num_design)
else:
X_design = None
if self.use_mpi:
X_design = self.comm.bcast(X_design)
if self.ehvi_opt_bounds is not None:
b = np.array(self.ehvi_opt_bounds)
X_design = b[:, 0] + (b[:, 1] - b[:, 0]) * X_design
x_best = None
ei_max = 0.
if self.verbosity >= 1:
pbar = tqdm.tqdm(total=X_design.shape[0])
for i in distributed_xrange(X_design.shape[0], comm=self.comm):
x0 = X_design[i, :]
if self.verbosity >= 1:
pbar.update(self.size)
if self.verbosity >= 2:
print '\t\t> computing EHVI at design point:\n', x0
x, ei = self._optimize_ehvi(x0)
if self.verbosity >= 2:
print '\t\t> final design point:\n', x
print '\t\t> found: ', ei
if ei > ei_max:
ei_max = ei
x_best = x
if self.verbosity >= 1:
pbar.close()
return reduce_max(x_best, ei_max, comm=self.comm)
def suggest(self, k, X_design=None):
"""
Suggest k points to be simulated.
"""
if X_design is None:
X_design = self.X_design
X0 = self.X.copy()
Y0 = self.Y.copy()
x_best = []
eis = []
for j in xrange(k):
for i in xrange(self.num_obj):
gp = self.surrogates[i]
gp.set_XY(self.X, self.Y[:, i][:, None])
self.Y_projected = self.Y.copy()
self.Y_projected[:, i] = gp.predict(self.X)[0][:, 0]
self.idx = get_idx_of_observed_pareto_front(self.Y_projected,
how=self.how)
self._update_yref()
x, ei = self.optimize_ehvi(X_design)
eis.append(ei)
y = [gp.posterior_samples(x[None, :], 1)[0, 0]
for gp in self.surrogates]
x_best.append(x)
if self.verbosity >=1:
print '\t\t> add (EI=%f):' % ei
print x
self.X = np.vstack([self.X, x])
self.Y = np.vstack([self.Y,[y]])
x_best = np.array(x_best)
self.X = X0
self.Y | |
apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
[
"transpose",
"transpose",
"relu",
"relu",
"concat",
"transpose",
"transpose",
],
)
self.assertEqual(get_op_types_in_program(prog), ["relu", "relu", "concat"])
assert_model_is_valid(
prog,
{"x": (1, 2, 5, 5)},
expected_output_shapes={
block.outputs[0].name: (1, 4, 5, 5),
block.outputs[1].name: (1, 2, 5, 5),
},
)
"""
Input graph:
constant(shape=[30,10,5])
|
V
input(shape=10,20,30)--->transpose(axis=[2,0,1])--->concat(axis=2)----->transpose(axis=[1,2,0])----->out1(shape=10,25,30)
Output graph:
constant(shape=[10,5,30])
|
V
input(shape=10,20,30)--->concat(axis=1)----->out1(shape=10,25,30)
"""
def test_concat_pattern_5(self):
const = np.random.rand(30, 10, 5)
@mb.program(input_specs=[mb.TensorSpec(shape=(10, 20, 30))])
def prog(x):
x1 = mb.transpose(x=x, perm=[2, 0, 1])
c = mb.const(val=const)
x2 = mb.concat(values=[x1, c], axis=2)
x3 = mb.transpose(x=x2, perm=[1, 2, 0])
return x3
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog), ["transpose", "concat", "transpose"]
)
self.assertEqual(get_op_types_in_program(prog), ["concat"])
assert_model_is_valid(
prog,
{"x": (10, 20, 30)},
expected_output_shapes={block.outputs[0].name: (10, 25, 30)},
)
"""
Input graph:
input2(shape=30,10,20)-----|
|
input(shape=10,20,30)--->transpose(axis=[2,0,1])----->relu-----|----->concat(axis=2)------>out1(shape=90,10,20)
| |
|-->relu-----|
|
|-->relu---->transpose(axis=[1,2,0])---->out2(shape=10,20,30)
|
|-->relu---->transpose(axis=[1,2,0])---->out3(shape=10,20,30)
|
|-->relu---->transpose(axis=[1,2,0])---->out4(shape=10,20,30)
Output graph:
input2(shape=30,10,20)-----|
|
input(shape=10,20,30)----->relu--->transpose(axis=[2,0,1])-----|----->concat(axis=2)------>out1(shape=90,10,20)
| |
|-->relu--->transpose(axis=[2,0,1])-----|
|
|-->relu---->out2(shape=10,20,30)
|
|-->relu---->out3(shape=10,20,30)
|
|-->relu---->out4(shape=10,20,30)
Output graph:
"""
def test_concat_pattern_6(self):
@mb.program(
input_specs=[
mb.TensorSpec(shape=(10, 20, 30)),
mb.TensorSpec(shape=(30, 10, 20)),
]
)
def prog(x, y):
x1 = mb.transpose(x=x, perm=[2, 0, 1])
r1 = mb.relu(x=x1)
r2 = mb.relu(x=x1)
r3 = mb.relu(x=x1)
r4 = mb.relu(x=x1)
r5 = mb.relu(x=x1)
x2 = mb.concat(values=[r1, r2, y], axis=0)
x3 = mb.transpose(x=r3, perm=[1, 2, 0])
x4 = mb.transpose(x=r4, perm=[1, 2, 0])
x5 = mb.transpose(x=r5, perm=[1, 2, 0])
return x2, x3, x4, x5
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
[
"transpose",
"relu",
"relu",
"relu",
"relu",
"relu",
"concat",
"transpose",
"transpose",
"transpose",
],
)
self.assertEqual(
get_op_types_in_program(prog),
[
"relu",
"relu",
"relu",
"relu",
"relu",
"transpose",
"transpose",
"concat",
],
)
assert_model_is_valid(
prog,
{"x": (10, 20, 30), "y": (30, 10, 20)},
expected_output_shapes={
block.outputs[0].name: (90, 10, 20),
block.outputs[1].name: (10, 20, 30),
block.outputs[2].name: (10, 20, 30),
block.outputs[3].name: (10, 20, 30),
},
)
"""
Input graph:
input(shape=1,4,5,6)--->transpose(axis=[0,3,2,1])--->relu---->split(axis=1, num_splits=2)----->transpose(axis=[0,3,2,1])----->out1(shape=1,4,5,3)
|
v
transpose(axis[0,3,2,1])-------------------------->out2(shape=1,4,5,3)
Output graph:
input(shape=1,4,5,6)------> relu ---->split(axis=3)--->out1(shape=1,4,5,3)
|
v
out2(shape=1,4,5,3)
"""
def test_split_nd_pattern_0(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 4, 5, 6))])
def prog(x):
x1 = mb.transpose(x=x, perm=[0, 3, 2, 1])
x1 = mb.relu(x=x1)
x2, x3 = mb.split(x=x1, axis=1, num_splits=2)
x4 = mb.transpose(x=x2, perm=[0, 3, 2, 1])
x5 = mb.transpose(x=x3, perm=[0, 3, 2, 1])
return x4, x5
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
["transpose", "relu", "split", "transpose", "transpose"],
)
self.assertEqual(get_op_types_in_program(prog), ["relu", "split"])
assert_model_is_valid(
prog,
{"x": (1, 4, 5, 6)},
expected_output_shapes={block.outputs[0].name: (1, 4, 5, 3),
block.outputs[1].name: (1, 4, 5, 3)},
)
self.assertEqual(block.find_ops(op_type="split")[0].axis.val, 3)
"""
Input graph:
input(shape=1,4,5,6)--->transpose(axis=[0,3,2,1])--->relu---->splitd(axis=1, num_splits=6)----->transpose(axis=[0,3,2,1])----->out1(shape=1,4,5,3)
|
v
transpose(axis[0,3,2,1])-------------------------------------->out2(shape=1,4,5,3)
Output graph:
input(shape=1,4,5,6)------>relu---->split(axis=3)--->out1(shape=1,4,5,3)
|
v
out2(shape=1,4,5,3)
"""
def test_split_nd_pattern_1(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 4, 5, 6))])
def prog(x):
x1 = mb.transpose(x=x, perm=[0, 3, 2, 1])
x1 = mb.relu(x=x1)
x2, x3, x4, x5, x6, x7 = mb.split(x=x1, axis=1, num_splits=6)
x2 = mb.transpose(x=x2, perm=[0, 3, 2, 1])
x3 = mb.transpose(x=x3, perm=[0, 3, 2, 1])
x4 = mb.transpose(x=x4, perm=[0, 3, 2, 1])
x5 = mb.transpose(x=x5, perm=[0, 3, 2, 1])
x6 = mb.transpose(x=x6, perm=[0, 3, 2, 1])
x7 = mb.transpose(x=x7, perm=[0, 3, 2, 1])
return x2, x3, x4, x5, x6, x7
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
["transpose", "relu", "split", "transpose", "transpose", "transpose", "transpose", "transpose", "transpose"],
)
self.assertEqual(get_op_types_in_program(prog), ["relu", "split"])
assert_model_is_valid(
prog,
{"x": (1, 4, 5, 6)},
expected_output_shapes={block.outputs[0].name: (1, 4, 5, 1),
block.outputs[1].name: (1, 4, 5, 1),
block.outputs[2].name: (1, 4, 5, 1),
block.outputs[3].name: (1, 4, 5, 1),
block.outputs[4].name: (1, 4, 5, 1),
block.outputs[5].name: (1, 4, 5, 1)},
)
self.assertEqual(block.find_ops(op_type="split")[0].axis.val, 3)
"""
Input graph:
input(shape=1,4,5,6)--->transpose(axis=[0,3,2,1])---> split(axis=1, num_splits=2) ----> concat(axis=1) ----->transpose(axis=[0,3,2,1]) ----->out1(shape=1,4,5,6)
| ^
v |
relu() ----------------------
Output graph:
input(shape=1,4,5,6)------>split(axis=3)--->concat(axis=3) -------> out1(shape=1,4,5,6)
| ^
v |
relu() --------------
"""
def test_split_nd_pattern_2(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 4, 5, 6))])
def prog(x):
x1 = mb.transpose(x=x, perm=[0, 3, 2, 1])
x2, x3 = mb.split(x=x1, axis=1, num_splits=2)
x4 = mb.relu(x=x2)
x5 = mb.concat(values=[x4, x3], axis=1)
x6 = mb.transpose(x=x5, perm=[0, 3, 2, 1])
return x6
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
["transpose", "split", "relu", "concat", "transpose"]
)
self.assertEqual(get_op_types_in_program(prog), ["split", "relu", "concat"])
assert_model_is_valid(
prog,
{"x": (1, 4, 5, 6)},
expected_output_shapes={block.outputs[0].name: (1, 4, 5, 6)},
)
self.assertEqual(block.find_ops(op_type="split")[0].axis.val, 3)
"""
Input graph:
input(shape=1,5,5,3)----->transpose(axis=[0,3,1,2])
|
---->relu-------------->transpose(axis=[0,2,3,1])
| |
| V
| relu
| |
| V
| transpose(axis=[0,3,1,2])
| |
| V
----------------> add --------> relu---->pool---->out(shape=1,3,5,5)
Output graph:
input(shape=1,5,5,3)---->relu------------------------> relu
| |
| V
----------------> add
|
V
relu
|
V
transpose(axis=[0,3,1,2])-->pool---->out(shape=1,3,5,5)
"""
def test_skip_connection_pattern_0(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 5, 5, 3))])
def prog(x):
x = mb.transpose(x=x, perm=[0, 3, 1, 2])
x = mb.relu(x=x)
x1 = mb.transpose(x=x, perm=[0, 2, 3, 1])
x2 = mb.relu(x=x1)
x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2])
x4 = mb.add(x=x, y=x3)
x5 = mb.relu(x=x4)
x6 = mb.avg_pool(
x=x5, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid"
)
return x6
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
[
"transpose",
"relu",
"transpose",
"relu",
"transpose",
"add",
"relu",
"avg_pool",
],
)
self.assertEqual(
get_op_types_in_program(prog),
["relu", "relu", "add", "relu", "transpose", "avg_pool"],
)
assert_model_is_valid(
prog,
{"x": (1, 5, 5, 3)},
expected_output_shapes={block.outputs[0].name: (1, 3, 5, 5)},
)
"""
Input graph:
input(shape=1,5,5,3)----->transpose(axis=[0,3,1,2])
|
---->relu-------------->transpose(axis=[0,2,3,1])
| |
| V
| relu
| |
| V
| transpose(axis=[0,3,1,2])
| |
| V
----------------> add -->transpose(axis=[0,2,3,1])
|
V
relu---->pool---->out(shape=1,5,5,3)
Output graph:
input(shape=1,5,5,3)---->relu------------------------> relu
| |
| V
----------------> add
|
V
relu
|
V
pool---->out(shape=1,5,5,3)
"""
def test_skip_connection_pattern_1(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 5, 5, 3))])
def prog(x):
x = mb.transpose(x=x, perm=[0, 3, 1, 2])
x = mb.relu(x=x)
x1 = mb.transpose(x=x, perm=[0, 2, 3, 1])
x2 = mb.relu(x=x1)
x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2])
x4 = mb.add(x=x, y=x3)
x4 = mb.transpose(x=x4, perm=[0, 2, 3, 1])
x5 = mb.relu(x=x4)
x6 = mb.avg_pool(
x=x5, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid"
)
return x6
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
[
"transpose",
"relu",
"transpose",
"relu",
"transpose",
"add",
"transpose",
"relu",
"avg_pool",
],
)
self.assertEqual(
get_op_types_in_program(prog), ["relu", "relu", "add", "relu", "avg_pool"]
)
assert_model_is_valid(
prog,
{"x": (1, 5, 5, 3)},
expected_output_shapes={block.outputs[0].name: (1, 5, 5, 3)},
)
"""
Input graph:
input(shape=2,5)--->transpose(axis=[1,0])--->transpose(axis=[1,0])-->reduce(axis=1)
| |
| V
| transpose(axis=[1,0])
| |
| V
-------------------------------------------->add------->out(shape=5,2)
Output graph:
input(shape=2,5)--->reduce(axis=1)---->add---->transpose(axis=[1,0])--->out(shape=5,2)
| ^
| |
------------------------
"""
def test_residual_with_unmaterialized_output(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 5))])
def prog(x):
x1 = mb.transpose(x=x, perm=[1,0])
t1 = mb.transpose(x=x1, perm=[1,0])
x2 = mb.reduce_mean(x=t1, axes=[1], keep_dims=True)
t2 = mb.transpose(x=x2, perm=[1,0])
return mb.add(x=x1, y=t2)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
[
'transpose',
'transpose',
'reduce_mean',
'transpose',
'add'
]
)
self.assertEqual(
get_op_types_in_program(prog), ['reduce_mean', 'add', 'transpose']
)
assert_model_is_valid(
prog,
{'x': (2, 5)},
expected_output_shapes={block.outputs[0].name: (5,2)}
)
"""
Input graph:
input(shape=2,5)--->transpose(axis=[1,0])--->transpose(axis=[1,0])-->reduce(axis=1)
| |
| V
| transpose(axis=[1,0])
| |
| V
-------------------------------------------->add------->out1(shape=5,2)
|
V
relu------->out2(shape=5,2)
Output graph:
input(shape=2,5)--->reduce(axis=1)----> add ----->transpose(axis=[1,0])----->out1(shape=5,2)
| |
| V
---------------------> relu----->transpose(axis=[1,0])----->out2(shape=5,2)
"""
def test_residual_with_unmaterialized_multiple_output(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 5))])
def prog(x):
x1 = mb.transpose(x=x, perm=[1,0])
t1 = mb.transpose(x=x1, perm=[1,0])
x2 = mb.reduce_mean(x=t1, axes=[1], keep_dims=True)
t2 = mb.transpose(x=x2, perm=[1,0])
out1 = mb.add(x=x1, y=t2)
out2 = mb.relu(x=out1)
return out1, out2
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
[
'transpose',
'transpose',
'reduce_mean',
'transpose',
'add',
'relu'
]
)
self.assertEqual(
get_op_types_in_program(prog), ['reduce_mean', 'add', 'relu', 'transpose', 'transpose']
)
assert_model_is_valid(
prog,
{'x': (2, 5)},
expected_output_shapes={block.outputs[0].name: (5,2),
block.outputs[1].name: (5,2)}
)
"""
Input graph:
input(shape=2,5)---->transpose(axis=[1,0])------>relu----->transpose(axis=[1,0])------>out2(shape=2,5)
|
------->out1(shape=5,2)
Output graph:
input(shape=2,5)---->relu-----> out2(shape=2,5)
|
V
transpose(axis=[1,0]) -----> out1(shape=5,2)
"""
def test_materialized_output_reuse(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 5))])
def prog(x):
x1 = mb.transpose(x=x, perm=[1,0])
y1 = mb.relu(x=x1)
y2 = mb.transpose(x=y1, perm=[1,0])
return y1, y2
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
[
'transpose',
'relu',
'transpose',
]
)
self.assertEqual(
get_op_types_in_program(prog), ['relu', 'transpose']
)
assert_model_is_valid(
prog,
{'x': (2, 5)},
expected_output_shapes={block.outputs[0].name: (5,2),
block.outputs[1].name: (2,5)}
)
"""
Input graph:
input(shape=1,2,5,5)----->transpose(axis=[0,2,3,1])------->add------------>transpose(axis=[0,3,1,2])--->out1(shape=1,2,5,5)
| ^ |
| | |
---->relu ----->transpose(axis=[0,3,1,2])--->out2(shape=1,2,5,5)
Output graph:
input(shape=1,2,5,5)----->add------->out1(shape=1,2,5,5)
| ^ |
| | |
|------>relu ------identity(renaming)---->out2(shape=1,2,5,5)
"""
def test_fusion_with_double_outputs(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))])
def prog(x):
x1 = mb.transpose(x=x, perm=[0, 2, 3, 1])
x2 = mb.relu(x=x1)
x3 = mb.add(x=x1, y=x2)
y1 = mb.transpose(x=x3, perm=[0, 3, 1, 2])
y2 = mb.transpose(x=x3, perm=[0, 3, 1, 2])
return y1, y2
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
["transpose", "relu", "add", "transpose", "transpose"],
)
self.assertEqual(get_op_types_in_program(prog), ["relu", "add", "identity"])
assert block.find_ops(op_type="relu")[0].inputs["x"] == block.inputs["x"]
assert block.find_ops(op_type="add")[0].inputs["x"] == block.inputs["x"]
assert (
block.find_ops(op_type="add")[0].inputs["y"]
== block.find_ops(op_type="relu")[0].outputs[0]
)
assert_model_is_valid(
prog,
{"x": (1, 2, 5, 5)},
expected_output_shapes={block.outputs[0].name: (1, 2, 5, 5)},
| |
<reponame>ma-compbio/MATCHA
from pybloom_live import BloomFilter
import multiprocessing
from torch.nn.utils.rnn import pad_sequence
import time
from tqdm import tqdm, trange
import argparse
import warnings
import random
from Modules import *
from utils import *
import datetime
cpu_num = multiprocessing.cpu_count()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
warnings.filterwarnings("ignore")
def get_free_gpu():
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free > ./tmp')
memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
if len(memory_available) > 0:
id = int(np.argmax(memory_available))
print("setting to gpu:%d" % id)
torch.cuda.set_device(id)
else:
return
if torch.cuda.is_available():
get_free_gpu()
def forward_op_batch(
model,
loss_func,
batch_data,
batch_weight,
y=""):
x = batch_data
w = batch_weight
# When label is not generated, prepare the data
if len(y) == 0:
x, y, w, s = generate_negative(x, "train_dict", w, neg_num=neg_num)
x, y, w, s = sync_shuffle([x, y, w, s])
else:
s = torch.ones((len(y), 1))
# forward
pred, recon_loss = model(x, return_recon=True)
loss = loss_func(pred, y, weight=w)
return F.sigmoid(pred), y, loss, recon_loss, w, s
def forward_op_batch_regress(
model,
loss_func,
batch_data,
batch_weight,
y=""):
x = batch_data
w = batch_weight
# When label is not generated, prepare the data
if len(y) == 0:
x, y, w, s = generate_negative(x, "train_dict", w,neg_num=1)
x, y, w, s = sync_shuffle([x, y, w, s])
else:
s = torch.ones((len(y), 1))
if len(x) % 2 == 1:
batch_length = (len(x) - 1)
x = x[:batch_length]
w = w[:batch_length]
y = y[:batch_length]
s = s[:batch_length]
batch_length = int(batch_length / 2)
else:
batch_length = int(len(x) / 2)
# forward
pred, recon_loss = model(x, return_recon=True)
pred = F.softplus(pred)
loss = F.mse_loss(pred, y)
pred = pred.view(batch_length,2)
y = y.view(batch_length,2)
w = w.view(batch_length,2)
s = s.view(batch_length,2)
l_back = torch.argmin(y,dim=-1,keepdim=False)
l = l_back.clone()
l[l == 0] = -1
mask = y[:,0]!=y[:,1]
l = l[mask].float()
l_back = l_back[mask].float()
pred = pred[mask]
w = w[mask,0]
s = s[mask,0]
# print ("l,pred",l, pred)
# , weight=s.float().view(-1, 1).to(device)
# loss = loss_func(pred[:,0], pred[:,1], l, margin=0.1)
y = l_back
pred = pred[:,0] - pred[:,1]
# pred = F.sigmoid(pred)
# loss = loss_func(pred, y)
# print (y)
return F.sigmoid(pred), y, loss, recon_loss, w, s
def train_epoch(
model,
loss_func,
training_data,
optimizer,
batch_size):
# Epoch operation in training phase
# print (len(train_dict[min_size]), train_dict[min_size].capacity, len(test_dict[min_size]))
edges, edge_weight = training_data
y = torch.tensor([])
# y = training_y
# Permutate all the data
if len(y) > 0:
print("existing y")
edges, edge_weight, y = sync_shuffle([edges, edge_weight, y])
else:
edges, edge_weight = sync_shuffle([edges, edge_weight])
model.train()
if task_mode == 'class':
forward_func = forward_op_batch
elif task_mode == 'regress':
forward_func = forward_op_batch_regress
bce_total_loss = 0
recon_total_loss = 0
acc_list, y_list, pred_list, weight_list, size_list = [], [], [], [], []
batch_num = int(math.floor(len(edges) / batch_size))
bar = trange(
batch_num,
mininterval=0.1,
desc=' - (Training) ',
leave=False,
)
for i in bar:
batch_edge = edges[i * batch_size:(i + 1) * batch_size]
batch_edge_weight = edge_weight[i * batch_size:(i + 1) * batch_size]
batch_y = ""
if len(y) > 0:
batch_y = y[i * batch_size:(i + 1) * batch_size]
if len(batch_y) == 0:
continue
pred, batch_y, loss_bce, loss_recon, batch_w, batch_s = forward_func(
model, loss_func, batch_edge, batch_edge_weight, y=batch_y)
loss = loss_bce * alpha + loss_recon * beta
# loss = loss_bce + loss_recon
# acc_list.append(accuracy(pred, batch_y))
y_list.append(batch_y)
pred_list.append(pred)
weight_list.append(batch_w)
size_list.append(batch_s)
for opt in optimizer:
opt.zero_grad()
# backward
loss.backward()
# update parameters
for opt in optimizer:
opt.step()
bar.set_description(" - (Training) BCE: %.4f recon: %.4f" %
(bce_total_loss / (i + 1), recon_total_loss / (i + 1)))
bce_total_loss += loss_bce.item()
recon_total_loss += loss_recon.item()
y = torch.cat(y_list)
pred = torch.cat(pred_list)
size_list = torch.cat(size_list)
weight_list = torch.cat(weight_list)
auc1, auc2 = roc_auc_cuda(y, pred, size_list, max_size)
acc = accuracy(pred, y, size_list, max_size)
return bce_total_loss / batch_num, recon_total_loss / batch_num, acc, auc1, auc2
def eval_epoch(model, loss_func, validation_data, batch_size):
''' Epoch operation in evaluation phase '''
bce_total_loss = 0
recon_total_loss = 0
model.eval()
if task_mode == 'class':
forward_func = forward_op_batch
elif task_mode == 'regress':
forward_func = forward_op_batch_regress
with torch.no_grad():
validation_data, validation_weight = validation_data
y = ""
validation_data, validation_weight = sync_shuffle(
[validation_data, validation_weight], 10000)
pred, label, size_list, weight_list = [], [], [], []
for i in tqdm(range(int(math.floor(len(validation_data) / batch_size))),
mininterval=0.1, desc=' - (Validation) ', leave=False):
# prepare data
batch_edge = validation_data[i * batch_size:(i + 1) * batch_size]
batch_edge_weight = validation_weight[i * batch_size:(i + 1) * batch_size]
# if len(y) == 0:
# batch_x, batch_y, batch_w, batch_s = generate_negative(
# batch_x, "test_dict", weight=batch_w, neg_num=neg_num)
# else:
# batch_y = y[i * batch_size:(i + 1) * batch_size]
#
# batch_x, batch_y, batch_w, batch_s = sync_shuffle(
# [batch_x, batch_y, batch_w, batch_s])
# pred_batch, recon_loss = model(batch_x, return_recon=True)
# loss = loss_func(pred_batch, batch_y)
pred_batch, batch_y, loss, recon_loss, batch_w, batch_s = forward_func(
model, loss_func, batch_edge, batch_edge_weight)
size_list.append(batch_s)
pred.append(pred_batch)
label.append(batch_y)
weight_list.append(batch_edge_weight)
recon_total_loss += recon_loss.item()
bce_total_loss += loss.item()
pred = torch.cat(pred, dim=0)
label = torch.cat(label, dim=0)
size_list = torch.cat(size_list, dim=0)
# weight_list = torch.cat(weight_list, dim=0)
acc = accuracy(pred, label, size_list, max_size)
auc1, auc2 = roc_auc_cuda(label, pred, size_list, max_size)
return bce_total_loss / (i + 1), recon_total_loss / \
(i + 1), acc, auc1, auc2
def train(model,
loss,
training_data,
validation_data,
optimizer,
epochs,
batch_size):
valid_accus = [0]
edges, edge_weight = training_data
training_data_generator = DataGenerator(
edges, edge_weight, int(batch_size),1000, min_size=min_size, max_size=max_size)
start = time.time()
for epoch_i in range(epochs):
save_embeddings(model, True)
print('[ Epoch', epoch_i, 'of', epochs, ']')
start = time.time()
edges_part, edge_weight_part = training_data_generator.next_iter()
training_data_new = edges_part, edge_weight_part
bce_loss, recon_loss, train_accu, auc1, auc2 = train_epoch(model, loss, training_data_new, optimizer,
batch_size)
print(
' - (Training) bce: {bce_loss: 7.4f},'
'recon: {recon_loss: 7.4f}'
' acc: {accu}, auc: {auc1}, aupr: {auc2}, '
'elapse: {elapse:3.3f} s'.format(
bce_loss=bce_loss,
recon_loss=recon_loss,
accu=train_accu,
auc1=auc1,
auc2=auc2,
elapse=(
time.time() - start)))
start = time.time()
valid_bce_loss, recon_loss, valid_accu, valid_auc1, valid_auc2 = eval_epoch(model, loss, validation_data,
batch_size)
print(
' - (Validation-hyper) bce: {bce_loss: 7.4f}, recon: {recon_loss: 7.4f},'
' acc: {accu},'
' auc: {auc1}, aupr: {auc2},'
'elapse: {elapse:3.3f} s'.format(
bce_loss=valid_bce_loss,
recon_loss=recon_loss,
accu=valid_accu,
auc1=valid_auc1,
auc2=valid_auc2,
elapse=(
time.time() - start)))
valid_aupr_final = float(valid_auc2.split(" ")[-2])
valid_accus += [valid_aupr_final]
checkpoint = {
'model_link': model.state_dict(),
'epoch': epoch_i}
if valid_aupr_final >= max(valid_accus):
torch.save(checkpoint, os.path.join(temp_dir, model_name))
torch.save(model, os.path.join(temp_dir, "model2load"))
torch.cuda.empty_cache()
checkpoint = torch.load(os.path.join(temp_dir, model_name))
model.load_state_dict(checkpoint['model_link'])
valid_bce_loss, recon_loss, valid_accu, valid_auc1, valid_auc2 = eval_epoch(model, loss, validation_data,
batch_size)
print(
' - (Validation-hyper) bce: {bce_loss: 7.4f}, recon: {recon_loss: 7.4f},'
' acc: {accu},'
' auc: {auc1}, aupr: {auc2},'
'elapse: {elapse:3.3f} s'.format(
bce_loss=valid_bce_loss,
recon_loss=recon_loss,
accu=valid_accu,
auc1=valid_auc1,
auc2=valid_auc2,
elapse=(
time.time() - start)))
def neighbor_check(temp, dict):
return tuple(temp) in dict
# flag = False
# for i in range(len(temp)):
# for j in [-1, 0, 1]:
# a = np.copy(temp)
# a[i] += j
# a.sort()
# if tuple(a) in dict:
# flag = True
# break
# if flag:
# break
# return flag
def generate_negative(x, dict1, weight="", neg_num=1):
if len(weight) == 0:
weight = torch.ones(len(x), dtype=torch.float)
if dict1 == 'train_dict':
dict1 = train_dict
elif dict1 == 'test_dict':
dict1 = test_dict
change_num_list = [[] for i in range(max_size + 1)]
for s in range(min_size, max_size + 1):
change_num = np.random.binomial(s, 0.5, int(len(x) * (math.ceil(neg_num) * 2)))
change_num = change_num[change_num != 0]
change_num_list[s] = list(change_num)
neg_list = []
new_x = []
new_index = []
neg_weight = []
size_list = []
size_neg_list = []
for j, sample in enumerate(x):
for i in range(int(math.ceil(neg_num))):
decompose_sample = np.copy(sample)
list1 = change_num_list[decompose_sample.shape[-1]]
change_num = list1.pop()
changes = np.random.choice(np.arange(decompose_sample.shape[-1]), change_num, replace=False)
temp = np.copy(decompose_sample)
trial = 0
while neighbor_check(temp, dict1[(len(temp))]):
temp = np.copy(decompose_sample)
# trial += 1
# if trial >= 10000:
# temp = ""
# break
for change in changes:
if temp[change] not in node2chrom:
print(temp, decompose_sample)
chrom = node2chrom[temp[change]]
start, end = chrom_range[chrom]
temp[change] = int(
math.floor(
(end - start) * random.random())) + start
temp = list(set(temp))
if len(temp) < len(decompose_sample):
temp = np.copy(decompose_sample)
continue
temp.sort()
dis_list = []
for k in range(len(temp) - 1):
dis_list.append(temp[k + 1] - temp[k])
if np.min(dis_list) <= min_dis:
temp = np.copy(decompose_sample)
if i == 0:
size_list.append(len(decompose_sample))
if len(temp) > 0:
neg_list.append(temp)
size_neg_list.append(len(temp))
neg_weight.append(weight[j])
pos_weight = weight
pos_weight = torch.tensor(pos_weight).to(device)
size_list = torch.tensor(size_list + size_neg_list)
pos_part = np2tensor_hyper(list(x), dtype=torch.long)
neg = np2tensor_hyper(neg_list, dtype=torch.long)
if type(pos_part) == list:
pos_part = pad_sequence(pos_part, batch_first=True, padding_value=0)
neg = pad_sequence(neg, batch_first=True, padding_value=0)
if len(neg) == 0:
neg = torch.zeros((1, pos_part.shape[-1]),dtype=torch.long, device=device)
pos_part = pos_part.to(device)
neg = neg.to(device)
if task_mode == 'class':
y = torch.cat([torch.ones((len(pos_part), 1), device=device),
torch.zeros((len(neg), 1), device=device)], dim=0)
w = torch.cat([torch.ones((len(pos_part), 1), device=device) * pos_weight.view(-1, 1),
torch.ones((len(neg), 1), device=device)])
x = torch.cat([pos_part, neg])
elif task_mode == 'regress':
w = torch.cat([torch.ones((len(pos_part), 1), device=device),
torch.ones((len(neg), 1), device=device)], dim=0)
y = torch.cat([torch.ones((len(pos_part), 1), device=device) * pos_weight.view(-1, 1),
torch.zeros((len(neg), 1), device=device)])
x = torch.cat([pos_part, neg])
else:
print ("Wrong task mode")
raise EOFError
return x, y, w, size_list
def save_embeddings(model, origin=False):
model.eval()
with torch.no_grad():
ids = np.arange(num_list[-1]) + 1
ids = torch.Tensor(ids).long().to(device).view(-1, 1)
embeddings = []
for j in range(math.ceil(len(ids) / batch_size)):
x = ids[j * batch_size:min((j + 1) * batch_size, len(ids))]
if origin:
embed = model.get_node_embeddings(x)
embed = embed.detach().cpu().numpy()
embeddings.append(embed)
embeddings = np.concatenate(embeddings, axis=0)[:, 0, :]
np.save("../embeddings.npy" , embeddings)
torch.cuda.empty_cache()
return embeddings
def predict(model, input):
model.eval()
output = []
new_batch_size = int(1e5)
with torch.no_grad():
for j in trange(math.ceil(len(input) / new_batch_size)):
x = input[j * new_batch_size:min((j + 1) * new_batch_size, len(input))]
x = np2tensor_hyper(x, dtype=torch.long)
x = pad_sequence(x, batch_first=True, padding_value=0).to(device)
output.append(model(x).detach().cpu().numpy())
output = np.concatenate(output, axis=0)
torch.cuda.empty_cache()
return output
def get_attributes():
attribute_all = []
for i in range(len(num)):
chrom = np.zeros((num[i], len(chrom_list)))
chrom[:, i] = 1
coor = np.arange(num[i]).reshape((-1, 1)).astype('float32')
coor /= num[0]
attribute = np.concatenate([chrom, coor], axis=-1)
attribute_all.append(attribute)
attribute_all = np.concatenate(attribute_all, axis=0)
attribute_dict = np.concatenate([np.zeros((1, attribute_all.shape[-1])), attribute_all], axis=0).astype(
'float32')
print("attribute_dict", attribute_dict.shape)
return attribute_dict
config = get_config()
bottle_neck = config['embed_dim']
size_list = config['k-mer_size']
min_size, max_size = int(np.min(size_list)), int(np.max(size_list))
temp_dir = config['temp_dir']
quantile_cutoff_for_positive = config['quantile_cutoff_for_positive']
quantile_cutoff_for_unlabel = config['quantile_cutoff_for_unlabel']
min_dis = config['min_distance']
chrom_list = config['chrom_list']
neg_num = 3
batch_size = 96
loss = F.binary_cross_entropy_with_logits
model_name = 'model.chkpt'
current_time = datetime.datetime.now()
task_mode = 'class'
neighbor_mask = []
chrom_range = np.load(os.path.join(temp_dir,"chrom_range.npy"))
node2chrom = np.load(os.path.join(temp_dir,"node2chrom.npy"), allow_pickle=True).item()
num = []
for v in chrom_range:
num.append(v[1] - v[0])
num_list = np.cumsum(num)
zero_num_list = np.array([0] + list(num_list))
print("Node type num", num)
data_list = []
weight_list = []
from sklearn.preprocessing import QuantileTransformer
for size in size_list:
data = np.load(os.path.join(temp_dir,"all_%d_counter.npy" % size)).astype('int')
weight = np.load(os.path.join(temp_dir,"all_%d_freq_counter.npy" % size)).astype('float32')
print("before filter", "size", size, "length", len(data))
weight = QuantileTransformer(n_quantiles=1000, output_distribution='uniform').fit_transform(weight.reshape((-1,1))).reshape((-1))
mask = weight > quantile_cutoff_for_positive
# mask = weight >= cutoff
data = data[mask]
weight = weight[mask]
print("after filter", "size", size, "length", len(data))
for datum in data:
data_list.append(datum)
weight_list.append(weight)
data = np.array(data_list)
weight = np.concatenate(weight_list,axis = 0)
embeddings_initial = []
inter_initial = np.load(os.path.join(temp_dir, "inter_adj.npy")).astype('float32')
adj = np.load(os.path.join(temp_dir, "intra_adj.npy")).astype('float32')
for v in chrom_range:
temp = adj[v[0] - 1:v[1] - 1, v[0] - 1:v[1] - 1]
temp = np.corrcoef(temp).astype('float32')
temp[np.isnan(temp)] = 0.0
print (temp.shape)
embeddings_initial.append(temp)
attribute_dict = get_attributes()
num = torch.as_tensor(num)
num_list = torch.as_tensor(num_list)
print(num, num_list)
compress = True
# Note that, no matter how many node types are here, make sure the
# hyperedge (N1,N2,N3,...) has id, N1 < N2 < N3...
train_dict = test_dict | |
<reponame>tomy-0000/pytorch-ssd
import argparse
import os
import logging
import sys
import itertools
import cv2
import datetime
import torch
from torch.utils.data import DataLoader, ConcatDataset
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR
from torch.utils.tensorboard import SummaryWriter
from vision.utils.misc import str2bool, Timer, freeze_net_layers, store_labels
from vision.ssd.ssd import MatchPrior
from vision.ssd.vgg_ssd import create_vgg_ssd
from vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd
from vision.ssd.mobilenetv1_ssd_lite import create_mobilenetv1_ssd_lite
from vision.ssd.mobilenet_v2_ssd_lite import create_mobilenetv2_ssd_lite
from vision.ssd.mobilenetv3_ssd_lite import create_mobilenetv3_large_ssd_lite, create_mobilenetv3_small_ssd_lite
from vision.ssd.squeezenet_ssd_lite import create_squeezenet_ssd_lite
from vision.datasets.voc_dataset import VOCDataset
from vision.datasets.open_images import OpenImagesDataset
from vision.datasets.coco_dataset import COCODataset
from vision.datasets.custom_dataset import CustomDataset
from vision.nn.multibox_loss import MultiboxLoss
from vision.ssd.config import vgg_ssd_config
from vision.ssd.config import mobilenetv1_ssd_config
from vision.ssd.config import squeezenet_ssd_config
from vision.ssd.data_preprocessing import TrainAugmentation, TestTransform
from vision.ssd.vgg_ssd import create_vgg_ssd, create_vgg_ssd_predictor
from vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd, create_mobilenetv1_ssd_predictor
from vision.ssd.mobilenetv1_ssd_lite import create_mobilenetv1_ssd_lite, create_mobilenetv1_ssd_lite_predictor
from vision.ssd.squeezenet_ssd_lite import create_squeezenet_ssd_lite, create_squeezenet_ssd_lite_predictor
from vision.ssd.mobilenet_v2_ssd_lite import create_mobilenetv2_ssd_lite, create_mobilenetv2_ssd_lite_predictor
from vision.ssd.mobilenetv3_ssd_lite import create_mobilenetv3_large_ssd_lite, create_mobilenetv3_small_ssd_lite
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Training With Pytorch')
parser.add_argument("--dataset_type", default="voc", type=str,
help='Specify dataset type. Currently support voc and open_images.')
parser.add_argument('--datasets', nargs='+', help='Dataset directory path')
parser.add_argument('--validation_dataset', help='Dataset directory path')
parser.add_argument('--balance_data', action='store_true',
help="Balance training data by down-sampling more frequent labels.")
parser.add_argument('--net', default="vgg16-ssd",
help="The network architecture, it can be mb1-ssd, mb1-lite-ssd, mb2-ssd-lite, mb3-large-ssd-lite, mb3-small-ssd-lite or vgg16-ssd.")
parser.add_argument('--freeze_base_net', action='store_true',
help="Freeze base net layers.")
parser.add_argument('--freeze_net', action='store_true',
help="Freeze all the layers except the prediction head.")
parser.add_argument('--mb2_width_mult', default=1.0, type=float,
help='Width Multiplifier for MobilenetV2')
# Params for SGD
parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float,
help='Momentum value for optim')
parser.add_argument('--weight_decay', default=5e-4, type=float,
help='Weight decay for SGD')
parser.add_argument('--gamma', default=0.1, type=float,
help='Gamma update for SGD')
parser.add_argument('--base_net_lr', default=None, type=float,
help='initial learning rate for base net.')
parser.add_argument('--extra_layers_lr', default=None, type=float,
help='initial learning rate for the layers not in base net and prediction heads.')
# Params for loading pretrained basenet or checkpoints.
parser.add_argument('--base_net',
help='Pretrained base model')
parser.add_argument('--pretrained_ssd', help='Pre-trained base model')
parser.add_argument('--resume', default=None, type=str,
help='Checkpoint state_dict file to resume training from')
# Scheduler
parser.add_argument('--scheduler', default="multi-step", type=str,
help="Scheduler for SGD. It can one of multi-step and cosine")
# Params for Multi-step Scheduler
parser.add_argument('--milestones', default="80,100", type=str,
help="milestones for MultiStepLR")
# Params for Cosine Annealing
parser.add_argument('--t_max', default=120, type=float,
help='T_max value for Cosine Annealing Scheduler.')
# Train params
parser.add_argument('--batch_size', default=32, type=int,
help='Batch size for training')
parser.add_argument('--num_epochs', default=120, type=int,
help='the number epochs')
parser.add_argument('--num_workers', default=0, type=int,
help='Number of workers used in dataloading')
parser.add_argument('--validation_epochs', default=5, type=int,
help='the number epochs')
parser.add_argument('--debug_steps', default=100, type=int,
help='Set the debug log output frequency.')
parser.add_argument('--use_cuda', default=True, type=str2bool,
help='Use CUDA to train model')
parser.add_argument('--checkpoint_folder', default='models/',
help='Directory for saving checkpoint models')
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
args = parser.parse_args()
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() and args.use_cuda else "cpu")
if args.use_cuda and torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
logging.info("Use Cuda.")
def train(loader, net, criterion, optimizer, device, debug_steps=100, epoch=-1):
net.train(True)
running_loss = 0.0
running_regression_loss = 0.0
running_classification_loss = 0.0
for i, data in enumerate(loader):
images, boxes, labels = data
images = images.to(device)
boxes = boxes.to(device)
labels = labels.to(device)
optimizer.zero_grad()
confidence, locations = net(images)
regression_loss, classification_loss = criterion(confidence, locations, labels, boxes) # TODO CHANGE BOXES
loss = regression_loss + classification_loss
loss.backward()
optimizer.step()
running_loss += loss.item()
running_regression_loss += regression_loss.item()
running_classification_loss += classification_loss.item()
if i and i % debug_steps == 0:
avg_loss = running_loss / debug_steps
avg_reg_loss = running_regression_loss / debug_steps
avg_clf_loss = running_classification_loss / debug_steps
logging.info(
f"Epoch: {epoch}, Step: {i}, " +
f"Average Loss: {avg_loss:.4f}, " +
f"Average Regression Loss {avg_reg_loss:.4f}, " +
f"Average Classification Loss: {avg_clf_loss:.4f}"
)
writer.add_scalar("training loss", avg_loss, epoch*len(loader) + i)
running_loss = 0.0
running_regression_loss = 0.0
running_classification_loss = 0.0
def test(loader, net, criterion, device, epoch):
net.eval()
running_loss = 0.0
running_regression_loss = 0.0
running_classification_loss = 0.0
num = 0
for _, data in enumerate(loader):
images, boxes, labels = data
images = images.to(device)
boxes = boxes.to(device)
labels = labels.to(device)
num += 1
with torch.no_grad():
confidence, locations = net(images)
regression_loss, classification_loss = criterion(confidence, locations, labels, boxes)
loss = regression_loss + classification_loss
running_loss += loss.item()
running_regression_loss += regression_loss.item()
running_classification_loss += classification_loss.item()
writer.add_scalar("test loss", running_loss/num, epoch)
return running_loss / num, running_regression_loss / num, running_classification_loss / num
def imwrite(dataset, net_type, epoch, model_path):
num_classes = len(dataset.class_names)
if net_type == 'vgg16-ssd':
net = create_vgg_ssd(num_classes, is_test=True)
elif net_type == 'mb1-ssd':
net = create_mobilenetv1_ssd(num_classes, is_test=True)
elif net_type == 'mb1-ssd-lite':
net = create_mobilenetv1_ssd_lite(num_classes, is_test=True)
elif net_type == 'mb2-ssd-lite':
net = create_mobilenetv2_ssd_lite(num_classes, is_test=True)
elif net_type == 'mb3-large-ssd-lite':
net = create_mobilenetv3_large_ssd_lite(num_classes, is_test=True)
elif net_type == 'mb3-small-ssd-lite':
net = create_mobilenetv3_small_ssd_lite(num_classes, is_test=True)
elif net_type == 'sq-ssd-lite':
net = create_squeezenet_ssd_lite(num_classes, is_test=True)
else:
print("The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite.")
sys.exit(1)
net.load(model_path)
if net_type == 'vgg16-ssd':
predictor = create_vgg_ssd_predictor(net, candidate_size=200)
elif net_type == 'mb1-ssd':
predictor = create_mobilenetv1_ssd_predictor(net, candidate_size=200)
elif net_type == 'mb1-ssd-lite':
predictor = create_mobilenetv1_ssd_lite_predictor(net, candidate_size=200)
elif net_type == 'mb2-ssd-lite' or net_type == "mb3-large-ssd-lite" or net_type == "mb3-small-ssd-lite":
predictor = create_mobilenetv2_ssd_lite_predictor(net, candidate_size=200)
elif net_type == 'sq-ssd-lite':
predictor = create_squeezenet_ssd_lite_predictor(net, candidate_size=200)
else:
predictor = create_vgg_ssd_predictor(net, candidate_size=200)
for i in range(10):
image, orig_boxes, labels = dataset[i]
orig_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
boxes, labels, probs = predictor.predict(image, 10, 0.4)
for j in range(boxes.size(0)): # predict
box = boxes[j, :]
box = [int(i) for i in box]
cv2.rectangle(orig_image, (box[0], box[1]), (box[2], box[3]), (255, 255, 0), 1)
label = f"{probs[j]:.2f}"
cv2.putText(orig_image, label,
(box[0] + 3, box[1] + 5),
cv2.FONT_HERSHEY_SIMPLEX,
1, # font scale
(255, 255, 0),
1) # line type
for j in range(orig_boxes.shape[0]): # ground truth
box = orig_boxes[j, :]
cv2.rectangle(orig_image, (box[0], box[1]), (box[2], box[3]), (255, 0, 255), 1)
path = f"out/{i:02}_{epoch:04}.jpg"
cv2.imwrite(path, orig_image)
if __name__ == '__main__':
timer = Timer()
writer = SummaryWriter(f"runs/mb2-ssd-lite-{datetime.datetime.now().strftime('%m-%d-%H-%M-%S')}")
logging.info(args)
if args.net == 'vgg16-ssd':
create_net = create_vgg_ssd
config = vgg_ssd_config
elif args.net == 'mb1-ssd':
create_net = create_mobilenetv1_ssd
config = mobilenetv1_ssd_config
elif args.net == 'mb1-ssd-lite':
create_net = create_mobilenetv1_ssd_lite
config = mobilenetv1_ssd_config
elif args.net == 'sq-ssd-lite':
create_net = create_squeezenet_ssd_lite
config = squeezenet_ssd_config
elif args.net == 'mb2-ssd-lite':
create_net = lambda num: create_mobilenetv2_ssd_lite(num, width_mult=args.mb2_width_mult)
config = mobilenetv1_ssd_config
elif args.net == 'mb3-large-ssd-lite':
create_net = lambda num: create_mobilenetv3_large_ssd_lite(num)
config = mobilenetv1_ssd_config
elif args.net == 'mb3-small-ssd-lite':
create_net = lambda num: create_mobilenetv3_small_ssd_lite(num)
config = mobilenetv1_ssd_config
else:
logging.fatal("The net type is wrong.")
parser.print_help(sys.stderr)
sys.exit(1)
train_transform = TrainAugmentation(config.image_size, config.image_mean, config.image_std)
target_transform = MatchPrior(config.priors, config.center_variance,
config.size_variance, 0.5)
test_transform = TestTransform(config.image_size, config.image_mean, config.image_std)
logging.info("Prepare training datasets.")
datasets = []
for dataset_path in args.datasets:
if args.dataset_type == 'voc':
dataset = VOCDataset(dataset_path, transform=train_transform,
target_transform=target_transform)
label_file = os.path.join(args.checkpoint_folder, "voc-model-labels.txt")
store_labels(label_file, dataset.class_names)
num_classes = len(dataset.class_names)
elif args.dataset_type == 'open_images':
dataset = OpenImagesDataset(dataset_path,
transform=train_transform, target_transform=target_transform,
dataset_type="train", balance_data=args.balance_data)
label_file = os.path.join(args.checkpoint_folder, "open-images-model-labels.txt")
store_labels(label_file, dataset.class_names)
logging.info(dataset)
num_classes = len(dataset.class_names)
elif args.dataset_type == "coco":
dataset = COCODataset(dataset_path, transform=train_transform,
target_transform=target_transform)
num_classes = len(dataset.class_names)
elif args.dataset_type == "custom":
dataset = CustomDataset(dataset_path, args.batch_size*1000, transform=train_transform,
target_transform=target_transform)
num_classes = len(dataset.class_names)
else:
raise ValueError(f"Dataset type {args.dataset_type} is not supported.")
datasets.append(dataset)
# logging.info(f"Stored labels into file {label_file}.")
train_dataset = ConcatDataset(datasets)
logging.info("Train dataset size: {}".format(len(train_dataset)))
train_loader = DataLoader(train_dataset, args.batch_size,
num_workers=args.num_workers,
shuffle=True)
logging.info("Prepare Validation datasets.")
if args.dataset_type == "voc":
val_dataset = VOCDataset(args.validation_dataset, transform=test_transform,
target_transform=target_transform, is_test=True)
elif args.dataset_type == 'open_images':
val_dataset = OpenImagesDataset(dataset_path,
transform=test_transform, target_transform=target_transform,
dataset_type="test")
logging.info(val_dataset)
elif args.dataset_type == "coco":
val_dataset = COCODataset(dataset_path, transform=test_transform,
target_transform=target_transform, is_test=True)
elif args.dataset_type == "custom":
val_dataset = CustomDataset(dataset_path, args.batch_size*1000, transform=test_transform,
target_transform=target_transform, is_test=True)
logging.info("validation dataset size: {}".format(len(val_dataset)))
val_loader = DataLoader(val_dataset, args.batch_size,
num_workers=args.num_workers,
shuffle=False)
logging.info("Prepare Imwrite datasets.")
if args.dataset_type == "voc":
imwrite_dataset = VOCDataset(args.validation_dataset, is_test=True)
elif args.dataset_type == 'open_images':
imwrite_dataset = OpenImagesDataset(dataset_path, dataset_type="test")
logging.info(val_dataset)
elif args.dataset_type == "coco":
imwrite_dataset = COCODataset(dataset_path, is_test=True)
elif args.dataset_type == "custom":
imwrite_dataset = CustomDataset(dataset_path, args.batch_size, is_test=True)
logging.info("imwrite dataset size: {}".format(len(imwrite_dataset)))
logging.info("Build network.")
net = create_net(num_classes)
min_loss = -10000.0
last_epoch = -1
base_net_lr = args.base_net_lr if args.base_net_lr is not None else args.lr
extra_layers_lr = args.extra_layers_lr if args.extra_layers_lr is not None else args.lr
if args.freeze_base_net:
logging.info("Freeze base net.")
freeze_net_layers(net.base_net)
params = itertools.chain(net.source_layer_add_ons.parameters(), net.extras.parameters(),
net.regression_headers.parameters(), net.classification_headers.parameters())
params = [
{'params': itertools.chain(
net.source_layer_add_ons.parameters(),
net.extras.parameters()
), 'lr': extra_layers_lr},
{'params': itertools.chain(
net.regression_headers.parameters(),
net.classification_headers.parameters()
)}
]
elif args.freeze_net:
freeze_net_layers(net.base_net)
freeze_net_layers(net.source_layer_add_ons)
freeze_net_layers(net.extras)
params = itertools.chain(net.regression_headers.parameters(), net.classification_headers.parameters())
logging.info("Freeze all the layers except prediction heads.")
else:
params = [
{'params': net.base_net.parameters(), 'lr': base_net_lr},
{'params': itertools.chain(
net.source_layer_add_ons.parameters(),
net.extras.parameters()
), 'lr': extra_layers_lr},
{'params': itertools.chain(
net.regression_headers.parameters(),
net.classification_headers.parameters()
)}
]
timer.start("Load Model")
if args.resume:
logging.info(f"Resume from the model {args.resume}")
net.load(args.resume)
elif args.base_net:
logging.info(f"Init from base net {args.base_net}")
net.init_from_base_net(args.base_net)
elif args.pretrained_ssd:
logging.info(f"Init from pretrained ssd {args.pretrained_ssd}")
net.init_from_pretrained_ssd(args.pretrained_ssd)
logging.info(f'Took {timer.end("Load Model"):.2f} seconds to load the model.')
net.to(DEVICE)
criterion = MultiboxLoss(config.priors, iou_threshold=0.5, neg_pos_ratio=3,
center_variance=0.1, size_variance=0.2, device=DEVICE)
optimizer = torch.optim.SGD(params, lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
logging.info(f"Learning rate: {args.lr}, Base net learning rate: {base_net_lr}, "
+ f"Extra Layers learning rate: {extra_layers_lr}.")
if args.scheduler == 'multi-step':
logging.info("Uses MultiStepLR scheduler.")
milestones = [int(v.strip()) for v in args.milestones.split(",")]
scheduler = MultiStepLR(optimizer, milestones=milestones,
gamma=0.1, last_epoch=last_epoch)
elif args.scheduler == 'cosine':
logging.info("Uses CosineAnnealingLR | |
from datetime import date, datetime, timedelta
import asyncio
import cmd
import getpass
import mmlsattendance
import os
import re
PRINT_ATTENDANCE_LIST = False
def print_subjects(SubjectDB_obj):
num_pad = len(str(len(SubjectDB_obj.subjects)))
space = ' '*num_pad + ' '*2
cat_space = space + ' '*3
for subject_no, subject in enumerate(SubjectDB_obj.subjects, 1):
subj_no = str(subject_no).rjust(num_pad)
print(f"{subj_no}. {subject.code} - {subject.name}") # 1. ECE2056 - DATA COMM AND NEWORK
print(f"{space}> Subject ID: {subject.id}") # > Subject ID: 232
print(f"{space}> Coordinator ID: {subject.coordinator_id}") # > Coordinator ID: 1577623541
print(f"{cat_space}Sel Class Class ID") # Sel Class Class ID
for char_id, kelas in enumerate(subject.classes, ord('a')): # a. [X] EC01 45132
X = 'X' if kelas.selected else ' ' # b. [ ] ECA1 45172
print(f"{space}{chr(char_id)}. [{X}]{kelas.code:>6}{kelas.id:>9}")
if subject_no != len(SubjectDB_obj.subjects):
print()
async def printer(queue):
while True:
f = await queue.get()
print()
print(f"[{f.class_date} {f.start_time}-{f.end_time}] {f.subject_code} - {f.subject_name} ({f.class_code})")
print(f.attendance_url)
if PRINT_ATTENDANCE_LIST:
if f.attendance_list_url:
print(f.attendance_list_url)
queue.task_done()
def change_selection(args, op):
"""Parses selection command arguments, creates a dict of sets -- where
key is subject index and value is a set of class index -- and iterates
through each item and its set elements which through it does select,
deselect or toggle operation to classes at their index."""
args_list = args.lower().split() # E.g. ['1ab', '2ac', '3', '4a']
if args_list and args_list[0] == 'all':
for kelas in subject_db.classes:
kelas.selected = op if op is not None else not kelas.selected
else:
op_dict = {} #op_dict = {subject_index: {class_index, ...}, ...}
for arg in args_list:
re_obj = re.match('[0-9]+', arg[0])
if re_obj is None:
continue
sub_idx = int(re_obj[0])-1
if sub_idx < 0:
continue
re_obj = re.search('[a-z]+', arg)
letters = re_obj[0] if re_obj is not None else ''
class_choices = [ord(char)-ord('a') for char in letters]
op_dict[sub_idx] = set(class_choices)
if not op_dict:
return False
subjects = subject_db.subjects
for sub_idx, class_set in op_dict.items():
if not class_set:
try:
for kelas in subjects[sub_idx].classes:
kelas.selected = op if op is not None else not kelas.selected
except IndexError:
pass
else:
for cls_idx in class_set:
try:
subjects[sub_idx].classes[cls_idx].selected = op if op is not None else not subjects[sub_idx].classes[cls_idx].selected
except IndexError:
pass
return True
class Prompt(cmd.Cmd):
nohelp = "No help on '%s'.\n"
user_id = None
def do_login(self, args):
("\n"
"Log in to MMLS and MMU Mobile and load subjects and classes.\n"
"————————————————————————————————————————————————————————————\n"
"Syntax: login [student_id] \n")
user_id = args.split()[0] if args else input('Student ID: ')
password = <PASSWORD>()
if asyncio.run(mmlsattendance.load_online(subject_db, user_id, password)):
print('Success.\n')
self.user_id = user_id
else:
print('Wrong student ID or password.\n')
def do_print(self, args):
("\nDisplay stored subjects, classes and selection.\n")
print_subjects(subject_db)
print()
def do_autoselect(self, args):
("\nAuto-select classes that the student has registered for.\n")
if self.user_id is not None:
asyncio.run(mmlsattendance.autoselect_classes(subject_db, self.user_id))
print_subjects(subject_db)
print()
else:
print('Please log in to use this command.\n')
def do_select(self, args):
("\n"
"Add selection to classes. \n"
"———————————————————————————————\n"
"Examples: select 1a 2c 3 4abc 5\n"
" select all \n")
if not change_selection(args, True):
print("Invalid command. Enter 'help search' for command help.\n")
return
print_subjects(subject_db)
print()
def do_deselect(self, args):
("\n"
"Remove selection in classes. \n"
"—————————————————————————————————\n"
"Examples: deselect 1a 2c 3 4abc 5\n"
" deselect all \n")
if not change_selection(args, False):
print("Invalid command. Enter 'help search' for command help.\n")
return
print_subjects(subject_db)
print()
def do_toggle(self, args):
("\n"
"Toggle selection of classes. \n"
"———————————————————————————————\n"
"Examples: toggle 1a 2c 3 4abc 5\n"
" toggle all \n")
if not change_selection(args, None):
print("Invalid command. Enter 'help search' for command help.\n")
return
print_subjects(subject_db)
print()
def do_search(self, args):
("\n"
"Search for attendance links in a specified range.\n"
"—————————————————————————————————————————————————\n"
"Syntax: search date <start_date> <end_date> \n"
" search date <date> \n"
" ...if date is empty, uses current date.\n"
" search timetable <start_id> <end_id> \n\n"
"Examples: search date 2020-04-20 2020-08-31 \n"
" search date 2020-07-04 \n"
" search timetable 66666 69420 \n")
cmd = ''.join(args.split()[:1])
args = ' '.join(args.split()[1:])
if not subject_db.selected_classes:
print('No classes selected for searching.\n')
return
elif not cmd or not (cmd == 'date' or cmd == 'timetable'):
print("Invalid command. Enter 'help search' for command help.\n")
return
# =============== search date <start_date> <end_date> ===============
elif cmd == 'date':
args_list = args.split()
try:
if len(args_list) > 2:
print("Too many arguments. Enter 'help search' for command help.\n")
return
elif len(args_list) == 1:
start_date = end_date = date.fromisoformat(args_list[0])
elif len(args_list) == 0:
start_date = end_date = (datetime.utcnow()+timedelta(hours=8)).date()
else:
start_date = date.fromisoformat(args_list[0])
end_date = date.fromisoformat(args_list[1])
except ValueError as err:
print(f"{err}. Use format YYYY-MM-DD.\n")
return
if len(args_list) == 2:
print(f"Searching classes from {start_date.isoformat()} to {end_date.isoformat()}.")
else:
print(f"Searching classes in {start_date.isoformat()}.")
async def scrape_and_print(start_date, end_date, subject_db):
queue = asyncio.Queue()
printer_task = asyncio.create_task(printer(queue))
await mmlsattendance.scrape_date(subject_db, start_date, start_date, queue = queue)
await queue.join()
printer_task.cancel()
await asyncio.wait([printer_task])
asyncio.run(scrape_and_print(start_date, end_date, subject_db))
# =============== search timetable <start_id> <end_id> ===============
elif cmd == 'timetable':
args_list = args.split()
if not len(args_list) == 2:
print("Expected two arguments. Enter 'help search' for command help.\n")
return
try:
start_timetable_id = int(args_list[0])
end_timetable_id = int(args_list[1])
except ValueError as err:
print(f"Value error. Enter 'help search' for command help.\n")
return
print(f"Searching classes from {start_timetable_id} to {end_timetable_id}.")
async def scrape_and_print(start_timetable_id, end_timetable_id, subject_db):
queue = asyncio.Queue()
printer_task = asyncio.create_task(printer(queue))
await mmlsattendance.scrape(subject_db, start_timetable_id, end_timetable_id, queue = queue)
await queue.join()
printer_task.cancel()
await asyncio.wait([printer_task])
asyncio.run(scrape_and_print(start_timetable_id, end_timetable_id, subject_db))
print()
def do_exit(self, args):
("\nTerminate this script.\n")
print('Exiting.')
exit()
def do_guided(self, args):
("\nStart a guided setup for typical attendance scraping.\n")
def ask_yes_no(question):
while True:
decision = input(f"{question} (y/n): ")
if (decision.lower() == 'y'): return True
if (decision.lower() == 'n'): return False
print("Invalid input.")
print("How do you want to scrape attendance links?: \n"
"1. Retrieve classes via MMLS login and search by date.* \n"
"2. Retrieve classes via MMLS login and search by range of timetable_id.\n\n"
"*/ Unreliable in the first three trimester days and in some cases. \n"
" / If no links were caught use the second option instead. ")
while True:
try:
what_to_do = int(input('\nChoice: '))
if not 0 < what_to_do < 3:
raise ValueError
break
except ValueError:
print('Invalid input.')
if self.user_id is None:
self.do_login('')
if self.user_id is None:
return
self.do_print('')
if ask_yes_no('Auto-select your registered classes?'):
print()
self.do_autoselect('')
if ask_yes_no('Edit class selection?'):
self.do_print('')
while True:
try:
sub_idx = int(input('Select which subject: '))-1
class_choices = input("Toggle which classes?: ").replace(',', '').split(' ')
class_indexes = [ord(char)-ord('a') for char in class_choices]
for index in class_indexes:
subject_db.subjects[sub_idx].classes[index].selected = not subject_db.subjects[sub_idx].classes[index].selected
self.do_print('')
except (ValueError, TypeError):
print('Invalid input.')
if not ask_yes_no('Continue editing?'):
break
if what_to_do == 1:
start_date = input("Search from what date? YYYY-MM-DD: ")
end_date = input("Until what date? YYYY-MM-DD: ")
self.do_search(f'date {start_date} {end_date}')
elif what_to_do == 2:
start_timetable_id = input('Define beginning of timetable_id range: ')
end_timetable_id = input('Define end of timetable_id range: ')
self.do_search(f'timetable {start_timetable_id} {end_timetable_id}')
def default(self, line):
print(f"Command not found: '{line}'. Enter 'help' to list commands.\n")
def emptyline(self):
print('Hello?\n')
def help_help(self):
print("\n"
"List available commands or provide command help.\n"
"————————————————————————————————————————————————\n"
"Syntax: help [command] \n")
def help_manual(self):
print(
"""Preface: The 'guided' command is a wrapper for commands described under this
entry. As such, it may be simpler to use that command instead.
There are three main steps to start scraping for attendance links: Setting
up required information, selecting what to search, and lastly, starting the
attendance search itself.
First step: Setting up required information.
————————————————————————————————————————————————————————————————————————————————
Commands: login
Syntax: login [student_id]
The script requires certain information before it could start. For instance,
as typical sought-after attendance links are for registered subjects, the script
is able to programmatically obtain required information by simply logging in to
MMLS.
Second step: Displaying and selecting what to search.
————————————————————————————————————————————————————————————————————————————————
Commands: print, select, deselect, toggle, and autoselect.
Syntax: print
select|deselect|toggle <i.e. '1a 2abc 3' and 'all'>
autoselect
After the script has logged in and parsed the required information, a list
of subjects and its classes could be displayed by using the 'print' command.
There will be selection boxes accompanying the class entries which signifies
whether the class will be searched for its attendance links. Altering class
selections can be done using the 'select', 'deselect', and 'toggle' command. On
the other hand, the 'autoselect' command is available which can automatically
select registered classes.
Third step: Starting attendance link scraping.
————————————————————————————————————————————————————————————————————————————————
Commands: search date and search timetable.
Syntax: search date <start_date> <end_date>
search date <date>
search date ...leave empty to search for today.
search timetable <start_timetable_id> <end_timetable_id>
The way this | |
block_id):
locations = self.list_locations()
block = self.connection.request_with_orgId_api_2(
'network/publicIpBlock/%s' % block_id).object
return self._to_ip_block(block, locations)
def ex_delete_public_ip_block(self, block):
delete_node = ET.Element('removePublicIpBlock', {'xmlns': TYPES_URN})
delete_node.set('id', block.id)
result = self.connection.request_with_orgId_api_2(
'network/removePublicIpBlock',
method='POST',
data=ET.tostring(delete_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_get_node_by_id(self, id):
node = self.connection.request_with_orgId_api_2(
'server/server/%s' % id).object
return self._to_node(node)
def ex_list_firewall_rules(self, network_domain):
params = {}
params['networkDomainId'] = network_domain.id
response = self.connection \
.request_with_orgId_api_2('network/firewallRule',
params=params).object
return self._to_firewall_rules(response, network_domain)
def ex_create_firewall_rule(self, network_domain, rule, position):
create_node = ET.Element('createFirewallRule', {'xmlns': TYPES_URN})
ET.SubElement(create_node, "networkDomainId").text = network_domain.id
ET.SubElement(create_node, "name").text = rule.name
ET.SubElement(create_node, "action").text = rule.action
ET.SubElement(create_node, "ipVersion").text = rule.ip_version
ET.SubElement(create_node, "protocol").text = rule.protocol
# Setup source port rule
source = ET.SubElement(create_node, "source")
source_ip = ET.SubElement(source, 'ip')
if rule.source.any_ip:
source_ip.set('address', 'ANY')
else:
source_ip.set('address', rule.source.ip_address)
source_ip.set('prefixSize', rule.source.ip_prefix_size)
if rule.source.port_begin is not None:
source_port = ET.SubElement(source, 'port')
source_port.set('begin', rule.source.port_begin)
if rule.source.port_end is not None:
source_port.set('end', rule.source.port_end)
# Setup destination port rule
dest = ET.SubElement(create_node, "destination")
dest_ip = ET.SubElement(dest, 'ip')
if rule.destination.any_ip:
dest_ip.set('address', 'ANY')
else:
dest_ip.set('address', rule.destination.ip_address)
dest_ip.set('prefixSize', rule.destination.ip_prefix_size)
if rule.destination.port_begin is not None:
dest_port = ET.SubElement(dest, 'port')
dest_port.set('begin', rule.destination.port_begin)
if rule.destination.port_end is not None:
dest_port.set('end', rule.destination.port_end)
ET.SubElement(create_node, "enabled").text = 'true'
placement = ET.SubElement(create_node, "placement")
placement.set('position', position)
response = self.connection.request_with_orgId_api_2(
'network/createFirewallRule',
method='POST',
data=ET.tostring(create_node)).object
rule_id = None
for info in findall(response, 'info', TYPES_URN):
if info.get('name') == 'firewallRuleId':
rule_id = info.get('value')
rule.id = rule_id
return rule
def ex_get_firewall_rule(self, network_domain, rule_id):
locations = self.list_locations()
rule = self.connection.request_with_orgId_api_2(
'network/firewallRule/%s' % rule_id).object
return self._to_firewall_rule(rule, locations, network_domain)
def ex_set_firewall_rule_state(self, rule, state):
"""
Change the state (enabled or disabled) of a rule
:param rule: The rule to delete
:type rule: :class:`DimensionDataFirewallRule`
:param state: The desired state enabled (True) or disabled (False)
:type state: ``bool``
:rtype: ``bool``
"""
update_node = ET.Element('editFirewallRule', {'xmlns': TYPES_URN})
update_node.set('id', rule.id)
ET.SubElement(update_node, 'enabled').text = str(state).lower()
result = self.connection.request_with_orgId_api_2(
'network/editFirewallRule',
method='POST',
data=ET.tostring(update_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_delete_firewall_rule(self, rule):
"""
Delete a firewall rule
:param rule: The rule to delete
:type rule: :class:`DimensionDataFirewallRule`
:rtype: ``bool``
"""
update_node = ET.Element('deleteFirewallRule', {'xmlns': TYPES_URN})
update_node.set('id', rule.id)
result = self.connection.request_with_orgId_api_2(
'network/deleteFirewallRule',
method='POST',
data=ET.tostring(update_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_create_nat_rule(self, network_domain, internal_ip, external_ip):
"""
Create a NAT rule
:param network_domain: The network domain the rule belongs to
:type network_domain: :class:`DimensionDataNetworkDomain`
:param internal_ip: The IPv4 address internally
:type internal_ip: ``str``
:param external_ip: The IPv4 address externally
:type external_ip: ``str``
:rtype: :class:`DimensionDataNatRule`
"""
create_node = ET.Element('createNatRule', {'xmlns': TYPES_URN})
ET.SubElement(create_node, 'networkDomainId').text = network_domain.id
ET.SubElement(create_node, 'internalIp').text = internal_ip
ET.SubElement(create_node, 'externalIp').text = external_ip
result = self.connection.request_with_orgId_api_2(
'network/createNatRule',
method='POST',
data=ET.tostring(create_node)).object
rule_id = None
for info in findall(result, 'info', TYPES_URN):
if info.get('name') == 'natRuleId':
rule_id = info.get('value')
return DimensionDataNatRule(
id=rule_id,
network_domain=network_domain,
internal_ip=internal_ip,
external_ip=external_ip,
status=NodeState.RUNNING
)
def ex_list_nat_rules(self, network_domain):
"""
Get NAT rules for the network domain
:param network_domain: The network domain the rules belongs to
:type network_domain: :class:`DimensionDataNetworkDomain`
:rtype: ``list`` of :class:`DimensionDataNatRule`
"""
params = {}
params['networkDomainId'] = network_domain.id
response = self.connection \
.request_with_orgId_api_2('network/natRule',
params=params).object
return self._to_nat_rules(response, network_domain)
def ex_get_nat_rule(self, network_domain, rule_id):
"""
Get a NAT rule by ID
:param network_domain: The network domain the rule belongs to
:type network_domain: :class:`DimensionDataNetworkDomain`
:param rule_id: The ID of the NAT rule to fetch
:type rule_id: ``str``
:rtype: :class:`DimensionDataNatRule`
"""
rule = self.connection.request_with_orgId_api_2(
'network/natRule/%s' % rule_id).object
return self._to_nat_rule(rule, network_domain)
def ex_delete_nat_rule(self, rule):
"""
Delete an existing NAT rule
:param rule: The rule to delete
:type rule: :class:`DimensionDataNatRule`
:rtype: ``bool``
"""
update_node = ET.Element('deleteNatRule', {'xmlns': TYPES_URN})
update_node.set('id', rule.id)
result = self.connection.request_with_orgId_api_2(
'network/deleteNatRule',
method='POST',
data=ET.tostring(update_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_get_location_by_id(self, id):
"""
Get location by ID.
:param id: ID of the node location which should be used
:type id: ``str``
:rtype: :class:`NodeLocation`
"""
location = None
if id is not None:
location = list(
filter(lambda x: x.id == id, self.list_locations()))[0]
return location
def ex_wait_for_state(self, state, func, poll_interval=2,
timeout=60, *args, **kwargs):
"""
Wait for the function which returns a instance
with field status to match
Keep polling func until one of the desired states is matched
:param state: Either the desired state (`str`) or a `list` of states
:type state: ``str`` or ``list``
:param func: The function to call, e.g. ex_get_vlan
:type func: ``function``
:param poll_interval: The number of seconds to wait between checks
:type poll_interval: `int`
:param timeout: The total number of seconds to wait to reach a state
:type timeout: `int`
:param args: The arguments for func
:type args: Positional arguments
:param kwargs: The arguments for func
:type kwargs: Keyword arguments
"""
return self.connection.wait_for_state(state, func, poll_interval,
timeout, *args, **kwargs)
def ex_enable_monitoring(self, node, service_plan="ESSENTIALS"):
"""
Enables cloud monitoring on a node
:param node: The node to monitor
:type node: :class:`Node`
:param service_plan: The service plan, one of ESSENTIALS or
ADVANCED
:type service_plan: ``str``
:rtype: ``bool``
"""
update_node = ET.Element('enableServerMonitoring',
{'xmlns': TYPES_URN})
update_node.set('id', node.id)
ET.SubElement(update_node, 'servicePlan').text = service_plan
result = self.connection.request_with_orgId_api_2(
'server/enableServerMonitoring',
method='POST',
data=ET.tostring(update_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_update_monitoring_plan(self, node, service_plan="ESSENTIALS"):
"""
Updates the service plan on a node with monitoring
:param node: The node to monitor
:type node: :class:`Node`
:param service_plan: The service plan, one of ESSENTIALS or
ADVANCED
:type service_plan: ``str``
:rtype: ``bool``
"""
update_node = ET.Element('changeServerMonitoringPlan',
{'xmlns': TYPES_URN})
update_node.set('id', node.id)
ET.SubElement(update_node, 'servicePlan').text = service_plan
result = self.connection.request_with_orgId_api_2(
'server/changeServerMonitoringPlan',
method='POST',
data=ET.tostring(update_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_disable_monitoring(self, node):
"""
Disables cloud monitoring for a node
:param node: The node to stop monitoring
:type node: :class:`Node`
:rtype: ``bool``
"""
update_node = ET.Element('disableServerMonitoring',
{'xmlns': TYPES_URN})
update_node.set('id', node.id)
result = self.connection.request_with_orgId_api_2(
'server/disableServerMonitoring',
method='POST',
data=ET.tostring(update_node)).object
response_code = findtext(result, 'responseCode', TYPES_URN)
return response_code in ['IN_PROGRESS', 'OK']
def ex_add_storage_to_node(self, node, amount, speed='STANDARD'):
"""
Add storage to the node
:param node: The server to add storage to
:type node: :class:`Node`
:param amount: The amount of storage to add, in GB
:type amount: ``int``
:param speed: The disk speed type
:type speed: ``str``
:rtype: ``bool``
"""
result = self.connection.request_with_orgId_api_1(
'server/%s?addLocalStorage&amount=%s&speed=%s' %
(node.id, amount, speed)).object
response_code = findtext(result, 'result', GENERAL_NS)
return response_code in ['IN_PROGRESS', 'SUCCESS']
def ex_remove_storage_from_node(self, node, disk_id):
"""
Remove storage from a node
:param node: The server to add storage to
:type node: :class:`Node`
:param disk_id: The ID of the disk to remove
:type disk_id: ``str``
:rtype: ``bool``
"""
result = self.connection.request_with_orgId_api_1(
'server/%s/disk/%s?delete' %
(node.id, disk_id)).object
response_code = findtext(result, 'result', GENERAL_NS)
return response_code in ['IN_PROGRESS', 'SUCCESS']
def ex_change_storage_speed(self, node, disk_id, speed):
"""
Change the speed (disk tier) of a disk
:param node: The server to change the disk speed of
:type node: :class:`Node`
:param disk_id: The ID of the disk to change
:type disk_id: ``str``
:param speed: The disk speed type e.g. STANDARD
:type speed: ``str``
:rtype: ``bool``
"""
create_node = ET.Element('ChangeDiskSpeed', {'xmlns': SERVER_NS})
ET.SubElement(create_node, 'speed').text = speed
result = self.connection.request_with_orgId_api_1(
'server/%s/disk/%s/changeSpeed' %
(node.id, disk_id),
method='POST',
data=ET.tostring(create_node)).object
response_code = findtext(result, 'result', GENERAL_NS)
return response_code in ['IN_PROGRESS', 'SUCCESS']
def ex_change_storage_size(self, node, disk_id, size):
"""
Change the size of a disk
:param node: The server to change the disk of
:type node: :class:`Node`
:param disk_id: The ID of the disk to resize
:type disk_id: ``str``
:param size: The disk size in GB
:type size: ``int``
:rtype: ``bool``
"""
create_node = ET.Element('ChangeDiskSize', {'xmlns': SERVER_NS})
ET.SubElement(create_node, 'newSizeGb').text = str(size)
result = self.connection.request_with_orgId_api_1(
'server/%s/disk/%s/changeSize' %
(node.id, disk_id),
method='POST',
data=ET.tostring(create_node)).object
response_code = findtext(result, 'result', GENERAL_NS)
return response_code in ['IN_PROGRESS', 'SUCCESS']
def ex_reconfigure_node(self, node, memory_gb, cpu_count, cores_per_socket,
cpu_performance):
"""
Reconfigure the virtual hardware specification of a node
:param node: The server to change
:type node: :class:`Node`
:param memory_gb: The amount of memory in GB (optional)
:type memory_gb: ``int``
:param cpu_count: The number of CPU (optional)
:type cpu_count: ``int``
:param cores_per_socket: Number of CPU cores per socket (optional)
:type cores_per_socket: ``int``
:param cpu_performance: CPU Performance type (optional)
:type cpu_performance: ``str``
:rtype: ``bool``
"""
update = ET.Element('reconfigureServer', {'xmlns': TYPES_URN})
update.set('id', node.id)
if memory_gb is not None:
ET.SubElement(update, 'memoryGb').text = str(memory_gb)
if cpu_count is not None:
ET.SubElement(update, 'cpuCount').text = str(cpu_count)
if cpu_performance is not None:
| |
not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('biz_id') is not None:
self.biz_id = m.get('biz_id')
if m.get('data_user_identity') is not None:
self.data_user_identity = m.get('data_user_identity')
if m.get('data_user_name') is not None:
self.data_user_name = m.get('data_user_name')
if m.get('data_owner_identity_type') is not None:
self.data_owner_identity_type = m.get('data_owner_identity_type')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('data_owner_name') is not None:
self.data_owner_name = m.get('data_owner_name')
if m.get('provider_id') is not None:
self.provider_id = m.get('provider_id')
if m.get('data_code') is not None:
self.data_code = m.get('data_code')
if m.get('target_code') is not None:
self.target_code = m.get('target_code')
if m.get('data_hash') is not None:
self.data_hash = m.get('data_hash')
if m.get('extend_params') is not None:
self.extend_params = m.get('extend_params')
return self
class SaveCpfDatauseResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
chain_info: ChainInfo = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 区块链链信息
self.chain_info = chain_info
def validate(self):
if self.chain_info:
self.chain_info.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.chain_info is not None:
result['chain_info'] = self.chain_info.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('chain_info') is not None:
temp_model = ChainInfo()
self.chain_info = temp_model.from_map(m['chain_info'])
return self
class ConfirmCpfDatauseRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
terminal_identity: str = None,
tx_hash: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 验证端ID
self.terminal_identity = terminal_identity
# 区块链交易hash
self.tx_hash = tx_hash
def validate(self):
self.validate_required(self.terminal_identity, 'terminal_identity')
self.validate_required(self.tx_hash, 'tx_hash')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
if self.tx_hash is not None:
result['tx_hash'] = self.tx_hash
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('tx_hash') is not None:
self.tx_hash = m.get('tx_hash')
return self
class ConfirmCpfDatauseResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
data_user_identity: str = None,
data_user_name: str = None,
data_owner_identity: str = None,
data_owner_name: str = None,
provider_id: str = None,
provider_name: str = None,
terminal_identity: str = None,
data_desc: str = None,
data_hash: str = None,
chain_info: ChainInfo = None,
extend_params: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 使用方ID
self.data_user_identity = data_user_identity
# 使用方名称
self.data_user_name = data_user_name
# 数据拥有者ID,用户身份证ID
self.data_owner_identity = data_owner_identity
# 数据拥有者名称,用户名称
self.data_owner_name = data_owner_name
# 数据源ID
self.provider_id = provider_id
# 数据源名称
self.provider_name = provider_name
# 存证端ID
self.terminal_identity = terminal_identity
# 业务描述
self.data_desc = data_desc
# 存证数据hash
self.data_hash = data_hash
# 链信息
self.chain_info = chain_info
# 扩展字段
self.extend_params = extend_params
def validate(self):
if self.chain_info:
self.chain_info.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.data_user_identity is not None:
result['data_user_identity'] = self.data_user_identity
if self.data_user_name is not None:
result['data_user_name'] = self.data_user_name
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.data_owner_name is not None:
result['data_owner_name'] = self.data_owner_name
if self.provider_id is not None:
result['provider_id'] = self.provider_id
if self.provider_name is not None:
result['provider_name'] = self.provider_name
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
if self.data_desc is not None:
result['data_desc'] = self.data_desc
if self.data_hash is not None:
result['data_hash'] = self.data_hash
if self.chain_info is not None:
result['chain_info'] = self.chain_info.to_map()
if self.extend_params is not None:
result['extend_params'] = self.extend_params
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('data_user_identity') is not None:
self.data_user_identity = m.get('data_user_identity')
if m.get('data_user_name') is not None:
self.data_user_name = m.get('data_user_name')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('data_owner_name') is not None:
self.data_owner_name = m.get('data_owner_name')
if m.get('provider_id') is not None:
self.provider_id = m.get('provider_id')
if m.get('provider_name') is not None:
self.provider_name = m.get('provider_name')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('data_desc') is not None:
self.data_desc = m.get('data_desc')
if m.get('data_hash') is not None:
self.data_hash = m.get('data_hash')
if m.get('chain_info') is not None:
temp_model = ChainInfo()
self.chain_info = temp_model.from_map(m['chain_info'])
if m.get('extend_params') is not None:
self.extend_params = m.get('extend_params')
return self
class CheckCpfAuthRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
request_id: str = None,
data_owner_identity: str = None,
authorized_identity: str = None,
authorized_platform_identity: str = None,
target_code: str = None,
auth_agreement: AuthAgreement = None,
content: AuthProperty = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 流水ID
self.request_id = request_id
# 用户ID
self.data_owner_identity = data_owner_identity
# 授权机构ID
self.authorized_identity = authorized_identity
# 授权端ID
self.authorized_platform_identity = authorized_platform_identity
# 授权业务码
self.target_code = target_code
# 授权协议
self.auth_agreement = auth_agreement
# 扩展字段
self.content = content
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.authorized_identity, 'authorized_identity')
self.validate_required(self.target_code, 'target_code')
if self.auth_agreement:
self.auth_agreement.validate()
self.validate_required(self.content, 'content')
if self.content:
self.content.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.request_id is not None:
result['request_id'] = self.request_id
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.authorized_identity is not None:
result['authorized_identity'] = self.authorized_identity
if self.authorized_platform_identity is not None:
result['authorized_platform_identity'] = self.authorized_platform_identity
if self.target_code is not None:
result['target_code'] = self.target_code
if self.auth_agreement is not None:
result['auth_agreement'] = self.auth_agreement.to_map()
if self.content is not None:
result['content'] = self.content.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('request_id') is not None:
self.request_id = m.get('request_id')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('authorized_identity') is not None:
self.authorized_identity = m.get('authorized_identity')
if m.get('authorized_platform_identity') is not None:
self.authorized_platform_identity = m.get('authorized_platform_identity')
if m.get('target_code') is not None:
self.target_code = m.get('target_code')
if m.get('auth_agreement') is not None:
temp_model = AuthAgreement()
self.auth_agreement = temp_model.from_map(m['auth_agreement'])
if m.get('content') is not None:
temp_model = AuthProperty()
self.content = temp_model.from_map(m['content'])
return self
class CheckCpfAuthResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
if_auth: bool = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 是否授权
self.if_auth = if_auth
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.if_auth is not None:
result['if_auth'] = self.if_auth
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('if_auth') is not None:
self.if_auth = m.get('if_auth')
return self
class ListCpfSourceRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
return self
class ListCpfSourceResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
cpf_list: List[TdmCpfProvinceVO] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 商业机构公积金中心列表查询结果
self.cpf_list = cpf_list
def validate(self):
if self.cpf_list:
for k in self.cpf_list:
if k:
k.validate()
def to_map(self):
result = | |
= []
if lump is None:
return []
pNameLen = int.from_bytes(lump.read(4), "little", signed=True)
for i in range(pNameLen):
patchesNames.append(trailingZeros(
lump.read(8).decode("ISO-8859-1").upper()))
return patchesNames
def getPicture(lump, pallete):
''' Given lump name of a picture, get that picture, stored in Doom picture format
Used for patches, sprites, title screens etc (but not flats)
Picture returned as a PIL.Image object
'''
if lump is None:
return None
if lump.data[1:4] == b'PNG':
pic = png2pic(lump.data, pallete)
return pic
# Size of the final picture
width = int.from_bytes(lump.read(2), "little", signed=False)
height = int.from_bytes(lump.read(2), "little", signed=False)
# Protection against some weird humongous things
# Although textures with 1024 width is a thing
if width > 2000 or height > 2000:
return None
lump.read(4)
# This is a list of Posts (columns) that comprize an image
postOffsets = []
for w in range(width):
postOffsets.append(int.from_bytes(
lump.read(4), "little", signed=False))
# this is the image we will build from posts (columns)
im = Image.new("RGBA", (width, height), (0, 0, 0, 0))
px = im.load()
# Here we go go through all Posts
for i in range(im.size[0]):
lump.seek(postOffsets[i])
# There is no fixed length of a post,
# post ends with the last byte=255
while True:
# if the first byte is not 255 - it is an offset
topdelta = int.from_bytes(
lump.read(1), "little", signed=False)
# if it is 255 - end this post (column)
if topdelta == 255:
break
# Next byte is the length of data to read
length = int.from_bytes(
lump.read(1), "little", signed=False)
# Protection in case something goes wrong
# and we are at the EOF
# (removed cause it breaks otehr files)
# if length == 0:
# return im
# First and last bytes are not used
lump.read(1)
# FInally, reading some pixel data
for j in range(length):
pixel = int.from_bytes(
lump.read(1), "little", signed=False)
color = pallete[pixel]
px[i, topdelta + j] = color
lump.read(1)
return im
def getPictures(wad, pictureNames, pallete):
''' Get all the pictures in a list
Returns a dictionary, where key is the picture's name and value is PIL.Image
'''
pictures = {}
for pictureName in pictureNames:
pictureLump = wad.getLump(pictureName)
im = getPicture(pictureLump, pallete)
if im is not None:
pictures[pictureName] = im
return pictures
# Functions that deal with textures
###################################
def getTextureInfo(lump):
''' Get info about all the textures in a lump (TEXTURE1 / TEXTURE2)
Which is texture data (name, width, height, patches)
where "patches" is a list of patches and offsets:
[(offsetX, offsetY, patchN),..]
They will be put together into a texture in a different function
'''
if lump is None:
return []
texturesInfo = []
nTextures = int.from_bytes(lump.read(4), "little", signed=False)
offsets = []
for i in range(nTextures):
offsets.append(int.from_bytes(lump.read(4), "little", signed=False))
for offset in offsets:
lump.seek(offset)
textureName = trailingZeros(lump.read(8).decode("ISO-8859-1"))
lump.read(4)
width = int.from_bytes(lump.read(2), "little", signed=False)
height = int.from_bytes(lump.read(2), "little", signed=False)
lump.read(4)
patchCount = int.from_bytes(lump.read(2), "little", signed=False)
patches = []
for i in range(patchCount):
offsetX = int.from_bytes(lump.read(2), "little", signed=True)
offsetY = int.from_bytes(lump.read(2), "little", signed=True)
patchN = int.from_bytes(lump.read(2), "little", signed=False)
lump.read(4)
patches.append((offsetX, offsetY, patchN))
texturesInfo.append((textureName, width, height, patches))
return texturesInfo
def getTextures(textureInfo, patches, patchesNames):
''' Given a list of texture information (see previous function)
Create all textures (by constructing them from patches)
Return dictionary of {textureName:PIL.Image}
'''
textures = {}
for texture in textureInfo:
name, width, height, patchList = texture
im = Image.new("RGBA", (width, height), color=(0, 0, 0, 0))
# go through the patches that make up a texture
for patchData in patchList:
offx, offy, patchID = patchData
if patchID >= len(patchesNames):
continue
patchName = patchesNames[patchID]
if patchName in patches:
# paste it into the image
# third parameter is a mask,
# because many patches use transparency
im.paste(patches[patchName], (offx, offy), patches[patchName])
textures[name] = im
return textures
def getListOfTextures(walls):
''' given all Wall objects, return names of all textures used in them '''
listOfTextures = set()
for wallgroup in walls.values():
for wall in wallgroup:
listOfTextures.add(wall.texture)
return list(listOfTextures)
# Function that deal with flats (textures for floors and ceilings)
# We dont use ceilings in this program though
###################################################################
def createFlat(rawFlat, pallete):
''' Convert raw flat data into a 64x64 list of (R,G,B)
This is not a PIL picture, but just a list of lists of RGB tuples
[[(R,G,B), (R,G,B), (R,G,B), ...], [], [], ...]
'''
out = []
pointer = 0
width = 64
height = len(rawFlat) // width
for i in range(width):
out.append([])
for j in range(height):
color = pallete[rawFlat[pointer]]
out[-1].append(color)
pointer += 1
return out
def getListOfFlats(sectors):
''' Given list of sectors, get list of all flats (only floors), used in them
'''
listOfFlats = set()
for sector in sectors:
if sector.floorTexture not in listOfFlats:
listOfFlats.add(sector.floorTexture)
return list(listOfFlats)
def getFlats(wad, listOfFlats, pallete):
''' Given list of flats, return dictionary of flats data (R,G,B) list
{flatName: [[(R,G,B), (R,G,B), ...], [],[], ...]}
'''
flats = {}
for flatName in listOfFlats:
rawFlat = wad.getLump(flatName)
if rawFlat is None:
continue
# 1. It is a PNG file
if rawFlat.data[1:4] == b'PNG':
flatPic = png2pic(rawFlat.data, pallete)
# it is still in wrong format for a flat
flat = pic2flat(flatPic)
flats[flatName] = flat
# 2. It is a regular DOOM flat
elif len(rawFlat.data) == 4096:
flatData = createFlat(rawFlat.data, pallete)
flats[flatName] = flatData
# 3. it is a DOOM picture format flat
elif len(rawFlat.data) != 0:
flatPic = getPicture(rawFlat, pallete)
if flatPic is not None:
flat = pic2flat(flatPic)
flats[flatName] = flat
return flats
# couple of helper functions to transform flats (arrays of tuples)
# into PIL's images (pics) and back
def flat2pic(flat):
''' Transform flat image into PIL image
'''
width = len(flat)
height = len(flat[0])
im = Image.new("RGB", (width, height), color=(0, 0, 0))
px = im.load()
for i in range(width):
for j in range(height):
px[i, j] = flat[i][j]
return im
def pic2flat(pic):
''' Transform PIL image into flat
'''
width = pic.size[0]
height = pic.size[1]
px = pic.load()
flat = []
for i in range(width):
flat.append([])
for j in range(height):
# copying this way to remove 4th element of px tuple
newpix = (px[i, j][0], px[i, j][1], px[i, j][2])
flat[-1].append(newpix)
return flat
# Functions to parse the map data, preparing for the drawing
############################################################
def checkHOM(vertexes, linedefs, sidedefs, sectors):
''' Check if the sectors are valid
(HOM stads for Hall Of Mirrors - an effect you see in classic Doom,
when a sector error is present)
'''
# Pouplate listOfVerteces data with list of all vertixes,
# surrounding this sector
for linedef in linedefs:
for sidedef in [linedef.front, linedef.back]:
if sidedef != 65535 and sidedef < len(sidedefs):
sector = sidedefs[sidedef].sector
for vertex in [linedef.beg, linedef.end]:
sectors[sector].listOfVerteces.append(vertex)
# Go through sectors, marking invalid with HOMpassed = False
# So far we have 2 checks here
for sector in sectors:
# Check #1
# valid sector has at least 3 sides (2 vertexes each)
# Fixes those dangling forgotten sidedefs (as in DOOM2-MAP30)
if len(sector.listOfVerteces) < 6:
sector.HOMpassed = False
continue
# Check #2
# If it is a narrow strip less than 2 pix wide - disqualify
xs, ys = [], []
for vertex in sector.listOfVerteces:
if vertex < len(vertexes):
xs.append(vertexes[vertex].x)
ys.append(vertexes[vertex].y)
if len(xs) == 0 or max(xs) - min(xs) < 2 or max(ys) - min(ys) < 2:
sector.HOMpassed = False
def genWalls(vertexes, linedefs, sidedefs, sectors, options):
''' Given level's info, generate list of Walls objects
Wall object combines all info needed to draw a wall:
things like position, texture, type, lighting etc.
Returned as a dictionary, where key is the distance from the corner
To draw from fartherst to closest, to make semi-transparent back-walls work
'''
hCoefX, hCoefY = options["coefX"], options["coefY"]
walls = {}
# All walls are based on Linedefs
for linedef in linedefs:
# Get linedef's basic info
frontSideDef = linedef.front
backSideDef = linedef.back
if linedef.beg >= len(vertexes) or linedef.end >= len(vertexes):
continue
start = vertexes[linedef.beg]
end = vertexes[linedef.end]
# distance from the top left corner of teh image
# used later in the order of drawing
distance = (start.x + end.x)/2 * hCoefX + (start.y + end.y)/2 | |
from pylab import *
from matplotlib.collections import LineCollection
from matplotlib.patches import Rectangle
from numpy import nan
import math, time
import numpy as nx
import numpy as np
import flydra_core.reconstruct as reconstruct
import cgtypes # cgkit 1.x
import tables # pytables
import scipy.signal
import scipy.io
from PQmath import *
import sets
### restore builtin functions which may have been overridden
##min = __builtins__.min
##max = __builtins__.max
##sum = __builtins__.sum
##round = __builtins__.round
##abs = __builtins__.abs
def getnan(x):
return numpy.nonzero( numpy.isnan(x) )
def my_interp( A, B, frac ):
return frac*(B-A)+A
def interpolate_P( results, start_frame, stop_frame, typ='best' ):
if typ == 'fast':
data3d = results.root.data3d_fast
elif typ == 'best':
data3d = results.root.data3d_best
fXl = [(row['frame'],
row['x'],row['y'],row['z'],
row['p0'],row['p1'],row['p2'],row['p3'],row['p4'],row['p5']) for row in
data3d if start_frame <= row['frame'] <= stop_frame ] # XXX
# data3d.where( start_frame <= data3d.cols.frame <= stop_frame )]
assert len(fXl) == 2
assert stop_frame > start_frame
assert (stop_frame - start_frame) > 1
fXl = nx.array(fXl)
frame = fXl[:,0].astype(nx.int64)
P = fXl[:,1:4]
print ' ',start_frame, P[0,:]
dPdt = (P[1,:]-P[0,:])/float(frame[1]-frame[0])
for frame_no in range(start_frame+1, stop_frame):
frac = float(frame_no-start_frame)/float(stop_frame-start_frame)
newP = P[0,:]+dPdt*frac
print ' ',frame_no,newP,'<- new value'
# now save to disk
old_nrow = None
# for row in data3d.where( data3d.cols.frame == frame_no ):
for row in data3d:
if row['frame'] != frame_no: # XXX
continue
if old_nrow is not None:
raise RuntimeError('more than row with frame number %d in data3d'%frame_no)
old_nrow = row.nrow()
# delete old row
if old_nrow is not None:
data3d.remove_rows(start=old_nrow,stop=None)
X = newP
line3d = [nan]*6 # fill with nans
cam_nos_used_str = ''
new_row = data3d.row
new_row['frame'] = frame_no
new_row['x'] = X[0]
new_row['y'] = X[1]
new_row['z'] = X[2]
new_row['p0'] = line3d[0]
new_row['p1'] = line3d[1]
new_row['p2'] = line3d[2]
new_row['p3'] = line3d[3]
new_row['p4'] = line3d[4]
new_row['p5'] = line3d[5]
new_row['timestamp']=0.0
new_row['camns_used']=cam_nos_used_str
new_row['mean_dist']=0.0
new_row.append()
data3d.flush()
print ' ',stop_frame, P[1,:]
def sort_on_col0( a, b ):
a0 = a[0]
b0 = b[0]
if a0 < b0: return -1
elif a0 > b0: return 1
else: return 0
def slerp_quats( Q, bad_idxs, allow_roll = True ):
"""replace quats in sequence with interpolated version"""
for cur_idx in bad_idxs:
pre_idx = cur_idx-1
preQ = None
while preQ is None:
if pre_idx < 0:
raise IndexError
preQ = Q[pre_idx]
if len(getnan(nx.array((preQ.w,preQ.x,preQ.y,preQ.z)))[0]):
preQ = None
pre_idx -= 1
post_idx = cur_idx+1
postQ = None
while postQ is None:
try:
postQ = Q[post_idx]
except IndexError:
raise RuntimeError('attempted to interpolate orientation with no final orientation value (reduce stop frame)')
if len(getnan(nx.array((postQ.w,postQ.x,postQ.y,postQ.z)))[0]):
postQ = None
post_idx += 1
frac = float(cur_idx-pre_idx)/float(post_idx-pre_idx)
#print ' ',frac, cur_idx, pre_idx, post_idx
new_quat = cgtypes.slerp(frac, preQ, postQ)
if allow_roll:
Q[cur_idx] = new_quat
else:
# convert back and forth from orientation to eliminate roll
ori = quat_to_orient(new_quat)
no_roll_quat = orientation_to_quat(ori)
Q[cur_idx] = no_roll_quat
def do_it(results,
start_frame = None,
stop_frame = None,
Psmooth=None,Qsmooth=None,
alpha=0.2, beta=20.0, lambda1=2e-9, lambda2 = 1e-11,
gamma=0.0,
percent_error_eps_quats = 9,
interp_OK=False,
return_err_tol=False,
force_err_tol=None,
return_frame_numbers=False,
return_resultant_forces=False,
return_roll_qsmooth=False,
return_coronal_dir=False,
do_smooth_position = False,
return_smooth_position = False,
do_smooth_quats = False,
return_smooth_quats = False,
plot_pos_and_vel = False,
plot_ffts = False,
plot_pos_err_histogram = False,
plot_vel_vs_accel = False,
return_vel_vs_pitch_info = False,
plot_xy = False, plot_xy_Qsmooth = False, plot_xy_Qraw = True, plot_xy_Psmooth = False,
plot_xz = False,
plot_xy_air = False,
plot_force_angle_info=False,
plot_hists = False,
plot_hist_horiz_vel = False,
plot_hist_vert_vel=False,
plot_forward_vel_vs_pitch_angle = False,
plot_accel = False,
plot_smooth_pos_and_vel = False,
plot_Q = False,
plot_body_angular_vel = False,
plot_body_angular_vel2 = False,
plot_error_angles = False,
plot_body_ground_V = False,
plot_body_air_V = False,
plot_forces = False,
plot_srini_landing_fig=False,
had_post = True,
show_grid = False,
xtitle='time',
force_scaling = 1e7,
drag_model_for_roll = 'linear',
return_drag_force=False,
return_thrust_force=False,
fps=100.0,
):
rad2deg = 180/math.pi
deg2rad = 1/rad2deg
fps = float(fps)
#############################################################
# get position data, make sure there are no holes
# get data from file
if isinstance(results,tables.File):
data3d = results.root.data3d_best
fXl = [(row['frame'],
row['x'],row['y'],row['z'],
row['p0'],row['p1'],row['p2'],row['p3'],row['p4'],row['p5']) for row in
data3d if start_frame <= row['frame'] <= stop_frame ] # XXX
#data3d.where( start_frame <= data3d.cols.frame <= stop_frame )]
fXl.sort( sort_on_col0 )
else:
print 'assuming results are numeric'
fXl = results
fXl = nx.asarray(fXl)
frame = fXl[:,0].astype(nx.int64)
if start_frame is None:
start_frame = frame.min()
else:
valid_cond = frame >= start_frame
fXl = fXl[valid_cond]
frame = fXl[:,0].astype(nx.int64)
if stop_frame is None:
stop_frame = frame.max()
else:
valid_cond = frame <= stop_frame
fXl = fXl[valid_cond]
frame = fXl[:,0].astype(nx.int64)
print 'frame[:5]',frame[:5]
P = fXl[:,1:4]
line3d = fXl[:,4:]
print 'P[:5]',P[:5]
print 'line3d[:5]',line3d[:5]
# reality check on data to ensure no big jumps -- drops frames
framediff = frame[1:]-frame[:-1]
Pdiff = P[1:,:]-P[:-1,:]
Pdiff_dist = nx.sqrt(nx.sum(Pdiff**2,axis=1))
mean_Pdiff_dist = np.mean(Pdiff_dist)
std_Pdiff_dist = np.std(Pdiff_dist)
newframe = [ frame[0] ]
newP = [ P[0,:] ]
newline3d = [ line3d[0,:] ]
cur_ptr = 0
n_sigma = 5
if force_err_tol is None:
err_tol = n_sigma*std_Pdiff_dist
if err_tol < 30:
err_tol = 30
print 'at lower limit',# 30 mm/IFI = 3 meters/sec
else:
print 'calculated',
else:
err_tol = force_err_tol
print 'given',
print 'err_tol',err_tol
outputs = []
if return_err_tol:
outputs.append( err_tol )
while (cur_ptr+1) < frame.shape[0]:
cur_ptr += 1
tmpP1 = newP[-1]
tmpP2 = P[cur_ptr]
#Pdiff_dist = math.sqrt(nx.sum((newP[-1] - P[cur_ptr])**2))
Pdiff_dist = math.sqrt(nx.sum((tmpP2-tmpP1)**2))
if abs(Pdiff_dist-mean_Pdiff_dist) > err_tol:
print 'WARNING: frame %d position difference exceeded %d sigma, ignoring data'%(frame[cur_ptr],n_sigma)
continue
newframe.append( frame[cur_ptr] )
newP.append( P[cur_ptr] )
newline3d.append( line3d[cur_ptr] )
frame = nx.array( newframe )
P = nx.array( newP )
line3d = nx.array( newline3d )
fXl = nx.concatenate( (frame[:,nx.newaxis], P, line3d), axis=1 )
IFI = 1.0/fps
t_P = (frame-frame[0])*IFI # put in seconds
to_meters = 1e-3 # put in meters (from mm)
P = nx.array(P)*to_meters
line3d = nx.array(line3d)
# check timestamps
delta_ts = t_P[1:]-t_P[:-1]
frames_missing = False
# interpolate to get fake data where missing
interpolated_xyz_frames = []
for i,delta_t in enumerate(delta_ts):
if not (0.009 < delta_t < 0.011):
if interp_OK:
fXl = list(fXl)
first = frame[i]
last = frame[i+1]
N = last-first
for ii,fno in enumerate(range(first,last)):
if ii == 0:
continue
frac = ii/float(N)
# do interpolation
new_x = my_interp( fXl[i][1], fXl[i+1][1], frac )
new_y = my_interp( fXl[i][2], fXl[i+1][2], frac )
new_z = my_interp( fXl[i][3], fXl[i+1][3], frac )
new_row = nx.array( [fno, new_x, new_y, new_z, nan, nan, nan, nan, nan, nan],
dtype=fXl[0].dtype )
fXl.append( new_row )
print ' linear interpolation at time %0.2f (frame %d)'%((fno-start_frame)*0.01,fno,)
interpolated_xyz_frames.append( fno )
else:
frames_missing = True
print 'are you missing frames between %d and %d?'%(frame[i], frame[i+1])
if frames_missing:
raise ValueError("results have missing frames (hint: interp_OK=True)")
if len(interpolated_xyz_frames):
# re-sort and partition results
fXl.sort( sort_on_col0 )
fXl = nx.array(fXl)
frame = fXl[:,0]
P = fXl[:,1:4]
line3d = fXl[:,4:]
t_P = (frame-frame[0])*IFI # put in seconds
to_meters = 1e-3 # put in meters (from mm)
P = nx.array(P)*to_meters
line3d = nx.array(line3d)
frame_list = list(frame)
interped_p_idxs = [ frame_list.index( fno ) for fno in interpolated_xyz_frames ]
else:
interped_p_idxs = []
delta_t = delta_ts[0]
################################################################
if return_frame_numbers:
outputs.append( frame )
# get angular position phi
phi_with_nans = reconstruct.line_direction(line3d) # unit vector
slerped_q_idxs = getnan(phi_with_nans[:,0])[0]
if len(slerped_q_idxs) and slerped_q_idxs[0] == 0:
raise ValueError('no orientation for first point')
Q = QuatSeq([ orientation_to_quat(U) for U in phi_with_nans ])
slerp_quats( Q, slerped_q_idxs, allow_roll=False )
for cur_idx in slerped_q_idxs:
print ' SLERPed missing quat at time %.2f (frame %d)'%(cur_idx*IFI, frame[cur_idx])
t_bad = nx.take(t_P,slerped_q_idxs)
#frame_bad = frame[slerped_q_idxs]
frame_bad = nx.take(frame,slerped_q_idxs)
#############################################################
# first position derivative (velocity)
dPdt = (P[2:]-P[:-2]) / (2*delta_t)
t_dPdt = t_P[1:-1]
# second position derivative (acceleration)
d2Pdt2 = (P[2:] - 2*P[1:-1] + P[:-2]) / (delta_t**2)
t_d2Pdt2 = t_P[1:-1]
# first orientation derivative (angular velocity)
omega = (Q[:-1].inverse()*Q[1:]).log()/delta_t
t_omega = t_P[:-1]
# second orientation derivative (angular acceleration)
omega_dot = ((Q[1:-1].inverse()*Q[2:]).log() -
(Q[:-2].inverse()*Q[1:-1]).log()) / (delta_t**2)
t_omega_dot = t_P[1:-1]
if had_post:
post_top_center=array([ 130.85457512, 169.45421191, 50.53490689])
post_radius=5 # mm
post_height=10 # mm
################# Get smooth Position #############################
if Psmooth is not None:
Psmooth = nx.array(Psmooth)*to_meters
elif not do_smooth_position:
if 1:
#Psmooth is None and we don't desire recomputation
# see if we can load cached Psmooth from pytables file
try:
smooth_data = results.root.smooth_data
fPQ = [(row['frame'],
row['x'],row['y'],row['z'],
row['qw'],row['qx'],row['qy'],row['qz']) for row in
smooth_data if start_frame <= row['frame'] <= stop_frame ] # XXX
## data3d.where( start_frame <= data3d.cols.frame <= stop_frame )]
fPQ.sort( sort_on_col0 )
if 0 < len(fPQ) < (stop_frame-start_frame+1):
raise ValueError('pytables file had some but not all data cached for file %s %d-%d'%(results.filename,start_frame,stop_frame))
fPQ = | |
or val.lower().endswith('.asd')):
newval=""
#if no search then grab them all else only grab the matches
#seriously speeds things up if skip non-matches
if not session['searchval'] or (session['searchval'] and session['searchval'].lower()
in val.lower()):
newval=os.path.join(root,val)
#blanco match
elif session['searchval'] and session['searchval']==session['blanco']:
for ix in range(len(blancopackage)):
if blancopackage[ix] in val:
newval=os.path.join(root,val)
break
#if no search or matches on search
if newval:
rated_sink = Sink.query.filter_by(location=newval[13:]).first()
ratings = Rating.query.filter_by(sink_id=rated_sink.id).all()
ival=0
for blah in range(len(ratings)):
ival+=ratings[blah].stars
if len(ratings) == 0:
ival=-1
else:
ival=round(float(ival)/float(len(ratings)),1)
if(folders):
newval=newval[14+len(os.path.join(session['donor'],session['current_folder'])):]
else:
newval=newval[14+len(session['donor']):]
initial_files.append((newval,ival,rated_sink.id))
#i only half understand this
initial_files = sorted(initial_files, key=lambda tup: tup[0].lower())
username=""
if 'username' in session:
username=session['username']
return render_template('silicate.html',title="silicatewastes", guys=guys,
cloneguys=cloneguys, folders=folders,
cads=initial_files, donor=session['donor'],
current_folder=session['current_folder'], username=username,
staticsearchval=session['searchval'], blanco=session['blanco'])
######################/sink/sink/sink/sink/sink/sink/sink/sink/sink/sink/sink###############
######################/sink/sink/sink/sink/sink/sink/sink/sink/sink/sink/sink###############
######################/sink/sink/sink/sink/sink/sink/sink/sink/sink/sink/sink###############
######################/sink/sink/sink/sink/sink/sink/sink/sink/sink/sink/sink###############
######################/sink/sink/sink/sink/sink/sink/sink/sink/sink/sink/sink###############
######################/sink/sink/sink/sink/sink/sink/sink/sink/sink/sink/sink###############
######################/sink/sink/sink/sink/sink/sink/sink/sink/sink/sink/sink###############
######################/sink/sink/sink/sink/sink/sink/sink/sink/sink/sink/sink###############
@app.route('/sink', methods=['GET','POST'])
def sink():
#if url makes sense, load the sink row grab the truncated location
if request.method == 'GET' and request.args.get('cad') != None:
#this int cast will crash it if some knucklhead messes with the url.
#i WANT it to crash in this situation
cad = int(request.args.get('cad'))
#not gonna check if they entered too large of an int. just let the query fail.
#if post request same thing
elif request.method == 'POST' and 'cad' in request.form:
cad = int(request.form['cad'])
#else he's messing with the URL or something's messed up.
else:
return redirect('/')
#regardless of request type, set sink to truncated location
#sinkentry to sink row
(sink,sinkentry)=sinksplit(cad)
new_rating=""
previous_sink=""
username=""
previous_rating=""
if request.method=='POST' and 'username' in session:
username=session['username']
user = User.query.filter_by(username=username).first()
#clear out lastsink, stop bothering user about it
if 'forgot' in request.form:
user.lastsink=0
db.session.commit()
if user.lastsink!=0:
previous_sink = Sink.query.filter_by(id=user.lastsink).first()
#see if he rated the last sink he downloaded
previous_rating = Rating.query.filter_by(user_id=user.id, sink_id=previous_sink.id).first()
#if it's already rated then don't bother him
if previous_rating!=None:
previous_sink=""
#he clicked add rating on previous sink
elif 'rate_previous' in request.form:
if 'starsB' not in request.form:
flash("pick a rating")
else:
stars = request.form['starsB']
comment = request.form['commentB']
if len(comment)>60:
flash("comment must be less than 60 chars")
#stick in the new rating, stop bothering him about it
else:
#previous_rating points to an actual row that you want to change
#thats why you don't want a new variable called new_rating here
previous_rating = Rating(stars, comment, user, previous_sink)
db.session.add(previous_rating)
db.session.commit()
previous_rating=""
previous_sink=""
flash("THANKS!")
#add rating to current sink
#(this corresponds to the sink in the url, not a sink pulled from the user's last
#download variable, like above)
if request.method=='POST' and 'add' in request.form:
comment = request.form('comment')
if len(comment)>60:
flash("comment must be less than 60 chars")
else:
#redundant? maybe safeguards against hackers.
if username:
#load the rating we're changing (if it exists)
new_rating = Rating.query.filter_by(user_id=user.id, sink_id=sinkentry.id).first()
if 'stars' in request.form:
stars = request.form['stars']
#overwrite pre-existing rating
if new_rating:
new_rating.stars=stars
new_rating.comment=comment
new_rating.user=user
new_rating.sink=sinkentry
db.session.commit()
#put in brand new rating
else:
new_rating=Rating(stars, comment, user, sinkentry)
db.session.add(new_rating)
db.session.commit()
else:
flash("You didn't pick a rating.")
elif request.method=='POST' and 'change' in request.form:
#just delete it
user_rating = Rating.query.filter_by(user_id=user.id, sink_id=sinkentry.id).first()
db.session.delete(user_rating)
db.session.commit()
user_rating=""
if username:
#now load up their rating
user_rating = Rating.query.filter_by(user_id=user.id, sink_id=sinkentry.id).first()
#load up all ratings, just filter out user rating in jinja. probably smarter to do
#it here or in the query directly above but i'll figure it out some other day
ratings = Rating.query.filter_by(sink_id=sinkentry.id).all()
return render_template("sink.html", folder=session['current_folder'], donor=session['donor'],
sink=sink, ratings=ratings, username=username, user_rating=user_rating,
previous_sink=previous_sink)
#######################/downloadfile/downloadfile/downloadfile###############################
#######################/downloadfile/downloadfile/downloadfile###############################
#######################/downloadfile/downloadfile/downloadfile###############################
#######################/downloadfile/downloadfile/downloadfile###############################
#######################/downloadfile/downloadfile/downloadfile###############################
#######################/downloadfile/downloadfile/downloadfile###############################
#######################/downloadfile/downloadfile/downloadfile###############################
#######################/downloadfile/downloadfile/downloadfile###############################
#apparently i shouldn't be doing this, supposed to use a proper web server
#instead of the flask server. don't care for now plus maybe i don't get
#enough traffic for it to matter
@app.route("/downloadfile", methods=['POST'])
def downloadfile():
session['current_folder'] = request.form['current_folder']
sink = request.form['sink']
if 'cancel' in request.form:
return redirect("/")
#same old check if donor dumped all his cads in his root directory
if(session['current_folder']):
location=os.path.join(session['donor'],session['current_folder'],sink)
else:
location=os.path.join(session['donor'],sink)
sinkentry = Sink.query.filter_by(location=location).first()
if 'username' in session:
username = session['username']
user = User.query.filter_by(username=username).first()
try:
sinkentry.downloads+=1
user.lastsink=sinkentry.id
#user downloads. made that variabe for something else. whatever.
user.benefactor+=1
db.session.commit()
return send_from_directory(os.path.join("static","sinks",session['donor'],
session['current_folder']), sink, as_attachment=True)
except Exception as e:
return ("<h1>Something went terribly wrong. Email me all this if you want:<br/>"+
os.path.join("static","sinks",session['donor'],session['current_folder'])+
"   "+sink+"<br/><br/>"+str(e)+"<h1>")
else:
username=""
flash("log in, jackass")
ratings = Rating.query.filter_by(sink_id=sinkentry.id).all()
user_rating=""
#don't need previous sink
return render_template("sink.html", folder=session['current_folder'], donor=session['donor'],
sink=sink, ratings=ratings, username=username, user_rating=user_rating)
##########################/chat##################################################################
##########################/chat##################################################################
##########################/chat##################################################################
##########################/chat##################################################################
##########################/chat##################################################################
##########################/chat##################################################################
##########################/chat##################################################################
##########################/chat##################################################################
@app.route('/chat', methods=['GET','POST'])
def chat():
username=""
if 'username' in session:
username=session['username']
if request.method == 'POST':
if 'babble' in request.form:
if username:
babble = request.form['babble']
if len(babble)>60 or len(babble)<2:
flash("babblings must be 2-60 chars")
else:
ref_user = user = User.query.filter_by(username=username).first()
new_babble = Babblings(babble, ref_user)
db.session.add(new_babble)
db.session.commit()
else:
flash("log in, jackass")
prebabblings = Babblings.query.order_by(Babblings.id.desc()).all()
babblings=[]
for ix in range(len(prebabblings)):
babblings.append((ix%2, prebabblings[ix]))
babble="annoying"
return render_template("chat.html", babblings=babblings, username=username, babble=babble)
##########################stalk##################################################################
##########################stalk##################################################################
##########################stalk##################################################################
##########################stalk##################################################################
##########################stalk##################################################################
##########################stalk##################################################################
##########################stalk##################################################################
##########################stalk##################################################################
@app.route('/stalk', methods=['GET','POST'])
def stalk():
username=""
if 'username' in session:
username=session['username']
members=User.query.with_entities(User.id, User.username, User.catchphrase, User.memberlevel,
User.benefactor, User.state, User.company).all()
#if request.method == 'GET':
#if request.method == 'POST':
#if 'search' in request.form:
#session['searchval'] = request.form['searchval']
#guy = request.args.get('guy')
if request.method == 'POST':
if 'username' not in session:
flash("log in, jackass")
elif 'victim' in request.form:
victim=int(request.form['victim'])
victimemail=User.query.filter_by(id=victim).first()
email=User.query.with_entities(User.email).filter_by(username=session['username']).first()
harassment=session['username']
harassment+="'s message: "
if 'harassment' in request.form:
harassment+=request.form['harassment']
harassment+="\n\n"+session['username']+"'s email address: "+email.email
harassment+=("\n\n Reply to "+session['username']+" at the email address above to establish "
"contact. If you don't want "+session['username']+" to know your email address then just "
"delete this message. If they're harassing you then email me at <EMAIL> "
"and I'll silence them forever.")
try:
yag = yagmail.SMTP(os.getenv("EMAIL_USER"),os.getenv("EMAIL_PASS"))
yag.send(victimemail.email, "message from "+session['username'], harassment)
flash("Message sent to "+victimemail.username)
return redirect('/stalk?member='+str(victimemail.id))
except Exception as e:
flash("Something went wrong. Email me if you want: <EMAIL>")
return redirect('/')
elif request.method == 'GET':
if request.args.get('member') != None:
#if it's not an int, let it crash
member = int(request.args.get('member'))
if 'username' not in session:
flash("log in, jackass")
else:
memberdata=User.query.filter_by(id=member).first()
memberratings=(Sink.query.with_entities(Sink.location).join
(Rating, Rating.sink_id==Sink.id).filter_by
(user_id=memberdata.id).add_columns
(Rating.sink_id, Rating.stars, Rating.comment))
#why doesn't the query set it to none if it finds no ratings?
#oh well time for a hacky workaround
try:
print(memberratings[0].location)
except Exception as e:
memberratings=None
return render_template("stalkmore.html", stalk="stalk", memberdata=memberdata,
memberratings=memberratings, username=username)
return render_template("stalk.html", stalk="stalk", members=members, username=username)
<EMAIL>('/stalkmore', methods=['GET'])
#def stalkmore():
#members=User.query.with_entities(User.id, User.username, User.catchphrase, User.memberlevel,
# User.benefactor, User.state, User.company).all()
#loadguys=Donator.query.all()
# return render_template("stalkmore.html", stalk="stalk")
################################login###########################################################
################################login###########################################################
################################login###########################################################
################################login###########################################################
################################login###########################################################
################################login###########################################################
################################login###########################################################
################################login###########################################################
@app.route('/login', methods=['POST', 'GET'])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
user = User.query.filter_by(username=username).first()
if user and check_pw_hash(password,user.hashpass):
session['username'] = username
return redirect('/')
else:
flash("login failed!")
return render_template("login.html")
@app.route('/register', methods=['POST', 'GET'])
def register():
allset=True
username=""
password=""
verify=""
email=""
company=""
catchphrase=""
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
verify = request.form['verify']
email = request.form['email']
company = request.form['companyname']
catchphrase = request.form['catchphrase']
usstate = request.form['state']
if 'dust' not in request.form:
crybaby=3
else:
crybaby = request.form['dust']
if crybaby=="True":
crybaby=1
elif crybaby=="False":
crybaby=0
#validate
if len(username)<1 or len(username)>20:
flash("username must be 1-20 characters")
allset=False
user_exists = User.query.filter_by(username=username).first()
if user_exists:
flash("username already taken!")
allset=False
if len(password)<1 or len(password)>30:
flash("password must be 1-30 characters")
allset=False
if password!=verify:
flash("passwords don't match!")
allset=False
user_exists = User.query.filter_by(email=email).first()
if user_exists:
flash("email already taken!")
allset=False
if len(email)>60:
flash("no emails longer than 60 characters")
allset=False
elif len(email)<1:
flash("enter an email address, jackass")
allset=False
if len(company)>60:
flash("no company names longer than 60 characters")
allset=False
if len(catchphrase)>60:
flash("no catchphrases longer than 60 characters")
allset=False
if len(usstate)>2:
flash("no states longer than 2 characters. not sure how you pulled that off.")
allset=False
if allset:
new_user = User(username,"",email,company,catchphrase,usstate,crybaby)
session['secret_pass']=id_generator()
try:
yag = yagmail.SMTP(os.getenv("EMAIL_USER"),os.getenv("EMAIL_PASS"))
yag.send(email, "verify thyself", session['secret_pass'])
except Exception as e:
#why won't this work here?
#flash(e)
flash("Maybe something's wrong with the email address you entered? Start over.")
flash("You entered: "+email)
session['secret_pass']=""
return redirect('/')
#if form data is good
#session['tempuser']=new_user
session['password']=password
flash("YOU ENTERED: "+email)
return render_template("verify.html", user=new_user)
else:
username=""
return render_template("register.html", username=username, email=email, company=company,
catchphrase=catchphrase)
@app.route('/verify', methods=['POST'])
def verify():
username = request.form['username']
#password = request.form['password']
email = request.form['email']
company = request.form['company']
catchphrase = request.form['catchphrase']
usstate = request.form['state']
secretpass = request.form['secretpass']
if 'dust' not in request.form:
dust=3
else:
dust = request.form['dust']
new_user = User(username, session['password'], email, company, catchphrase, usstate, dust)
del session['password']
if secretpass == session['secret_pass']:
flash("Welcome to the |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.