input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>lib/kb_JobStats/core/UJS_CAT_NJS_DataUtils.py
import time
import datetime
import dateutil.parser
import pytz
import json
import os
import re
import copy
import uuid
import shutil
import sys
from pprint import pprint, pformat
from urllib2 import Request, urlopen
from urllib2 import URLError, HTTPError
import urllib
import errno
from Bio import Entrez, SeqIO
from numpy import median, mean, max
from Workspace.WorkspaceClient import Workspace as Workspace
from Catalog.CatalogClient import Catalog
from NarrativeJobService.NarrativeJobServiceClient import NarrativeJobService
from UserAndJobState.UserAndJobStateClient import UserAndJobState
from UserProfile.UserProfileClient import UserProfile
def log(message, prefix_newline=False):
"""Logging function, provides a hook to suppress or redirect log messages."""
print(('\n' if prefix_newline else '') + '{0:.2f}'.format(time.time()) + ': ' + str(message))
def _mkdir_p(path):
"""
_mkdir_p: make directory for given path
"""
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def ceildiv(a, b):
"""
celldiv: get the ceiling division of two integers, by reversing the floor division
"""
return -(-a // b)
def _datetime_from_utc(date_utc_str):
try:#for u'2017-08-27T17:29:37+0000'
dt = datetime.datetime.strptime(date_utc_str,'%Y-%m-%dT%H:%M:%S+0000')
except ValueError as v_er:#for ISO-formatted date & time, e.g., u'2015-02-15T22:31:47.763Z'
dt = datetime.datetime.strptime(date_utc_str,'%Y-%m-%dT%H:%M:%S.%fZ')
return dt
def _timestamp_from_utc(date_utc_str):
dt = _datetime_from_utc(date_utc_str)
return int(time.mktime(dt.timetuple())*1000) #in microseconds
def _convert_to_datetime(dt):
new_dt = dt
if (not isinstance(dt, datetime.date) and not isinstance(dt, datetime.datetime)):
if isinstance(dt, int):
new_dt = datetime.datetime.utcfromtimestamp(dt / 1000)
else:
new_dt = _datetime_from_utc(dt)
return new_dt
def _unix_time_millis(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
return int((dt - epoch).total_seconds()*1000)
class UJS_CAT_NJS_DataUtils:
def __init__(self, config):
self.workspace_url = config['workspace-url']
self.scratch = os.path.join(config['scratch'], str(uuid.uuid4()))
_mkdir_p(self.scratch)
self.metrics_dir = os.path.join(self.scratch, str(uuid.uuid4()))
_mkdir_p(self.metrics_dir)
def generate_app_metrics(self, input_params, token):
"""
generate_app_metrics: get app job state data with structure as the following example:
[
{'app_id': u'kb_Metrics/refseq_genome_counts',
'canceled': 0,
'creation_time': 1510159439977,
'error': 0,
'exec_start_time': 1510159441720,
'finish_time': 1510159449612,
'finished': 1,
'job_desc': u'Execution engine job for kb_Metrics.refseq_genome_counts',
'job_id': u'5a03344fe4b088e4b0e0e370',
'job_state': u'completed',
'method': u'refseq_genome_counts',
'module': u'kb_Metrics',
'result': [{u'report_name': u'kb_Metrics_report_f97f0567-fee5-48ea-8fc5-1f5e361ee2bd',
u'report_ref': u'25735/121/1'}],
'run_time': '0:00:08',
'stage': u'complete',
'status': u'done',
'time_info': [u'2017-11-08T16:44:01+0000',
u'2017-11-08T16:44:09+0000',
None],
'user_id': u'qzhang',
'wsid': 25735},
{'app_id': u'RAST_SDK/annotate_contigset',
'canceled': 0,
'creation_time': 1485974151389,
'error': 0,
'exec_start_time': 1485974156377,
'finish_time': 1485974703341,
'finished': 1,
'job_desc': u'Execution engine job for RAST_SDK.annotate_genome',
'job_id': u'58922a87e4b0c1af1bf0981b',
'job_state': u'completed',
'method': u'annotate_genome',
'module': u'RAST_SDK',
'result': [{u'id': u'Pantoea.ananatis_contigs_beta_out',
u'report_name': u'Pantoea.ananatis_contigs_beta_out.report',
u'report_ref': u'19268/62/1',
u'workspace': u'qzhang:narrative_1485914570215',
u'ws_report_id': u'Pantoea.ananatis_contigs_beta_out.report'}],
'run_time': '0:09:07',
'stage': u'complete',
'status': u'done',
'time_info': [u'2017-02-01T18:35:56+0000',
u'2017-02-01T18:45:03+0000',
None],
'user_id': u'qzhang',
'wsid': 19268},
......
]
"""
self.init_clients(token)
params = self.process_app_parameters(input_params)
user_ids = params['user_ids']
time_start = params['time_start']
time_end = params['time_end']
job_stage = params['job_stage']
ws_owner, ws_ids = self.get_user_workspaces(user_ids, time_start, time_end, 0, 0)
ujs_ret = self.get_user_and_job_states(ws_ids)
total_ujs_count = len(ujs_ret)
#log("Before time_stage filter:{}".format(total_ujs_count))
jt_filtered_ujs = self.filterUJS_by_time_stage(ujs_ret, job_stage, time_start, time_end)
period_ujs_count = len(jt_filtered_ujs)
jt_filtered_ujs = self.convert_time_info(jt_filtered_ujs)
#log("After time_stage filter:{}".format(period_ujs_count))
#user_grouped_ujs = self.group_by_user(jt_filtered_ujs, user_ids)
#return {'job_states': ujs_ret}
return {'job_states':jt_filtered_ujs}
def get_user_workspaces(self, user_ids, st_time, ed_time, showDeleted=0, showOnlyDeleted=0):
"""
get_user_workspaces: given the user ids, get a list of data structure as the example below:
typedef tuple<ws_id id,
ws_name workspace,
username owner,
timestamp moddate,
int max_objid,
permission user_permission,
permission globalread,
lock_status lockstat,
usermeta metadata> workspace_info;
ws_info = self.ws_client.list_workspace_info({'owners':user_ids,
'showDeleted': showDeleted,
'showOnlyDeleted': showOnlyDeleted,
'perm':'r',
'excludeGlobal': 1,
'after': '2017-04-03T08:56:32Z',
'before': '2017-11-03T08:56:32Z'
})
return a list of ws_owners and ws_ids
"""
#log("Fetching workspace ids for {} users:\n{}".format('the' if user_ids else 'all', user_ids if user_ids else ''))
ws_info = self.ws_client.list_workspace_info({'owners':user_ids,
'showDeleted': showDeleted,
'showOnlyDeleted': showOnlyDeleted,
'perm':'r',
'after': st_time.strftime("%Y-%m-%dT%H:%M:%SZ"),
'before': ed_time.strftime("%Y-%m-%dT%H:%M:%SZ")
})
#log(pformat(ws_info))
ws_ids = [ws[0] for ws in ws_info]
ws_owners = [ws[2] for ws in ws_info]
return (ws_owners, ws_ids)
def get_user_and_job_states(self, ws_ids):
"""
get_user_and_job_states: Get the user and job info for the given workspaces
"""
#log("Fetching the job data...for these workspaces:\n{}".format(pformat(ws_ids)))
wsj_states = []
clnt_groups = self.get_client_groups_from_cat()
counter = 0
while counter < len(ws_ids) // 10:
j_states = []
wid_slice = ws_ids[counter * 10 : (counter + 1) * 10]
wsj_states += self.retrieve_user_job_states(wid_slice, clnt_groups)
counter += 1
wsj_states += self.retrieve_user_job_states(ws_ids[counter * 10: ], clnt_groups)
#log(pformat(wsj_states[0]))
return wsj_states
def retrieve_user_job_states(self, wid_p, c_groups):
"""
call ujs_client.list_jobs2() that returns an array of job_info2:
typedef tuple<job_id job, user_info users, service_name service,
job_stage stage, job_status status, time_info times,
progress_info progress, boolean complete, boolean error,
auth_info auth, usermeta meta, job_description desc, Results res>
job_info2;
retrieve_user_job_states: returns an array of required data items about user_and_job states
"""
#log("Fetching the ujs data for workspace(s) {}...".format(pformat(wid_p)))
ret_ujs = []
try:
nar_jobs = self.ujs_client.list_jobs2({
'filter': 'RCNES',#all jobs are returned
'authstrat': 'kbaseworkspace',
'authparams': wid_p
})
except Exception as e_ujs: #RuntimeError as e_ujs:
log('UJS list_jobs2 raised error:\n')
log(pformat(e_ujs))
return []
else:#no exception raised
if (nar_jobs and len(nar_jobs) > 0):
#******The ujs_client.list_jobs2({...}) returns a 13 member tuple:*****#
job_ids = [j[0] for j in nar_jobs]#[u'59f36d00e4b0fb0c767100cc',...]
job_user_info = [j[1] for j in nar_jobs]#[[u'qzhang', None],[u'qzhang', u'qzhang'],...]
job_owners = [j[2] for j in nar_jobs]#[u'qzhang',u'qzhang',...]
job_stages = [j[3] for j in nar_jobs]#One of 'created', 'started', 'complete', 'canceled' or 'error'
job_status = [j[4] for j in nar_jobs]##[u'done','running','canceled by user','......',...]
job_time_info = [j[5] for j in nar_jobs]#tuple<timestamp started, timestamp last_update,timestamp est_complete>[[u'2017-10-27T17:29:37+0000', u'2017-10-27T17:29:42+0000', None],...]
job_progress_info = [j[6] for j in nar_jobs]#tuple<total_progress prog, max_progress max, progress_type ptype>
job_complete = [j[7] for j in nar_jobs]#[1,1,...,0,..]
job_error = [j[8] for j in nar_jobs]#[1,0,...,0,..]
job_auth_info = [j[9] for j in nar_jobs]#[[u'kbaseworkspace', u'25735'],...]
job_meta = [j[10] for j in nar_jobs]#[{u'cell_id': u'828d2e3c-5c5d-4c4c-9de8-4aacb875c074',u'run_id': u'a05df5b3-2d3e-4e4a-9a32-173acaa9bd0c',u'tag': u'beta',u'token_id': u'<PASSWORD>'},...]
job_desc = [j[11] for j in nar_jobs]#[u'Execution engine job for kb_Metrics.count_ncbi_genome_features',...]
job_res = [j[12] for j in nar_jobs]#[{},None,...]
ret_ujs = self.retrieve_ujs_via_njs(c_groups, job_ids, job_owners,
job_stages, job_status, job_time_info, job_error, job_desc)
return ret_ujs
def retrieve_ujs_via_njs(self, c_groups, job_ids, job_owners, job_stages,
job_status, job_time_info, job_error, job_desc):
ujs_ret = []
try:
#log("Calling njs.check_jobs for {} jobs".format(len(job_ids)))
job_info = self.njs_client.check_jobs({
'job_ids': job_ids, 'with_job_params': 1
})
except Exception as e_njs: #RuntimeError as e_njs:
log('NJS check_jobs raised error:\n')
log(pformat(e_njs))
return []
else:#no exception raised
job_states = job_info.get('job_states', {})
job_params = job_info.get('job_params', {})
job_errors = job_info.get('check_error', {})
# Retrieve the interested data from job_states to assemble an array of job states
#for j_id, j_owner in zip(job_ids, job_owners):
for j_idx, jb_id in enumerate(job_ids):
jbs = job_states.get(job_ids[j_idx], {})
jbp = job_params.get(job_ids[j_idx], {})
u_j_s = {}
u_j_s['job_id'] = job_ids[j_idx]
u_j_s['user_id'] = job_owners[j_idx]
u_j_s['status'] = job_status[j_idx]
u_j_s['stage'] = job_stages[j_idx]
u_j_s['time_info'] = job_time_info[j_idx]
u_j_s['error'] = job_error[j_idx]
u_j_s['job_desc'] = job_desc[j_idx]
if jbs:
try:
u_j_s['app_id'] = jbp['app_id']
for clnt in c_groups:
if u_j_s['app_id'] == clnt['app_id']:
u_j_s['client_groups'] = clnt['client_groups']
break
u_j_s['wsid'] = jbp['wsid']
u_j_s['module'], u_j_s['method'] = jbp['method'].split('.')
u_j_s['job_state'] = jbs['job_state']
if jbs['job_state'] == 'suspend':
u_j_s['error'] = jbs['error']
elif (jbs['job_state'] == 'completed' and 'result' in u_j_s):
u_j_s['result'] = jbs['result']
u_j_s['finished'] = jbs['finished']
u_j_s['canceled'] = jbs['canceled']
u_j_s['creation_time'] = jbs['creation_time']
if 'exec_start_time' in jbs:
u_j_s['exec_start_time'] = jbs['exec_start_time']
elif u_j_s['stage'] == 'started':
u_j_s['exec_start_time'] = _timestamp_from_utc(u_j_s['time_info'][1])
if 'finish_time' in jbs:
u_j_s['finish_time'] = jbs['finish_time']
elif (u_j_s['stage'] == 'completed' or u_j_s['stage'] == 'complete'):
u_j_s['finish_time'] = _timestamp_from_utc(u_j_s['time_info'][1])
except KeyError as e_key:
log("KeyError for " + pformat(e_key))
else:
pass
else:
#log("No job state info is returned by njs for job with id {}".format(job_ids[j_idx]))
#log("\nBut maybe ujs has returned something for job with id {}".format(job_ids[j_idx]))
#log(pformat(job_stages[j_idx]))
u_j_s['creation_time'] = _timestamp_from_utc(u_j_s['time_info'][0])
if (u_j_s['stage'] == 'started' and u_j_s['status'] == 'running'):
u_j_s['exec_start_time'] = _timestamp_from_utc(u_j_s['time_info'][1])
elif (u_j_s['stage'] == 'completed' or u_j_s['stage'] == 'complete'
or u_j_s['job_state'] == 'completed' or u_j_s['status'] == 'done'):
u_j_s['finish_time'] = _timestamp_from_utc(u_j_s['time_info'][1])
#get some info from the client groups
for clnt in c_groups:
if clnt['function_name'] in u_j_s['job_desc']:
u_j_s['app_id'] = clnt['app_id']
u_j_s['client_groups'] = clnt['client_groups']
u_j_s['module'] = clnt['module_name']
u_j_s['method'] = clnt['function_name']
break
#log("*******From ujs result directly*******:\n")
#log(pformat(u_j_s))
if ('exec_start_time' in u_j_s and u_j_s['stage'] == 'started'
and u_j_s['status'] == 'running'):
delta = (datetime.datetime.utcnow() -
datetime.datetime.fromtimestamp(u_j_s['exec_start_time']/1000))
delta = delta - datetime.timedelta(microseconds=delta.microseconds)
u_j_s['running_time'] = delta.total_seconds() #str(delta)
elif ('finish_time' in u_j_s and 'exec_start_time' in u_j_s
and u_j_s['status'] == 'done'):
delta = (datetime.datetime.fromtimestamp(u_j_s['finish_time']/1000) -
datetime.datetime.fromtimestamp(u_j_s['exec_start_time']/1000))
delta = delta - datetime.timedelta(microseconds=delta.microseconds)
u_j_s['run_time'] = delta.total_seconds() #str(delta)
elif (u_j_s['stage'] == 'created' and 'creation_time' in u_j_s
and u_j_s['status'] not in ['done','running','canceled by user','error']
and job_error[j_idx] == {}):
delta = (datetime.datetime.utcnow() - datetime.datetime.fromtimestamp(
u_j_s['creation_time']/1000))
delta = delta - datetime.timedelta(microseconds=delta.microseconds)
u_j_s['queued_time'] = delta.total_seconds() #str(delta)
u_j_s['status'] = 'queued'
else:
u_j_s['status'] = 'not created'
ujs_ret.append(u_j_s)
return ujs_ret
def get_exec_stats_from_cat(self):
"""
get_exec_stats_from_cat: Get stats on completed jobs
return an array of the following structure (example with data):
{
u'app_id': u'describe_rnaseq_experiment',
u'app_module_name': u'KBaseRNASeq',
u'creation_time': 1456863947.568,
u'exec_start_time': 1456863953.739,
u'finish_time': 1456863955.138,
u'func_module_name': u'KBaseRNASeq',
u'func_name': u'SetupRNASeqAnalysis',
u'git_commit_hash': u'5de844e7303a8a30a94d4ca40f2b341439b8bb3c',
u'is_error': True,
u'user_id': u'srividya22'
}
"""
# Pull the data
#log("Fetching the exec stats data from Catalog API...")
raw_stats = self.cat_client.get_exec_raw_stats({})
# Calculate queued_time and run_time (in seconds)
for elem in raw_stats:
tc = elem['creation_time']
ts = elem['exec_start_time']
| |
m.x474 - m.x478 - m.x482 - m.x486 == 0)
m.e831 = Constraint(expr= m.x95 - m.x475 - m.x479 - m.x483 - m.x487 == 0)
m.e832 = Constraint(expr= m.x96 - m.x476 - m.x480 - m.x484 - m.x488 == 0)
m.e833 = Constraint(expr= m.x121 - m.x505 - m.x509 - m.x513 - m.x517 == 0)
m.e834 = Constraint(expr= m.x122 - m.x506 - m.x510 - m.x514 - m.x518 == 0)
m.e835 = Constraint(expr= m.x123 - m.x507 - m.x511 - m.x515 - m.x519 == 0)
m.e836 = Constraint(expr= m.x124 - m.x508 - m.x512 - m.x516 - m.x520 == 0)
m.e837 = Constraint(expr= m.x117 - m.x489 - m.x493 - m.x497 - m.x501 == 0)
m.e838 = Constraint(expr= m.x118 - m.x490 - m.x494 - m.x498 - m.x502 == 0)
m.e839 = Constraint(expr= m.x119 - m.x491 - m.x495 - m.x499 - m.x503 == 0)
m.e840 = Constraint(expr= m.x120 - m.x492 - m.x496 - m.x500 - m.x504 == 0)
m.e841 = Constraint(expr= m.x345 - 35 * m.b957 <= 0)
m.e842 = Constraint(expr= m.x346 - 30 * m.b958 <= 0)
m.e843 = Constraint(expr= m.x347 - 30 * m.b959 <= 0)
m.e844 = Constraint(expr= m.x348 - 30 * m.b960 <= 0)
m.e845 = Constraint(expr= m.x349 - 35 * m.b961 <= 0)
m.e846 = Constraint(expr= m.x350 - 30 * m.b962 <= 0)
m.e847 = Constraint(expr= m.x351 - 30 * m.b963 <= 0)
m.e848 = Constraint(expr= m.x352 - 30 * m.b964 <= 0)
m.e849 = Constraint(expr= m.x353 - 35 * m.b965 <= 0)
m.e850 = Constraint(expr= m.x354 - 30 * m.b966 <= 0)
m.e851 = Constraint(expr= m.x355 - 30 * m.b967 <= 0)
m.e852 = Constraint(expr= m.x356 - 30 * m.b968 <= 0)
m.e853 = Constraint(expr= m.x357 - 35 * m.b969 <= 0)
m.e854 = Constraint(expr= m.x358 - 30 * m.b970 <= 0)
m.e855 = Constraint(expr= m.x359 - 30 * m.b971 <= 0)
m.e856 = Constraint(expr= m.x360 - 30 * m.b972 <= 0)
m.e857 = Constraint(expr= m.x361 - 35 * m.b973 <= 0)
m.e858 = Constraint(expr= m.x362 - 30 * m.b974 <= 0)
m.e859 = Constraint(expr= m.x363 - 30 * m.b975 <= 0)
m.e860 = Constraint(expr= m.x364 - 30 * m.b976 <= 0)
m.e861 = Constraint(expr= m.x365 - 35 * m.b977 <= 0)
m.e862 = Constraint(expr= m.x366 - 30 * m.b978 <= 0)
m.e863 = Constraint(expr= m.x367 - 30 * m.b979 <= 0)
m.e864 = Constraint(expr= m.x368 - 30 * m.b980 <= 0)
m.e865 = Constraint(expr= m.x369 - 35 * m.b981 <= 0)
m.e866 = Constraint(expr= m.x370 - 30 * m.b982 <= 0)
m.e867 = Constraint(expr= m.x371 - 30 * m.b983 <= 0)
m.e868 = Constraint(expr= m.x372 - 30 * m.b984 <= 0)
m.e869 = Constraint(expr= m.x373 - 35 * m.b985 <= 0)
m.e870 = Constraint(expr= m.x374 - 30 * m.b986 <= 0)
m.e871 = Constraint(expr= m.x375 - 30 * m.b987 <= 0)
m.e872 = Constraint(expr= m.x376 - 30 * m.b988 <= 0)
m.e873 = Constraint(expr= m.x377 - 61 * m.b973 <= 0)
m.e874 = Constraint(expr= m.x378 - 53 * m.b974 <= 0)
m.e875 = Constraint(expr= m.x379 - 52 * m.b975 <= 0)
m.e876 = Constraint(expr= m.x380 - 48 * m.b976 <= 0)
m.e877 = Constraint(expr= m.x381 - 61 * m.b977 <= 0)
m.e878 = Constraint(expr= m.x382 - 53 * m.b978 <= 0)
m.e879 = Constraint(expr= m.x383 - 52 * m.b979 <= 0)
m.e880 = Constraint(expr= m.x384 - 48 * m.b980 <= 0)
m.e881 = Constraint(expr= m.x385 - 61 * m.b981 <= 0)
m.e882 = Constraint(expr= m.x386 - 53 * m.b982 <= 0)
m.e883 = Constraint(expr= m.x387 - 52 * m.b983 <= 0)
m.e884 = Constraint(expr= m.x388 - 48 * m.b984 <= 0)
m.e885 = Constraint(expr= m.x389 - 61 * m.b985 <= 0)
m.e886 = Constraint(expr= m.x390 - 53 * m.b986 <= 0)
m.e887 = Constraint(expr= m.x391 - 52 * m.b987 <= 0)
m.e888 = Constraint(expr= m.x392 - 48 * m.b988 <= 0)
m.e889 = Constraint(expr= m.x393 - 25 * m.b989 <= 0)
m.e890 = Constraint(expr= m.x394 - 22 * m.b990 <= 0)
m.e891 = Constraint(expr= m.x395 - 22 * m.b991 <= 0)
m.e892 = Constraint(expr= m.x396 - 20 * m.b992 <= 0)
m.e893 = Constraint(expr= m.x397 - 25 * m.b993 <= 0)
m.e894 = Constraint(expr= m.x398 - 22 * m.b994 <= 0)
m.e895 = Constraint(expr= m.x399 - 22 * m.b995 <= 0)
m.e896 = Constraint(expr= m.x400 - 20 * m.b996 <= 0)
m.e897 = Constraint(expr= m.x401 - 25 * m.b997 <= 0)
m.e898 = Constraint(expr= m.x402 - 22 * m.b998 <= 0)
m.e899 = Constraint(expr= m.x403 - 22 * m.b999 <= 0)
m.e900 = Constraint(expr= m.x404 - 20 * m.b1000 <= 0)
m.e901 = Constraint(expr= m.x405 - 25 * m.b1001 <= 0)
m.e902 = Constraint(expr= m.x406 - 22 * m.b1002 <= 0)
m.e903 = Constraint(expr= m.x407 - 22 * m.b1003 <= 0)
m.e904 = Constraint(expr= m.x408 - 20 * m.b1004 <= 0)
m.e905 = Constraint(expr= m.x409 - 25 * m.b1005 <= 0)
m.e906 = Constraint(expr= m.x410 - 22 * m.b1006 <= 0)
m.e907 = Constraint(expr= m.x411 - 22 * m.b1007 <= 0)
m.e908 = Constraint(expr= m.x412 - 20 * m.b1008 <= 0)
m.e909 = Constraint(expr= m.x413 - 25 * m.b1009 <= 0)
m.e910 = Constraint(expr= m.x414 - 22 * m.b1010 <= 0)
m.e911 = Constraint(expr= m.x415 - 22 * m.b1011 <= 0)
m.e912 = Constraint(expr= m.x416 - 20 * m.b1012 <= 0)
m.e913 = Constraint(expr= m.x417 - 25 * m.b1013 <= 0)
m.e914 = Constraint(expr= m.x418 - 22 * m.b1014 <= 0)
m.e915 = Constraint(expr= m.x419 - 22 * m.b1015 <= 0)
m.e916 = Constraint(expr= m.x420 - 20 * m.b1016 <= 0)
m.e917 = Constraint(expr= m.x421 - 25 * m.b1017 <= 0)
m.e918 = Constraint(expr= m.x422 - 22 * m.b1018 <= 0)
m.e919 = Constraint(expr= m.x423 - 22 * m.b1019 <= 0)
m.e920 = Constraint(expr= m.x424 - 20 * m.b1020 <= 0)
m.e921 = Constraint(expr= m.x441 - 25 * m.b1021 <= 0)
m.e922 = Constraint(expr= m.x442 - 22 * m.b1022 <= 0)
m.e923 = Constraint(expr= m.x443 - 22 * m.b1023 <= 0)
m.e924 = Constraint(expr= m.x444 - 20 * m.b1024 <= 0)
m.e925 = Constraint(expr= m.x445 - 25 * m.b1025 <= 0)
m.e926 = Constraint(expr= m.x446 - 22 * m.b1026 <= 0)
m.e927 = Constraint(expr= m.x447 - 22 * m.b1027 <= 0)
m.e928 = Constraint(expr= m.x448 - 20 * m.b1028 <= 0)
m.e929 = Constraint(expr= m.x449 - 25 * m.b1029 <= 0)
m.e930 = Constraint(expr= m.x450 - 22 * m.b1030 <= 0)
m.e931 = Constraint(expr= m.x451 - 22 * m.b1031 <= 0)
m.e932 = Constraint(expr= m.x452 - 20 * m.b1032 <= 0)
m.e933 = Constraint(expr= m.x453 - 25 * m.b1033 <= 0)
m.e934 = Constraint(expr= m.x454 - 22 * m.b1034 <= 0)
m.e935 = Constraint(expr= m.x455 - 22 * m.b1035 <= 0)
m.e936 = Constraint(expr= m.x456 - 20 * m.b1036 <= 0)
m.e937 = Constraint(expr= m.x425 - 25 * m.b1037 <= 0)
m.e938 = Constraint(expr= m.x426 - 22 * m.b1038 <= 0)
m.e939 = Constraint(expr= m.x427 - 22 * m.b1039 <= 0)
m.e940 = Constraint(expr= m.x428 - 20 * m.b1040 <= 0)
m.e941 = Constraint(expr= m.x429 - 25 * m.b1041 <= 0)
m.e942 = Constraint(expr= m.x430 - 22 * m.b1042 <= 0)
m.e943 = Constraint(expr= m.x431 - 22 * m.b1043 <= 0)
m.e944 = Constraint(expr= m.x432 - 20 * m.b1044 <= 0)
m.e945 = Constraint(expr= m.x433 - 25 * m.b1045 <= 0)
m.e946 = Constraint(expr= m.x434 - 22 * m.b1046 <= 0)
m.e947 = Constraint(expr= m.x435 - 22 * m.b1047 <= 0)
m.e948 = Constraint(expr= m.x436 - 20 * m.b1048 <= 0)
m.e949 = Constraint(expr= m.x437 - 25 * m.b1049 <= 0)
m.e950 = Constraint(expr= m.x438 - 22 * m.b1050 <= 0)
m.e951 = Constraint(expr= m.x439 - 22 * m.b1051 <= 0)
m.e952 = Constraint(expr= m.x440 - 20 * m.b1052 <= 0)
m.e953 = Constraint(expr= m.x457 - 24 * m.b1037 <= 0)
m.e954 = Constraint(expr= m.x458 - 21 * m.b1038 <= 0)
m.e955 = Constraint(expr= m.x459 - 20 * m.b1039 <= 0)
m.e956 = Constraint(expr= m.x460 - 20 * m.b1040 <= 0)
m.e957 = Constraint(expr= m.x461 - 24 * m.b1041 <= 0)
m.e958 = Constraint(expr= m.x462 - 21 * m.b1042 <= 0)
m.e959 = Constraint(expr= m.x463 - 20 * m.b1043 <= 0)
m.e960 = Constraint(expr= m.x464 - 20 * m.b1044 <= 0)
m.e961 = Constraint(expr= m.x465 - 24 * m.b1045 <= 0)
m.e962 = Constraint(expr= m.x466 - 21 * m.b1046 <= 0)
m.e963 = Constraint(expr= m.x467 - 20 * m.b1047 <= 0)
m.e964 = Constraint(expr= m.x468 - 20 * m.b1048 <= 0)
m.e965 = Constraint(expr= m.x469 - 24 * m.b1049 <= 0)
m.e966 = Constraint(expr= m.x470 - 21 * m.b1050 <= 0)
m.e967 = Constraint(expr= m.x471 - 20 * m.b1051 | |
<reponame>Brym-Gyimah/mmdetection
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmdet.core import BitmapMasks, PolygonMasks
def dummy_raw_bitmap_masks(size):
"""
Args:
size (tuple): expected shape of dummy masks, (H, W) or (N, H, W)
Return:
ndarray: dummy mask
"""
return np.random.randint(0, 2, size, dtype=np.uint8)
def dummy_raw_polygon_masks(size):
"""
Args:
size (tuple): expected shape of dummy masks, (N, H, W)
Return:
list[list[ndarray]]: dummy mask
"""
num_obj, heigt, width = size
polygons = []
for _ in range(num_obj):
num_points = np.random.randint(5) * 2 + 6
polygons.append([np.random.uniform(0, min(heigt, width), num_points)])
return polygons
def dummy_bboxes(num, max_height, max_width):
x1y1 = np.random.randint(0, min(max_height // 2, max_width // 2), (num, 2))
wh = np.random.randint(0, min(max_height // 2, max_width // 2), (num, 2))
x2y2 = x1y1 + wh
return np.concatenate([x1y1, x2y2], axis=1).squeeze().astype(np.float32)
def test_bitmap_mask_init():
# init with empty ndarray masks
raw_masks = np.empty((0, 28, 28), dtype=np.uint8)
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert len(bitmap_masks) == 0
assert bitmap_masks.height == 28
assert bitmap_masks.width == 28
# init with empty list masks
raw_masks = []
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert len(bitmap_masks) == 0
assert bitmap_masks.height == 28
assert bitmap_masks.width == 28
# init with ndarray masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert len(bitmap_masks) == 3
assert bitmap_masks.height == 28
assert bitmap_masks.width == 28
# init with list masks contain 3 instances
raw_masks = [dummy_raw_bitmap_masks((28, 28)) for _ in range(3)]
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert len(bitmap_masks) == 3
assert bitmap_masks.height == 28
assert bitmap_masks.width == 28
# init with raw masks of unsupported type
with pytest.raises(AssertionError):
raw_masks = [[dummy_raw_bitmap_masks((28, 28))]]
BitmapMasks(raw_masks, 28, 28)
def test_bitmap_mask_rescale():
# rescale with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
rescaled_masks = bitmap_masks.rescale((56, 72))
assert len(rescaled_masks) == 0
assert rescaled_masks.height == 56
assert rescaled_masks.width == 56
# rescale with bitmap masks contain 1 instances
raw_masks = np.array([[[1, 0, 0, 0], [0, 1, 0, 1]]])
bitmap_masks = BitmapMasks(raw_masks, 2, 4)
rescaled_masks = bitmap_masks.rescale((8, 8))
assert len(rescaled_masks) == 1
assert rescaled_masks.height == 4
assert rescaled_masks.width == 8
truth = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1], [0, 0, 1, 1, 0, 0, 1, 1]]])
assert (rescaled_masks.masks == truth).all()
def test_bitmap_mask_resize():
# resize with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
resized_masks = bitmap_masks.resize((56, 72))
assert len(resized_masks) == 0
assert resized_masks.height == 56
assert resized_masks.width == 72
# resize with bitmap masks contain 1 instances
raw_masks = np.diag(np.ones(4, dtype=np.uint8))[np.newaxis, ...]
bitmap_masks = BitmapMasks(raw_masks, 4, 4)
resized_masks = bitmap_masks.resize((8, 8))
assert len(resized_masks) == 1
assert resized_masks.height == 8
assert resized_masks.width == 8
truth = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1]]])
assert (resized_masks.masks == truth).all()
# resize to non-square
raw_masks = np.diag(np.ones(4, dtype=np.uint8))[np.newaxis, ...]
bitmap_masks = BitmapMasks(raw_masks, 4, 4)
resized_masks = bitmap_masks.resize((4, 8))
assert len(resized_masks) == 1
assert resized_masks.height == 4
assert resized_masks.width == 8
truth = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1]]])
assert (resized_masks.masks == truth).all()
def test_bitmap_mask_flip():
# flip with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
flipped_masks = bitmap_masks.flip(flip_direction='horizontal')
assert len(flipped_masks) == 0
assert flipped_masks.height == 28
assert flipped_masks.width == 28
# horizontally flip with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
flipped_masks = bitmap_masks.flip(flip_direction='horizontal')
flipped_flipped_masks = flipped_masks.flip(flip_direction='horizontal')
assert flipped_masks.masks.shape == (3, 28, 28)
assert (bitmap_masks.masks == flipped_flipped_masks.masks).all()
assert (flipped_masks.masks == raw_masks[:, :, ::-1]).all()
# vertically flip with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
flipped_masks = bitmap_masks.flip(flip_direction='vertical')
flipped_flipped_masks = flipped_masks.flip(flip_direction='vertical')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert (bitmap_masks.masks == flipped_flipped_masks.masks).all()
assert (flipped_masks.masks == raw_masks[:, ::-1, :]).all()
# diagonal flip with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
flipped_masks = bitmap_masks.flip(flip_direction='diagonal')
flipped_flipped_masks = flipped_masks.flip(flip_direction='diagonal')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert (bitmap_masks.masks == flipped_flipped_masks.masks).all()
assert (flipped_masks.masks == raw_masks[:, ::-1, ::-1]).all()
def test_bitmap_mask_pad():
# pad with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
padded_masks = bitmap_masks.pad((56, 56))
assert len(padded_masks) == 0
assert padded_masks.height == 56
assert padded_masks.width == 56
# pad with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
padded_masks = bitmap_masks.pad((56, 56))
assert len(padded_masks) == 3
assert padded_masks.height == 56
assert padded_masks.width == 56
assert (padded_masks.masks[:, 28:, 28:] == 0).all()
def test_bitmap_mask_crop():
# crop with empty bitmap masks
dummy_bbox = np.array([0, 10, 10, 27], dtype=np.int)
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
cropped_masks = bitmap_masks.crop(dummy_bbox)
assert len(cropped_masks) == 0
assert cropped_masks.height == 17
assert cropped_masks.width == 10
# crop with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
cropped_masks = bitmap_masks.crop(dummy_bbox)
assert len(cropped_masks) == 3
assert cropped_masks.height == 17
assert cropped_masks.width == 10
x1, y1, x2, y2 = dummy_bbox
assert (cropped_masks.masks == raw_masks[:, y1:y2, x1:x2]).all()
# crop with invalid bbox
with pytest.raises(AssertionError):
dummy_bbox = dummy_bboxes(2, 28, 28)
bitmap_masks.crop(dummy_bbox)
def test_bitmap_mask_crop_and_resize():
dummy_bbox = dummy_bboxes(5, 28, 28)
inds = np.random.randint(0, 3, (5, ))
# crop and resize with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
cropped_resized_masks = bitmap_masks.crop_and_resize(
dummy_bbox, (56, 56), inds)
assert len(cropped_resized_masks) == 0
assert cropped_resized_masks.height == 56
assert cropped_resized_masks.width == 56
# crop and resize with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
cropped_resized_masks = bitmap_masks.crop_and_resize(
dummy_bbox, (56, 56), inds)
assert len(cropped_resized_masks) == 5
assert cropped_resized_masks.height == 56
assert cropped_resized_masks.width == 56
def test_bitmap_mask_expand():
# expand with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
expanded_masks = bitmap_masks.expand(56, 56, 12, 14)
assert len(expanded_masks) == 0
assert expanded_masks.height == 56
assert expanded_masks.width == 56
# expand with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
expanded_masks = bitmap_masks.expand(56, 56, 12, 14)
assert len(expanded_masks) == 3
assert expanded_masks.height == 56
assert expanded_masks.width == 56
assert (expanded_masks.masks[:, :12, :14] == 0).all()
assert (expanded_masks.masks[:, 12 + 28:, 14 + 28:] == 0).all()
def test_bitmap_mask_area():
# area of empty bitmap mask
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert bitmap_masks.areas.sum() == 0
# area of bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
areas = bitmap_masks.areas
assert len(areas) == 3
assert (areas == raw_masks.sum((1, 2))).all()
def test_bitmap_mask_to_ndarray():
# empty bitmap masks to ndarray
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
ndarray_masks = bitmap_masks.to_ndarray()
assert isinstance(ndarray_masks, np.ndarray)
assert ndarray_masks.shape == (0, 28, 28)
# bitmap masks contain 3 instances to ndarray
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
ndarray_masks = bitmap_masks.to_ndarray()
assert isinstance(ndarray_masks, np.ndarray)
assert ndarray_masks.shape == (3, 28, 28)
assert (ndarray_masks == raw_masks).all()
def test_bitmap_mask_to_tensor():
# empty bitmap masks to tensor
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
tensor_masks = bitmap_masks.to_tensor(dtype=torch.uint8, device='cpu')
assert isinstance(tensor_masks, torch.Tensor)
assert tensor_masks.shape == (0, 28, 28)
# bitmap masks contain 3 instances to tensor
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
tensor_masks = bitmap_masks.to_tensor(dtype=torch.uint8, device='cpu')
assert isinstance(tensor_masks, torch.Tensor)
assert tensor_masks.shape == (3, 28, 28)
assert (tensor_masks.numpy() == raw_masks).all()
def test_bitmap_mask_index():
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert (bitmap_masks[0].masks == raw_masks[0]).all()
assert (bitmap_masks[range(2)].masks == raw_masks[range(2)]).all()
def | |
m, max(n - m, 0), -1):
fact_mpn_div_fact_mmn *= i
return 2.0/(2*n + 1)*fact_mpn_div_fact_mmn
def Get_Int_Pm2muPmnu(m, mu, nu):
if m + 2 > mu or m > nu:
return 0
if mu < nu:
return 0.0
elif mu == nu:
fact_div = 1.0
for i in range(nu + m, max(nu - m - 2, 0), -1):
fact_div *= i
return -2.0/(2.0*nu + 1)*fact_div
elif (mu + nu) % 2 == 0:
fact_div = 1.0
for i in range(nu + m, max(nu - m, 0), -1):
fact_div *= i
return 4*(m + 1)*fact_div
else:
return 0.0
def Get_Int_PmnPmn_div_1mx2(m, n):
if m > n:
return 0
if m!=0:
fact_div = 1.0
for i in range(n + m, max(n - m, 0), -1):
fact_div *= i
return fact_div / m
else:
return np.inf
def Expand_P_minusm_n(m, n):
coeff = 0.0
if abs(m) <= n:
fact_mpn_div_fact_mmn = 1.0
for i in range(n + m, max(n - m, 0), -1):
fact_mpn_div_fact_mmn *= i
coeff = 1.0 / fact_mpn_div_fact_mmn
if m % 2 == 1:
coeff *= -1.0
return {(m, n):coeff}
def Expand_xPmn(m, n):
return {(m, n-1):(n + m)/(2*n + 1), (m, n+1):(n - m + 1)/(2*n + 1)}
def Expand_x_div_sqrt_1mx2_Pmn(m, n):
return {(m-1, n):-(n + m)*(n - m + 1)/(2*m), (m+1, n):-1/(2*m)}
def Expand_1_div_sqrt_1mx2_Pmn__nm1(m, n):
return {(m-1, n-1):-(n + m)*(n + m - 1)/(2*m), (m+1, n-1):-1/(2*m)}
def Expand_1_div_sqrt_1mx2_Pmn__np1(m, n):
return {(m-1, n+1):-(n - m + 1)*(n - m + 2)/(2*m), (m+1, n+1):-1/(2*m)}
def Expand_1mx2_dxPmn(m, n):
return {(m, n-1):(n + 1)*(n + m)/(2*n + 1), (m, n+1):-n*(n - m + 1)/(2*n + 1)}
def Expand_sqrt_1mx2_Pmn__mm1(m, n):
return {(m-1, n-1):-(n + m)*(n + m - 1)/(2*n + 1), (m-1, n+1):(n - m + 1)*(n - m + 2)/(2*n + 1)}
def Expand_sqrt_1mx2_Pmn__mp1(m, n):
return {(m+1, n-1):1/(2*n + 1), (m+1, n+1):-1/(2*n + 1)}
def Expand_sqrt_1mx2_dxPmn(m, n):
return {(m-1, n):(n + m)*(n - m + 1)/2, (m+1, n):-1.0/2}
import copy
def Get_Expansion_Coeff_d_mn(m, n, c, thresh_rel=1.0e-14):
#m0, n0, c0 = copy.deepcopy(m), copy.deepcopy(n), copy.deepcopy(c)
cv = specfun.segv(m, n, c, 1)[0]
#m0, n0, c0 = copy.deepcopy(m), copy.deepcopy(n), copy.deepcopy(c)
d_mn_ = specfun.sdmn(m, n, c, cv, 1)
d_mn = d_mn_.copy()
if m % 2 == 1:
d_mn *= -1.0
d_inds = None
if (n - m) % 2 == 0:
d_inds = np.array([2*i for i in range(len(d_mn))])
else:
d_inds = np.array([2*i+1 for i in range(len(d_mn))])
if thresh_rel != 0.0:
d_mn_max = np.max(np.abs(d_mn))
filt = np.abs(d_mn) > d_mn_max*thresh_rel
d_mn = d_mn[filt]
d_inds = d_inds[filt]
else:
ind_max = np.argmax(d_mn==0.0)
if ind_max > 0:
d_mn = d_mn[:ind_max]
d_inds = d_inds[:ind_max]
return d_mn, d_inds
## I_1mNn
def GetInt_Sm_mpn_Sm_mpN(c, m, n, N):
if n == N:
d_mn, d_inds = Get_Expansion_Coeff_d_mn(m, m+n, c)
I = 0.0
for i in range(len(d_mn)):
I += d_mn[i]**2 * Get_Int_PmnPmn(m, m+d_inds[i])
return I
else:
return 0.0
def GetInt_Sm_mpn_Sm_mpN_quad1(c, m, n, N):
def f(x):
return pro_ang1(m, m+n, c, x)[0]*pro_ang1(m, m+N, c, x)[0]
return quad(f, -1, 1)
def GetInt_Sm_mpn_Sm_mpN_quad2(c, m, n, N):
def f(x):
cv_n = specfun.segv(m, m+n, c, 1)[0]
cv_N = specfun.segv(m, m+N, c, 1)[0]
return specfun.aswfa(m, m+n, c, x, 1, cv_n)[0]*specfun.aswfa(m, m+N, c, x, 1, cv_N)[0]
return quad(f, -1, 1)
## I_2mNn
def GetInt_Smp1_mpnp1_Sm_mpN_x_div_sqrt_1mx2(c, m, n, N):
d_q, d_q_inds = Get_Expansion_Coeff_d_mn(m+1, m+n+1, c)
m_q = m + 1
d_r, d_r_inds = Get_Expansion_Coeff_d_mn(m, m+N, c)
m_r = m
len_q = len(d_q_inds)
len_r = len(d_r_inds)
I = 0.0
for q in range(len(d_q)):
q_expansion = Expand_x_div_sqrt_1mx2_Pmn(m_q, m_q + d_q_inds[q])
for mn_pair in q_expansion:
m_q_expan, n_q_expan = mn_pair
coeff = q_expansion[mn_pair]
if m_q_expan == m_r:
for r in range(len(d_r)):
if n_q_expan == m_r + d_r_inds[r]:
I += coeff*d_q[q]*d_r[r]*Get_Int_PmnPmn(m_q_expan, n_q_expan)
elif m_q_expan == m_r + 2:
for r in range(len(d_r)):
I += coeff*d_q[q]*d_r[r]*Get_Int_Pm2muPmnu(m_r, n_q_expan, m_r + d_r_inds[r])
else:
assert False
return I
def GetInt_Smp1_mpnp1_Sm_mpN_x_div_sqrt_1mx2_quad1(c, m, n, N):
def f(x):
return pro_ang1(m+1, m+n+1, c, x)[0]*pro_ang1(m, m+N, c, x)[0]*x/np.sqrt(1 - x**2)
return quad(f, -1, 1)
def GetInt_Smp1_mpnp1_Sm_mpN_x_div_sqrt_1mx2_quad2(c, m, n, N):
def f(x):
cv_n = specfun.segv(m+1, m+n+1, c, 1)[0]
cv_N = specfun.segv(m, m+N, c, 1)[0]
return specfun.aswfa(m+1, m+n+1, c, x, 1, cv_n)[0]*specfun.aswfa(m, m+N, c, x, 1, cv_N)[0]*x/np.sqrt(1 - x**2)
return quad(f, -1, 1)
## I_3mNn
def GetInt_Sm_mpn_Sm_mpN_x(c, m, n, N):
d_q, d_q_inds = Get_Expansion_Coeff_d_mn(m, m+n, c)
m_q = m
d_r, d_r_inds = Get_Expansion_Coeff_d_mn(m, m+N, c)
m_r = m
len_q = len(d_q_inds)
len_r = len(d_r_inds)
I = 0.0
for q in range(len(d_q)):
q_expansion = Expand_xPmn(m_q, m_q + d_q_inds[q])
for mn_pair in q_expansion:
m_q_expan, n_q_expan = mn_pair
coeff = q_expansion[mn_pair]
if m_q_expan == m_r:
for r in range(len(d_r)):
if n_q_expan == m_r + d_r_inds[r]:
I += coeff*d_q[q]*d_r[r]*Get_Int_PmnPmn(m_q_expan, n_q_expan)
else:
assert False
return I
def GetInt_Sm_mpn_Sm_mpN_x_quad1(c, m, n, N):
def f(x):
return pro_ang1(m, m+n, c, x)[0]*pro_ang1(m, m+N, c, x)[0]*x
return quad(f, -1, 1)
def GetInt_Sm_mpn_Sm_mpN_x_quad2(c, m, n, N):
def f(x):
cv_n = specfun.segv(m, m+n, c, 1)[0]
cv_N = specfun.segv(m, m+N, c, 1)[0]
return specfun.aswfa(m, m+n, c, x, 1, cv_n)[0]*specfun.aswfa(m, m+N, c, x, 1, cv_N)[0]*x
return quad(f, -1, 1)
## I_4mNn
def GetInt_dxSm_mpn_Sm_mpN_1mx2(c, m, n, N):
d_q, d_q_inds = Get_Expansion_Coeff_d_mn(m, m+n, c)
m_q = m
d_r, d_r_inds = Get_Expansion_Coeff_d_mn(m, m+N, c)
m_r = m
len_q = len(d_q_inds)
len_r = len(d_r_inds)
I = 0.0
for q in range(len(d_q)):
q_expansion = Expand_1mx2_dxPmn(m_q, m_q + d_q_inds[q])
for mn_pair in q_expansion:
m_q_expan, n_q_expan = mn_pair
coeff = q_expansion[mn_pair]
if m_q_expan == m_r:
for r in range(len(d_r)):
if n_q_expan == m_r + d_r_inds[r]:
I += coeff*d_q[q]*d_r[r]*Get_Int_PmnPmn(m_q_expan, n_q_expan)
else:
assert False
return I
def GetInt_dxSm_mpn_Sm_mpN_1mx2_quad1(c, m, n, N):
def f(x):
return pro_ang1(m, m+n, c, x)[1]*pro_ang1(m, m+N, c, x)[0]*(1 - x**2)
return quad(f, -1, 1)
def GetInt_dxSm_mpn_Sm_mpN_1mx2_quad2(c, m, n, N):
def f(x):
cv_n = specfun.segv(m, m+n, c, 1)[0]
cv_N = specfun.segv(m, m+N, c, 1)[0]
return specfun.aswfa(m, m+n, c, x, 1, cv_n)[1]*specfun.aswfa(m, m+N, c, x, 1, cv_N)[0]*(1 - x**2)
return quad(f, -1, 1)
## I_5mNn
def GetInt_Smp1_mpnp1_Sm_mpN_sqrt_1mx2(c, m, n, N):
d_q, d_q_inds = Get_Expansion_Coeff_d_mn(m+1, m+n+1, c)
m_q = m + 1
d_r, d_r_inds = Get_Expansion_Coeff_d_mn(m, m+N, c)
m_r = m
len_q = len(d_q_inds)
len_r = len(d_r_inds)
I = 0.0
for q in range(len(d_q)):
q_expansion = Expand_sqrt_1mx2_Pmn__mm1(m_q, m_q + d_q_inds[q])
for mn_pair in q_expansion:
m_q_expan, n_q_expan = mn_pair
coeff = q_expansion[mn_pair]
if m_q_expan == m_r:
for r in range(len(d_r)):
if n_q_expan == m_r + d_r_inds[r]:
I += coeff*d_q[q]*d_r[r]*Get_Int_PmnPmn(m_q_expan, n_q_expan)
else:
assert False
return I
def GetInt_Smp1_mpnp1_Sm_mpN_sqrt_1mx2_quad1(c, m, n, N):
def f(x):
return pro_ang1(m+1, m+n+1, c, x)[0]*pro_ang1(m, m+N, c, x)[0]*np.sqrt(1 - x**2)
return quad(f, -1, 1)
def GetInt_Smp1_mpnp1_Sm_mpN_sqrt_1mx2_quad2(c, m, n, N):
def f(x):
cv_n = specfun.segv(m+1, m+n+1, c, 1)[0]
cv_N = specfun.segv(m, m+N, c, 1)[0]
return specfun.aswfa(m+1, m+n+1, c, x, 1, cv_n)[0]*specfun.aswfa(m, m+N, c, x, 1, cv_N)[0]*np.sqrt(1 - x**2)
return quad(f, -1, 1)
## I_6mNn
def GetInt_dxSmp1_mpnp1_Sm_mpN_x_sqrt_1mx2(c, m, n, N):
d_q, d_q_inds = Get_Expansion_Coeff_d_mn(m+1, m+n+1, c)
m_q = m + 1
d_r, d_r_inds = Get_Expansion_Coeff_d_mn(m, m+N, c)
m_r = m
len_q = len(d_q_inds)
len_r = len(d_r_inds)
I = 0.0
for q in range(len(d_q)):
q_expansion = Expand_sqrt_1mx2_dxPmn(m_q, m_q + d_q_inds[q])
for mn_q_pair in q_expansion:
m_q_expan, n_q_expan = mn_q_pair
coeff_q = q_expansion[mn_q_pair]
if m_q_expan == m_r:
for r in range(len(d_r)):
r_expansion = Expand_xPmn(m_r, m_r + d_r_inds[r])
for mn_r_pair in r_expansion:
m_r_expan, n_r_expan = mn_r_pair
coeff_r = r_expansion[mn_r_pair]
assert m_r_expan == m_q_expan
if n_q_expan == n_r_expan:
I += coeff_q*coeff_r*d_q[q]*d_r[r]*Get_Int_PmnPmn(m_q_expan, n_q_expan)
elif m_q_expan == m_r + 2:
for r in range(len(d_r)):
r_expansion = Expand_xPmn(m_r, m_r + d_r_inds[r])
for mn_r_pair in r_expansion:
m_r_expan, n_r_expan = mn_r_pair
coeff_r = r_expansion[mn_r_pair]
assert m_r_expan + 2 == m_q_expan
I += coeff_q*coeff_r*d_q[q]*d_r[r]*Get_Int_Pm2muPmnu(m_r_expan, n_q_expan, n_r_expan)
else:
assert False
return I
def GetInt_dxSmp1_mpnp1_Sm_mpN_x_sqrt_1mx2_quad1(c, m, n, N):
def f(x):
return pro_ang1(m+1, m+n+1, c, x)[1]*pro_ang1(m, m+N, c, x)[0]*x*np.sqrt(1 - x**2)
return quad(f, -1, 1)
def GetInt_dxSmp1_mpnp1_Sm_mpN_x_sqrt_1mx2_quad2(c, m, n, N):
def f(x):
cv_n = specfun.segv(m+1, m+n+1, c, 1)[0]
cv_N = specfun.segv(m, m+N, c, 1)[0]
return specfun.aswfa(m+1, m+n+1, c, x, 1, cv_n)[1]*specfun.aswfa(m, m+N, c, x, 1, cv_N)[0]*x*np.sqrt(1 - x**2)
return quad(f, -1, 1)
## I_7mNn
def GetInt_Smp2_mpnp2_Sm_mpN(c, m, n, N):
d_q, d_q_inds = Get_Expansion_Coeff_d_mn(m+2, m+n+2, c)
m_q = m + 2
d_r, d_r_inds = Get_Expansion_Coeff_d_mn(m, m+N, c)
m_r = m
len_q = len(d_q_inds)
len_r = len(d_r_inds)
I = 0.0
for q in range(len(d_q)):
for r in range(len(d_r)):
| |
9.22],
'Split 1_Peptide 2': [9.3, 5.03, 0.379, 0.517],
'Split 1_Peptide 1': [9.81, 4.37, 1.0, 3.4],
'Split 1_Peptide 3': [5.42, 9.75, 9.19, 8.33]}, dtype=object),
'Analyte X': pd.DataFrame({'Plate': ['tests/Test_plates/Test_plate_16.xlsx'],
'Analyte': ['Analyte X'],
'Split 1_Peptide 4': [4.32],
'Split 1_Peptide 2': [8.28],
'Split 1_Peptide 1': [8.1],
'Split 1_Peptide 3': [2.75]}, dtype=object),
'Analyte 2': pd.DataFrame({'Plate': ['tests/Test_plates/Test_plate_16.xlsx' for n in range(2)],
'Analyte': ['Analyte 2' for n in range(2)],
'Split 1_Peptide 4': [9.81, 0.448],
'Split 1_Peptide 2': [8.21, 1.04],
'Split 1_Peptide 1': [1.12, 3.42],
'Split 1_Peptide 3': [5.68, 3.44]}, dtype=object)}),
['Split 1_Peptide 4', 'Split 1_Peptide 2', 'Split 1_Peptide 1',
'Split 1_Peptide 3']],
17: [np.nan, np.nan, np.nan]
}
for num in exp_input_dict.keys():
plate_path = exp_input_dict[num][0]
split_name = exp_input_dict[num][1]
peptide_dict = exp_input_dict[num][2]
gain = exp_input_dict[num][3]
exp_raw_data = exp_results_dict[num][0]
exp_grouped_data = exp_results_dict[num][1]
exp_peptide_list = exp_results_dict[num][2]
if num in [2, 3]:
with self.assertRaises(TypeError) as message: parse_xlsx_to_dataframe(
plate_path, split_name, peptide_dict, gain
)
self.assertEqual(
str(message.exception),
'Gain value not recognised. Please specify a positive integer'
)
elif num == 4:
with self.assertRaises(ValueError) as message: parse_xlsx_to_dataframe(
plate_path, split_name, peptide_dict, gain
)
self.assertEqual(
str(message.exception),
'Gain value not recognised. Please specify a positive integer'
)
elif num == 5:
with self.assertRaises(FileNotFoundError) as message: parse_xlsx_to_dataframe(
plate_path, split_name, peptide_dict, gain
)
self.assertEqual(
str(message.exception),
'File tests/Test_plates/Test_plate_5.xlsx doesn\'t exist'
)
elif num == 6:
with self.assertRaises(ValueError) as message: parse_xlsx_to_dataframe(
plate_path, split_name, peptide_dict, gain
)
self.assertEqual(
str(message.exception),
'"Protocol Information" sheet not found in tests/Test_plates/Test_plate_6.xlsx'
)
elif num == 7:
with self.assertRaises(ValueError) as message: parse_xlsx_to_dataframe(
plate_path, split_name, peptide_dict, gain
)
self.assertEqual(
str(message.exception),
'"Plate Layout" information not found on Protocol Information sheet'
)
elif num == 8:
with self.assertRaises(ValueError) as message: parse_xlsx_to_dataframe(
plate_path, split_name, peptide_dict, gain
)
self.assertEqual(
str(message.exception),
'"Peptide Layout" information not found on Protocol Information sheet'
)
elif num == 9:
with self.assertRaises(ValueError) as message: parse_xlsx_to_dataframe(
plate_path, split_name, peptide_dict, gain
)
self.assertEqual(
str(message.exception),
'"End point" sheet not found in tests/Test_plates/Test_plate_9.xlsx'
)
elif num == 10:
with self.assertRaises(ValueError) as message: parse_xlsx_to_dataframe(
plate_path, split_name, peptide_dict, gain
)
self.assertEqual(
str(message.exception),
'Failed to locate plate in tests/Test_plates/Test_plate_10.xlsx\n'
'- expect to be marked as e.g. "1.rawdata"'
)
elif num in [11, 12, 14]:
with self.assertRaises(PlateLayoutError) as message: parse_xlsx_to_dataframe(
plate_path, split_name, peptide_dict, gain=1
)
self.assertEqual(
str(message.exception),
'Peptides tested in tests/Test_plates/Test_plate_{}.xlsx '
'don\'t match peptides specified by user'.format(num)
)
elif num == 13:
with self.assertRaises(PlateLayoutError) as message: parse_xlsx_to_dataframe(
plate_path, split_name, peptide_dict, gain=1
)
self.assertEqual(
str(message.exception),
'Peptide(s)\n[\'Peptide 3\']\nlisted more than once in '
'plate layout from tests/Test_plates/Test_plate_13.xlsx'
)
elif num == 17:
with self.assertRaises(ValueError) as message: parse_xlsx_to_dataframe(
plate_path, split_name, peptide_dict, gain
)
self.assertEqual(
str(message.exception),
'NaN value detected in tests/Test_plates/Test_plate_17.xlsx'
)
else:
(
act_raw_data, act_grouped_data, act_peptide_list
) = parse_xlsx_to_dataframe(
plate_path, split_name, peptide_dict, gain
)
pd.testing.assert_frame_equal(exp_raw_data, act_raw_data)
self.assertEqual(list(exp_grouped_data.keys()), list(act_grouped_data.keys()))
for key in exp_grouped_data.keys():
exp_df = exp_grouped_data[key]
act_df = act_grouped_data[key]
pd.testing.assert_frame_equal(exp_df, act_df)
self.assertEqual(exp_peptide_list, act_peptide_list)
def test_draw_scatter_plot(self):
"""
Tests draw_scatter_plot in parse_array_data.py
"""
print('Testing draw_scatter_plot')
exp_input_dict = {
1: [{'Analyte 1': pd.DataFrame({'Peptide 1': [1.0, 2.0],
'Peptide 2': [2.0, np.nan],
'Peptide 3': [np.nan, np.nan]}),
'Analyte 2': pd.DataFrame({'Peptide 1': [1.5, 2.5],
'Peptide 2': [3.0, np.nan],
'Peptide 3': [1.0, 3.0]})},
['Peptide 1', 'Peptide 2', 'Peptide 3'],
['Analyte 1', 'Analyte 2']],
2: [{'Analyte 1': pd.DataFrame({'Peptide 1': [1.0, 2.0],
'Peptide 2': [2.0, np.nan],
'Peptide 3': [np.nan, np.nan]}),
'Analyte 2': pd.DataFrame({'Peptide 1': [1.5, 2.5],
'Peptide 2': [3.0, np.nan],
'Peptide 3': [1.0, 3.0]})},
['Peptide 2'],
['Analyte 1']],
3: [{'Analyte 1': pd.DataFrame({'Peptide 1': [1.0, 2.0],
'Peptide 2': [2.0, np.nan],
'Peptide 3': [np.nan, np.nan]}),
'Analyte 2': pd.DataFrame({'Peptide 1': [1.5, 2.5],
'Peptide 2': [3.0, np.nan],
'Peptide 3': [1.0, 3.0]})},
[],
['Analyte 1']],
4: [{'Analyte 1': pd.DataFrame({'Peptide 1': [1.0, 2.0],
'Peptide 2': [2.0, np.nan],
'Peptide 3': [np.nan, np.nan]}),
'Analyte 2': pd.DataFrame({'Peptide 1': [1.5, 2.5],
'Peptide 2': [3.0, np.nan],
'Peptide 3': [1.0, 3.0]})},
['Peptide 1'],
[]],
5: [{'Analyte 1': pd.DataFrame({'Peptide 1': [1.0, 2.0],
'Peptide 2': [2.0, np.nan],
'Peptide 3': [np.nan, np.nan]}),
'Analyte 2': pd.DataFrame({'Peptide 1': [1.5, 2.5],
'Peptide 2': [3.0, np.nan],
'Peptide 3': [1.0, 3.0]})},
['Peptide 1', 'Peptide 4'],
['Analyte 1']],
6: [{'Analyte 1': pd.DataFrame({'Peptide 1': [1.0, 2.0],
'Peptide 2': [2.0, np.nan],
'Peptide 3': [np.nan, np.nan]}),
'Analyte 2': pd.DataFrame({'Peptide 1': [1.5, 2.5],
'Peptide 2': [3.0, np.nan],
'Peptide 3': [1.0, 3.0]})},
['Peptide 1'],
['Analyte 1', 'Analyte 4']],
7: [{'Analyte 1': pd.DataFrame({'Peptide 1': [1.0, 2.0],
'Peptide 2': ['X', np.nan],
'Peptide 3': [np.nan, np.nan]}),
'Analyte 2': pd.DataFrame({'Peptide 1': [1.5, 2.5],
'Peptide 2': [3.0, np.nan],
'Peptide 3': [1.0, 3.0]})},
['Peptide 1', 'Peptide 2', 'Peptide 3'],
['Analyte 1', 'Analyte 2']],
8: [{'Analyte 2_10': pd.DataFrame({'Peptide 1': [1.0, 2.0],
'Peptide 2': [2.0, np.nan],
'Peptide 3': [np.nan, np.nan]}),
'Analyte 2_1': pd.DataFrame({'Peptide 1': [1.5, 2.5],
'Peptide 2': [3.0, np.nan],
'Peptide 3': [1.0, 3.0]}),
'Analyte 2_1.5': pd.DataFrame({'Peptide 1': [3.0, 2.5],
'Peptide 2': [0.5, 1.5],
'Peptide 3': [np.nan, 2.5]}),
'Analyte 2_Z': pd.DataFrame({'Peptide 1': [np.nan, np.nan],
'Peptide 2': [0.5, 2.0],
'Peptide 3': [1.5, 1.0]})},
['Peptide 1', 'Peptide 2', 'Peptide 3'],
['Analyte 2_10', 'Analyte 2_1', 'Analyte 2_1.5', 'Analyte 2_Z']],
}
exp_results_dict = {
1: [pd.DataFrame({'Analyte': ['Analyte 1', 'Analyte 1', 'Analyte 2', 'Analyte 2', 'Analyte 1', 'Analyte 1', 'Analyte 2', 'Analyte 2', 'Analyte 1', 'Analyte 1', 'Analyte 2', 'Analyte 2'],
'Peptide': ['Peptide 1', 'Peptide 1', 'Peptide 1', 'Peptide 1', 'Peptide 2', 'Peptide 2', 'Peptide 2', 'Peptide 2', 'Peptide 3', 'Peptide 3', 'Peptide 3', 'Peptide 3'],
'Reading': [1.0, 2.0, 1.5, 2.5, 2.0, np.nan, 3.0, np.nan, np.nan, np.nan, 1.0, 3.0]}),
['Analyte 1', 'Analyte 2']],
2: [pd.DataFrame({'Analyte': ['Analyte 1', 'Analyte 1'],
'Peptide': ['Peptide 2', 'Peptide 2'],
'Reading': [2.0, np.nan]}),
['Analyte 1']],
3: [pd.DataFrame({}), []],
4: [pd.DataFrame({}), []],
5: [pd.DataFrame({}), []],
6: [pd.DataFrame({}), []],
7: [pd.DataFrame({}), []],
8: [pd.DataFrame({'Analyte': ['Analyte 2_10', 'Analyte 2_10', 'Analyte 2_1', 'Analyte 2_1', 'Analyte 2_1.5', 'Analyte 2_1.5', 'Analyte 2_Z', 'Analyte 2_Z', 'Analyte 2_10', 'Analyte 2_10', 'Analyte 2_1', 'Analyte 2_1', 'Analyte 2_1.5', 'Analyte 2_1.5', 'Analyte 2_Z', 'Analyte 2_Z', 'Analyte 2_10', 'Analyte 2_10', 'Analyte 2_1', 'Analyte 2_1', 'Analyte 2_1.5', 'Analyte 2_1.5', 'Analyte 2_Z', 'Analyte 2_Z'],
'Peptide': ['Peptide 1', 'Peptide 1', 'Peptide 1', 'Peptide 1', 'Peptide 1', 'Peptide 1', 'Peptide 1', 'Peptide 1', 'Peptide 2', 'Peptide 2', 'Peptide 2', 'Peptide 2', 'Peptide 2', 'Peptide 2', 'Peptide 2', 'Peptide 2', 'Peptide 3', 'Peptide 3', 'Peptide 3', 'Peptide 3', 'Peptide 3', 'Peptide 3', 'Peptide 3', 'Peptide 3'],
'Reading': [1.0, 2.0, 1.5, 2.5, 3.0, 2.5, np.nan, np.nan, 2.0, np.nan, 3.0, np.nan, 0.5, 1.5, 0.5, 2.0, np.nan, np.nan, 1.0, 3.0, np.nan, 2.5, 1.5, 1.0]}),
['Analyte 2_Z', 'Analyte 2_1', 'Analyte 2_1.5', 'Analyte 2_10']],
}
for num in exp_input_dict.keys():
test_data = exp_input_dict[num][0]
features = exp_input_dict[num][1]
analytes = exp_input_dict[num][2]
exp_df = exp_results_dict[num][0]
exp_analytes_list = exp_results_dict[num][1]
os.mkdir('tests/Temp_output')
if num == 3:
with self.assertRaises(ValueError) as message: draw_scatter_plot(
test_data, 'tests/Temp_output', features, analytes, str(num), True
)
self.assertEqual(
str(message.exception),
'List of peptides to include in the plot is empty'
)
elif num == 4:
with self.assertRaises(ValueError) as message: draw_scatter_plot(
test_data, 'tests/Temp_output', features, analytes, str(num), True
)
self.assertEqual(
str(message.exception),
'List of analytes to include in the plot is empty'
)
elif num == 5:
with self.assertRaises(ValueError) as message: draw_scatter_plot(
test_data, 'tests/Temp_output', features, analytes, str(num), True
)
self.assertEqual(
str(message.exception),
'Failed to parse the input dataframes - check that '
'"features" and "analytes" lists match the features and '
'analytes analysed in one or more of the dataframes in '
'"grouped_fluor_data"'
)
elif num == 6:
with self.assertRaises(ValueError) as message: draw_scatter_plot(
test_data, 'tests/Temp_output', features, analytes, str(num), True
)
self.assertEqual(
str(message.exception),
'Analyte Analyte 4 not present in the input data for 6'
)
elif num == 7:
with self.assertRaises(ValueError) as message: draw_scatter_plot(
test_data, 'tests/Temp_output', features, analytes, str(num), True
)
self.assertEqual(
str(message.exception),
'Non-numeric value found in input dataframe for 7'
)
else:
draw_scatter_plot(
test_data, 'tests/Temp_output', features, analytes, str(num), True
)
act_df = pd.read_pickle('tests/Temp_output/Plot_data.pkl')
with open('tests/Temp_output/Sorted_analytes.pkl', 'rb') as f:
act_analytes_list = pickle.load(f)
if not os.path.isfile('tests/Temp_output/{}_data_spread.svg'.format(num)):
raise FileNotFoundError(
'draw_scatter_plot has failed to create the expected plots '
'- expect file tests/Temp_output/{}_data_spread.svg to '
'exist'.format(num)
)
pd.testing.assert_frame_equal(exp_df, act_df)
self.assertEqual(exp_analytes_list, act_analytes_list)
shutil.rmtree('tests/Temp_output')
def test_check_for_saturation(self):
"""
Tests check_for_saturation in parse_array_data.py
"""
print('Testing check_for_saturation')
plate = {'Analyte': pd.DataFrame({'Peptide_1': [1.0, 2.0, 3.0],
'Peptide_2': [4.0, np.inf, np.nan],
'Peptide_3': [1.0, 4.0, 5.0]})}
exp_readings = [['', 'Analyte', 'Peptide_1', 1.0],
['', 'Analyte', 'Peptide_2', 4.0],
['', 'Analyte', 'Peptide_2', np.inf]]
obs_readings = check_for_saturation(
| |
self.hostname)
else:
output = self.send_command_timing(
"copy sftp startup-config " + sftp_server_ip + " " + filename
)
output += self.send_command_timing("y")
output += self.send_command_timing("\n")
print("Rebooting ", self.hostname)
def sftp_load_firmware(
self,
sftp_server_ip,
filename,
boot_image,
username=None,
password=<PASSWORD>,
reboot=False,
):
"""Loads firmware from an sftp server.
Parameters
----------
sftp_server_ip : str
Specify the sftp server IP address
filename : str
Specify the filename of the configuration to be loaded
boot_image : str
Specify the boot image. Can be primary or secondary boot image.
username : str
Specify the username of the sftp server
password : str
Specify the password of the <PASSWORD>
reboot : bool
Set to True if switch is to be rebooted after firmware has been loaded
"""
if username and password:
output = self.send_command_timing(
"copy sftp flash "
+ username
+ "@"
+ sftp_server_ip
+ " "
+ filename
+ " "
+ boot_image
)
output += self.send_command_timing("y")
output += self.send_command_timing(password, delay_factor=10)
print("Firmware loaded for ", self.hostname)
if reboot:
self.send_command_timing("boot system flash " + boot_image)
self.send_command_timing("y")
print("Rebooting ", self.hostname)
self.disconnect()
else:
output = self.send_command_timing(
"copy sftp flash " + sftp_server_ip + " " + filename + " " + boot_image
)
output += self.send_command_timing("y")
output += self.send_command_timing("\n", delay_factor=10)
print("Firmware loaded for ", self.hostname)
if reboot:
self.send_command_timing("boot system flash " + boot_image)
self.send_command_timing("y")
print("Rebooting ", self.hostname)
self.disconnect()
def tftp_backup_config(self, tftp_server_ip):
"""Backs up the startup configuration to a tftp server.
Parameters
----------
tftp_server_ip : str
Specify the tftp server IP address
"""
output = self.send_command_timing(
"copy startup-config tftp "
+ tftp_server_ip
+ " "
+ self.hostname
+ "_"
+ str(date.today())
)
if "TFTP download" in output:
print(f"Startup configuration successfully backed up for {self.hostname}")
def tftp_load_config(self, tftp_server_ip, filename):
"""Loads a startup configuration from an sftp server.
Parameters
----------
tftp_server_ip : str
Specify the tftp server IP address
filename : str
Specify the filename of the configuration to be loaded
"""
output = self.send_command_timing(
"copy tftp startup-config " + tftp_server_ip + " " + filename
)
output += self.send_command_timing("y")
output += self.send_command_timing("\n")
print("Rebooting ", self.hostname)
def tftp_load_firmware(self, tftp_server_ip, filename, boot_image, reboot=False):
"""Loads the switch firmware from a tftp server.
Parameters
----------
tftp_server_ip : str
Specify the tftp server IP address
filename : str
Specify the filename of the configuration to be loaded
boot_image : str
Specify the boot image. Can be primary or secondary boot image
reboot : bool
Set to True if switch is to be rebooted after firmware has been loaded
"""
output = self.send_command_timing(
"copy tftp flash " + tftp_server_ip + " " + filename + " " + boot_image
)
output += self.send_command_timing("y")
output += self.send_command_timing("\n", delay_factor=10)
print("Firmware loaded for ", self.hostname)
if reboot:
self.send_command_timing("boot system flash " + boot_image)
self.send_command_timing("y")
print("Rebooting ", self.hostname)
self.disconnect()
def find_firmware_version(self):
"""Displays the version of firmware on the switch."""
print("-" * 25)
print(f"{'HOSTNAME':^10}{'VERSION':^10}")
print("-" * 25)
output = self.send_command("show version")
version = re.search(r"^(\s+\S+\.\S+\.\S+.*)", output, flags=re.M).group(1)
print(f"{self.hostname:^10}{version.strip():^10}")
print("-" * 25)
def find_switch_mac_address(self, switch_mac_addr=None):
"""Finds and displays the switch with the specified mac address
Parameters
----------
switch_mac_addr : list
Specify the switch mac address
"""
print("-" * 25)
print(f"{'HOSTNAME':^10}{'MAC_ADDRESS':^10}")
print("-" * 25)
if switch_mac_addr:
output = self.send_command("show spanning-tree")
output = output.strip().splitlines()
for i in output:
for switch in switch_mac_addr:
if "Switch MAC Address" in i and switch in i:
print("-" * 25)
print(f"{self.hostname:^10}{switch:^10}")
print("-" * 25)
else:
output = self.send_command("show spanning-tree")
switch_mac = re.search(
r"^\s+Switch MAC Address\s+:\s+(\S{6}-\S{6}.*)", output, flags=re.M
).group(1)
print(f"{self.hostname:^10}{switch_mac.strip():^10}")
print("-" * 25)
def find_switch_serial_number(self):
"""Finds and displays the switch hostname and associated serial number"""
print("-" * 25)
print(f"{'HOSTNAME':^10}{'SERIAL_NUMBER':^10}")
print("-" * 25)
output = self.send_command("show system")
serial_number = re.search(
r"^\s+ROM.*Serial Number\s+:\s+(\S+.*)", output, flags=re.M
).group(1)
print(f"{self.hostname:^10}{serial_number.strip():^10}")
print("-" * 25)
def find_ports_down(self):
"""Finds and displays the interfaces in a 'DOWN' state."""
print("-" * 12)
print(f"{self.hostname:^10}")
print("-" * 12)
print(f"{'PORT':5}{'STATUS':5}")
print("-" * 12)
output = self.send_command_timing("show int brief", use_textfsm=True)
count = 0
for ports in output:
if ports["status"] == "Down":
count += 1
print(f"{ports['port']:^5}{ports['status']:^5}")
print("-" * 12)
print()
print(f"number of ports down: {count}")
def find_ports_up(self):
"""Finds and displays the interfaces in an 'UP' state."""
print("-" * 12)
print(f"{self.hostname :^10}")
print("-" * 12)
print(f"{'PORT':5}{'STATUS':5}")
print("-" * 12)
output = self.send_command_timing("show int brief", use_textfsm=True)
count = 0
for ports in output:
if ports["status"] == "Up":
count += 1
print(f"{ports['port']:^5}{ports['status']:^5}")
print("-" * 12)
print()
print(f"number of ports up: {count}")
def find_ip_from_mac_address(self, mac_address):
"""Finds and displays the IP address from the specified mac address
Parameters
----------
mac_addresses : list
Specify the mac address(s)
"""
print("-" * 70)
print(f"{'HOSTNAME':^20}{'MAC_ADDRESS':>20}{'IP_ADDRESS':>20}")
print("-" * 70)
output = self.send_command_timing("show arp", use_textfsm=True)
for ip in output:
for i in mac_address:
if ip["mac"] == i:
print(f"{self.hostname :^20}{ip['mac']:^30}{ip['ip']:^10}")
print("-" * 70)
def find_mac_from_ip_address(self, ip_address):
"""Finds and displays the mac address from the specified IP address(s)
Parameters
----------
ip_address : list
Specify the IP address(s)
"""
print("-" * 70)
print(f"{'HOSTNAME':^20}{'MAC_ADDRESS':>20}{'IP_ADDRESS':>20}")
print("-" * 70)
output = self.send_command_timing("show arp", use_textfsm=True)
for ip_addr in output:
for i in ip_address:
if ip_addr["ip"] == i:
print(
f"{self.hostname :^20}{ip_addr['mac']:^30}{ip_addr['ip']:^10}"
)
print("-" * 70)
def find_port_security_enabled_ports(self):
"""Finds and displays the ports enabled for port security"""
print("-" * 85)
print(f"{self.hostname:^85}")
print("-" * 85)
print(
f"{'PORT':^20}{'LEARN_MODE':^20}{'ACTION':^20}{'EAVESDROP_PREVENTION':^25}"
)
print("-" * 85)
output = self.send_command_timing("show port-security", use_textfsm=True)
for port in output:
if port["learn_mode"] != "Continuous":
print(
f"{port['port'] :^20}{port['learn_mode'] :^20}{port['action'] :^20}"
f"{port['eavesdrop_prevention'] :^25}"
)
print("-" * 85)
def find_port_security_disabled_ports(self):
"""Finds and displays the ports not enabled for port security"""
print("-" * 85)
print(f"{self.hostname:^85}")
print("-" * 85)
print(
f"{'PORT':^20}{'LEARN_MODE':^20}{'ACTION':^20}{'EAVESDROP_PREVENTION':^25}"
)
print("-" * 85)
output = self.send_command_timing("show port-security", use_textfsm=True)
for port in output:
if port["learn_mode"] == "Continuous":
print(
f"{port['port'] :^20}{port['learn_mode'] :^20}{port['action'] :^20}"
f"{port['eavesdrop_prevention'] :^25}"
)
print("-" * 85)
def find_jumbo_vlan(self, jumbo_vlan):
"""Finds and displays the vlan with jumbo configuration
Parameters
----------
jumbo_vlan : list
Specify the jumbo vlan
"""
print("-" * 20)
print(f"{'HOSTNAME':^10}{'JUMBO_VLAN':^10}")
print("-" * 20)
output = self.send_command_timing("show vlans", use_textfsm=True)
for j in output:
for v in jumbo_vlan:
if j["jumbo"] == "Yes" and j["vlan_id"] == str(v):
print(f"{self.hostname:^10}{j['vlan_id']:^10}")
print("-" * 20)
def find_voice_vlan(self, voice_vlan):
"""Finds and displays the vlan with voice configuration
Parameters
----------
voice_vlan : int
Specify the voice vlan
"""
print("-" * 20)
print(f"{'HOSTNAME':^10}{'VOICE_VLAN':^10}")
print("-" * 20)
output = self.send_command_timing("show vlans", use_textfsm=True)
for v in output:
if v["voice"] == "Yes" and v["vlan_id"] == str(voice_vlan):
print(f"{self.hostname:^10}{v['vlan_id']:^10}")
print("-" * 20)
def find_poe_enabled_ports(self):
"""Finds and displays the POE+ enabled ports"""
print("-" * 25)
print(f"{self.hostname :^25}")
print("-" * 25)
print(f"{'PORT':^10}{'POE_ENABLED':^10}")
output = self.send_command("show power-over-ethernet brief")
output = output.strip().splitlines()
for line in output:
if re.search(r"^\s+\d+\s+Yes\s+", line, flags=re.M):
port, poe_status, *extra = line.split()
print(f"{port: ^10}{poe_status: ^10}")
print("-" * 25)
def find_poe_disabled_ports(self):
"""Finds and displays the POE+ disabled ports"""
print("-" * 25)
print(f"{self.hostname :^25}")
print("-" * 25)
print(f"{'PORT':^10}{'POE_ENABLED':^10}")
print("-" * 25)
output = self.send_command("show power-over-ethernet brief")
output = output.strip().splitlines()
for line in output:
if re.search(r"^\s+\d+\s+No\s+", line, flags=re.M):
port, poe_status, *extra = line.split()
print(f"{port: ^10}{poe_status: ^10}")
print("-" * 25)
def find_poe_switch_status(self):
"""Finds and displays the switch with POE+ capability."""
print("-" * 25)
print(f"{'HOSTNAME':^10}{'POE_ENABLED':^10}")
print("-" * 25)
output = self.send_command("show power-over-ethernet")
output = output.strip().splitlines()
for line in output:
if "POE+ Connected" in line:
print(f"{self.hostname:^10}{'YES':^10}")
print("-" * 25)
def list_vlans_on_trunk(self):
"""Lists the vlans on switch trunk ports."""
print()
print("-" * 25)
print(f"{'HOSTNAME':^25}")
print("-" * 25)
print(f"{self.hostname:^25}")
print("-" * 25)
print(f"{'NEIGHBOR':^10}{'TRUNK PORT':^10}")
print("-" * 25)
output = self.send_command("show lldp info remote-device", use_textfsm=True)
for i in output:
if i["neighbor_sysname"] is not None:
neighbor_sysname = i["neighbor_sysname"]
port_number = i["local_port"]
print(f"{neighbor_sysname:^10}{port_number:^10}")
vlan_output = self.send_command("show vlan ports " + port_number)
vlan_output = vlan_output.strip()
vlan_output = vlan_output.splitlines()
print("-" * 25)
print(f"{'VLAN':^10}{'VLAN_NAME':10}")
print("-" * 25)
for vlan in vlan_output[4:]:
vlan, vlan_name, *extra = vlan.split()
print(f"{vlan:^10}{vlan_name:^10}")
print("-" * 25)
def find_vlans_on_trunk(self, trunk_vlan):
"""finds whether the specified vlan is on a trunk port
Parameters
----------
trunk_vlan : list
Specify the trunk vlan
"""
print()
print("-" * 25)
print(f"{'HOSTNAME':^25}")
print("-" * 25)
print(f"{self.hostname:^25}")
print("-" * 25)
print(f"{'NEIGHBOR':^10}{'TRUNK PORT':^10}")
print("-" * 25)
output = self.send_command("show lldp info remote-device", use_textfsm=True)
for i in output:
if i["neighbor_sysname"] is not None:
neighbor_sysname = i["neighbor_sysname"]
port_number = i["local_port"]
print(f"{neighbor_sysname:^10}{port_number:^10}")
| |
"""
Module that contains the ChunkGrid class
"""
import functools
import operator
from typing import (
Any,
Callable,
Generic,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
overload,
SupportsAbs,
)
import numpy as np
import numpy.typing as npt
from voxly.iterators import SliceOpt, VoxelGridIterator
from .faces import ChunkFace
from .index_dict import IndexDict
from .typing import Index3, LenType, Arr3i, Vec3i
from .chunk import Chunk, stack_chunks
ChunkIndex = Index3
_VT_co = TypeVar("_VT_co", covariant=True, bound=np.generic)
_OT_co = TypeVar("_OT_co", covariant=True, bound=np.generic)
class ChunkGrid(Generic[_VT_co]):
__slots__ = ("_chunk_size", "_dtype", "_fill", "chunks")
_chunk_size: int
_dtype: np.dtype[Any]
_fill: _VT_co
chunks: IndexDict[Chunk[_VT_co]]
def __init__(self, chunk_size: int, dtype: np.dtype[_VT_co] | Type[_VT_co], fill: Any = ...) -> None:
assert chunk_size > 0
self._chunk_size = chunk_size
self._dtype = np.dtype(dtype)
self.fill = self._dtype.base.type() if fill is ... else fill
self.chunks = IndexDict()
@property
def dtype(self) -> np.dtype[Any]:
return self._dtype
@property
def chunk_size(self) -> int:
return self._chunk_size
@property
def chunk_shape(self) -> Tuple[int, int, int]:
s = self._chunk_size
return s, s, s
@property
def fill(self) -> _VT_co:
return self._fill
@fill.setter
def fill(self, value: Any) -> None:
_dtype = self._dtype
if _dtype.subdtype:
self._fill = np.array((value,), _dtype)[0] # Hacky way to ensure the data type matches
else:
self._fill = _dtype.base.type(value)
def size(self) -> Arr3i:
return self.chunks.size() * self._chunk_size
def _new_chunk_factory(self, index: Index3) -> Chunk[_VT_co]:
return Chunk(index, self._chunk_size, self._dtype, self._fill)
def chunk_index(self, pos: Vec3i) -> ChunkIndex:
res = np.asarray(pos, dtype=np.int_) // self._chunk_size
assert res.shape == (3,)
return tuple(res) # type: ignore
def chunk_at_pos(self, pos: Arr3i) -> Chunk[_VT_co] | None:
return self.chunks.get(self.chunk_index(pos))
def ensure_chunk_at_index(self, index: ChunkIndex, *, insert: bool = True) -> Chunk[_VT_co]:
return self.chunks.create_if_absent(index, self._new_chunk_factory, insert=insert)
def ensure_chunk_at_pos(self, pos: Vec3i, insert: bool = True) -> Chunk[_VT_co]:
return self.ensure_chunk_at_index(self.chunk_index(pos), insert=insert)
def new_empty_mask(self, default: bool = False) -> npt.NDArray[np.bool8]:
return np.full(self.chunk_shape, default, dtype=np.bool8)
@classmethod
def iter_neighbors_indices(cls, index: ChunkIndex) -> Iterator[Tuple[ChunkFace, Index3]]:
yield from ((f, tuple(np.add(index, f.direction()))) for f in ChunkFace) # type: ignore
def iter_neighbors(
self, index: ChunkIndex, flatten: bool = False
) -> Iterator[Tuple[ChunkFace, Optional[Chunk[_VT_co]]]]:
if flatten:
yield from ((f, c) for f, c in self.iter_neighbors(index, False) if c is not None)
else:
yield from ((f, self.chunks.get(i, None)) for f, i in self.iter_neighbors_indices(index))
def __bool__(self) -> bool:
raise ValueError(
f"The truth value of {self.__class__} is ambiguous. "
"Use a.any(), or a.all(), or wrap the comparison (0 < a) & (a < 0)"
)
def all(self) -> bool:
"""True if all chunks contain only True values"""
return all(c.all() for c in self.chunks.values())
def any(self) -> bool:
"""True if any chunk contains any True value"""
return any(c.any() for c in self.chunks.values())
def to_dense(self, x: SliceOpt = None, y: SliceOpt = None, z: SliceOpt = None) -> npt.NDArray[_VT_co]:
return self.to_dense_with_offset(x, y, z)[0]
def to_dense_with_offset(
self, x: SliceOpt = None, y: SliceOpt = None, z: SliceOpt = None
) -> Tuple[npt.NDArray[_VT_co], Arr3i]:
"""Convert the grid to a dense numpy array"""
if len(self.chunks) == 0:
return np.empty((0, 0, 0), dtype=self._dtype), np.zeros(3, dtype=np.int_)
# Variable cache
cs = self._chunk_size
index_min, index_max = self.chunks.box.minmax
pos_min = np.multiply(index_min, cs)
pos_max = np.multiply(index_max, cs) + cs
voxel_it = VoxelGridIterator(pos_min, pos_max, x, y, z, clip=False)
chunk_it = voxel_it // cs
chunk_min: Arr3i = np.asarray(chunk_it.start)
chunk_max: Arr3i = np.asarray(chunk_it.stop)
chunk_len: Arr3i = chunk_max - chunk_min
res: npt.NDArray[_VT_co] = np.full(tuple(chunk_len * cs), self.fill, dtype=self.dtype)
# Method cache (prevent lookup in loop)
__self_chunks_get = self.chunks.get
__chunk_to_array = Chunk.to_array
u: int
v: int
w: int
for index in chunk_it:
c = __self_chunks_get(index, None)
if c is not None:
u, v, w = np.subtract(index, chunk_min) * cs
res[u : u + cs, v : v + cs, w : w + cs] = __chunk_to_array(c)
start = voxel_it.start - chunk_min * cs
stop = voxel_it.stop - chunk_min * cs
step = voxel_it.step
return (
res[start[0] : stop[0] : step[0], start[1] : stop[1] : step[1], start[2] : stop[2] : step[2]],
chunk_min * cs,
)
def astype(self, dtype: Type[_OT_co] | np.dtype[_OT_co], copy: bool = False) -> "ChunkGrid[_OT_co]":
_dtype = np.dtype(dtype)
if not copy and self._dtype == _dtype:
return self # type: ignore
new_grid: ChunkGrid[_OT_co] = ChunkGrid(self._chunk_size, dtype, fill=self._fill)
__new_grid_chunks_insert = new_grid.chunks.insert # Cache method lookup
for src in self.chunks.values():
__new_grid_chunks_insert(src.index, src.astype(dtype))
return new_grid
def copy(self, empty: bool = False) -> "ChunkGrid[_VT_co]":
new_grid = ChunkGrid(self._chunk_size, self._dtype, self._fill)
if not empty:
__new_grid_chunks_insert = new_grid.chunks.insert # Cache method lookup
for src in self.chunks.values():
__new_grid_chunks_insert(src.index, src.copy())
return new_grid
def split(self, splits: int, chunk_size: int | None = None) -> "ChunkGrid[_VT_co]":
assert splits > 0 and self._chunk_size % splits == 0
chunk_size = chunk_size or self._chunk_size
new_grid: ChunkGrid[_VT_co] = ChunkGrid(chunk_size, self._dtype, self.fill)
# Method cache (prevent lookup in loop)
__new_grid_chunks_insert = new_grid.chunks.insert
for c in self.chunks.values():
for c_new in c.split(splits, chunk_size):
__new_grid_chunks_insert(c_new.index, c_new)
return new_grid
def items(self, mask: "ChunkGrid[np.bool8]" | None = None) -> Iterator[Tuple[Arr3i, _VT_co]]:
if mask is None:
for i, c in self.chunks.items():
yield from c.items()
else:
__mask_ensure_chunk_at_index = mask.ensure_chunk_at_index # Cache method
for i, c in self.chunks.items():
m = __mask_ensure_chunk_at_index(i, insert=False)
if m.any_if_filled():
yield from c.items(m)
def filter(self, other: "ChunkGrid[np.bool8]") -> "ChunkGrid[_VT_co]":
"""Apply a filter mask to this grid and return the masked values"""
result = self.copy(empty=True)
# Method cache (prevent lookup in loop)
__self_chunks_get = self.chunks.get
__chunk_any = Chunk.any
__result_chunks_insert = result.chunks.insert
for i, o in other.chunks.items():
c = __self_chunks_get(i, None)
if c is not None and __chunk_any(c):
__result_chunks_insert(i, c.filter(o))
return result
def _argwhere_iter_arrays(self, mask: Optional["ChunkGrid[np.bool8]"] = None) -> Iterator[npt.NDArray[np.int_]]:
# Method cache (prevent lookup in loop)
__chunk_where = Chunk.argwhere
if mask is None:
for i, c in self.chunks.items():
yield __chunk_where(c)
else:
# Method cache (prevent lookup in loop)
__chunk_any_fast = Chunk.any_if_filled
__mask_ensure_chunk_at_index = mask.ensure_chunk_at_index
for i, c in self.chunks.items():
m = __mask_ensure_chunk_at_index(i, insert=False)
if __chunk_any_fast(m):
yield __chunk_where(c, mask=m)
def argwhere(self, mask: Optional["ChunkGrid[np.bool8]"] = None) -> Iterator[Arr3i]:
# Method cache (prevent lookup in loop)
__chunk_where = Chunk.argwhere
if mask is None:
for i, c in self.chunks.items():
yield from __chunk_where(c)
else:
# Method cache (prevent lookup in loop)
__chunk_any_fast = Chunk.any_if_filled
__mask_ensure_chunk_at_index = mask.ensure_chunk_at_index
for i, c in self.chunks.items():
m = __mask_ensure_chunk_at_index(i, insert=False)
if __chunk_any_fast(m):
yield from __chunk_where(c, mask=m)
def __getitem__(self, item: Any) -> Any:
if isinstance(item, slice):
return self.to_dense(item)
elif isinstance(item, tuple) and len(item) <= 3:
return self.to_dense(*item)
elif isinstance(item, ChunkGrid):
return self.filter(item)
elif isinstance(item, np.ndarray):
return self.get_values(item)
else:
raise IndexError("Invalid get")
def get_values(self, pos: Iterable[Arr3i] | npt.NDArray[np.int_]) -> npt.NDArray[_VT_co]:
"""Returns a list of values at the positions"""
# Method cache (prevent lookup in loop)
__np_argwhere = np.argwhere
__self_ensure_chunk_at_index = self.ensure_chunk_at_index
__chunk_to_array = Chunk.to_array
pos = np.asarray(pos, dtype=int)
assert pos.ndim == 2 and pos.shape[1] == 3
csize: int = self._chunk_size
cind: npt.NDArray[np.int_]
cinv: npt.NDArray[np.int_]
cind, cinv = np.unique(pos // csize, axis=0, return_inverse=True)
result = np.zeros(len(cinv), dtype=self._dtype)
for n, i in enumerate(cind):
pind = __np_argwhere(cinv == n).flatten()
cpos = pos[pind] % csize
chunk = __self_ensure_chunk_at_index(i, insert=False)
result[pind] = __chunk_to_array(chunk)[tuple(cpos.T)]
return result
def get_value(self, pos: Vec3i) -> _VT_co:
idx = self.chunk_index(pos)
c: Chunk[_VT_co] | None = self.chunks.get(idx)
if c is None:
return self.fill
else:
return c.get(pos)
def set_value(self, pos: Vec3i, value: Any) -> None:
c = self.ensure_chunk_at_pos(pos)
c.set(pos, value)
def set_or_fill(self, pos: Vec3i, value: Any) -> None:
c = self.ensure_chunk_at_pos(pos)
c.set_or_fill(pos, value)
def _set_slices(self, value: Any, x: SliceOpt = None, y: SliceOpt = None, z: SliceOpt = None) -> None:
it = VoxelGridIterator.require_bounded(x, y, z)
if isinstance(value, np.ndarray):
assert value.shape == it.shape
if self._dtype is not None:
value = value.astype(self._dtype)
for i, pos in it.iter_with_indices():
self.set_value(pos, value[i])
else:
for pos in it:
self.set_value(pos, value)
def _set_positions(self, pos: npt.NDArray[np.int_] | Sequence[Arr3i], value: _VT_co | Sequence[_VT_co]) -> None:
if isinstance(pos, LenType):
if not len(pos):
return # No Op
pos = np.asarray(pos, dtype=int)
if len(pos) == 0:
return # No Op
if pos.shape == (3,):
self.set_value(pos, value)
else:
assert pos.ndim == 2 and pos.shape[1] == 3, f"shape={pos.shape}"
if isinstance(value, (list, tuple, np.ndarray)):
assert len(pos) == len(value)
for p, v in zip(pos, value):
self.set_value(p, v)
else:
upos: npt.NDArray[np.int_] = np.unique(pos, axis=0)
for p in upos:
self.set_value(p, value)
def _set_chunks(
self, mask: "ChunkGrid[np.bool8]", value: _VT_co | npt.NDArray[_VT_co] | Chunk[_VT_co] | "ChunkGrid[_VT_co]"
) -> None:
assert self._chunk_size == mask._chunk_size
# Method cache (prevent | |
from numpy import array
def ldfesadictionary():
return ({1: array([[ 0.2097691065 , 0.2097691065 , 0.9549836878 ,
0.339836909477261],
[ 0.9549836878 , 0.2097691065 , 0.2097691065 ,
0.339836909477261],
[ 0.2097691065 , 0.9549836878 , 0.2097691065 ,
0.339836909477261],
[ 0.5773502692 , 0.5773502692 , 0.5773502692 ,
0.551285598363113],
[-0.2097691065 , 0.2097691065 , 0.9549836878 ,
0.339836909477261],
[-0.9549836878 , 0.2097691065 , 0.2097691065 ,
0.339836909477261],
[-0.2097691065 , 0.9549836878 , 0.2097691065 ,
0.339836909477261],
[-0.5773502692 , 0.5773502692 , 0.5773502692 ,
0.551285598363113],
[-0.2097691065 , -0.2097691065 , 0.9549836878 ,
0.339836909477261],
[-0.9549836878 , -0.2097691065 , 0.2097691065 ,
0.339836909477261],
[-0.2097691065 , -0.9549836878 , 0.2097691065 ,
0.339836909477261],
[-0.5773502692 , -0.5773502692 , 0.5773502692 ,
0.551285598363113],
[ 0.2097691065 , -0.2097691065 , 0.9549836878 ,
0.339836909477261],
[ 0.9549836878 , -0.2097691065 , 0.2097691065 ,
0.339836909477261],
[ 0.2097691065 , -0.9549836878 , 0.2097691065 ,
0.339836909477261],
[ 0.5773502692 , -0.5773502692 , 0.5773502692 ,
0.551285598363113],
[ 0.2097691065 , 0.2097691065 , -0.9549836878 ,
0.339836909477261],
[ 0.9549836878 , 0.2097691065 , -0.2097691065 ,
0.339836909477261],
[ 0.2097691065 , 0.9549836878 , -0.2097691065 ,
0.339836909477261],
[ 0.5773502692 , 0.5773502692 , -0.5773502692 ,
0.551285598363113],
[-0.2097691065 , 0.2097691065 , -0.9549836878 ,
0.339836909477261],
[-0.9549836878 , 0.2097691065 , -0.2097691065 ,
0.339836909477261],
[-0.2097691065 , 0.9549836878 , -0.2097691065 ,
0.339836909477261],
[-0.5773502692 , 0.5773502692 , -0.5773502692 ,
0.551285598363113],
[-0.2097691065 , -0.2097691065 , -0.9549836878 ,
0.339836909477261],
[-0.9549836878 , -0.2097691065 , -0.2097691065 ,
0.339836909477261],
[-0.2097691065 , -0.9549836878 , -0.2097691065 ,
0.339836909477261],
[-0.5773502692 , -0.5773502692 , -0.5773502692 ,
0.551285598363113],
[ 0.2097691065 , -0.2097691065 , -0.9549836878 ,
0.339836909477261],
[ 0.9549836878 , -0.2097691065 , -0.2097691065 ,
0.339836909477261],
[ 0.2097691065 , -0.9549836878 , -0.2097691065 ,
0.339836909477261],
[ 0.5773502692 , -0.5773502692 , -0.5773502692 ,
0.551285598363113]]), 2: array([[ 0.1227852912 , 0.1227852912 , 0.9848083796 ,
0.052655908296477],
[ 0.531419325 , 0.1031295016 , 0.8408078299 ,
0.099572004193338],
[ 0.1031295016 , 0.531419325 , 0.8408078299 ,
0.099572004193338],
[ 0.2357022604 , 0.2357022604 , 0.9428090416 ,
0.088036992794109],
[ 0.8408078299 , 0.1031295016 , 0.531419325 ,
0.099572004193338],
[ 0.9848083796 , 0.1227852912 , 0.1227852912 ,
0.052655908296477],
[ 0.8408078299 , 0.531419325 , 0.1031295016 ,
0.099572004193338],
[ 0.9428090416 , 0.2357022604 , 0.2357022604 ,
0.088036992794109],
[ 0.1031295016 , 0.8408078299 , 0.531419325 ,
0.099572004193338],
[ 0.531419325 , 0.8408078299 , 0.1031295016 ,
0.099572004193338],
[ 0.1227852912 , 0.9848083796 , 0.1227852912 ,
0.052655908296477],
[ 0.2357022604 , 0.9428090416 , 0.2357022604 ,
0.088036992794109],
[ 0.686947072 , 0.686947072 , 0.2370810843 ,
0.132024927791166],
[ 0.2370810843 , 0.686947072 , 0.686947072 ,
0.132024927791166],
[ 0.686947072 , 0.2370810843 , 0.686947072 ,
0.132024927791166],
[ 0.5773502692 , 0.5773502692 , 0.5773502692 ,
0.155210814989615],
[-0.1227852912 , 0.1227852912 , 0.9848083796 ,
0.052655908296477],
[-0.531419325 , 0.1031295016 , 0.8408078299 ,
0.099572004193338],
[-0.1031295016 , 0.531419325 , 0.8408078299 ,
0.099572004193338],
[-0.2357022604 , 0.2357022604 , 0.9428090416 ,
0.088036992794109],
[-0.8408078299 , 0.1031295016 , 0.531419325 ,
0.099572004193338],
[-0.9848083796 , 0.1227852912 , 0.1227852912 ,
0.052655908296477],
[-0.8408078299 , 0.531419325 , 0.1031295016 ,
0.099572004193338],
[-0.9428090416 , 0.2357022604 , 0.2357022604 ,
0.088036992794109],
[-0.1031295016 , 0.8408078299 , 0.531419325 ,
0.099572004193338],
[-0.531419325 , 0.8408078299 , 0.1031295016 ,
0.099572004193338],
[-0.1227852912 , 0.9848083796 , 0.1227852912 ,
0.052655908296477],
[-0.2357022604 , 0.9428090416 , 0.2357022604 ,
0.088036992794109],
[-0.686947072 , 0.686947072 , 0.2370810843 ,
0.132024927791166],
[-0.2370810843 , 0.686947072 , 0.686947072 ,
0.132024927791166],
[-0.686947072 , 0.2370810843 , 0.686947072 ,
0.132024927791166],
[-0.5773502692 , 0.5773502692 , 0.5773502692 ,
0.155210814989615],
[-0.1227852912 , -0.1227852912 , 0.9848083796 ,
0.052655908296477],
[-0.531419325 , -0.1031295016 , 0.8408078299 ,
0.099572004193338],
[-0.1031295016 , -0.531419325 , 0.8408078299 ,
0.099572004193338],
[-0.2357022604 , -0.2357022604 , 0.9428090416 ,
0.088036992794109],
[-0.8408078299 , -0.1031295016 , 0.531419325 ,
0.099572004193338],
[-0.9848083796 , -0.1227852912 , 0.1227852912 ,
0.052655908296477],
[-0.8408078299 , -0.531419325 , 0.1031295016 ,
0.099572004193338],
[-0.9428090416 , -0.2357022604 , 0.2357022604 ,
0.088036992794109],
[-0.1031295016 , -0.8408078299 , 0.531419325 ,
0.099572004193338],
[-0.531419325 , -0.8408078299 , 0.1031295016 ,
0.099572004193338],
[-0.1227852912 , -0.9848083796 , 0.1227852912 ,
0.052655908296477],
[-0.2357022604 , -0.9428090416 , 0.2357022604 ,
0.088036992794109],
[-0.686947072 , -0.686947072 , 0.2370810843 ,
0.132024927791166],
[-0.2370810843 , -0.686947072 , 0.686947072 ,
0.132024927791166],
[-0.686947072 , -0.2370810843 , 0.686947072 ,
0.132024927791166],
[-0.5773502692 , -0.5773502692 , 0.5773502692 ,
0.155210814989615],
[ 0.1227852912 , -0.1227852912 , 0.9848083796 ,
0.052655908296477],
[ 0.531419325 , -0.1031295016 , 0.8408078299 ,
0.099572004193338],
[ 0.1031295016 , -0.531419325 , 0.8408078299 ,
0.099572004193338],
[ 0.2357022604 , -0.2357022604 , 0.9428090416 ,
0.088036992794109],
[ 0.8408078299 , -0.1031295016 , 0.531419325 ,
0.099572004193338],
[ 0.9848083796 , -0.1227852912 , 0.1227852912 ,
0.052655908296477],
[ 0.8408078299 , -0.531419325 , 0.1031295016 ,
0.099572004193338],
[ 0.9428090416 , -0.2357022604 , 0.2357022604 ,
0.088036992794109],
[ 0.1031295016 , -0.8408078299 , 0.531419325 ,
0.099572004193338],
[ 0.531419325 , -0.8408078299 , 0.1031295016 ,
0.099572004193338],
[ 0.1227852912 , -0.9848083796 , 0.1227852912 ,
0.052655908296477],
[ 0.2357022604 , -0.9428090416 , 0.2357022604 ,
0.088036992794109],
[ 0.686947072 , -0.686947072 , 0.2370810843 ,
0.132024927791166],
[ 0.2370810843 , -0.686947072 , 0.686947072 ,
0.132024927791166],
[ 0.686947072 , -0.2370810843 , 0.686947072 ,
0.132024927791166],
[ 0.5773502692 , -0.5773502692 , 0.5773502692 ,
0.155210814989615],
[ 0.1227852912 , 0.1227852912 , -0.9848083796 ,
0.052655908296477],
[ 0.531419325 , 0.1031295016 , -0.8408078299 ,
0.099572004193338],
[ 0.1031295016 , 0.531419325 , -0.8408078299 ,
0.099572004193338],
[ 0.2357022604 , 0.2357022604 , -0.9428090416 ,
0.088036992794109],
[ 0.8408078299 , 0.1031295016 , -0.531419325 ,
0.099572004193338],
[ 0.9848083796 , 0.1227852912 , -0.1227852912 ,
0.052655908296477],
[ 0.8408078299 , 0.531419325 , -0.1031295016 ,
0.099572004193338],
[ 0.9428090416 , 0.2357022604 , -0.2357022604 ,
0.088036992794109],
[ 0.1031295016 , 0.8408078299 , -0.531419325 ,
0.099572004193338],
[ 0.531419325 , 0.8408078299 , -0.1031295016 ,
0.099572004193338],
[ 0.1227852912 , 0.9848083796 , -0.1227852912 ,
0.052655908296477],
[ 0.2357022604 , 0.9428090416 , -0.2357022604 ,
0.088036992794109],
[ 0.686947072 , 0.686947072 , -0.2370810843 ,
0.132024927791166],
[ 0.2370810843 , 0.686947072 , -0.686947072 ,
0.132024927791166],
[ 0.686947072 , 0.2370810843 , -0.686947072 ,
0.132024927791166],
[ 0.5773502692 , 0.5773502692 , -0.5773502692 ,
0.155210814989615],
[-0.1227852912 , 0.1227852912 , -0.9848083796 ,
0.052655908296477],
[-0.531419325 , 0.1031295016 , -0.8408078299 ,
0.099572004193338],
[-0.1031295016 , 0.531419325 , -0.8408078299 ,
0.099572004193338],
[-0.2357022604 , 0.2357022604 , -0.9428090416 ,
0.088036992794109],
[-0.8408078299 , 0.1031295016 , -0.531419325 ,
0.099572004193338],
[-0.9848083796 , 0.1227852912 , -0.1227852912 ,
0.052655908296477],
[-0.8408078299 , 0.531419325 , -0.1031295016 ,
0.099572004193338],
[-0.9428090416 , 0.2357022604 , -0.2357022604 ,
0.088036992794109],
[-0.1031295016 , 0.8408078299 , -0.531419325 ,
0.099572004193338],
[-0.531419325 , 0.8408078299 , -0.1031295016 ,
0.099572004193338],
[-0.1227852912 , 0.9848083796 , -0.1227852912 ,
0.052655908296477],
[-0.2357022604 , 0.9428090416 , -0.2357022604 ,
0.088036992794109],
[-0.686947072 , 0.686947072 , -0.2370810843 ,
0.132024927791166],
[-0.2370810843 , 0.686947072 , -0.686947072 ,
0.132024927791166],
[-0.686947072 , 0.2370810843 , -0.686947072 ,
0.132024927791166],
[-0.5773502692 , 0.5773502692 , -0.5773502692 ,
0.155210814989615],
[-0.1227852912 , -0.1227852912 , -0.9848083796 ,
0.052655908296477],
[-0.531419325 , -0.1031295016 , -0.8408078299 ,
0.099572004193338],
[-0.1031295016 , -0.531419325 , -0.8408078299 ,
0.099572004193338],
[-0.2357022604 , -0.2357022604 , -0.9428090416 ,
0.088036992794109],
[-0.8408078299 , -0.1031295016 , -0.531419325 ,
0.099572004193338],
[-0.9848083796 , -0.1227852912 , -0.1227852912 ,
0.052655908296477],
[-0.8408078299 , -0.531419325 , -0.1031295016 ,
0.099572004193338],
[-0.9428090416 , -0.2357022604 , -0.2357022604 ,
0.088036992794109],
[-0.1031295016 , -0.8408078299 , -0.531419325 ,
0.099572004193338],
[-0.531419325 , -0.8408078299 , -0.1031295016 ,
0.099572004193338],
[-0.1227852912 , -0.9848083796 , -0.1227852912 ,
0.052655908296477],
[-0.2357022604 , -0.9428090416 , -0.2357022604 ,
0.088036992794109],
[-0.686947072 , -0.686947072 , -0.2370810843 ,
0.132024927791166],
[-0.2370810843 , -0.686947072 , -0.686947072 ,
0.132024927791166],
[-0.686947072 , -0.2370810843 , -0.686947072 ,
0.132024927791166],
[-0.5773502692 , -0.5773502692 , -0.5773502692 ,
0.155210814989615],
[ 0.1227852912 , -0.1227852912 , -0.9848083796 ,
0.052655908296477],
[ 0.531419325 , -0.1031295016 , -0.8408078299 ,
0.099572004193338],
[ 0.1031295016 , -0.531419325 , -0.8408078299 ,
0.099572004193338],
[ 0.2357022604 , -0.2357022604 , -0.9428090416 ,
0.088036992794109],
[ 0.8408078299 , -0.1031295016 , -0.531419325 ,
0.099572004193338],
[ 0.9848083796 , -0.1227852912 , -0.1227852912 ,
0.052655908296477],
[ 0.8408078299 , -0.531419325 , -0.1031295016 ,
0.099572004193338],
[ 0.9428090416 , -0.2357022604 , -0.2357022604 ,
0.088036992794109],
[ 0.1031295016 , -0.8408078299 , -0.531419325 ,
0.099572004193338],
[ 0.531419325 , -0.8408078299 , -0.1031295016 ,
0.099572004193338],
[ 0.1227852912 , -0.9848083796 , -0.1227852912 ,
0.052655908296477],
[ 0.2357022604 , -0.9428090416 , -0.2357022604 ,
0.088036992794109],
[ 0.686947072 , -0.686947072 , -0.2370810843 ,
0.132024927791166],
[ 0.2370810843 , -0.686947072 , -0.686947072 ,
0.132024927791166],
[ 0.686947072 , -0.2370810843 , -0.686947072 ,
0.132024927791166],
[ 0.5773502692 , -0.5773502692 , -0.5773502692 ,
0.155210814989615]]), 3: array([[ 0.0438752388 , 0.0438752388 , 0.998073107 ,
0.01010118190061 ],
[ 0.2231940545 , 0.0434603636 , 0.9738047088 ,
0.014711409100889],
[ 0.0434603636 , 0.2231940545 , 0.9738047088 ,
0.014711409100889],
[ 0.0990147543 , 0.0990147543 , 0.990147543 ,
0.013131908200793],
[ 0.3917547244 , 0.0513254349 , 0.9186369988 ,
0.020163559601218],
[ 0.6256627238 , 0.050922383 , 0.7784298728 ,
0.024290017701467],
[ 0.4500607531 , 0.286782825 , 0.8456955302 ,
0.030009672601813],
[ 0.4923659639 , 0.123091491 , 0.8616404369 ,
0.025108754201517],
[ 0.0513254349 , 0.3917547244 , 0.9186369988 ,
| |
<gh_stars>0
def findDecision(obj): #obj[0]: Passanger, obj[1]: Time, obj[2]: Coupon, obj[3]: Education, obj[4]: Occupation, obj[5]: Bar, obj[6]: Restaurant20to50, obj[7]: Direction_same, obj[8]: Distance
# {"feature": "Coupon", "instances": 8147, "metric_value": 0.9848, "depth": 1}
if obj[2]>1:
# {"feature": "Distance", "instances": 5889, "metric_value": 0.9535, "depth": 2}
if obj[8]<=2:
# {"feature": "Passanger", "instances": 5308, "metric_value": 0.9386, "depth": 3}
if obj[0]<=2:
# {"feature": "Education", "instances": 3482, "metric_value": 0.9606, "depth": 4}
if obj[3]>1:
# {"feature": "Time", "instances": 1973, "metric_value": 0.9718, "depth": 5}
if obj[1]<=2:
# {"feature": "Direction_same", "instances": 1359, "metric_value": 0.9624, "depth": 6}
if obj[7]<=0:
# {"feature": "Restaurant20to50", "instances": 771, "metric_value": 0.975, "depth": 7}
if obj[6]<=3.0:
# {"feature": "Occupation", "instances": 749, "metric_value": 0.9778, "depth": 8}
if obj[4]<=13.370987400688758:
# {"feature": "Bar", "instances": 651, "metric_value": 0.9833, "depth": 9}
if obj[5]>-1.0:
return 'True'
elif obj[5]<=-1.0:
return 'True'
else: return 'True'
elif obj[4]>13.370987400688758:
# {"feature": "Bar", "instances": 98, "metric_value": 0.9217, "depth": 9}
if obj[5]<=1.0:
return 'True'
elif obj[5]>1.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>3.0:
# {"feature": "Occupation", "instances": 22, "metric_value": 0.7732, "depth": 8}
if obj[4]<=12:
# {"feature": "Bar", "instances": 19, "metric_value": 0.8315, "depth": 9}
if obj[5]>0.0:
return 'True'
elif obj[5]<=0.0:
return 'True'
else: return 'True'
elif obj[4]>12:
return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>0:
# {"feature": "Occupation", "instances": 588, "metric_value": 0.9417, "depth": 7}
if obj[4]<=19.294968306511763:
# {"feature": "Bar", "instances": 541, "metric_value": 0.9335, "depth": 8}
if obj[5]>-1.0:
# {"feature": "Restaurant20to50", "instances": 537, "metric_value": 0.9308, "depth": 9}
if obj[6]<=1.0:
return 'True'
elif obj[6]>1.0:
return 'True'
else: return 'True'
elif obj[5]<=-1.0:
# {"feature": "Restaurant20to50", "instances": 4, "metric_value": 0.8113, "depth": 9}
if obj[6]<=2.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[4]>19.294968306511763:
# {"feature": "Bar", "instances": 47, "metric_value": 0.9971, "depth": 8}
if obj[5]<=2.0:
# {"feature": "Restaurant20to50", "instances": 38, "metric_value": 0.998, "depth": 9}
if obj[6]>0.0:
return 'True'
elif obj[6]<=0.0:
return 'False'
else: return 'False'
elif obj[5]>2.0:
# {"feature": "Restaurant20to50", "instances": 9, "metric_value": 0.7642, "depth": 9}
if obj[6]>1.0:
return 'True'
elif obj[6]<=1.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[1]>2:
# {"feature": "Direction_same", "instances": 614, "metric_value": 0.9877, "depth": 6}
if obj[7]<=0:
# {"feature": "Bar", "instances": 467, "metric_value": 0.9748, "depth": 7}
if obj[5]<=3.0:
# {"feature": "Occupation", "instances": 452, "metric_value": 0.9699, "depth": 8}
if obj[4]>0:
# {"feature": "Restaurant20to50", "instances": 450, "metric_value": 0.971, "depth": 9}
if obj[6]<=3.0:
return 'True'
elif obj[6]>3.0:
return 'True'
else: return 'True'
elif obj[4]<=0:
return 'True'
else: return 'True'
elif obj[5]>3.0:
# {"feature": "Occupation", "instances": 15, "metric_value": 0.9183, "depth": 8}
if obj[4]<=16:
# {"feature": "Restaurant20to50", "instances": 14, "metric_value": 0.8631, "depth": 9}
if obj[6]<=3.0:
return 'False'
elif obj[6]>3.0:
return 'True'
else: return 'True'
elif obj[4]>16:
return 'True'
else: return 'True'
else: return 'False'
elif obj[7]>0:
# {"feature": "Occupation", "instances": 147, "metric_value": 0.9984, "depth": 7}
if obj[4]<=8.013605442176871:
# {"feature": "Restaurant20to50", "instances": 93, "metric_value": 0.9992, "depth": 8}
if obj[6]>0.0:
# {"feature": "Bar", "instances": 72, "metric_value": 0.9978, "depth": 9}
if obj[5]<=3.0:
return 'False'
elif obj[5]>3.0:
return 'True'
else: return 'True'
elif obj[6]<=0.0:
# {"feature": "Bar", "instances": 21, "metric_value": 0.9183, "depth": 9}
if obj[5]<=3.0:
return 'True'
elif obj[5]>3.0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[4]>8.013605442176871:
# {"feature": "Bar", "instances": 54, "metric_value": 0.9751, "depth": 8}
if obj[5]<=1.0:
# {"feature": "Restaurant20to50", "instances": 37, "metric_value": 0.909, "depth": 9}
if obj[6]<=2.0:
return 'False'
elif obj[6]>2.0:
return 'False'
else: return 'False'
elif obj[5]>1.0:
# {"feature": "Restaurant20to50", "instances": 17, "metric_value": 0.9774, "depth": 9}
if obj[6]>1.0:
return 'False'
elif obj[6]<=1.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[3]<=1:
# {"feature": "Restaurant20to50", "instances": 1509, "metric_value": 0.9431, "depth": 5}
if obj[6]<=2.0:
# {"feature": "Time", "instances": 1408, "metric_value": 0.9512, "depth": 6}
if obj[1]<=1:
# {"feature": "Direction_same", "instances": 847, "metric_value": 0.9751, "depth": 7}
if obj[7]<=0:
# {"feature": "Occupation", "instances": 432, "metric_value": 0.996, "depth": 8}
if obj[4]<=18.882886482140197:
# {"feature": "Bar", "instances": 400, "metric_value": 0.9974, "depth": 9}
if obj[5]<=3.0:
return 'True'
elif obj[5]>3.0:
return 'True'
else: return 'True'
elif obj[4]>18.882886482140197:
# {"feature": "Bar", "instances": 32, "metric_value": 0.9544, "depth": 9}
if obj[5]<=0.0:
return 'False'
elif obj[5]>0.0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>0:
# {"feature": "Occupation", "instances": 415, "metric_value": 0.9335, "depth": 8}
if obj[4]<=13.389134189221542:
# {"feature": "Bar", "instances": 355, "metric_value": 0.9477, "depth": 9}
if obj[5]>0.0:
return 'True'
elif obj[5]<=0.0:
return 'True'
else: return 'True'
elif obj[4]>13.389134189221542:
# {"feature": "Bar", "instances": 60, "metric_value": 0.8113, "depth": 9}
if obj[5]<=2.0:
return 'True'
elif obj[5]>2.0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[1]>1:
# {"feature": "Direction_same", "instances": 561, "metric_value": 0.8994, "depth": 7}
if obj[7]<=0:
# {"feature": "Bar", "instances": 466, "metric_value": 0.8711, "depth": 8}
if obj[5]<=3.0:
# {"feature": "Occupation", "instances": 460, "metric_value": 0.8759, "depth": 9}
if obj[4]<=19.025502608326256:
return 'True'
elif obj[4]>19.025502608326256:
return 'True'
else: return 'True'
elif obj[5]>3.0:
return 'True'
else: return 'True'
elif obj[7]>0:
# {"feature": "Bar", "instances": 95, "metric_value": 0.9864, "depth": 8}
if obj[5]>0.0:
# {"feature": "Occupation", "instances": 53, "metric_value": 0.9977, "depth": 9}
if obj[4]<=21:
return 'False'
elif obj[4]>21:
return 'True'
else: return 'True'
elif obj[5]<=0.0:
# {"feature": "Occupation", "instances": 42, "metric_value": 0.8926, "depth": 9}
if obj[4]<=19:
return 'True'
elif obj[4]>19:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>2.0:
# {"feature": "Occupation", "instances": 101, "metric_value": 0.7562, "depth": 6}
if obj[4]<=18:
# {"feature": "Bar", "instances": 93, "metric_value": 0.7893, "depth": 7}
if obj[5]<=1.0:
# {"feature": "Time", "instances": 53, "metric_value": 0.6122, "depth": 8}
if obj[1]<=2:
# {"feature": "Direction_same", "instances": 36, "metric_value": 0.5033, "depth": 9}
if obj[7]>0:
return 'True'
elif obj[7]<=0:
return 'True'
else: return 'True'
elif obj[1]>2:
# {"feature": "Direction_same", "instances": 17, "metric_value": 0.7871, "depth": 9}
if obj[7]<=0:
return 'True'
elif obj[7]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[5]>1.0:
# {"feature": "Direction_same", "instances": 40, "metric_value": 0.9341, "depth": 8}
if obj[7]<=0:
# {"feature": "Time", "instances": 25, "metric_value": 0.9896, "depth": 9}
if obj[1]<=1:
return 'True'
elif obj[1]>1:
return 'True'
else: return 'True'
elif obj[7]>0:
# {"feature": "Time", "instances": 15, "metric_value": 0.7219, "depth": 9}
if obj[1]>0:
return 'True'
elif obj[1]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>18:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[0]>2:
# {"feature": "Time", "instances": 1826, "metric_value": 0.8821, "depth": 4}
if obj[1]>0:
# {"feature": "Occupation", "instances": 1422, "metric_value": 0.8981, "depth": 5}
if obj[4]<=13.119242508776624:
# {"feature": "Bar", "instances": 1227, "metric_value": 0.9082, "depth": 6}
if obj[5]<=2.0:
# {"feature": "Education", "instances": 1074, "metric_value": 0.9183, "depth": 7}
if obj[3]<=2:
# {"feature": "Restaurant20to50", "instances": 852, "metric_value": 0.9036, "depth": 8}
if obj[6]<=1.0:
# {"feature": "Direction_same", "instances": 600, "metric_value": 0.9216, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[6]>1.0:
# {"feature": "Direction_same", "instances": 252, "metric_value": 0.8524, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>2:
# {"feature": "Restaurant20to50", "instances": 222, "metric_value": 0.9631, "depth": 8}
if obj[6]<=2.0:
# {"feature": "Direction_same", "instances": 202, "metric_value": 0.977, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[6]>2.0:
# {"feature": "Direction_same", "instances": 20, "metric_value": 0.6098, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>2.0:
# {"feature": "Restaurant20to50", "instances": 153, "metric_value": 0.819, "depth": 7}
if obj[6]>0.0:
# {"feature": "Education", "instances": 138, "metric_value": 0.7936, "depth": 8}
if obj[3]<=3:
# {"feature": "Direction_same", "instances": 119, "metric_value": 0.7726, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[3]>3:
# {"feature": "Direction_same", "instances": 19, "metric_value": 0.8997, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=0.0:
# {"feature": "Education", "instances": 15, "metric_value": 0.971, "depth": 8}
if obj[3]<=2:
# {"feature": "Direction_same", "instances": 14, "metric_value": 0.9403, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[3]>2:
return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[4]>13.119242508776624:
# {"feature": "Bar", "instances": 195, "metric_value": 0.8213, "depth": 6}
if obj[5]<=3.0:
# {"feature": "Restaurant20to50", "instances": 179, "metric_value": 0.7764, "depth": 7}
if obj[6]<=2.0:
# {"feature": "Education", "instances": 169, "metric_value": 0.7993, "depth": 8}
if obj[3]<=3:
# {"feature": "Direction_same", "instances": 151, "metric_value": 0.8341, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[3]>3:
# {"feature": "Direction_same", "instances": 18, "metric_value": 0.3095, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>2.0:
return 'True'
else: return 'True'
elif obj[5]>3.0:
# {"feature": "Education", "instances": 16, "metric_value": 0.9887, "depth": 7}
if obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 10, "metric_value": 0.8813, "depth": 8}
if obj[6]>1.0:
# {"feature": "Direction_same", "instances": 6, "metric_value": 1.0, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[6]<=1.0:
return 'True'
else: return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Bar", "instances": 404, "metric_value": 0.8152, "depth": 5}
if obj[5]<=3.0:
# {"feature": "Restaurant20to50", "instances": 386, "metric_value": 0.7923, "depth": 6}
if obj[6]<=1.0:
# {"feature": "Occupation", "instances": 265, "metric_value": 0.8492, "depth": 7}
if obj[4]<=18.686908748292247:
# {"feature": "Education", "instances": 243, "metric_value": 0.8767, "depth": 8}
if obj[3]>1:
# {"feature": "Direction_same", "instances": 133, "metric_value": 0.8315, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[3]<=1:
# {"feature": "Direction_same", "instances": 110, "metric_value": 0.9213, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>18.686908748292247:
# {"feature": "Education", "instances": 22, "metric_value": 0.2668, "depth": 8}
if obj[3]<=2:
return 'True'
elif obj[3]>2:
# {"feature": "Direction_same", "instances": 8, "metric_value": 0.5436, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[6]>1.0:
# {"feature": "Occupation", "instances": 121, "metric_value": 0.6271, "depth": 7}
if obj[4]>3:
# {"feature": "Education", "instances": 98, "metric_value": 0.7095, "depth": 8}
if obj[3]<=3:
# {"feature": "Direction_same", "instances": 93, "metric_value": 0.7304, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[3]>3:
return 'True'
else: return 'True'
elif obj[4]<=3:
return 'True'
else: return 'True'
else: return 'True'
elif obj[5]>3.0:
# {"feature": "Occupation", "instances": 18, "metric_value": 0.9911, "depth": 6}
if obj[4]<=12:
# {"feature": "Restaurant20to50", "instances": 12, "metric_value": 0.9799, "depth": 7}
if obj[6]>1.0:
# {"feature": "Education", "instances": 8, "metric_value": 1.0, "depth": 8}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.9852, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[6]<=1.0:
# {"feature": "Education", "instances": 4, "metric_value": 0.8113, "depth": 8}
if obj[3]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 1.0, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[3]>0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>12:
# {"feature": "Education", "instances": 6, "metric_value": 0.65, "depth": 7}
if obj[3]<=0:
return 'False'
elif obj[3]>0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 1.0, "depth": 8}
if obj[6]<=2.0:
return 'True'
elif obj[6]>2.0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[8]>2:
# {"feature": "Passanger", "instances": 581, "metric_value": 0.9944, "depth": 3}
if obj[0]>0:
# {"feature": "Time", "instances": 561, "metric_value": 0.9903, "depth": 4}
if obj[1]>0:
# {"feature": "Education", "instances": 480, "metric_value": 0.999, "depth": 5}
if | |
<filename>wigpy/wig.py
import multiprocessing as mp
import os
import pdb
from collections import OrderedDict, defaultdict
from datetime import datetime
from functools import partial
from operator import itemgetter
import numpy as np
import pandas as pd
import spacy
import torch
from gensim.models import Word2Vec
# from scipy.stats import pearsonr, spearmanr
from sklearn import linear_model
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA, FastICA, TruncatedSVD
from sklearn.preprocessing import scale
from torch import Tensor, optim
from torch.nn.functional import softmax
from torch.utils.data import DataLoader
from wigpy.model import WasserIndexGen
from wigpy.utils import timer
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
spacy.prefer_gpu()
# from nltk.corpus import stopwords; stopwords.words('english');
stop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves',
'you', "you're", "you've", "you'll", "you'd", 'your', 'yours',
'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she',
"she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself',
'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which',
'who', 'whom', 'this', 'that', "that'll", 'these', 'those', 'am',
'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has',
'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the',
'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of',
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into',
'through', 'during', 'before', 'after', 'above', 'below', 'to',
'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under',
'again', 'further', 'then', 'once', 'here', 'there', 'when',
'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few',
'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not',
'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't',
'can', 'will', 'just', 'don', "don't", 'should', "should've",
'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren',
"aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn',
"doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven',
"haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn',
"mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn',
"shouldn't", 'wasn', "wasn't", 'weren', "weren't", 'won',
"won't", 'wouldn', "wouldn't"]
class WIG():
def __init__(self,
dataset,
train_test_ratio=[0.7, 0.3],
emsize=10,
batch_size=64,
num_topics=4,
reg=0.1,
epochs=5,
opt='adam',
lr=0.005,
wdecay=1.2e-6,
log_interval=50,
seed=0,
prune_topk=0,
l1_reg=0.01,
n_clusters=10,
ckpt_path='./ckpt',
numItermax=1000,
stopThr=1e-9,
dtype=torch.float32,
spacy_model='en_core_web_sm',
metric='sqeuclidean',
merge_entity=True,
process_fn=None,
remove_stop=False,
remove_punct=True,
device='cuda',
interval='M',
visualize_every=1,
loss_per_batch=False,
**kwargs):
"""
Parameters:
======
dataset : list, of (date, doc) pairs
train_test_ratio : list, of floats sum to 1, how to split dataset
emsize : int, dim of embedding
batch_size : int, size of a batch
num_topics : int, K topics
reg : float, entropic regularization term in Sinkhorn
epochs : int, epochs to train
lr : float, learning rate for optimizer
wdecay : float, L-2 regularization term used by some optimizers
log_interval : int, print log one per k steps
seed : int, pseudo-random seed for pytorch
prune_topk : int, max no of tokens to use for pruning vocabulary
l1_reg : float, L1 penalty for pruning
n_clusters : int, KMeans clusters
opt : str, which optimizer to use, default to 'adam'
ckpt_path : str, checkpoint when training model
numItermax : int, max steps to run Sinkhorn, dafault 1000
dtype : torch.dtype, default torch.float32
spacy_model : str, spacy language model name
Default: nlp = spacy.load(
'en_core_web_sm', disable=["tagger"])
metric : str, 'sqeuclidean' or 'euclidean'
merge_entity : bool, merge entity detected by spacy model, default True
remove_stop : bool, whether to remove stop words, default False
remove_punct : bool, whether to remove punctuation, default True
interval : 'M', 'Y', 'D'
visualize_every : int,
loss_per_batch : bool, if print loss per batch
Also parameters from Word2Vec
"""
global dt
if dtype:
dt = dtype
global dev
if device == 'cuda':
dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
dev = torch.device('cpu')
# hyperparameters
self.batch_size = batch_size
self.num_topics = num_topics
self.reg = reg
self.epochs = epochs
self.lr = lr
self.emsize = emsize
# opt
self.opt = opt
self.lr = lr
self.wdecay = wdecay
self.interval = interval
self.visualize_every = visualize_every
self.loss_per_batch = loss_per_batch
self.log_interval = log_interval
# ckpt
self.ckpt = os.path.join(ckpt_path,
f'WIG_Bsz_{batch_size}_K_{num_topics}_LR_{lr}_EMsize_{emsize}_Reg_{reg}_Opt_{opt}_CompressTopk_{prune_topk}')
self.ckpt_path = ckpt_path
if not os.path.exists(ckpt_path):
os.makedirs(ckpt_path)
# set random seed
self.seed = seed
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
sentences, self.id2date, self.id2doc, self.senid2docid, self.date2idlist = \
self.idmap(dataset, spacy_model, merge_entity, process_fn,
remove_stop, remove_punct)
# pdb.set_trace()
# prepare train, eval, and test data
self.d_l = d_l = len(self.senid2docid.keys())
assert sum(train_test_ratio) == 1., \
'Three shares do not sum to one.'
train_r, test_r = train_test_ratio
print(f'Splitting data by ratio: {train_r} {test_r}')
tr_l = round(d_l * train_r)
ts_l = d_l - tr_l
# shuffle ids
data_ids = torch.randperm(d_l) # ids of splitted sentences
# Word2Vec model use more workers and less min_count than default
try:
workers
except NameError:
workers = 20
try:
min_count
except NameError:
min_count = 1
# add to kwargs
kwargs['size'] = emsize
kwargs['seed'] = seed
kwargs['workers'] = workers
kwargs['min_count'] = min_count
print('run Word2Vec model for word embeddings...')
word2vec = Word2Vec(sentences=[sentences[i] for i in data_ids],
**kwargs)
wv = word2vec.wv
self.vocab = wv.vocab
print(f'Vocab length is {len(wv.vocab)}')
# choose with algorithm to use, if compressed then shrink vocab dim
if prune_topk == 0:
print('using original algorithm')
X = torch.tensor(wv[wv.vocab], dtype=dtype, device=device)
self.M = mjit(dist(X, metric=metric))
mycollate = partial(getfreq_single_original, wv)
elif prune_topk > 0:
print('using compressed dictionary algorithm')
print('compressed dictionary length is {}'.format(prune_topk))
self.M, id2comdict = compress_dictionary(wv, topk=prune_topk,
n_clusters=n_clusters,
l1_reg=l1_reg,
metric='sqeuclidean')
mycollate = partial(getfreq_single_compress, wv, id2comdict)
else:
# TODO: geodesic regression with l1?
raise ValueError("use 'original' or 'compress' algotithm")
tr_ids, ts_ids = data_ids.split([tr_l, ts_l])
self.tr_ids, self.ts_ids = tr_ids, ts_ids
# pdb.set_trace()
self.tr_dl = DataLoader([sentences[i] for i in tr_ids],
batch_size=batch_size, collate_fn=mycollate)
self.ts_dl = DataLoader([sentences[i] for i in ts_ids],
batch_size=batch_size, collate_fn=mycollate)
self.model = WasserIndexGen(batch_size=batch_size,
num_topics=num_topics,
reg=reg,
numItermax=numItermax,
stopThr=stopThr,
dtype=dt,
device=dev)
print(f'WIG model {self.model}')
# init
self.R = torch.randn((self.M.shape[0], self.num_topics),
device=dev)
self.A = torch.randn((self.num_topics, self.d_l),
device=dev)
# softmax over columns
self.basis = softmax(self.R, dim=0)
self.lbd = softmax(self.A, dim=0)
@timer
def train(self, loss_per_batch=False):
# set optimizer
if self.opt == 'adam':
optimizer = optim.Adam([self.R, self.A], lr=self.lr,
weight_decay=self.wdecay)
elif self.opt == 'adagrad':
optimizer = optim.Adagrad([self.R, self.A], lr=self.lr,
weight_decay=self.wdecay)
elif self.opt == 'adadelta':
optimizer = optim.Adadelta([self.R, self.A], lr=self.lr,
weight_decay=self.wdecay)
elif self.opt == 'rmsprop':
optimizer = optim.RMSprop([self.R, self.A], lr=self.lr,
weight_decay=self.wdecay)
elif self.opt == 'asgd':
optimizer = optim.ASGD([self.R, self.A], lr=self.lr,
weight_decay=self.wdecay, t0=0, lambd=0.)
else:
print('Optimizer not supported . Defaulting to vanilla SGD...')
optimizer = optim.SGD([self.R, self.A], lr=self.lr)
# cnt = 0
best_loss = 1e9
for epoch in range(self.epochs):
total_loss = 0.
tr_id = 0
for b_id, batch in enumerate(self.tr_dl):
for each in batch:
self.R.requires_grad_()
self.A.requires_grad_()
self.basis.requires_grad_()
self.lbd.requires_grad_()
optimizer.zero_grad()
csa = each.view(-1, 1).to(dev)
clbd = self.lbd[:,
self.tr_ids[tr_id]].view(-1, 1).to(dev)
reg = torch.tensor([self.reg]).to(dev)
loss = self.model(csa, self.M, self.basis, clbd, reg)
loss.backward()
optimizer.step()
self.R.detach_()
self.A.detach_()
self.basis.detach_()
self.lbd.detach_()
tr_id += 1
if not torch.isnan(loss).item():
# pdb.set_trace()
total_loss += loss.item()
if loss_per_batch:
print('Average Loss: {0:.4f}'.format(total_loss / tr_id))
# softmax over columns
self.basis = softmax(self.R, dim=0)
self.lbd = softmax(self.A, dim=0)
# if not self.infer:
# evaluate after training
eval_loss = self.evaluate(self.model, self.ts_dl, self.ts_ids,
self.basis, self.lbd)
if eval_loss < best_loss: # save bast model among all epochs
with open(self.ckpt, 'wb') as f:
torch.save((self.model, self.basis, self.lbd), f)
best_loss = eval_loss
print('*' * 50)
print('Epoch: {}, LR: {}, Train Loss: {:.2f}, Eval Loss: {:.2f}'.format(
epoch, self.lr, total_loss / tr_id, eval_loss))
print('*' * 50)
if epoch % self.visualize_every == 0:
# TODO: implement visualize function per log_interval
# visualize(self.model, self.vocab)
pass
with open(self.ckpt, 'rb') as f:
m, basis, lbd = torch.load(f)
eval_loss = self.evaluate(m, self.ts_dl, self.ts_ids,
self.basis, self.lbd)
print(f'Evaluation Loss: {eval_loss.item()}')
return eval_loss.item()
def evaluate(self, model, data_loader, data_ids, basis, lbd):
"""
data_loader: either eval_loader or test_loader
data_ids: self.eval_ids or self.test_ids
"""
model.eval()
with torch.no_grad():
total_loss = 0.
reg = torch.tensor([self.reg]).to(dev)
ts_id = 0
for b_id, batch in enumerate(data_loader):
for each in batch:
sa = each.view(-1, 1).to(dev)
clbd = lbd[:, data_ids[ts_id]].view(-1, 1)
loss = model(sa, self.M, basis, clbd, reg)
total_loss += loss.item()
ts_id += 1
return loss / ts_id
def generateindex(self, output_file='index.tsv', proj_algo='svd',
ifscale=False, compare=False):
"projection algorithm, default 'svd', or 'pca', 'ica'"
# TODO: generate time-series index from model
# raise NotImplementedError('generate index')
with open(self.ckpt, 'rb') as f:
m, basis, lbd = torch.load(f)
# load basis and \lambda to cpu
basis = basis.cpu()
lbd = lbd.cpu()
if proj_algo == 'svd':
proj = TruncatedSVD(n_components=1, random_state=self.seed)
elif proj_algo == 'ica':
proj = FastICA(n_components=1, random_state=self.seed)
raise NotImplementedError('ICA not available')
elif proj_algo == 'pca':
proj = PCA(n_components=1, random_state=self.seed)
raise NotImplementedError('PCA not available')
basis_proj = torch.tensor(proj.fit_transform(basis.T).T, device='cpu')
index_docs = basis_proj @ lbd
ordereddate = | |
<filename>coddiwomple/openmm/propagators.py
"""
OpenMM Propagator Adapter Module
"""
#####Imports#####
from coddiwomple.propagators import Propagator
from openmmtools.mcmc import BaseIntegratorMove
from simtk import unit
import simtk.openmm as openmm
import os
import numpy as np
import logging
from copy import deepcopy
#####Instantiate Logger#####
logging.basicConfig(level = logging.NOTSET)
_logger = logging.getLogger("openmm_propagators")
_logger.setLevel(logging.WARNING)
#Propagator Adapter
class OMMBIP(BaseIntegratorMove, Propagator):
"""
Generalized OpenMM Base Integrator Propagator
"""
def __init__(self,
openmm_pdf_state,
integrator,
context_cache=None,
reassign_velocities=False,
n_restart_attempts=0):
"""
call the BaseIntegratorMove init method
arguments
openmm_pdf_state : coddiwomple.openmm_adapters.OpenMMPDFState
the pdf state of the propagator
integrator : openmm.Integrator
integrator of dynamics
context_cache : openmmtools.cache.ContextCache, optional
The ContextCache to use for Context creation. If None, the global cache
openmmtools.cache.global_context_cache is used (default is None).
reassign_velocities : bool, optional
If True, the velocities will be reassigned from the Maxwell-Boltzmann
distribution at the beginning of the move (default is False).
n_restart_attempts : int, optional
When greater than 0, if after the integration there are NaNs in energies,
the move will restart. When the integrator has a random component, this
may help recovering. On the last attempt, the ``Context`` is
re-initialized in a slower process, but better than the simulation
crashing. An IntegratorMoveError is raised after the given number of
attempts if there are still NaNs.
attributes
pdf_state : coddiwomple.openmm_adapters.OpenMMPDFState
integrator : openmm.Integrator
context_cache : openmmtools.cache.ContextCache
reassign_velocities : bool
n_restart_attempts : int or None
"""
super().__init__(n_steps = None,
context_cache = context_cache,
reassign_velocities = reassign_velocities,
n_restart_attempts = n_restart_attempts)
_logger.debug(f"successfully executed {BaseIntegratorMove.__class__.__name__} init.")
import openmmtools.cache as cache
from perses.dispersed.utils import check_platform, configure_platform
from openmmtools.utils import get_fastest_platform
try:
cache.global_context_cache.platform = configure_platform(get_fastest_platform().getName())
except Exception as e:
_logger.warning(f"platform configuration error: {e}")
self.pdf_state = openmm_pdf_state
# Check if we have to use the global cache.
if self.context_cache is None:
self._context_cache = cache.global_context_cache
else:
self._context_cache = self.context_cache
# Create context and reset integrator for good measure
self.context, self.integrator = self._context_cache.get_context(self.pdf_state, integrator)
self.integrator.reset()
_logger.debug(f"successfully equipped integrator: {self.integrator.__class__.__name__}")
_logger.debug(f"integrator printable: {self.integrator.pretty_print()}")
def apply(self,
particle_state,
n_steps = 1,
reset_integrator = False,
apply_pdf_to_context = False,
returnable_key = None,
randomize_velocities = False,
**kwargs):
"""
Propagate the state through the integrator.
This updates the particle_state after the integration.
arguments
particle_state : OpenMMParticleState
The state to apply the move to. This is modified.
n_steps : int, default 1
number of steps to apply to the integrator
reset_integrator : bool, default False
whether to reset the integrator
apply_pdf_to_context : bool, default False
whether to self.pdf_state.apply_to_context
returnable_key : str, default 'proposal'
which 'work' to return as the second returnable
randomize_velocities : bool, default False
whether to randomize velocities on this particular application
returns
particle_state : OpenMMParticleState
The state to apply the move to. This is modified.
proposal_work : float
proposal work to return
see also
openmmtools.utils.Timer
"""
move_name = self.__class__.__name__ # shortcut
# reset the integrator
if reset_integrator:
self.integrator.reset()
if apply_pdf_to_context:
self.pdf_state.apply_to_context(self.context)
# Perform the integration.
for attempt_counter in range(self.n_restart_attempts + 1):
# If we reassign velocities, we can ignore the ones in particle_state.
particle_state.apply_to_context(self.context, ignore_velocities=self.reassign_velocities)
if self.reassign_velocities or randomize_velocities:
self.context.setVelocitiesToTemperature(self.pdf_state.temperature)
# Subclasses may implement _before_integration().
self._before_integration(particle_state,
n_steps,
reset_integrator,
apply_pdf_to_context,
**kwargs)
try:
for _ in range(n_steps):
self.integrator.step(1)
self._during_integration(particle_state,
n_steps,
reset_integrator,
apply_pdf_to_context,
**kwargs)
except Exception as e:
# Catches particle positions becoming nan during integration.
_logger.warning(f"Exception raised: {e}")
restart = True
else:
# We get also velocities here even if we don't need them because we
# will recycle this State to update the sampler state object. This
# way we won't need a second call to Context.getState().
context_state = self.context.getState(getPositions=True, getVelocities=True, getEnergy=True,
enforcePeriodicBox=self.pdf_state.is_periodic)
# Check for NaNs in energies.
potential_energy = context_state.getPotentialEnergy()
restart = np.isnan(potential_energy.value_in_unit(potential_energy.unit))
# Restart the move if we found NaNs.
if restart:
err_msg = ('Potential energy is NaN after {} attempts of integration '
'with move {}'.format(attempt_counter, self.__class__.__name__))
# If we are on our last chance before crash, try to re-initialize context
if attempt_counter == self.n_restart_attempts - 1:
_logger.error(err_msg + ' Trying to reinitialize Context as a last-resort restart attempt...')
self.context.reinitialize()
self.integrator.reset()
particle_state.apply_to_context(self.context)
self.pdf_state.apply_to_context(self.context)
# If we have hit the number of restart attempts, raise an exception.
elif attempt_counter == self.n_restart_attempts:
# Restore the context to the state right before the integration.
particle_state.apply_to_context(self.context)
_logger.warning(err_msg)
else:
_logger.warning(err_msg + ' Attempting a restart...')
else:
break
# Subclasses can read here info from the context to update internal statistics.
self._after_integration(particle_state,
n_steps,
reset_integrator,
apply_pdf_to_context,
**kwargs)
# Updated sampler state.
# This is an optimization around the fact that Collective Variables are not a part of the State,
# but are a part of the Context. We do this call twice to minimize duplicating information fetched from
# the State.
context_state = self.context.getState(getPositions=True, getVelocities=True, getEnergy=True,
enforcePeriodicBox=self.pdf_state.is_periodic)
# Update everything but the collective variables from the State object
particle_state.update_from_context(context_state, ignore_collective_variables=True)
# Update only the collective variables from the Context
particle_state.update_from_context(self.context, ignore_positions=True, ignore_velocities=True,
ignore_collective_variables=False)
if returnable_key is not None:
try:
proposal_work = getattr(self.integrator, f"get_{returnable_key}_work")(dimensionless=True)
except Exception as e:
raise Exception(f"{e}; the returnable key {returnable_key} is not a valid method in {self.integrator.__class__.__name__}")
else:
proposal_work = 0.
return particle_state, proposal_work
def _get_integrator(self):
pass
def _get_global_integrator_variables(self):
"""
return a dictionary of the self.integrator's global variables
returns
global_integrator_variables : dict
{global variable name <str> : global variable value <float>}
"""
num_global_vars = self.integrator.getNumGlobalVariables()
global_integrator_variables = {self.integrator.getGlobalVariableName(idx): self.integrator.getGlobalVariable(idx) for idx in range(num_global_vars)}
return global_integrator_variables
def _get_context_parameters(self):
"""
return a dictionary of the self.context's parameters
returns
context_parameters : dict
{parameter name <str> : parameter value value <float>}
"""
swig_parameters = self.context.getParameters()
context_parameters = {q: swig_parameters[q] for q in swig_parameters}
return context_parameters
def _before_integration(self,
*args,
**kwargs):
pass
def _during_integration(self,
*args,
**kwargs):
pass
def _after_integration(self,
*args,
**kwargs):
pass
class OMMAISP(OMMBIP):
"""
OpenMM Annealed Importance Sampling Propagator
This propagator is equipped with a coddiwomple.openmm.integrators.OMMLIAIS integrator or a subclass thereof.
The purpose is to allow for the management and validation of a singly-parameterized annealing protocol (i.e. 'fractional_iteration')
Ensure that the self.apply() step will anneal the FULL protocol (i.e. fractional_iteration = 0., 1.) for all parameters
"""
def __init__(self,
openmm_pdf_state,
integrator,
record_state_work_interval = None,
context_cache=None,
reassign_velocities=False,
n_restart_attempts=0):
"""
see super
arguments:
record_state_work_interval : int, default None
frequency with which to record the state work
if None, the state work is never calculated during integration
"""
super().__init__(openmm_pdf_state,
integrator,
context_cache=context_cache,
reassign_velocities=reassign_velocities,
n_restart_attempts=n_restart_attempts)
#and there is one more validation that has to happen...
pdf_state_parameters = list(self.pdf_state.get_parameters().keys())
function_parameters = self.integrator._function_parameters
assert set(pdf_state_parameters) == set(function_parameters), f"the pdf_state parameters ({pdf_state_parameters}) is not equal to the function parameters ({function_parameters})"
self._record_state_work_interval = record_state_work_interval
self._state_works = {}
self._state_works_counter = 0
def _before_integration(self, *args, **kwargs):
"""
make sure that the context parameters are all 0.0
"""
self._current_state_works = []
if self._record_state_work_interval is not None:
self._current_state_works.append(0.0)
def _during_integration(self, *args, **kwargs):
"""
update the state work
"""
if self._record_state_work_interval is not None:
integrator_variables = self._get_global_integrator_variables()
iteration = integrator_variables['iteration']
if iteration % self._record_state_work_interval == 0:
self._current_state_works.append(self.integrator.get_state_work())
def _after_integration(self, *args, **kwargs):
"""
make sure that he context parameters are all 1.0
"""
integrator_variables = self._get_global_integrator_variables()
iteration = integrator_variables['iteration']
if self._record_state_work_interval is not None:
if iteration % self._record_state_work_interval == 0:
pass #do not record if the state work was recorded at the last `_during_integration` pass
else:
self._current_state_works.append(self.integrator.get_state_work())
else:
self._current_state_works.append(self.integrator.get_state_work())
self._state_works[self._state_works_counter] = deepcopy(self._current_state_works)
self._state_works_counter += 1
@property
def state_works(self):
"""
return state works
"""
return self._state_works
class OMMAISVP(OMMAISP):
"""
OpenMMAIS Verbose Propagator
OMMAISVP is a simple subclass of OMMAISP that prints context parameters and integrator variables before, during, and after the integration steps
"""
def __init__(self,
openmm_pdf_state,
integrator,
record_state_work_interval = None,
context_cache=None,
reassign_velocities=False,
n_restart_attempts=0):
super().__init__(openmm_pdf_state,
integrator,
record_state_work_interval = record_state_work_interval,
context_cache=context_cache,
reassign_velocities=reassign_velocities,
n_restart_attempts=n_restart_attempts)
def _before_integration(self, *args, **kwargs):
super()._before_integration(*args, **kwargs)
integrator_variables = self._get_global_integrator_variables()
_logger.debug(f"\tintegrator_global_variables before integration:")
for key, val in integrator_variables.items():
_logger.debug(f"\t\t{key}: {val}")
def _during_integration(self, *args, **kwargs):
super()._during_integration(*args, **kwargs)
integrator_variables = self._get_global_integrator_variables()
context_parameters = self._get_context_parameters()
_logger.debug(f"\tcontext_parameters during integration: {context_parameters}")
_logger.debug(f"\tintegrator_global_variables during integration: ")
for key, val in integrator_variables.items():
_logger.debug(f"\t\t{key}: {val}")
def _after_integration(self, *args, **kwargs):
super()._after_integration(*args, **kwargs)
integrator_variables = self._get_global_integrator_variables()
_logger.debug(f"\tintegrator_global_variables after integration: ")
for key, val in integrator_variables.items():
_logger.debug(f"\t\t{key}: {val}")
class OMMAISPR(OMMAISP):
"""
OpenMMAISP Reportable
OMMAISP is a simple subclass of OMMAISP that equips an OpenMMReporter object and writes a trajectory to disk at specified iterations
"""
def __init__(self,
openmm_pdf_state,
integrator,
record_state_work_interval = None,
reporter = None,
trajectory_write_interval = | |
convertors=convertors),
dumps(value, encoding, beautiful, ind, convertors=convertors)))
if i<len(a.items())-1:
if beautiful:
s.append(',\n')
# s.append(indent_char * indent)
else:
s.append(', ')
if beautiful:
s.append('\n')
s.append(indent_char*indent)
s.append('}')
elif isinstance(a, str):
t = a
for i in escapechars:
t = t.replace(i[0], i[1])
s.append("'%s'" % t)
elif isinstance(a, unicode):
t = a
for i in escapechars:
t = t.replace(i[0], i[1])
s.append("u'%s'" % t.encode(encoding))
else:
_type = type(a)
c_func = convertors.get(_type)
if c_func:
s.append(c_func(a))
else:
s.append(str(str_value(a, none='None', bool_int=bool_int)))
return ''.join(s)
def norm_path(path):
return os.path.normpath(os.path.abspath(path))
r_expand_path = re.compile('\$\[(\w+)\]')
def expand_path(path):
"""
Auto search some variables defined in path string, such as:
$[PROJECT]/files
$[app_name]/files
for $[PROJECT] will be replaced with uliweb application apps_dir directory
and others will be treated as a normal python package, so uliweb will
use pkg_resources to get the path of the package
update: 0.2.5 changed from ${} to $[]
Also apply with os.path.expandvars(os.path.expanduser(path))
"""
from uliweb import application
def replace(m):
txt = m.groups()[0]
if txt == 'PROJECT':
return application.apps_dir
else:
return pkg.resource_filename(txt, '')
p = re.sub(r_expand_path, replace, path)
return os.path.expandvars(os.path.expanduser(path))
def date_in(d, dates):
"""
compare if d in dates. dates should be a tuple or a list, for example:
date_in(d, [d1, d2])
and this function will execute:
d1 <= d <= d2
and if d is None, then return False
"""
if not d:
return False
return dates[0] <= d <= dates[1]
class Serial(object):
"""
For json protocal, datetime will convert to string, and convert reversed be
be not datetime
"""
@classmethod
def load(cls, s, protocal=None):
import json
if not protocal:
return pickle.loads(s)
elif protocal == 'json':
return json.loads(s)
else:
raise Exception("Can't support this protocal %s" % protocal)
@classmethod
def dump(cls, v, protocal=None):
from uliweb import json_dumps
if not protocal:
return pickle.dumps(v, pickle.HIGHEST_PROTOCOL)
elif protocal == 'json':
return json_dumps(v)
else:
raise Exception("Can't support this protocal %s" % protocal)
parse = import_('urllib', 'parse')
class QueryString(object):
def __init__(self, url):
self.url = safe_str(url)
self.scheme, self.netloc, self.script_root, qs, self.anchor = self.parse()
self.qs = parse.parse_qs(qs, True)
def parse(self):
return parse.urlsplit(self.url)
def __getitem__(self, name):
return self.qs.get(name, [])
def __setitem__(self, name, value):
self.qs[name] = [value]
def set(self, name, value, replace=False):
v = self.qs.setdefault(name, [])
if replace:
self.qs[name] = [value]
else:
v.append(value)
return self
def __str__(self):
qs = parse.urlencode(self.qs, True)
return parse.urlunsplit((self.scheme, self.netloc, self.script_root, qs, self.anchor))
def query_string(url, replace=True, **kwargs):
q = QueryString(url)
for k, v in kwargs.items():
q.set(k, v, replace)
return str(q)
def camel_to_(s):
"""
Convert CamelCase to camel_case
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def application_path(path):
"""
Join application project_dir and path
"""
from uliweb import application
return os.path.join(application.project_dir, path)
def get_uuid(type=4):
"""
Get uuid value
"""
import uuid
name = 'uuid'+str(type)
u = getattr(uuid, name)
return u().hex
def pretty_dict(d, leading=' ', newline='\n', indent=0, tabstop=4, process=None):
"""
Output pretty formatted dict, for example:
d = {"a":"b",
"c":{
"d":"e",
"f":"g",
}
}
will output:
a : 'b'
c :
d : 'e'
f : 'g'
"""
for k, v in d.items():
if process:
k, v = process(k, v)
if isinstance(v, dict):
yield '%s%s : %s' % (indent*tabstop*leading, simple_value(k), newline)
for x in pretty_dict(v, leading=leading, newline=newline, indent=indent+1, tabstop=tabstop):
yield x
continue
yield '%s%s : %s%s' % (indent*tabstop*leading, simple_value(k), simple_value(v), newline)
def request_url(req=None):
"""
Get full url of a request
"""
from uliweb import request
r = req or request
if request:
if r.query_string:
return r.path + '?' + u(r.query_string)
else:
return r.path
else:
return ''
def quote_url(url):
parse = import_('urllib', 'parse')
scheme, netloc, path, qs, anchor = parse.urlsplit(safe_str(url))
return parse.urlunsplit((scheme, netloc, parse.quote(parse.unquote(path)), qs, anchor))
def unquote_url(url):
parse = import_('urllib', 'parse')
scheme, netloc, path, qs, anchor = parse.urlsplit(safe_str(url))
return parse.urlunsplit((scheme, netloc, parse.unquote(path), qs, anchor))
def flat_list(*alist):
"""
Flat a tuple, list, single value or list of list to flat list
e.g.
>>> flat_list(1,2,3)
[1, 2, 3]
>>> flat_list(1)
[1]
>>> flat_list([1,2,3])
[1, 2, 3]
>>> flat_list([None])
[]
"""
a = []
for x in alist:
if x is None:
continue
if isinstance(x, (tuple, list)):
a.extend([i for i in x if i is not None])
else:
a.append(x)
return a
def compare_dict(da, db):
"""
Compare differencs from two dicts
"""
sa = set(da.items())
sb = set(db.items())
diff = sa & sb
return dict(sa - diff), dict(sb - diff)
def get_caller(skip=None):
"""
Get the caller information, it'll return: module, filename, line_no
"""
import inspect
from fnmatch import fnmatch
try:
stack = inspect.stack()
except:
stack = [None, inspect.currentframe()]
if len(stack) > 1:
stack.pop(0)
if skip and not isinstance(skip, (list, tuple)):
skip = [skip]
else:
skip = []
ptn = [os.path.splitext(s.replace('\\', '/'))[0] for s in skip]
for frame in stack:
#see doc: inspect
#the frame object, the filename, the line number of the current line,
#the function name, a list of lines of context from the source code,
#and the index of the current line within that list
if isinstance(frame, tuple):
filename, funcname, lineno = frame[1], frame[3], frame[2]
else:
filename, funcname, lineno = frame.f_code.co_filename, frame.f_code.co_name, frame.f_lineno
del frame
found = False
for k in ptn:
filename = os.path.splitext(filename.replace('\\', '/'))[0]
if fnmatch(filename, k):
found = True
break
if not found:
return filename, lineno, funcname
class classonlymethod(classmethod):
"""
Use to limit the class method can only be called via class object, but not instance
object
>>> class A(object):
... @classonlymethod
... def p(cls):
... print 'call p()'
>>> A.p()
call p()
>>> a = A()
>>> try:
... a.p()
... except Exception as e:
... print e
This method can only be called with class object.
"""
def __get__(self, instance, owner):
if instance is not None:
raise AttributeError("This method can only be called with class object.")
return super(classonlymethod, self).__get__(instance, owner)
def trim_path(path, length=30):
"""
trim path to specified length, for example:
>>> a = '/project/apps/default/settings.ini'
>>> trim_path(a)
'.../apps/default/settings.ini'
The real length will be length-4, it'll left '.../' for output.
"""
s = path.replace('\\', '/').split('/')
t = -1
for i in range(len(s)-1, -1, -1):
t = len(s[i]) + t + 1
if t > length-4:
break
return '.../' + '/'.join(s[i+1:])
class cached_property(object):
"""
cached function return value
"""
def __init__(self, func):
self.value = _Default
self.func = func
def __get__(self, obj, type=None):
value = self.value
if self.value is _Default:
value = self.func(type)
self.value = value
return value
def get_temppath(prefix, suffix='', dir=''):
import tempfile
return tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
def get_tempfilename2(prefix, suffix='', dir=''):
import tempfile
return tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
def get_tempfilename(prefix, suffix='', dir=''):
return get_tempfilename2(prefix, suffix, dir)[1]
def get_configrable_object(key, section, cls=None):
"""
if obj is a class, then check if the class is subclass of cls
or it should be object path, and it'll be imported by import_attr
"""
from uliweb import UliwebError, settings
import inspect
if inspect.isclass(key) and cls and issubclass(key, cls):
return key
elif isinstance(key, string_types):
path = settings[section].get(key)
if path:
_cls = import_attr(path)
return _cls
else:
raise UliwebError("Can't find section name %s in settings" % section)
else:
raise UliwebError("Key %r should be subclass of %r object or string path format!" % (key, cls))
def format_size(size):
"""
Convert size to XB, XKB, XMB, XGB
:param size: length value
:return: string value with size unit
"""
units = ['B', 'KB', 'MB', 'GB']
unit = ''
n = size
old_n = n
value = size
for i in units:
old_n = n
x, y = divmod(n, 1024)
if x == 0:
unit = i
value = y
break
n = x
unit = i
value = old_n
return str(value)+unit
def convert_bytes(n):
"""
Convert a size number to 'K', 'M', .etc
"""
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%sB" % n
def read_syntax_line(f):
import token
import tokenize
g = tokenize.generate_tokens(f.readline)
buf = []
for v in g:
tokentype, t, start, end, line = v
if tokentype == 54:
continue
if tokentype | |
v._pos._x, v._pos._y
if ( Pos ( x, y ) ):
return self._mat [ x ][ y ]
else:
return None
def __setitem__ ( self, v, val ):
if ( isinstance ( v, Pos ) ):
x, y = v._x, v._y
if ( isinstance ( v, tuple ) ):
x, y = v
if ( isinstance ( v, Piece ) ):
x, y = v._pos._x, v._pos._y
if ( Pos ( x, y ) ):
self._mat [ x ][ y ] = val
return self._mat [ x ][ y ]
else:
return None
def __iter__ ( self ):
return GameBoard.GameBoard_iterator ( self )
@property
def translated ( self ):
return list ( reversed ( [ list ( map ( lambda x : x.parsed () , p ) ) for p in self._mat ] ) )
def _is_check ( self, side, p = None ):
if ( p is None ):
for piece in self:
if ( isinstance ( piece, King ) and piece._side == side ):
p = piece._pos
for piece in self:
if ( piece and piece._side * side == -1 ):
if ( p in piece () ):
return True
return False
def _is_castle ( self, p1, p2, side ):
if ( side == 1 ):
if ( not self._wc ):
return False
else:
if ( not self._bc ):
return False
if ( p1 and self [ p1 ]._side * side == 1 and p2 and self [ p2 ]._side * side == 1 ):
if ( isinstance ( self [ p2 ], King ) ):
p1, p2 = p2, p1
if ( isinstance ( self [ p1 ], King ) and isinstance ( self [ p2 ], Rook ) ):
for y in range ( min ( p1._y, p2._y) + 1, max ( p1._y, p2._y ) ):
if ( self [ ( p1._x, y ) ] ):
return False
if ( p2._y == 0 ):
k = -1
else:
k = 1
if ( self._is_check ( p1 + (0, k) ) or self._is_check ( side, p1 + (0, 2*k) ) ):
return False
else:
return False
else:
return False
return True
def _is_enpass ( self, p1, p2, side ):
if ( 0 <= self._enpass < 64 ):
pass
else:
return False
x, y = self._enpass // 8, self._enpass % 8
if ( isinstance ( self [ p1 ], Soldier ) and self [ p1 ]._side * side == 1 ):
if ( p1._x == x and abs ( p1.y - y ) == 1 ):
return True
return False
def _move ( self, p1, p2 ):
x1, y1 = p1._x, p1._y
x2, y2 = p2._x, p2._y
self._pre.append ( (self [ p2 ], Pos ( x1, y1 ), Pos ( x2, y2 ), self._wc, self._bc, self._enpass) )
self._undo.append ( self._undo_move )
self [ p2 ] = self [ p1 ]
self [ p1 ]._pos = Pos ( x2, y2 )
self [ p1 ] = EmptyField ()
def _undo_move ( self ):
piece, p1, p2, wc, bc, enpass = self._pre [ -1 ]
self._wc, self._bc, self._enpass = wc, bc, enpass
x1, y1 = p1._x, p1._y
x2, y2 = p2._x, p2._y
self [ p1 ] = self [ p2 ]
self [ p1 ]._pos = p1 + (0, 0)
self [ p2 ] = piece
def _move_castle ( self, p1, p2 ):
if ( isinstance ( self [ p2 ], King ) ):
p1, p2 = p2, p1
if ( p2._y == 0 ):
k = -1
else:
k = 1
self [ p1 ]._pos += (0, 2 * k)
self [ p1 ], self [ p1 + (0, 2 * k) ] = self [ p1 + (0, 2 * k) ], self [ p1 ]
self [ p2 ]._pos = p1 + (0, k)
self [ p2 ], self [ p1 + (0, k) ] = self [ p1 + (0, k) ], self [ p2 ]
if ( p1._x ):
self._bc = False
else:
self._wc = False
self._pre.append ( (p1 + (0, 0), p2 + (0, 0)) )
self._undo.append ( self._undo_castle )
def _undo_castle ( self ):
p1, p2 = self._pre [ -1 ]
if ( p2._y == 0 ):
k = -1
else:
k = 1
self [ p1 ], self [ p1 + (0, 2 * k) ] = self [ p1 + (0, 2 * k) ], self [ p1 ]
self [ p1 ]._pos = p1 + (0, 0)
self [ p2 ], self [ p1 + (0, k) ] = self [ p1 + (0, k) ], self [ p2 ]
self [ p2 ]._pos = p2 + (0, 0)
if ( p1._x ):
self._bc = True
else:
self._wc = True
def _move_enpass ( self, p1, p2, side ):
self._undo.append ( self._undo_enpass )
self._pre.append ( (self [ p2 + (-side, 0) ], p1 + (0, 0), p2 + (0, 0), self._enpass, side ) )
self [ p1 ]._pos = p2 + (0, 0)
self [ p1 ], self [ p2 ] = self [ p2 ], self [ p1 ]
self [ p2 + (-side, 0) ] = EmptyField ()
self._enpass = 64
def _undo_enpass ( self ):
piece, p1, p2, enpass, side = self._undo [ -1 ]
self [ p1 ], self [ p2 ] = self [ p2 ], self [ p1 ]
self [ p1 ]._pos = p2 + (0, 0)
self [ p2 + (-side, 0) ] = piece
self._enpass = enpass
def undo ( self ):
self._undo [ -1 ]()
self._undo.pop ()
self._pre.pop ()
def check_move ( self, p1, p2, side ):
res = False
if ( self [ p1 ] and self [ p1 ]._side * side == 1 and (p2 in self [ p1 ] ()) ):
self._move ( p1, p2 )
if ( not self._is_check ( side ) ):
res = True
self.undo ()
return res
def move ( self, p1, p2, side ):
if ( p1 and p2 ):
pass
else:
raise InvalidMoveError ( "Move is not valid." )
if ( self [ p1 ] and self [ p1 ]._side == side ):
pass
else:
raise InvalidMoveError ( "Move is not valid." )
if ( self._is_castle ( p1, p2, side ) ):
self._move_castle ( p1, p2 )
return None
if ( self._is_enpass ( p1, p2, side ) ):
self._move_enpass ( p1, p2 )
if ( self._is_check ( side ) ):
self.undo ()
raise InvalidMoveError ( "Move is not valid." )
else:
return None
if ( self.check_move ( p1, p2, side ) ):
self._move ( p1, p2 )
self._enpas = 64
if ( isinstance ( self [ p1 ], Soldier ) and p1._x == (7 + side) % 7 and abs(p2._x - p1._x) == 2 ):
self._enpas = 8 * p2._x + p2._y
if ( p1 == Pos ( (8 + side) % 9, 0 ) or p1 == Pos ( (8 + side) % 9, 7 ) or p1 == Pos ( (8 + side) % 9, 4 ) ):
if ( side == 1 ):
self._wc = False
else:
self._bc = False
else:
raise InvalidMoveError ( "Move is not valid." )
def promote ( self, p, side, obj ):
self._pre.append ( (p + (0, 0), side) )
self._undo.append ( self._undo_promote )
self [ p ] = obj ( p._x, p._y, side, self )
def _undo_promote ( self ):
p, side = self._pre [ -1 ]
self [ p ] = Soldier ( p._x, p._y, side, self )
def __call__ ( self, side ):
for piece in self:
if ( piece and piece._side == side ):
lst = [ piece._pos + (side, 1), piece._pos + (side, -1) ]
for dest in piece ():
if ( dest in lst ):
lst.remove ( dest )
try:
self.move ( piece._pos, dest, side )
if ( isinstance ( piece, Soldier ) and not (dest + (side, 0)) ):
for clas in [ Rook, Bishop, Knight, Queen ]:
self.promote ( dest, side, clas )
yield 0
self._undo_promote ()
else:
yield 0
self.undo ()
except InvalidMoveError:
pass
for dest in lst:
try:
self.move ( piece._pos, dest, side )
yield 0
self.undo ()
except InvalidMoveError:
pass
lst = [ (Pos (0, 0), Pos ( 0, 4 )), (Pos (0, 7), Pos ( 0, 4 )), (Pos (7, 0), Pos ( 7, 4 )), (Pos (7, 7), Pos ( 7, 4 )) ]
for ( p1, p2 ) in lst:
if ( self [ p1 ] and self [ p2 ] and self [ p1 ]._side == side == self [ p2 ]._side ):
try:
self.move ( p1, p2, side )
yield 0
self.undo ()
except InvalidMoveError:
pass
def is_mate ( self, side ):
ret = True
if ( self._is_check ( side ) ):
for i in self ( side ):
if ( not self._is_check ( side ) ):
ret = False
else:
ret = False
return ret
def is_pot ( self, side ):
ret = True
if ( not self._is_check ( side ) ):
for x in self ( side ):
if ( not self._is_check ( side ) ):
ret = False
else:
ret = False
return ret
# configuring first | |
<reponame>roadnarrows-robotics/rnr-sdk<filename>Fusion/modules/Fusion/Gui/GuiXYGraph.py
################################################################################
#
# GuiXYGraph.py
#
""" Graphical User Interface X-Y Graph Module.
Graphical User Interface (GUI) Tkinter simple x-y graphing module.
Author: <NAME>
Email: <EMAIL>
URL: http://www.roadnarrowsrobotics.com
Date: 2006.01.25
Copyright (C) 2006. RoadNarrows LLC.
"""
#
# All Rights Reserved
#
# Permission is hereby granted, without written agreement and without
# license or royalty fees, to use, copy, modify, and distribute this
# software and its documentation for any purpose, provided that
# (1) The above copyright notice and the following two paragraphs
# appear in all copies of the source code and (2) redistributions
# including binaries reproduces these notices in the supporting
# documentation. Substantial modifications to this software may be
# copyrighted by their authors and need not follow the licensing terms
# described here, provided that the new terms are clearly indicated in
# all files where they apply.
#
# IN NO EVENT SHALL THE AUTHOR, ROADNARROWS LLC, OR ANY MEMBERS/EMPLOYEES
# OF ROADNARROW LLC OR DISTRIBUTORS OF THIS SOFTWARE BE LIABLE TO ANY
# PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL
# DAMAGES ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
# EVEN IF THE AUTHORS OR ANY OF THE ABOVE PARTIES HAVE BEEN ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHOR AND ROADNARROWS LLC SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN
# "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
################################################################################
import math
import tkinter as tk
import Fusion.Utils.Pt as pt
import Fusion.Gui.GuiTypes as gt
import Fusion.Gui.GuiUtils as gut
#-------------------------------------------------------------------------------
# Global Data
#-------------------------------------------------------------------------------
# flag to force real text width and height base calcualations
_DidOneTimes = False
# character characteristics
_CharW = 9 # max text character width (pixels)
_CharH = 15 # max text character height (pixels)
# canvas grid margins
_EdgeLeft = _CharW * 8 # left edge margin
_EdgeTop = _CharH * 2 # top edge margin
_EdgeBottom = _CharH * 3 # bottom edge margin
_EdgeRight = _CharW * 2 # right edge margin
#-------------------------------------------------------------------------------
# CLASS: GuiXYGraph
#-------------------------------------------------------------------------------
class GuiXYGraph:
""" GUI X-Y Graphing Class """
#--
def __init__(self, canvas, **kwopts):
""" Initialize x-y graph instance.
Parameters:
canvas - the graphing canvas (tk.Canvas() instance)
**kwopts:
xdata - list of x data. Default: []
ydata - list of y data cooresponding 1-1 with the x data
Default: []
fofx - y=f(x) data generator. If specified, this function
takes precedence over the ydata list.
Default: None
title - title of this graph. Default: 'y = f(x)'
xlabel - x-axis label. Default: 'x'
ylabel - y-axis label. Default: 'y'
xstep - x-axis grid step size. Default: 1
ystep - y-axis grid step size. Default: 1
linewidth - graph line width. Default: 1
linecolor - graph line color. Default: ColorBlue1
showpoints: - do [not] show graph (x,y) points. Default: True
domain: - do [not] auto limit x domain. if 'auto', then xmin
and xmax are determined by the xdata.
Otherwise specify (xmin, xmax). Default: 'auto'
range: - do [not] auto limit y range. if 'auto', then ymin
and ymax are determined by the ydata/fofx.
Otherwise specify (ymin, ymax). Default: 'auto'
"""
# the graphing canvas
self.mCanvas = canvas
# force idletask to determine size, etc.
self.mCanvas.update_idletasks()
# canvas geometry
geo = gut.geometry(self.mCanvas)
# canvas width and height
self.mCanvasWidth = geo[gut.W]
self.mCanvasHeight = geo[gut.H]
# canvas graphic ids
self.mGidsXGrid = []
self.mGidsYGrid = []
self.mGidsData = []
self.mGidsLabels = []
# determine real text dimensions used by this canvas
if not _DidOneTimes:
self._onetimes()
# initialize grid dimension data
self._griddim(geo[gut.W], geo[gut.H])
# initialize graph data
self.mXMin = 0.0
self.mXMax = 0.0
self.mYMin = 0.0
self.mYMax = 0.0
self.mDomSpan = 0.0
self.mRanSpan = 0.0
self.mData = []
# graph the data
self.graph(**kwopts)
#--
def configure(self, width, height):
""" (Re)configure the graph canvas to the new width and height.
Parameters:
width - new graph canvas width in pixels
height - new graph canvas height in pixels
Return Value:
None
"""
self.erase()
self.mCanvas.configure(width=width, height=height)
self._griddim(width, height)
# race condition can leave droppings, erase again (kludge)
self.erase()
self._graph()
#--
def redraw(self):
""" Redraw complete x-y graph, including title, labels, etc.
Return Vaue:
None
"""
self.erase()
self._graph()
#--
def erase(self):
""" Erase the x-y graph from canvas. """
self._erasexgrid()
self._eraseygrid()
self._erasedata()
self._eraselabels()
#--
def graph(self, **kwopts):
""" Graph the x-y data. Any old graph will be automatically erased.
Parameters:
See __init__().
Return Value:
None
"""
# graphing options
self.mOpts = {
'xdata': [],
'ydata': [],
'fofx': None,
'title':'y = f(x)',
'xlabel': 'x',
'ylabel': 'y',
'xstep': 1,
'ystep': 1,
'linewidth': 1,
'linecolor': gt.ColorBlue1,
'showpoints': True,
'domain': 'auto',
'range': 'auto',
}
for key in kwopts:
if key in self.mOpts:
self.mOpts[key] = kwopts[key]
else:
raise KeyError('unknown x-y graph option: %s' % repr(key))
# erase any old graph and grid
self.erase()
# initialize new graph data
self.mXMin = 0.0
self.mXMax = 0.0
self.mYMin = 0.0
self.mYMax = 0.0
self.mDomSpan = 0.0
self.mRanSpan = 0.0
self.mData = []
# new graph data
self.mData = self._gendata(self.mOpts['xdata'], self.mOpts['ydata'])
# find domain and range of data
self._domain() # x domain of data
self._range() # y range of data
# now really graph the data and grid
self._graph()
#--
def newdata(self, xdata=[], ydata=[]):
""" Graph new x-y data with current graphing options including
current axes and title labels.
Parmeters:
xdata - list of x data
ydata - list of y data cooresponding 1-1 with the x data.
(any preexisting fofx() overrides this list).
"""
# new raw data
self.mOpts['xdata'] = xdata
self.mOpts['ydata'] = ydata
# new graphing data
self.mData = self._gendata(self.mOpts['xdata'], self.mOpts['ydata'])
# find scopes of new data
hasNewDomain = self._domain() # x domain of data (sets XMin, XMax)
hasNewRange = self._range() # y range of dat (sets YMin, YMax)
self._erasedata() # erase old x-y graph data
# new domain and/or range, so re-grid
if hasNewDomain or hasNewRange:
self._erasexgrid() # erase x grid
self._eraseygrid() # erase y grid
self._xgrid() # draw the x-grid (and x-axis if in domain)
self._ygrid() # draw the y-grid (and y-axis if in range)
# draw the new x-y graph data
self._graphdata()
#--
def _erasexgrid(self):
""" Erase x grid and ticks. """
for id in self.mGidsXGrid:
self.mCanvas.delete(id)
self.mGidsXGrid = []
#--
def _eraseygrid(self):
""" Erase y grid and ticks. """
for id in self.mGidsYGrid:
self.mCanvas.delete(id)
self.mGidsYGrid = []
#--
def _erasedata(self):
""" Erase x-y data. """
for id in self.mGidsData:
self.mCanvas.delete(id)
self.mGidsData = []
#--
def _eraselabels(self):
""" Erase graph labels. """
for id in self.mGidsLabels:
self.mCanvas.delete(id)
self.mGidsLabels = []
#--
def _graph(self):
""" The graphing grunt. """
self._xgrid() # draw the x-grid (and x-axis if in domain)
self._ygrid() # draw the y-grid (and x-axis if in range)
self._xaxislabel() # draw the x-axis label
self._yaxislabel() # draw the y-axis label
self._title() # draw the graph title
self._graphdata() # draw the graph data
#--
def _gendata(self, xdata, ydata):
""" Generate the x-y data points. """
data = []
if self.mOpts['fofx']:
for x in xdata:
data.append(pt.pt(x, self.mOpts['fofx'](x)))
else:
m = len(xdata)
n = len(ydata)
if m > n:
m = n
i = 0
while i < m:
data.append(pt.pt(xdata[i], ydata[i]))
i += 1
return data
#--
def _xgrid(self):
""" Draw x grid lines and x grid labels. """
# nothing to grid
if self.mXMin == self.mXMax:
return
# current x grid label end pixel position
labelend = 0
# x grid line y start and end positions
ytick0 = self.mGridMinY-2
ytick1 = self.mGridMaxY+2
# x grid starting world value
x = math.ceil(self.mXMin/self.mOpts['xstep']) * self.mOpts['xstep']
# make the grid lines
while x <= self.mXMax:
# map to grid coordinates (don't care about y)
p = self._world2grid(pt.pt(x, 0.0))
# color the x origin grid line differently
if x == 0.0:
color = gt.ColorPink1
else:
color = gt.ColorBlack
# grid line
id = self.mCanvas.create_line((p.x, ytick0, p.x, ytick1), fill=color)
self.mGidsXGrid += [id]
# label the grid line
text = self._gridlabelfmt(x) % x
id = self.mCanvas.create_text((p.x, ytick1+1), fill=color,
text=text, anchor=tk.N)
rect = self.mCanvas.bbox(id)
# | |
que un\n" \
"valor muy alto (autolimitado) tomaría a\n" \
"todo el bache (entreno sin mini-baches)."
elif id == 40: # entreno - SGD - alfa
res = "Digite valor de parámetro encargado de la\n" \
"modificación de los pesos sinápticos en\n" \
"cada paso (alfa), obedece a la ecuación\n" \
"para cada peso:\n\n" \
"u = u . beta + de/dw\n" \
"w = w - alfa . u"
elif id == 41: # entreno - SGD - beta
res = "Digite valor de parámetro encargado de la\n" \
"fricción (beta) de la inercia involucrada\n" \
"en el cambio de cada peso sináptico, obedece\n" \
"a la ecuación para cada peso:\n\n" \
"u = u . beta + de/dw\n" \
"w = w - alfa . u\n\n" \
"Autolimitada a 1 como máximo.\n" \
"= 0 sin fricción."
elif id == 42: # entreno - DE - poblacion
res = "Digite cuántos individuos quiere que\n" \
"conformen la población que se reproducirá\n" \
"para buscar la optimización."
elif id == 43: # entreno - DE - h
res = "Digite valor de parámetro encargado de la\n" \
"fuerza de mutación, es deir, que tanto\n" \
"cambiará un gen (peso sináptico) al crear\n" \
"un hijo, la ecuación para cada peso es:\n\n" \
"v = x0 + h . (x1 - x2)\n" \
"u = v si rand < c sino n"
elif id == 44: # entreno - DE - c
res = "Digite valor de parámetro encargado de el\n" \
"porcentaje de recombinación, un valor de\n" \
"10% indica que el hijo tendrá ese porcentaje\n" \
"de pesos diferentes a los del padre (n), la\n" \
"ecuación para cada peso es:\n\n" \
"v = x0 + h . (x1 - x2)\n" \
"u = v si rand < c sino n\n\n" \
"= 0% será 1 / pesos sinápticos ó mínimo 0.01%"
elif id == 45: # entreno - PSO - particulas
res = "Digite cuántas partículas estarán\n" \
"interactuando para buscar la optimización."
elif id == 46: # entreno - PSO - c1
res = "Digite parámetro que representa la influencia\n" \
"de la mejor solución (b) hallada por la partícula\n" \
"(exploración), la ecuación para un peso es:\n\n" \
"v = v . c3 + rand . c1 . (b - p) + rand . c2 . (g - p)\n" \
"p = p + v"
elif id == 47: # entreno - PSO - c2
res = "Digite parámetro que representa la influencia\n" \
"de la solución global (g) hallada por las partículas\n" \
"(explotación), la ecuación para un peso es:\n\n" \
"v = v . c3 + rand . c1 . (b - p) + rand . c2 . (g - p)\n" \
"p = p + v"
elif id == 48: # entreno - PSO - c3
res = "Digite parámetro que representa la disminución\n" \
"de la velocidad de la partícula, se recomiendan\n" \
"valores altos, cercanos a 1 para evitar\n" \
"convergencia prematura, la ecuación es:\n\n" \
"v = v . c3 + rand . c1 . (b - p) + rand . c2 . (g - p)\n" \
"p = p + v"
elif id == 49: # all - tolerancia ECM
res = "Digite valor de tolerancia, refiere al ECM máximo\n" \
"permitido por el algoritmo de optimización de\n" \
"dendritas (disminución); es decir que se\n" \
"intentarán quitar hipercajas siempre y cuando\n" \
"ello No eleve el ECM más allá de la tolerancia."
elif id == 50: # inicializacion - check unir
res = "Unir efectuará un segundo ciclo en el que\n" \
"une a las hipercajas que puedan hacerlo, de\n" \
"este modo evita crear mas de las necesarias\n" \
"(ralentiza)."
elif id == 51: # postentreno - ejecutar red
res = "Ingresa a la red neuronal las entradas digitadas\n" \
"en la line de abajo, lo que arrojará como\n" \
"resultado una clase ganadora, la cual se\n" \
"mostrará abajo junto a su color asociado."
elif id == 52: # postentreno - unir dendritas
res = "Recorrerá a todas las dendritas (hipercajas)\n" \
"verificando si pueden unirse a otra de su\n" \
"misma clase, lo que reducirá el número de\n" \
"estas, siempre que el error No sobrepase\n" \
"la tolerancia."
elif id == 53: # postentreno - quitar dendritas
res = "Recorrerá a todas las dendritas (hipercajas)\n" \
"verificando si puede quitarlas (inhibirlas)\n" \
"sin que el error sobrepase la tolerancia."
elif id == 54: # postentreno - deshinibir dendritas
res = "Convierte a las dendritas quitadas (inhibidas)\n" \
"en activas, esto puede servir para re-entrenar\n" \
"teniendo más dendritas que mover, o como\n" \
"revertimiento del algoritmo de Quitar."
elif id == 55: # postentreno - eliminar inhibidas
res = "Para hacer a la red más ligera en memoria,\n" \
"entreno y archivo final, elimina\n" \
"permanentemente a las dendritas\n" \
"quitadas (inhibidas)."
elif id == 56: # postentreno - escr entradas
res = "Digite valores de entrada separados por comas,\n" \
"tantos como entradas tenga la red, estos\n" \
"pueden ser enteros o flotantes, con o sin\n" \
"espacios, ej: 34, 6, 12.8, 0"
elif id == 57: # postentreno - check normalizar
res = "Normalizar aplicará a las entradas ingresadas la\n" \
"misma transformación hecha al normalizar el set:\n" \
"- Min-Max.\n" \
"- Z-Score.\n\n" \
"Si No los normalizó, No habrá diferencia."
elif id == 58: # all - ERL
res = "Error de regresión logistica, las salidas\n" \
"Softmax (Y) van de 0 a 1, si la deseada es\n" \
"(d), la ecuación seria:\n\n" \
"e = -log10(max(Yd, 1.E-6)) / 6"
elif id == 59: # analisis - exportar matrices
res = "Guarda las matrices de confusión\n" \
"en formato (.xml), las 5:\n" \
"entreno, validación, testeo,\n" \
" No-entreno, general.\n\n" \
"No incluye análisis estadísticos\n" \
"ni errores ECM o ERL; incluye el\n" \
"número de dendritas."
elif id == 60: # analisis - txt precision
res = "Porcetaje de datos correctamente clasificados."
elif id == 61: # analisis - txt exactitud
res = "Probabilidad de que la predicción entregada por el\n" \
"detector realmente pertenezca a dicha clase."
elif id == 62: # analisis - txt sensibilidad
res = "Porcentaje de los datos de dicha clase\n" \
"que fueron correctamente clasificados."
elif id == 63: # analisis - txt especificidad
res = "Porcentaje de los datos No perteneciente\n" \
"a dicha clase que fueron correctamente\n" \
"clasificados como No pertenecientes."
elif id == 64: # analisis - txt valorF
res = "Media armónica entre la Exactitud y la\n" \
"Sensibilidad, su mejor valor es 100%, en\n" \
"otras palabras, esta puntuación relacióna\n" \
"a los dos valores mencionados."
elif id == 65: # analisis - txt kappa
res = "Grado de acuerdo entre dos mediciónes\n" \
"(salidas deseadas vs resultados obtenidos),\n" \
"toma en consideración al azar; un valor\n" \
"mayor a 0.6 es buena concordancia y mayor\n" \
"a 0.8 muy buena."
elif id == 66: # analisis - archivo resultados
res = "Guarda en formato (.txt) o (.xml) la lista de\n" \
"valores deseados y obtenidos para todos los\n" \
"patrones, esto es análogo a la función de\n" \
"ejecución del GUI post-entreno, solo que\n" \
"para muchos datos; puede ingresar como\n" \
"patrones una lista y luego usar esto."
elif id == 67: # problema - check zscore minmax
res = "Seleccióne uno de los dos algoritmos:\n\n" \
"- Min-Max:\n" \
"v = ((v - min) / (max - min)) * 2 - 1\n\n" \
"- Z-Score:\n" \
"v = (v - prom) / desv_std"
return res
# a continuacion las funciones de graficacion
def GEntrenamiento(id, cambioDendras):
| |
<reponame>Algorithmic-Alignment-Lab/openTAMP<gh_stars>1-10
from opentamp.core.util_classes.common_predicates import ExprPredicate
from opentamp.core.util_classes.openrave_body import OpenRAVEBody
import opentamp.core.util_classes.transform_utils as T
import opentamp.core.util_classes.common_constants as const
from opentamp.errors_exceptions import PredicateException
from opentamp.core.util_classes import robot_sampling
from sco_py.expr import Expr, AffExpr, EqExpr, LEqExpr
import itertools
from collections import OrderedDict
import numpy as np
import pybullet as p
import sys
import traceback
import time
DEFAULT_TOL = 1e-3
NEAR_TOL = 1e-1 # 2e-2
NEAR_ROT_TOL = 0.2
### HELPER FUNCTIONS
def init_robot_pred(pred, robot, params=[], robot_poses=[], attrs={}):
"""
Initializes attr_inds and attr_dim from the robot's geometry
"""
r_geom = robot.geom
if robot not in attrs:
attrs[robot] = ['pose', 'rotation'] + r_geom.arms + [r_geom.ee_link_names[arm] for arm in r_geom.arms] + r_geom.ee_attrs
base_dim = 3 if not len(attrs[robot]) or 'pose' in attrs[robot] else 0
base_dim += 3 if not len(attrs[robot]) or 'rotation' in attrs[robot] else 0
arm_dims = sum([len(r_geom.jnt_names[arm]) for arm in r_geom.arms if not len(attrs[robot]) or arm in attrs[robot]])
gripper_dims = sum([r_geom.gripper_dim(arm) for arm in r_geom.arms if not len(attrs[robot]) or r_geom.ee_link_names[arm] in attrs[robot]])
ee_dims = sum([3 for attr in r_geom.ee_attrs if not len(attrs[robot]) or attr in attrs[robot]])
cur_attr_dim = base_dim + arm_dims + gripper_dims + ee_dims
cur_attr_dim *= 1 + len(robot_poses)
robot_inds = []
attr_inds = OrderedDict()
robot_inds = []
pose_inds = []
attr_inds[robot] = robot_inds
for attr in attrs[robot]:
if attr in r_geom.jnt_names:
njnts = len(r_geom.jnt_names[attr])
robot_inds.append((attr, np.array(range(njnts), dtype=np.int)))
if len(robot_poses): pose_inds.append((attr, np.array(range(njnts), dtype=np.int)))
elif attr.find('ee_pos') >= 0:
robot_inds.append((attr, np.array(range(3), dtype=np.int)))
if len(robot_poses): pose_inds.append((attr, np.array(range(3), dtype=np.int)))
elif attr.find('ee_rot') >= 0:
robot_inds.append((attr, np.array(range(3), dtype=np.int)))
if len(robot_poses): pose_inds.append((attr, np.array(range(3), dtype=np.int)))
elif attr == 'pose':
robot_inds.append((attr, np.array(range(3), dtype=np.int)))
if len(robot_poses): pose_inds.append(('value', np.array(range(3), dtype=np.int)))
elif attr == 'rotation':
robot_inds.append((attr, np.array(range(3), dtype=np.int)))
if len(robot_poses): pose_inds.append(('rotation', np.array(range(3), dtype=np.int)))
#robot_inds = list(filter(lambda inds: inds[0] in attrs, r_geom.attr_map['robot']))
if len(robot_poses):
for pose in robot_poses:
attr_inds[pose] = pose_inds
for p in params:
attr_inds[p] = [(attr, inds) for (attr, inds) in const.ATTRMAP[p._type] if p not in attrs or attr in attrs[p]]
for (attr, inds) in attr_inds[p]:
cur_attr_dim += len(inds)
pred.attr_inds = attr_inds
pred.attr_dim = cur_attr_dim
return pred.attr_inds, pred.attr_dim
def parse_collision(c, obj_body, obstr_body, held_links=[], obs_links=[]):
linkA, linkB = c[3], c[4]
linkAParent, linkBParent = c[1], c[2]
sign = 0
if linkAParent == obj_body.body_id and linkBParent == obstr_body.body_id:
ptObj, ptObstr = c[5], c[6]
linkObj, linkObstr = linkA, linkB
sign = -1
elif linkBParent == obj_body.body_id and linkAParent == obstr_body.body_id:
ptObj, ptObstr = c[6], c[5]
linkObj, linkObstr = linkB, linkA
sign = 1
else:
return None
if (len(held_links) and linkObj not in held_links) or (len(obs_links) and linkObstr not in obs_links):
return None
# Obtain distance between two collision points, and their normal collision vector
distance = np.array(c[8]) # c.contactDistance
normal = np.array(c[7]) # c.contactNormalOnB # Pointing towards A
ptObj = np.array(ptObj)
ptObstr = np.array(ptObstr)
return distance, normal, linkObj, linkObstr, ptObj, ptObstr
def parse_robot_collision(c, robot, robot_body, obj_body, col_links=[], obj_links=[]):
linkA, linkB = c[3], c[4] # c.linkIndexA, c.linkIndexB
linkAParent, linkBParent = c[1], c[2] # c.bodyUniqueIdA, c.bodyUniqueIdB
sign = 0
if linkAParent == robot_body.body_id and linkBParent == obj_body.body_id:
ptRobot, ptObj = c[5], c[6] # c.positionOnA, c.positionOnB
linkRobot, linkObj = linkA, linkB
sign = -1
elif linkBParent == robot_body.body_id and linkAParent == obj_body.body_id:
ptRobot, ptObj = c[6], c[5] # c.positionOnB, c.positionOnA
linkRobot, linkObj = linkB, linkA
sign = 1
else:
return None
if (len(col_links) and linkRobot not in col_links) or (len(obj_links) and linkObj not in obj_links):
return None
distance = c[8] # c.contactDistance
normal = c[7] # c.contactNormalOnB # Pointing towards A
jnts = robot_body._geom.get_free_jnts()
n_jnts = len(jnts)
robot_jac, robot_ang_jac = p.calculateJacobian(robot_body.body_id,
linkRobot,
ptRobot,
objPositions=jnts,
objVelocities=np.zeros(n_jnts).tolist(),
objAccelerations=np.zeros(n_jnts).tolist())
normal = np.array(normal)
ptRobot = np.array(ptRobot)
ptObj = np.array(ptObj)
robot_jac = -np.array(robot_jac)
robot_ang_jac = np.array(robot_ang_jac)
# PyBullet adds the first 6 indices if the base is floating
if robot_jac.shape[-1] != n_jnts:
robot_jac = robot_jac[:,6:]
robot_ang_jac = robot_ang_jac[6:]
return distance, normal, linkRobot, linkObj, ptRobot, ptObj, robot_jac, robot_ang_jac
def compute_arm_pos_jac(arm_joints, robot_body, pos):
arm_jac = []
for jnt_id in arm_joints:
info = p.getJointInfo(robot_body.body_id, jnt_id)
axis = info[13]
jnt_state = p.getLinkState(robot_body.body_id, jnt_id)
jnt_pos = np.array(jnt_state[0])
quat = jnt_state[1]
mat = T.quat2mat(quat)
axis = mat.dot(axis)
#parent_id = info[-1]
#parent_frame_pos = info[14]
#parent_info = p.getLinkState(robot_body.body_id, parent_id)
#parent_pos = np.array(parent_info[0])
arm_jac.append(np.cross(axis, pos - jnt_pos))
arm_jac = np.array(arm_jac).T
return arm_jac
def compute_arm_rot_jac(arm_joints, robot_body, obj_dir, world_dir, sign=1.):
arm_jac = []
for jnt_id in arm_joints:
info = p.getJointInfo(robot_body.body_id, jnt_id)
axis = info[13]
quat = p.getLinkState(robot_body.body_id, jnt_id)[1]
mat = T.quat2mat(quat)
axis = mat.dot(axis)
arm_jac.append(np.dot(obj_dir, np.cross(axis, sign * world_dir)))
arm_jac = np.array(arm_jac).reshape((-1, len(arm_joints)))
return arm_jac
### BASE CLASSES
class RobotPredicate(ExprPredicate):
"""
Super-class for all robot predicates, defines several required functions
"""
def __init__(self, name, expr, attr_inds, params, expected_param_types, env=None, active_range=(0,0), tol=DEFAULT_TOL, priority=0):
if not hasattr(self, 'arm') and hasattr(params[0].geom, 'arm'): self.arm = params[0].geom.arms[0]
super(RobotPredicate, self).__init__(name, expr, attr_inds, params, expected_param_types, tol=tol, priority = priority, active_range=active_range)
self._init_include = False
def get_robot_info(self, robot_body, arm):
arm_inds = robot_body._geom.get_arm_inds(arm)
ee_link = robot_body._geom.get_ee_link(arm)
info = p.getLinkState(robot_body.body_id, ee_link)
pos, rot = info[0], info[1]
robot_trans = OpenRAVEBody.transform_from_obj_pose(pos, rot)
return robot_trans, arm_inds
def set_robot_poses(self, x, robot_body):
# Provide functionality of setting robot poses
geom = robot_body._geom
dof_value_map = {}
if hasattr(self, 'attr_inds'):
for attr, inds in self.attr_inds[self.robot]:
if attr not in self.robot.geom.dof_map: continue
dof_value_map[attr] = x[self.attr_map[self.robot, attr]]
else:
for dof, dof_ind in geom.get_dof_inds():
dof_value_map[dof] = x[dof_ind].flatten()
robot_body.set_dof(dof_value_map)
robot_body.set_pose(x[self.attr_map[self.robot, 'pose']].flatten(),\
x[self.attr_map[self.robot, 'rotation']].flatten(),)
if hasattr(self, 'obj'):
pos = x[self.attr_map[self.obj, 'pose']]
rot = x[self.attr_map[self.obj, 'rotation']]
self.obj.openrave_body.set_pose(pos, rot)
elif hasattr(self, 'targ') and hasattr(self.targ, 'openrave_body'):
if self.targ.openrave_body is not None:
pos = x[self.attr_map[self.targ, 'value']]
rot = x[self.attr_map[self.targ, 'rotation']]
self.targ.openrave_body.set_pose(pos, rot)
if hasattr(self, 'obstacle'):
pos = x[self.attr_map[self.obstacle, 'pose']]
rot = x[self.attr_map[self.obstacle, 'rotation']]
self.obstacle.openrave_body.set_pose(pos, rot)
def robot_obj_kinematics(self, x):
# Getting the variables
robot_body = self.robot.openrave_body
# Setting the poses for forward kinematics to work
self.set_robot_poses(x, robot_body)
robot_trans, arm_inds = self.get_robot_info(robot_body, self.arm)
# Assume target information is in the tail
if hasattr(self, 'obj'):
pos_inds, rot_inds = self.attr_map[self.obj, 'pose'], self.attr_map[self.obj, 'rotation']
ee_pos, ee_rot = x[pos_inds], x[rot_inds]
elif hasattr(self, 'targ'):
pos_inds, rot_inds = self.attr_map[self.targ, 'value'], self.attr_map[self.targ, 'rotation']
ee_pos, ee_rot = x[pos_inds], x[rot_inds]
elif hasattr(self, 'ee_ref') and self.ee_ref:
pos_inds, rot_inds = self.attr_map[self.robot, '{}_ee_pos'.format(self.arm)], self.attr_map[self.robot, '{}_ee_rot'.format(self.arm)]
ee_pos, ee_rot = x[pos_inds], x[rot_inds]
else:
ee_pos, ee_rot = np.zeros((3,1)), np.zeros((3,1))
ee_rot = ee_rot.flatten()
obj_trans = OpenRAVEBody.transform_from_obj_pose(ee_pos, [ee_rot[2], ee_rot[1], ee_rot[0]])
#obj_trans = OpenRAVEBody.transform_from_obj_pose(ee_pos, [ee_rot[0], ee_rot[1], ee_rot[2]])
Rz, Ry, Rx = OpenRAVEBody._axis_rot_matrices(ee_pos, [ee_rot[2], ee_rot[1], ee_rot[0]])
axises = [[0,0,1], np.dot(Rz, [0,1,0]), np.dot(Rz, np.dot(Ry, [1,0,0]))] # axises = [axis_z, axis_y, axis_x]
# Obtain the pos and rot val and jac from 2 function calls
return obj_trans, robot_trans, axises, arm_inds
def setup_mov_limit_check(self, delta=False, ee_only=False):
# Get upper joint limit and lower joint limit
robot_body = self._param_to_body[self.robot]
geom = robot_body._geom
dof_map = geom.dof_map
if ee_only:
dof_inds = np.concatenate([dof_map[arm] for arm in geom.arms])
lb = np.zeros(3)
ub = np.zeros(3)
else:
dof_inds = np.concatenate([dof_map[arm] for arm in geom.arms])
lb = np.zeros(6+sum([len(dof_map[arm]) for arm in geom.arms])+sum([geom.gripper_dim(arm) for arm in geom.arms]))
ub = np.zeros(6+sum([len(dof_map[arm]) for arm in geom.arms])+sum([geom.gripper_dim(arm) for arm in geom.arms]))
if delta:
base_move = geom.get_base_move_limit()
base_move = [-base_move, base_move]
else:
base_move = geom.get_base_limit()
cur_ind = 0
for attr, inds in self.attr_inds[self.robot]:
ninds = len(inds)
if attr == 'pose':
lb[cur_ind:cur_ind+ninds] = base_move[0]
ub[cur_ind:cur_ind+ninds] = base_move[1]
elif attr == 'rotation':
lb[cur_ind:cur_ind+ninds] = -4 * np.pi
ub[cur_ind:cur_ind+ninds] = 4 * np.pi
elif attr in geom.arms:
arm_lb, arm_ub = geom.get_joint_limits(attr)
lb[cur_ind:cur_ind+ninds] = arm_lb
ub[cur_ind:cur_ind+ninds] = arm_ub
elif attr in geom.ee_link_names.values():
if delta:
gripper_lb, gripper_ub = -10, 10
else:
gripper_lb = -1 # geom.get_gripper_closed_val()
gripper_ub = 1 # geom.get_gripper_open_val()
lb[cur_ind:cur_ind+ninds] = gripper_lb
ub[cur_ind:cur_ind+ninds] = gripper_ub
elif ee_only:
lb[cur_ind:cur_ind+ninds] = self.lb * geom.get_joint_move_factor()
ub[cur_ind:cur_ind+ninds] = self.ub * geom.get_joint_move_factor()
cur_ind += ninds
'''
inds = geom.dof_inds['pose']
lb[inds] = base_move[0]
ub[inds] = base_move[1]
for arm in geom.arms:
arm_lb, arm_ub = geom.get_joint_limits(arm)
inds = geom.dof_inds[arm]
lb[inds] = arm_lb
ub[inds] = arm_ub
gripper = geom.ee_link_names[arm]
gripper_lb = geom.get_gripper_closed_val()
gripper_ub = geom.get_gripper_open_val()
inds = geom.dof_inds[gripper]
lb[inds] = gripper_lb
ub[inds] = gripper_ub
'''
if delta:
joint_move = (ub-lb)/geom.get_joint_move_factor()
# Setup the Equation so that: Ax+b < val represents
# |base_pose_next - base_pose| <= const.BASE_MOVE
# |joint_next - joint| <= joint_movement_range/const.JOINT_MOVE_FACTOR
val = np.concatenate((joint_move, joint_move)).reshape((-1,1))
A = np.eye(len(val)) - np.eye(len(val), k=len(val)//2) - np.eye(len(val), k=-len(val)//2)
self.base_step = base_move
self.joint_step = joint_move
else:
val = np.concatenate((-lb, ub)).reshape((-1,1))
A_lb_limit = -np.eye(len(lb))
A_ub_limit = np.eye(len(ub))
A = np.vstack((A_lb_limit, A_ub_limit))
| |
'builtins.object':
return
# Plausibly the method could have too few arguments, which would result
# in an error elsewhere.
if len(typ.arg_types) <= 2:
# TODO check self argument kind
# Check for the issue described above.
arg_type = typ.arg_types[1]
other_method = nodes.normal_from_reverse_op[method]
if isinstance(arg_type, Instance):
if not arg_type.type.has_readable_member(other_method):
return
elif isinstance(arg_type, AnyType):
return
elif isinstance(arg_type, UnionType):
if not arg_type.has_readable_member(other_method):
return
else:
return
typ2 = self.expr_checker.analyze_external_member_access(
other_method, arg_type, defn)
self.check_overlapping_op_methods(
typ, method, defn.info,
typ2, other_method, cast(Instance, arg_type),
defn)
def check_overlapping_op_methods(self,
reverse_type: CallableType,
reverse_name: str,
reverse_class: TypeInfo,
forward_type: Type,
forward_name: str,
forward_base: Instance,
context: Context) -> None:
"""Check for overlapping method and reverse method signatures.
Assume reverse method has valid argument count and kinds.
"""
# Reverse operator method that overlaps unsafely with the
# forward operator method can result in type unsafety. This is
# similar to overlapping overload variants.
#
# This example illustrates the issue:
#
# class X: pass
# class A:
# def __add__(self, x: X) -> int:
# if isinstance(x, X):
# return 1
# return NotImplemented
# class B:
# def __radd__(self, x: A) -> str: return 'x'
# class C(X, B): pass
# def f(b: B) -> None:
# A() + b # Result is 1, even though static type seems to be str!
# f(C())
#
# The reason for the problem is that B and X are overlapping
# types, and the return types are different. Also, if the type
# of x in __radd__ would not be A, the methods could be
# non-overlapping.
if isinstance(forward_type, CallableType):
# TODO check argument kinds
if len(forward_type.arg_types) < 1:
# Not a valid operator method -- can't succeed anyway.
return
# Construct normalized function signatures corresponding to the
# operator methods. The first argument is the left operand and the
# second operand is the right argument -- we switch the order of
# the arguments of the reverse method.
forward_tweaked = CallableType(
[forward_base, forward_type.arg_types[0]],
[nodes.ARG_POS] * 2,
[None] * 2,
forward_type.ret_type,
forward_type.fallback,
name=forward_type.name)
reverse_args = reverse_type.arg_types
reverse_tweaked = CallableType(
[reverse_args[1], reverse_args[0]],
[nodes.ARG_POS] * 2,
[None] * 2,
reverse_type.ret_type,
fallback=self.named_type('builtins.function'),
name=reverse_type.name)
if is_unsafe_overlapping_signatures(forward_tweaked,
reverse_tweaked):
self.msg.operator_method_signatures_overlap(
reverse_class.name(), reverse_name,
forward_base.type.name(), forward_name, context)
elif isinstance(forward_type, Overloaded):
for item in forward_type.items():
self.check_overlapping_op_methods(
reverse_type, reverse_name, reverse_class,
item, forward_name, forward_base, context)
elif not isinstance(forward_type, AnyType):
self.msg.forward_operator_not_callable(forward_name, context)
def check_inplace_operator_method(self, defn: FuncBase) -> None:
"""Check an inplace operator method such as __iadd__.
They cannot arbitrarily overlap with __add__.
"""
method = defn.name()
if method not in nodes.inplace_operator_methods:
return
typ = self.method_type(defn)
cls = defn.info
other_method = '__' + method[3:]
if cls.has_readable_member(other_method):
instance = self_type(cls)
typ2 = self.expr_checker.analyze_external_member_access(
other_method, instance, defn)
fail = False
if isinstance(typ2, FunctionLike):
if not is_more_general_arg_prefix(typ, typ2):
fail = True
else:
# TODO overloads
fail = True
if fail:
self.msg.signatures_incompatible(method, other_method, defn)
def check_getattr_method(self, typ: CallableType, context: Context) -> None:
method_type = CallableType([AnyType(), self.named_type('builtins.str')],
[nodes.ARG_POS, nodes.ARG_POS],
[None],
AnyType(),
self.named_type('builtins.function'))
if not is_subtype(typ, method_type):
self.msg.invalid_signature(typ, context)
def expand_typevars(self, defn: FuncItem,
typ: CallableType) -> List[Tuple[FuncItem, CallableType]]:
# TODO use generator
subst = [] # type: List[List[Tuple[TypeVarId, Type]]]
tvars = typ.variables or []
tvars = tvars[:]
if defn.info:
# Class type variables
tvars += defn.info.defn.type_vars or []
for tvar in tvars:
if tvar.values:
subst.append([(tvar.id, value)
for value in tvar.values])
if subst:
result = [] # type: List[Tuple[FuncItem, CallableType]]
for substitutions in itertools.product(*subst):
mapping = dict(substitutions)
expanded = cast(CallableType, expand_type(typ, mapping))
result.append((expand_func(defn, mapping), expanded))
return result
else:
return [(defn, typ)]
def check_method_override(self, defn: FuncBase) -> None:
"""Check if function definition is compatible with base classes."""
# Check against definitions in base classes.
for base in defn.info.mro[1:]:
self.check_method_or_accessor_override_for_base(defn, base)
def check_method_or_accessor_override_for_base(self, defn: FuncBase,
base: TypeInfo) -> None:
"""Check if method definition is compatible with a base class."""
if base:
name = defn.name()
if name not in ('__init__', '__new__'):
# Check method override (__init__ and __new__ are special).
self.check_method_override_for_base_with_name(defn, name, base)
if name in nodes.inplace_operator_methods:
# Figure out the name of the corresponding operator method.
method = '__' + name[3:]
# An inplace operator method such as __iadd__ might not be
# always introduced safely if a base class defined __add__.
# TODO can't come up with an example where this is
# necessary; now it's "just in case"
self.check_method_override_for_base_with_name(defn, method,
base)
def check_method_override_for_base_with_name(
self, defn: FuncBase, name: str, base: TypeInfo) -> None:
base_attr = base.names.get(name)
if base_attr:
# The name of the method is defined in the base class.
# Construct the type of the overriding method.
typ = self.method_type(defn)
# Map the overridden method type to subtype context so that
# it can be checked for compatibility.
original_type = base_attr.type
if original_type is None:
if isinstance(base_attr.node, FuncDef):
original_type = self.function_type(base_attr.node)
elif isinstance(base_attr.node, Decorator):
original_type = self.function_type(base_attr.node.func)
else:
assert False, str(base_attr.node)
if isinstance(original_type, FunctionLike):
original = map_type_from_supertype(
method_type(original_type),
defn.info, base)
# Check that the types are compatible.
# TODO overloaded signatures
self.check_override(typ,
cast(FunctionLike, original),
defn.name(),
name,
base.name(),
defn)
else:
self.msg.signature_incompatible_with_supertype(
defn.name(), name, base.name(), defn)
def check_override(self, override: FunctionLike, original: FunctionLike,
name: str, name_in_super: str, supertype: str,
node: Context) -> None:
"""Check a method override with given signatures.
Arguments:
override: The signature of the overriding method.
original: The signature of the original supertype method.
name: The name of the subtype. This and the next argument are
only used for generating error messages.
supertype: The name of the supertype.
"""
# Use boolean variable to clarify code.
fail = False
if not is_subtype(override, original):
fail = True
elif (not isinstance(original, Overloaded) and
isinstance(override, Overloaded) and
name in nodes.reverse_op_methods.keys()):
# Operator method overrides cannot introduce overloading, as
# this could be unsafe with reverse operator methods.
fail = True
if fail:
emitted_msg = False
if (isinstance(override, CallableType) and
isinstance(original, CallableType) and
len(override.arg_types) == len(original.arg_types) and
override.min_args == original.min_args):
# Give more detailed messages for the common case of both
# signatures having the same number of arguments and no
# overloads.
# override might have its own generic function type
# variables. If an argument or return type of override
# does not have the correct subtyping relationship
# with the original type even after these variables
# are erased, then it is definitely an incompatiblity.
override_ids = override.type_var_ids()
def erase_override(t: Type) -> Type:
return erase_typevars(t, ids_to_erase=override_ids)
for i in range(len(override.arg_types)):
if not is_subtype(original.arg_types[i],
erase_override(override.arg_types[i])):
self.msg.argument_incompatible_with_supertype(
i + 1, name, name_in_super, supertype, node)
emitted_msg = True
if not is_subtype(erase_override(override.ret_type),
original.ret_type):
self.msg.return_type_incompatible_with_supertype(
name, name_in_super, supertype, node)
emitted_msg = True
if not emitted_msg:
# Fall back to generic incompatibility message.
self.msg.signature_incompatible_with_supertype(
name, name_in_super, supertype, node)
def visit_class_def(self, defn: ClassDef) -> Type:
"""Type check a class definition."""
typ = defn.info
self.errors.push_type(defn.name)
self.enter_partial_types()
old_binder = self.binder
self.binder = ConditionalTypeBinder()
with self.binder.frame_context():
self.accept(defn.defs)
self.binder = old_binder
if not defn.has_incompatible_baseclass:
# Otherwise we've already found errors; more errors are not useful
self.check_multiple_inheritance(typ)
self.leave_partial_types()
self.errors.pop_type()
def check_multiple_inheritance(self, typ: TypeInfo) -> None:
"""Check for multiple inheritance related errors."""
if len(typ.bases) <= 1:
# No multiple inheritance.
return
# Verify that inherited attributes are compatible.
mro = typ.mro[1:]
for i, base in enumerate(mro):
for name in base.names:
for base2 in mro[i + 1:]:
# We only need to check compatibility of attributes from classes not
# in a subclass relationship. For subclasses, normal (single inheritance)
# checks suffice (these are implemented elsewhere).
if name in base2.names and base2 not in base.mro:
self.check_compatibility(name, base, base2, typ)
# Verify that base class layouts are compatible.
builtin_bases = [nearest_builtin_ancestor(base.type)
for base in typ.bases]
for base1 in builtin_bases:
for base2 in builtin_bases:
if not (base1 in base2.mro or base2 in base1.mro):
self.fail(messages.INSTANCE_LAYOUT_CONFLICT, typ)
def check_compatibility(self, name: str, base1: TypeInfo,
base2: TypeInfo, ctx: Context) -> None:
"""Check if attribute name in base1 is compatible with base2 in multiple inheritance.
Assume base1 comes before base2 in the MRO, and that base1 and base2 don't have
a direct subclass relationship (i.e., the compatibility requirement only derives from
multiple inheritance).
"""
if name | |
'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTEST_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def _get_valtrain_mul_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_train = FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix)
df_train = pd.read_csv(fn_train)
X_val = []
fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def get_mul_mean_image(area_id):
prefix = area_id_to_prefix(area_id)
with tb.open_file(FMT_MULMEAN.format(prefix), 'r') as f:
im_mean = np.array(f.get_node('/mulmean'))
return im_mean
def preproc_stage3(area_id):
prefix = area_id_to_prefix(area_id)
if not Path(FMT_VALTEST_MUL_STORE.format(prefix)).exists():
valtrain_test_mul_image_prep(area_id)
if not Path(FMT_TEST_MUL_STORE.format(prefix)).exists():
train_test_mul_image_prep(area_id)
# mean image for subtract preprocessing
X1, _ = _get_train_mul_data(area_id)
X2 = _get_test_mul_data(area_id)
X = np.vstack([X1, X2])
print(X.shape)
X_mean = X.mean(axis=0)
fn = FMT_MULMEAN.format(prefix)
logger.info("Prepare mean image: {}".format(fn))
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(X_mean.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, 'mulmean', atom, X_mean.shape,
filters=filters)
ds[:] = X_mean
def _internal_test_predict_best_param(area_id,
save_pred=True):
prefix = area_id_to_prefix(area_id)
param = _get_model_parameter(area_id)
epoch = param['fn_epoch']
min_th = param['min_poly_area']
# Prediction phase
logger.info("Prediction phase: {}".format(prefix))
X_mean = get_mul_mean_image(area_id)
# Load model weights
# Predict and Save prediction result
fn = FMT_TESTPRED_PATH.format(prefix)
fn_model = FMT_VALMODEL_PATH.format(prefix + '_{epoch:02d}')
fn_model = fn_model.format(epoch=epoch)
model = get_unet()
model.load_weights(fn_model)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
y_pred = model.predict_generator(
generate_test_batch(
area_id,
batch_size=64,
immean=X_mean,
enable_tqdm=True,
),
val_samples=len(df_test),
)
del model
# Save prediction result
if save_pred:
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, 'pred', atom, y_pred.shape,
filters=filters)
ds[:] = y_pred
return y_pred
def _internal_test(area_id, enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
y_pred = _internal_test_predict_best_param(area_id, save_pred=False)
param = _get_model_parameter(area_id)
min_th = param['min_poly_area']
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn_out = FMT_TESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_image_list = df_test.index.tolist()
for idx, image_id in tqdm.tqdm(enumerate(test_image_list),
total=len(test_image_list)):
df_poly = mask_to_poly(y_pred[idx][0], min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
def validate_score(area_id):
"""
Calc competition score
"""
prefix = area_id_to_prefix(area_id)
# Prediction phase
if not Path(FMT_VALTESTPRED_PATH.format(prefix)).exists():
X_val, y_val = _get_valtest_mul_data(area_id)
X_mean = get_mul_mean_image(area_id)
# Load model weights
# Predict and Save prediction result
model = get_unet()
model.load_weights(FMT_VALMODEL_PATH.format(prefix))
y_pred = model.predict(X_val - X_mean, batch_size=8, verbose=1)
del model
# Save prediction result
fn = FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, 'pred', atom, y_pred.shape,
filters=filters)
ds[:] = y_pred
# Postprocessing phase
if not Path(FMT_VALTESTPOLY_PATH.format(prefix)).exists():
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn = FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'r') as f:
y_pred = np.array(f.get_node('/pred'))
print(y_pred.shape)
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
for idx, image_id in enumerate(df_test.index.tolist()):
df_poly = mask_to_poly(y_pred[idx][0])
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio))
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
# update fn_out
with open(fn_out, 'r') as f:
lines = f.readlines()
with open(fn_out, 'w') as f:
f.write(lines[0])
for line in lines[1:]:
line = _remove_interiors(line)
f.write(line)
# Validation solution file
if not Path(FMT_VALTESTTRUTH_PATH.format(prefix)).exists():
fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df_true = pd.read_csv(fn_true)
# # Remove prefix "PAN_"
# df_true.loc[:, 'ImageId'] = df_true.ImageId.str[4:]
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
df_test_image_ids = df_test.ImageId.unique()
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
df_true = df_true[df_true.ImageId.isin(df_test_image_ids)]
for idx, r in df_true.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
r.ImageId,
r.BuildingId,
r.PolygonWKT_Pix,
1.0))
def validate_all_score():
header_line = []
lines = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
assert Path(FMT_VALTESTTRUTH_PATH.format(prefix)).exists()
with open(FMT_VALTESTTRUTH_PATH.format(prefix), 'r') as f:
header_line = f.readline()
lines += f.readlines()
with open(FMT_VALTESTTRUTH_OVALL_PATH, 'w') as f:
f.write(header_line)
for line in lines:
f.write(line)
# Predicted polygons
header_line = []
lines = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
assert Path(FMT_VALTESTPOLY_PATH.format(prefix)).exists()
with open(FMT_VALTESTPOLY_PATH.format(prefix), 'r') as f:
header_line = f.readline()
lines += f.readlines()
with open(FMT_VALTESTPOLY_OVALL_PATH, 'w') as f:
f.write(header_line)
for line in lines:
f.write(line)
def generate_valtest_batch(area_id,
batch_size=8,
immean=None,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_VALTEST_MUL_STORE.format(prefix)
fn_mask = FMT_VALTEST_MASK_STORE.format(prefix)
image_id_list = df_train.ImageId.tolist()
if enable_tqdm:
pbar = tqdm.tqdm(total=len(image_id_list))
while 1:
total_sz = len(image_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im,\
tb.open_file(fn_mask, 'r') as f_mask:
for i_batch in range(n_batch):
target_image_ids = image_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_image_ids) == 0:
continue
X_train = []
y_train = []
for image_id in target_image_ids:
im = np.array(f_im.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_train.append(im)
mask = np.array(f_mask.get_node('/' + image_id))
mask = (mask > 0).astype(np.uint8)
y_train.append(mask)
X_train = np.array(X_train)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_train = X_train - immean
if enable_tqdm:
pbar.update(y_train.shape[0])
yield (X_train, y_train)
if enable_tqdm:
pbar.close()
def generate_valtrain_batch(area_id, batch_size=8, immean=None):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix)
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
image_id_list = df_train.ImageId.tolist()
np.random.shuffle(image_id_list)
while 1:
total_sz = len(image_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im,\
tb.open_file(fn_mask, 'r') as f_mask:
for i_batch in range(n_batch):
target_image_ids = image_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_image_ids) == 0:
continue
X_train = []
y_train = []
for image_id in target_image_ids:
im = np.array(f_im.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_train.append(im)
mask = np.array(f_mask.get_node('/' + image_id))
mask = (mask > 0).astype(np.uint8)
y_train.append(mask)
X_train = np.array(X_train)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_train = X_train - immean
yield (X_train, y_train)
def _get_test_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
X_test = []
fn_im = FMT_TEST_IM_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_test.append(im)
X_test = np.array(X_test)
return X_test
def _get_valtest_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
X_val = []
fn_im = FMT_VALTEST_IM_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTEST_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def _get_valtrain_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_train = FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix)
df_train = pd.read_csv(fn_train)
X_val = []
fn_im = FMT_VALTRAIN_IM_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
im = np.array(f.get_node('/' + image_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
mask = np.array(f.get_node('/' + image_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def predict(area_id):
prefix = area_id_to_prefix(area_id)
X_test = _get_test_mul_data(area_id)
X_mean = get_mul_mean_image(area_id)
# Load model weights
# Predict and Save prediction result
model = get_unet()
model.load_weights(FMT_VALMODEL_PATH.format(prefix))
y_pred = model.predict(X_test - X_mean, batch_size=8, verbose=1)
del model
# Save prediction result
fn = FMT_TESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, 'pred', atom, y_pred.shape,
filters=filters)
ds[:] = y_pred
def _internal_validate_predict_best_param(area_id,
enable_tqdm=False):
param = _get_model_parameter(area_id)
epoch = param['fn_epoch']
y_pred = _internal_validate_predict(
area_id,
epoch=epoch,
save_pred=False,
enable_tqdm=enable_tqdm)
return y_pred
def _internal_validate_predict(area_id,
epoch=3,
save_pred=True,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
X_mean = get_mul_mean_image(area_id)
# Load model weights
# Predict and Save prediction result
fn_model = FMT_VALMODEL_PATH.format(prefix + '_{epoch:02d}')
fn_model = fn_model.format(epoch=epoch)
model = get_unet()
model.load_weights(fn_model)
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
y_pred = model.predict_generator(
generate_valtest_batch(
area_id,
batch_size=64,
immean=X_mean,
enable_tqdm=enable_tqdm,
),
val_samples=len(df_test),
| |
'''
Base driver class
'''
import pandas as pd
import requests
import json
from copy import deepcopy
import pyperclip
import math
import re
import inspect
import yaml
import itertools
from datetime import datetime
import warnings
import functools
from textwrap import dedent
from datapungi_fed import generalSettings #NOTE: projectName
#import generalSettings #NOTE: projectName
from datapungi_fed import utils #NOTE: projectName
#import utils #NOTE: projectName
class driverCore():
r'''
Given a dbGroupName and its default db, starts a factory of query functions - ie, a function for
each db in the group. If dbGroupName is empty, return the list of dbGroups, dbs in the group, and their parameters
'''
def __init__(self,dbGroupName='', baseRequest={},connectionParameters={},userSettings={}):
#TODO: place defaultQueryFactoryEntry in yaml
self._dbParams, self.defaultQueryFactoryEntry = self._getDBParameters(dbGroupName)
self._ETDB = extractTransformDB(baseRequest,connectionParameters,userSettings) #a generic query is started
self._ETFactory = extractTransformFactory(dbGroupName,self._ETDB,self._dbParams,self.defaultQueryFactoryEntry)
self._driverMeta = driverMetadata()(dbGroupName)
self.__setdoc__(dbGroupName)
def __getitem__(self,dbName):
return(self._ETFactory.extractTransformFactory[dbName])
def __call__(self,*args,**kwargs):
out = self._ETFactory.extractTransformFactory[self.defaultQueryFactoryEntry](*args,**kwargs)
return(out)
def __setdoc__(self,dbGroupName):
if dbGroupName == '':
self.__doc__ = 'Returns the metadata of the dataset groups and their databases. Do not need inputs.'
else:
self.__doc__ = 'Queries the databases of {} \n \n'.format(dbGroupName)
for entry in self.__docParams__:
self.__doc__ += '- {short name}: {description} \n'.format(**entry)
self.__doc__ += ' parameters: {}\n'.format(str(entry['parameters']))
self.__doc__ += ' official database name: {}\n'.format(entry['database'])
self.__doc__ += '\nDefault query database: {}\n'.format(self.defaultQueryFactoryEntry)
self.__doc__ += "Sample functions: \n-data.{dbGroupName}() (default) \n-data.{dbGroupName}['{db}']() (query the {db} database)".format(**{'dbGroupName':dbGroupName.lower(),'db':self.defaultQueryFactoryEntry})
self.__doc__ += "\n\nNOTE: don't need to pass most parameters. Eg, api_key and file_type (json)."
def __str__(self):
return(self.__doc__)
def _getDBParameters(self,dbGroupName = ''):
r'''
The parameters of each database in the group (if empty returns all groups x databases)
'''
dataPath = utils.getResourcePath('/config/datasetlist.yaml')
with open(dataPath, 'r') as yf:
datasetlist = yaml.safe_load(yf)
if dbGroupName == '':
defaultDB = {}
return((datasetlist,defaultDB))
#get the entry of the group:
selected = list(filter( lambda x: x['group'] == dbGroupName , datasetlist))[0]
defaultDB = selected.get('default query','')
datasets = selected.get('datasets',{})
removeCases = lambda array: list(filter( lambda x: x not in ['api_key','file_type'] , array ))
dbParams = { entry['short name'] : { 'urlSuffix' : entry['database'] , 'json key': entry['json key'], 'params': removeCases(entry['parameters']) } for entry in datasets }
self.__docParams__ = datasets #parameters used to write a doc string for the class instance.
return((dbParams,defaultDB))
class extractTransformFactory():
r'''
given a groupName of databases, constructs dictionary of functions querying all of its databases
'''
def __init__(self,dbGroupName,ETDB,dbParams,defaultQueryFactoryEntry):
if dbGroupName:
self.dbGroupName = dbGroupName
self.dbParams = dbParams
self.ETDB = ETDB
self.ETDB(self.dbGroupName,self.dbParams) #update the connector to the databases with parameters specific to the collection of dbs.
self.extractTransformFactory = { dbName : self.selectDBQuery(self.query, dbName) for dbName in self.dbParams.keys() }
self.defaultQueryFactoryEntry = defaultQueryFactoryEntry #the entry in query factory that __call__ will use.
else:
self.extractTransformFactory = {}
def query(self,*args,**kwargs):
return( self.ETDB.query(*args,**kwargs) )
def selectDBQuery(self,queryFun,dbName):
r'''
Fix a generic query to a query to dbName, creates a lambda that, from
args/kwargs creates a query of the dbName
'''
fun = functools.partial(queryFun,dbName)
lfun = lambda *args,**kwargs: fun(**self.getQueryArgs(dbName,*args,**kwargs))
#add quick user tips
lfun.options = self.dbParams[dbName]['params']
return(lfun)
def getQueryArgs(self,dbName,*args,**kwargs):
r'''
Map args and kwargs to driver args
'''
#paramaters to be passed to a requests query:
paramArray = self.dbParams[dbName]['params']
params = dict(zip(paramArray,args))
paramsAdd = {key:val for key, val in kwargs.items() if key in paramArray}
params.update(paramsAdd)
#non query options (eg, verbose)
otherArgs = {key:val for key, val in kwargs.items() if not key in paramArray}
return({**{'params':params},**otherArgs})
class extractTransformDB():
r'''
Functions to connect and query a db given its dbName and dbParams (see yaml in config for these).
'''
def __init__(self,baseRequest={},connectionParameters={},userSettings={}):
'''
loads generic parametes (ie api key, location fo data.)
'''
self._connectionInfo = generalSettings.getGeneralSettings(connectionParameters = connectionParameters, userSettings = userSettings )
self._baseRequest = self.getBaseRequest(baseRequest,connectionParameters,userSettings)
self._lastLoad = {} #data stored here to assist functions such as clipcode
self._transformData = transformExtractedData()
self._getCode = transformIncludeCodeSnippet()
self._cleanCode = "" #TODO: improvable - this is the code snippet producing a pandas df
def __call__(self,dbGroup,dbParams):
r'''
A call to an instance of the class Loads specific parameters of the dbs of dbGroup
'''
self.dbGroup = dbGroup
self.dbParams = dbParams
def query(self,dbName,params={},file_type='json',verbose=False,warningsOn=True):
r'''
Args:
params
file_type
verbose
warningsOn
'''
# get requests' query inputs
warningsList = ['countPassLimit'] # warn on this events.
prefixUrl = self.dbParams[dbName]['urlSuffix']
output = self.queryApiCleanOutput(prefixUrl, dbName, params, warningsList, warningsOn, verbose)
return(output)
def queryApiCleanOutput(self,urlPrefix,dbName,params,warningsList,warningsOn,verbose):
r'''
Core steps of querying and cleaning data. Notice, specific data cleaning should be
implemented in the specific driver classes
Args:
self - should containg a base request (url)
urlPrefix (str) - a string to be appended to request url (eg, https:// ...// -> https//...//urlPrefix?)
params (dict) - usually empty, override any query params with the entries of this dictionary
warningsList (list) - the list of events that can lead to warnings
warningsOn (bool) - turn on/off driver warnings
verbose (bool) - detailed output or short output
'''
#get data
query = self.getBaseQuery(urlPrefix,params)
retrivedData = requests.get(** { key:entry for key, entry in query.items() if key in ['params','url'] } )
#clean data
df_output,self._cleanCode = self.cleanOutput(dbName,query,retrivedData)
#print warning if there is more data the limit to download
for entry in warningsList:
self._warnings(entry,retrivedData,warningsOn)
#short or detailed output, update _lastLoad attribute:
output = self.formatOutputupdateLoadedAttrib(query,df_output,retrivedData,verbose)
return(output)
def getBaseQuery(self,urlPrefix,params):
r'''
Return a dictionary of request arguments.
Args:
urlPrefix (str) - string appended to the end of the core url (eg, series -> http:...\series? )
dbName (str) - the name of the db being queried
params (dict) - a dictionary with request paramters used to override all other given parameters
Returns:
query (dict) - a dictionary with 'url' and 'params' (a string) to be passed to a request
'''
query = deepcopy(self._baseRequest)
#update query url
query['url'] = query['url']+urlPrefix
query['params'].update(params)
query['params_dict'] = query['params']
query['params'] = '&'.join([str(entry[0]) + "=" + str(entry[1]) for entry in query['params'].items()])
return(query)
def formatOutputupdateLoadedAttrib(self,query,df_output,retrivedData,verbose):
if verbose == False:
self._lastLoad = df_output
return(df_output)
else:
code = self._getCode.transformIncludeCodeSnippet(query,self._baseRequest,self._connectionInfo.userSettings,self._cleanCode)
output = dict(dataFrame = df_output, request = retrivedData, code = code)
self._lastLoad = output
return(output)
def cleanOutput(self,dbName,query,retrivedData):
r'''
This is a placeholder - specific drivers should have their own cleaning method
this generates self._cleanCode
'''
transformedOutput = self._transformData(self.dbGroup,dbName,self.dbParams,query,retrivedData)
return(transformedOutput)
def getBaseRequest(self,baseRequest={},connectionParameters={},userSettings={}):
r'''
Write a base request. This is the information that gets used in most requests such as getting the userKey
'''
if baseRequest =={}:
connectInfo = generalSettings.getGeneralSettings(connectionParameters = connectionParameters, userSettings = userSettings )
return(connectInfo.baseRequest)
else:
return(baseRequest)
def _warnings(self,warningName,inputs,warningsOn = True):
if not warningsOn:
return
if warningName == 'countPassLimit':
'''
warns if number of lines in database exceeds the number that can be downloaded.
inputs = a request result of a FED API
'''
_count = inputs.json().get('count',1)
_limit = inputs.json().get('limit',1000)
if _count > _limit:
warningText = 'NOTICE: dataset exceeds download limit! Check - count ({}) and limit ({})'.format(_count,_limit)
warnings.warn(warningText)
class transformExtractedData():
def __call__(self,dbGroup,dbName,dbParams,query,retrivedData):
if dbGroup == 'Series':
return( self.cleanOutputSeries(dbName,dbParams,query,retrivedData) )
if dbGroup == 'Geo':
return( self.cleanOutputGeo(dbName,dbParams,query,retrivedData) )
else:
return( self.cleanOutput(dbName,dbParams,query,retrivedData) )
def cleanOutput(self, dbName, dbParams,query, retrivedData): #categories, releases, sources, tags
dataKey = dbParams[dbName]['json key']
cleanCode = "df_output = pd.DataFrame( retrivedData.json()['{}'] )".format(dataKey)
df_output = pd.DataFrame(retrivedData.json()[dataKey]) # TODO: deal with xml
warnings.filterwarnings("ignore", category=UserWarning)
setattr(df_output, '_meta', dict(filter(lambda entry: entry[0] != dataKey, retrivedData.json().items())))
warnings.filterwarnings("always", category=UserWarning)
return((df_output,cleanCode))
def cleanOutputSeries(self, dbName, dbParams,query, retrivedData): #series
dataKey = dbParams[dbName]['json key']
cleanCode = "df_output = pd.DataFrame( retrivedData.json()['{}'] )".format(dataKey)
df_output = pd.DataFrame(retrivedData.json()[dataKey]) # TODO: deal with xml
if dbName == 'observations':
seriesID = query['params_dict']['series_id'] #{ x.split('=')[0] : x.split('=')[1] for x in query['params'].split("&") }['series_id']
df_output = (df_output[['date','value']]
.assign( dropRow = lambda df: pd.to_numeric(df['value'],errors='coerce') )
.dropna()
.drop('dropRow',axis=1)
.assign(value=lambda df: df['value'].astype('float'), date=lambda df: pd.to_datetime(df['date'] ) )
.set_index('date')
.rename({'value':seriesID},axis='columns'))
codeAddendum = f'''\n
df_output = (df_output[['date','value']]
.assign( dropRow = lambda df: pd.to_numeric(df['value'],errors='coerce') )
.dropna()
.drop('dropRow',axis=1)
.assign(value=lambda df: df['value'].astype('float'), date=lambda df: pd.to_datetime(df['date'] ) )
.set_index('date')
.rename({{'value': '{seriesID}' }},axis='columns'))
'''
cleanCode += dedent(codeAddendum)
#TODO: relabel value column with symbol
warnings.filterwarnings("ignore", category=UserWarning)
setattr(df_output, '_meta', dict(filter(lambda entry: entry[0] != dataKey, retrivedData.json().items())))
warnings.filterwarnings("always", category=UserWarning)
return((df_output,cleanCode))
def cleanOutputGeo(self, dbName, dbParams,query, retrivedData): #categories, releases, sources, tags
if dbName == 'shapes':
dataKey | |
import math
import re
from collections import Counter, OrderedDict, defaultdict
from enum import IntEnum
from fractions import Fraction
from itertools import groupby, combinations, permutations
from numbers import Real
from typing import FrozenSet
from typing import Iterable, Tuple, List, Dict, TypeVar, Optional, Set, Mapping, DefaultDict
from physcalc.util import (MathParseError, MathEvalError, parse_power, generate_sup_power, iterlen, ensure_real,
ensure_int)
T = TypeVar("T")
class BaseUnit(IntEnum):
AMPERE = 1
KILOGRAM = 2
METER = 3
SECOND = 4
KELVIN = 5
MOLE = 6
CANDELA = 7
BASE_UNIT_NAMES = {
BaseUnit.METER: "m",
BaseUnit.KILOGRAM: "kg",
BaseUnit.SECOND: "s",
BaseUnit.KELVIN: "K",
BaseUnit.AMPERE: "A",
BaseUnit.MOLE: "mol",
BaseUnit.CANDELA: "cd",
}
MUL_PREFIXES = OrderedDict((
("", Fraction(1, 1)),
("da", Fraction(10, 1)),
("h", Fraction(100, 1)),
("k", Fraction(1000, 1)),
("M", Fraction(1000 ** 2, 1)),
("G", Fraction(1000 ** 3, 1)),
("T", Fraction(1000 ** 4, 1)),
("P", Fraction(1000 ** 5, 1)),
("E", Fraction(1000 ** 6, 1)),
("Z", Fraction(1000 ** 7, 1)),
("Y", Fraction(1000 ** 8, 1)),
("d", Fraction(1, 10)),
("c", Fraction(1, 100)),
("m", Fraction(1, 1000)),
("μ", Fraction(1, 1000) ** 2),
("u", Fraction(1, 1000) ** 2),
("n", Fraction(1, 1000) ** 3),
("p", Fraction(1, 1000) ** 4),
("f", Fraction(1, 1000) ** 5),
("a", Fraction(1, 1000) ** 6),
("z", Fraction(1, 1000) ** 7),
("y", Fraction(1, 1000) ** 8),
))
# by default, non-multiplicative units won't be prefixed with the non-1000^n prefixes
DEFAULT_DISALLOWED_PREFIXES = frozenset(("c", "d", "da", "h"))
# by default, multiplicative and anonymous units won't be prefixed at all
ALL_DISALLOWED_PREFIXES = frozenset(MUL_PREFIXES.keys()) - frozenset(("",))
DISALLOWED_PREFIXES: "DefaultDict[Unit, FrozenSet[str]]" = defaultdict(lambda: ALL_DISALLOWED_PREFIXES)
PREFIX_POWER: "DefaultDict[Unit, int]" = defaultdict(lambda: 1)
WHITESPACE_RE = re.compile(r"\s+")
def _parse_multiplied_unit(unit_name: str) -> "Unit":
"""Parses a unit name with optional multiplier."""
for prefix, prefix_mul in MUL_PREFIXES.items():
for name, unit in Unit.name_registry.items():
if unit_name == prefix + name:
return unit * prefix_mul
raise MathParseError(f"unknown unit {unit_name}")
def _parse_unit_half(text) -> "Tuple[Unit, Mapping[str, int]]":
"""Parses unit names separated by whitespace into a single unit and a name list."""
total_unit = NO_UNIT
total_names = Counter()
parts = WHITESPACE_RE.split(text)
if parts == ["1"]:
return total_unit, total_names
for part in parts:
if not part:
continue
unit_name, power = parse_power(part)
unit = _parse_multiplied_unit(unit_name)
try:
power = int(power)
except ValueError:
raise MathParseError(f"invalid power {power}") from None
for _ in range(power):
total_unit *= unit
total_names[unit_name] += 1
for _ in range(-power):
total_unit /= unit
total_names[unit_name] -= 1
return total_unit, total_names
def _cancel_unit_parts(num: Iterable[T], denom: Iterable[T]) -> Tuple[Tuple[T, ...], Tuple[T, ...]]:
"""Simplifies a numerator-denumerator pair."""
counts = Counter(num)
counts.subtract(denom)
num_list = []
denom_list = []
for item, count in sorted(counts.items()):
for _ in range(count):
num_list.append(item)
for _ in range(-count):
denom_list.append(item)
return tuple(num_list), tuple(denom_list)
def _generate_base_name_half(part: Iterable[str]) -> Tuple[str, int]:
"""Generates a unit name with powers from a sorted list of unit names."""
if not part:
return "", 1
units = []
weight = 1
for unit, power in groupby(part):
units.append(unit + generate_sup_power(iterlen(power)))
weight *= 2
return " ".join(units), weight
def _multiply_list_by_frac(lst, frac):
"""Multiplies a list of base units by a fraction, ensuring that no fractional powers result."""
out = []
if len(lst) % frac.denominator:
raise ValueError(f"number of base units in unit not divisible by {frac.denominator}")
for i in range(0, len(lst), frac.denominator):
if any(lst[i] != lst[i + j] for j in range(frac.denominator)):
raise ValueError(f"number of {BASE_UNIT_NAMES[lst[i]]} in unit not divisible by {frac.denominator}")
for _ in range(frac.numerator):
out.append(lst[i])
return out
class Unit:
part_registry: "Dict[tuple, Unit]" = {}
name_registry: "Dict[str, Unit]" = {}
output_units: "Set[Unit]" = set()
named_units: "List[Unit]" = []
specific_name: bool
_name: Optional[str]
output_weight: int
quantity_name: Optional[str]
multiplier: Real
def __init__(self, specific_name: Optional[str], num, denom, output_weight: int = 1000, quantity_name: Optional[str] = None, multiplier: Real = 1):
self.specific_name = specific_name is not None
self._name = specific_name
self._num = num
self._denom = denom
self.output_weight = output_weight
self.quantity_name = quantity_name
self.multiplier = multiplier
def _register_key(self):
key = (self._num, self._denom, self.multiplier)
if key in Unit.part_registry:
raise ValueError("unit already registered")
Unit.part_registry[key] = self
def _register_name(self, derivative):
if self.name in Unit.name_registry:
raise ValueError(f"unit with name {self.name} already registered")
Unit.named_units.append(self)
if not derivative:
Unit.output_units.add(self)
Unit.name_registry[self.name] = self
def register_derivative(self, specific_name: str, multiplier: Real, disallowed_prefixes=None):
"""Creates a new derivative Unit of this Unit."""
deriv = Unit(specific_name, self._num, self._denom, 1000, self.quantity_name, self.multiplier * multiplier)
deriv._register_name(True)
if disallowed_prefixes is not None:
DISALLOWED_PREFIXES[deriv] = disallowed_prefixes
return deriv
@staticmethod
def register(specific_name: Optional[str], output_weight: int, quantity_name: str, num, denom=(), disallowed_prefixes=None, prefix_power=None):
"""Creates a new Unit with the given properties."""
num, denom = _cancel_unit_parts(num, denom)
created = Unit(specific_name, num, denom, output_weight, quantity_name)
created._register_key()
if specific_name is not None:
created._register_name(False)
if specific_name is not None or disallowed_prefixes is not None:
DISALLOWED_PREFIXES[created] = DEFAULT_DISALLOWED_PREFIXES if disallowed_prefixes is None else frozenset(disallowed_prefixes)
if prefix_power is not None:
PREFIX_POWER[created] = prefix_power
return created
@staticmethod
def from_parts(num, denom, multiplier):
"""Gets a Unit from the given numerator, denominator and multiplier.
If the combination is already known, returns the previously created Unit. Otherwise, an anonymous unit is
created and registered for this combination.
"""
num, denom = _cancel_unit_parts(num, denom)
key = (num, denom, multiplier)
try:
return Unit.part_registry[key]
except KeyError:
unit = Unit(None, num, denom, multiplier=multiplier)
unit._register_key()
return unit
@staticmethod
def parse(name):
"""Parses a unit specification.
The resulting unit will keep the given name and may be multiplied. To convert it to a non-multiplied unit,
use to_si().
"""
num, denom = name.split("/") if "/" in name else (name, "")
num, num_names = _parse_unit_half(num)
denom, denom_names = _parse_unit_half(denom)
num_names, denom_names = _cancel_unit_parts(num_names, denom_names)
name = _generate_base_name_half(num_names)[0]
if denom_names:
name += " / " + _generate_base_name_half(denom_names)[0]
unit = num / denom
return Unit(name, unit._num, unit._denom, 1000, unit.quantity_name, unit.multiplier)
@property
def name(self):
if self._name is None:
self._name = self._generate_name()
return self._name
def _generate_name(self):
"""Generates potential names for a unit."""
if not self._num and not self._denom:
return ""
# generate name derived from the base units that make up this unit
basic_name, basic_weight = _generate_base_name_half(BASE_UNIT_NAMES[unit] for unit in self._num)
if self._denom:
basic_denom, denom_weight = _generate_base_name_half(BASE_UNIT_NAMES[unit] for unit in self._denom)
basic_name += " / " + basic_denom
basic_weight *= denom_weight
results: List[Tuple[str, int]] = [(basic_name, basic_weight)]
# generate all reasonable powers of known units
for test in Unit.output_units:
max_power = min(len(self._num), len(self._denom))
for power in range(-max_power, max_power + 1):
if 0 <= power <= 1:
continue
if self.can_convert(test ** power):
results.append((test.name + generate_sup_power(power), test.output_weight * 2))
# generate all products of two known units
for first, second in combinations(Unit.output_units, 2):
if self.can_convert(first * second):
results.append((f"{first.name} {second.name}", first.output_weight * second.output_weight))
# generate all quotients of two known units
for num, denom in permutations(Unit.output_units, 2):
if self.can_convert(num / denom):
results.append((f"{num.name} / {denom.name}", num.output_weight * denom.output_weight))
# pick the name with the minimum weight
return min(results, key=lambda pair: pair[1])[0]
def can_convert(self, other: "Unit"):
return self._num == other._num and self._denom == other._denom
def __mul__(self, other):
if isinstance(other, Real):
return Unit.from_parts(self._num, self._denom, self.multiplier * other)
if not isinstance(other, Unit):
return NotImplemented
return Unit.from_parts(self._num + other._num, self._denom + other._denom, self.multiplier * other.multiplier)
def __truediv__(self, other):
if not isinstance(other, Unit):
return NotImplemented
return Unit.from_parts(self._num + other._denom, self._denom + other._num, self.multiplier / other.multiplier)
def __rtruediv__(self, other):
if other == 1:
return NO_UNIT / self
return NotImplemented
def __pow__(self, power):
# special case for 0
if power == 0 or self is NO_UNIT:
return NO_UNIT
# only real powers allowed for non-empty units
try:
power = ensure_real(power)
except ValueError:
raise MathEvalError(f"cannot raise {self} to complex power {power}") from None
if isinstance(power, Fraction):
# fractional powers
try:
num = _multiply_list_by_frac(self._num, abs(power))
denom = _multiply_list_by_frac(self._denom, abs(power))
except ValueError:
raise MathEvalError(f"cannot raise {self} to power {power}") from None
else:
# integral powers
try:
power = ensure_int(power)
except ValueError:
raise MathEvalError(f"cannot raise {self} to non-rational power {power}") from None
num = self._num * abs(power)
denom = self._denom * abs(power)
# negative powers
if power < 0:
num, denom = denom, num
return Unit.from_parts(num, denom, self.multiplier ** power)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, Unit):
return False
return self.name == other.name and self._num == other._num and self._denom == other._denom and self.multiplier == other.multiplier
def __hash__(self):
return hash((self._name if self.specific_name else None, self._num, self._denom, self.multiplier))
def __str__(self):
return self.name or "1"
def __repr__(self):
return f"<unit {self.name or '1'}>"
def to_si(self):
"""Converts this Unit to a multiplier | |
import math
import mxnet as mx
from mxnext.tvm.decode_bbox import decode_bbox
def _bbox_encode(F, ex_rois, gt_rois, means=[0., 0., 0., 0.], stds=[1., 1., 1., 1.]):
""" decode bbox
inputs:
F: symbol or ndarray
ex_rois: F, (#img, ..., 4)
gt_rois: F, (#img, ..., 4)
means: list, (4,)
stds: list, (4,)
outputs:
targets: symbol or ndarray, (#img, ..., 4)
"""
ex_x1, ex_y1, ex_x2, ex_y2 = F.split(ex_rois, num_outputs=4, axis=-1)
gt_x1, gt_y1, gt_x2, gt_y2 = F.split(gt_rois, num_outputs=4, axis=-1)
ex_widths = ex_x2 - ex_x1 + 1.0
ex_heights = ex_y2 - ex_y1 + 1.0
ex_ctr_x = ex_x1 + 0.5 * (ex_widths - 1.0)
ex_ctr_y = ex_y1 + 0.5 * (ex_heights - 1.0)
gt_widths = gt_x2 - gt_x1 + 1.0
gt_heights = gt_y2 - gt_y1 + 1.0
gt_ctr_x = gt_x1 + 0.5 * (gt_widths - 1.0)
gt_ctr_y = gt_y1 + 0.5 * (gt_heights - 1.0)
targets_dx = (gt_ctr_x - ex_ctr_x) / (ex_widths + 1e-14)
targets_dy = (gt_ctr_y - ex_ctr_y) / (ex_heights + 1e-14)
targets_dw = F.log(gt_widths / ex_widths)
targets_dh = F.log(gt_heights / ex_heights)
targets_dx = (targets_dx - means[0]) / stds[0]
targets_dy = (targets_dy - means[1]) / stds[1]
targets_dw = (targets_dw - means[2]) / stds[2]
targets_dh = (targets_dh - means[3]) / stds[3]
return F.concat(targets_dx, targets_dy, targets_dw, targets_dh, dim=-1)
def _prepare_anchors(F, feat_list, anchor_list, num_image, num_anchor):
""" crop pre-comuputed anchors into the shape of the features
inputs:
F: symbol or ndarray
feat_list: list of symbols or ndarrays, [(#img, #c, #h1, #w1), ...]
anchor_list: list of symbols or ndarrays, [(1, 1, #h1', #w1', #anchor * 4), ...]
num_image: int
num_anchor: int
outputs:
anchors: symbol or ndarray, (#img, H * W * #anchor, 4)
"""
lvl_anchors = []
for features, anchors in zip(feat_list, anchor_list):
anchors = F.slice_like(anchors, features, axes=(2, 3)) # (1, 1, h, w, #anchor * 4)
anchors = F.reshape(anchors, shape=(1, -1, num_anchor * 4)) # (1, h * w, #anchor * 4)
lvl_anchors.append(anchors)
anchors = F.concat(*lvl_anchors, dim=1) # (1, H * W, #anchor * 4)
anchors = F.reshape(anchors, shape=(0, -1, 4)) # (1, H * W * #anchor, 4)
anchors = F.broadcast_axis(anchors, axis=0, size=num_image) # (#img, H * W * #anchor, 4)
return anchors
def _positive_loss(F, anchors, gt_bboxes, cls_prob, bbox_pred, num_image,
alpha, pre_anchor_top_n, target_means, target_stds):
"""
inputs:
F: symbol or ndarray
anchors: F, (#img, h * w * #anchor, 4)
gt_bboxes: F, (#img, #gt, 5)
cls_prob: F, (#img, h * w * #anchor, #class)
bbox_pred: F, (#img, h * w * #anchor, 4)
alpha: float
pre_anchor_top_n: int
target_means: list, (4,)
target_stds: list, (4,)
outputs:
positive_loss, F, (1,)
"""
with mx.name.Prefix("free_anchor_pos_target: "):
gt_labels = F.slice_axis(gt_bboxes, axis=-1, begin=4, end=5) # (n, #gt, 1)
gt_bboxes = F.slice_axis(gt_bboxes, axis=-1, begin=0, end=4) # (n, #gt, 4)
# assigning
an_index_list = list()
for i in range(num_image):
anchors_this = F.slice_axis(anchors, axis=0, begin=i, end=i + 1).reshape([-1, 4])
gt_bboxes_this = F.slice_axis(gt_bboxes, axis=0, begin=i, end=i + 1).reshape([-1, 4])
iou_gt2a_this = F.contrib.box_iou(
gt_bboxes_this, anchors_this, format="corner") # (#gt, h * w * #anchor)
iou_gt2a_topk_this = F.topk(iou_gt2a_this, axis=1, k=pre_anchor_top_n) # (#gt, top_n)
an_index_list.append(iou_gt2a_topk_this)
anchor_index = F.stack(*an_index_list, axis=0) # (n, #gt, top_n)
batch_index = F.arange(0, num_image).reshape((num_image, 1, 1)) # (n, 1, 1)
batch_index = F.broadcast_like(batch_index, anchor_index) # (n, #gt, top_n)
gt_index = F.maximum(gt_labels - 1, 0).reshape((0, 0, 1)) # (n, #gt, 1)
gt_index = F.broadcast_like(gt_index, anchor_index) # (n, #gt, top_n)
# matched cls
cls_index = F.stack(*[batch_index, anchor_index, gt_index], axis=0) # (3, n, #gt, top_n)
matched_cls_prob = F.gather_nd(cls_prob, cls_index) # (n, #gt, top_n)
# matched bbox
bbox_index = F.stack(*[batch_index, anchor_index], axis=0) # (2, n, #gt, top_n)
matched_bbox_pred = F.gather_nd(bbox_pred, bbox_index) # (n, #gt, top_n, 4)
matched_anchors = F.gather_nd(anchors, bbox_index) # (n, #gt, top_n, 4)
matched_gt_bboxes = F.reshape(gt_bboxes, (0, 0, -1, 4)) # (n, #gt, 1, 4)
matched_gt_bboxes = F.broadcast_like(
matched_gt_bboxes, matched_anchors) # (n, #gt, top_n, 4)
bbox_targets = _bbox_encode(
F, matched_anchors, matched_gt_bboxes, target_means, target_stds) # (n, #gt, top_n, 4)
scalar = 0.11
bbox_loss_weight = 0.75
bbox_loss = F.smooth_l1(
matched_bbox_pred - bbox_targets, scalar=math.sqrt(1 / scalar)) # (n, #gt, top_n, 4)
bbox_loss = bbox_loss * bbox_loss_weight # (n, #gt, top_n, 4)
matched_box_prob = F.exp(-F.sum(bbox_loss, axis=-1)) # (n, #gt, top_n)
# positive part of the loss
matched_prob = matched_cls_prob * matched_box_prob # (n, #gt, top_n)
valid = (gt_labels > 0).reshape((0, 0, 1)) # (n, #gt, 1)
valid = F.broadcast_like(valid, matched_prob) # (n, #gt, top_n)
matched_prob = F.where(valid, matched_prob, F.ones_like(matched_prob)) # (n, #gt, top_n)
prob_weight = 1. / F.maximum(1. - matched_prob, 1e-12) # (n, #gt, top_n)
prob_weight = F.broadcast_div(prob_weight, F.sum(
prob_weight, axis=-1, keepdims=True)) # (n, #gt, top_n)
bag_prob = F.sum(prob_weight * matched_prob, axis=-1) # (n, #gt)
positive_loss = - alpha * F.log(F.clip(bag_prob, 1e-12, 1.)) # (n, #gt)
positive_loss = F.broadcast_div(
positive_loss, F.maximum(F.sum(gt_labels > 0), 1)) # (n, #gt)
positive_loss = F.sum(positive_loss)
return positive_loss
def _negative_loss(F, anchors, gt_bboxes, cls_prob, bbox_pred, im_infos, num_image,
num_class, alpha, gamma, pre_anchor_top_n, bbox_thr, target_means, target_stds):
"""
inputs:
F: symbol or ndarray
anchors: F, (#img, H * W * #anchor, 4)
gt_bboxes: F, (#img, #gt, 5)
cls_prob: F, (#img, H * W * #anchor, #class)
bbox_pred: F, (#img, H * W * #anchor, 4)
im_infos: F, (#img, 3)
num_image: int
num_class: int
alpha: float
gamma: float
pre_anchor_top_n: int
bbox_thr: float
target_means: list, (4,)
target_stds: list, (4,)
outputs:
negative_loss: F, (1,)
"""
with mx.name.Prefix("free_anchor_neg_target: "):
gt_labels = F.slice_axis(gt_bboxes, axis=-1, begin=4, end=5) # (n, #gt, 1)
gt_bboxes = F.slice_axis(gt_bboxes, axis=-1, begin=0, end=4) # (n, #gt, 4)
pred_bboxes = decode_bbox(F, anchors, bbox_pred, im_infos,
target_means, target_stds, True) # (n, h * w * anchor, 4)
iou_gt2pred_list = list()
for i in range(num_image):
gt_bboxes_this = F.slice_axis(gt_bboxes, axis=0, begin=i, end=i + 1).reshape([-1, 4])
pred_bboxes_this = F.slice_axis(
pred_bboxes, axis=0, begin=i, end=i + 1).reshape([-1, 4])
iou_gt2pred_this = F.contrib.box_iou(
gt_bboxes_this, pred_bboxes_this, format="corner") # (#gt, h * w * #anchor)
iou_gt2pred_list.append(iou_gt2pred_this)
iou_gt2pred = F.stack(*iou_gt2pred_list, axis=0) # (n, #gt, h * w * #anchor)
# select positive boxes after decoding
t1 = bbox_thr
t2 = F.maximum(F.max(iou_gt2pred, axis=2, keepdims=True), t1 + 1e-12) # (n, #gt, 1)
gt_pred_prob = F.clip(F.broadcast_div(iou_gt2pred - t1, t2 - t1),
a_min=0., a_max=1.) # (n, #gt, h * w * #anchor)
# box prob
gt_index = F.argmax(gt_pred_prob, axis=1) # (n, h * w * #anchor)
batch_index = F.arange(0, num_image).reshape((num_image, 1)) # (n, 1)
batch_index = F.broadcast_like(batch_index, gt_index) # (n, h * w * #anchor)
gt_labels_index = F.stack(*[batch_index, gt_index]) # (2, n, h * w * #anchor)
gt_labels_reshape = gt_labels.reshape((0, -1)) # (n, #gt)
gt_labels_gather = F.gather_nd(gt_labels_reshape, gt_labels_index) # (n, h * w * #anchor)
cls_index = gt_labels_gather - 1 # (n, h * w * #anchor)
one_hot = F.one_hot(cls_index, depth=num_class - 1) # (n, h * w * #anchor, #class)
gt_pred_prob_gather = F.max(gt_pred_prob, axis=1).reshape(
(0, 0, 1)) # (n, h * w * #anchor, 1)
box_prob = F.broadcast_mul(one_hot, gt_pred_prob_gather) # (n, h * w * #anchor, #class)
box_prob = F.BlockGrad(box_prob)
# negative part of the loss
prob = cls_prob * (1. - box_prob) # (n, h * w * #anchor, #class)
valid = (gt_labels_gather > 0).reshape((0, 0, 1)) # (n, h * w * #anchor, 1)
valid = F.broadcast_like(valid, prob) # (n, h * w * #anchor, #class)
prob = F.where(valid, prob, F.zeros_like(prob)) # (n, h * w * #anchor, #class)
negative_loss = - prob ** gamma * \
F.log(F.clip(1. - prob, 1e-12, 1.)) # (n, h * w * #anchor, #class)
negative_loss = (1. - alpha) * negative_loss
negative_loss = F.broadcast_div(negative_loss, (F.maximum(
F.sum(gt_labels > 0) * pre_anchor_top_n, 1))) # (n, h * w * #anchor, #class)
negative_loss = F.sum(negative_loss)
return negative_loss
def _proposal_retina(
F=mx.ndarray,
cls_prob=None,
bbox_pred=None,
anchors=None,
im_info=None,
batch_size=1,
rpn_pre_nms_top_n=None,
num_class=None,
anchor_mean=None,
anchor_std=None):
"""
inputs:
F: symbol or ndarray
cls_prob: F, (#img, h * w * #anchor, #class)
bbox_pred: F, (#img, h * w * #anchor, 4)
anchors: F, (#img, h * w * #anchor, 4)
im_info: F, (#img, 3)
batch_size: int
rpn_pre_nms_top_n: int
num_class: int
anchor_means: list, (4,)
anchor_stds: list, (4,)
outputs:
sort_bbox: F, (#img, #n, 4)
sort_cls_prob: F, (#img, n, #class)
"""
# slice anchor
anchors = F.slice_like(anchors, cls_prob, axes=(2, 3)) # (1, 1, h, w, #anchor * 4)
anchors = F.reshape(anchors, shape=(-3, | |
<gh_stars>1-10
#! /usr/bin/env python3
#
# Copyright 2020 California Institute of Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ISOFIT: Imaging Spectrometer Optimal FITting
# Author: <NAME>, <EMAIL>
#
from scipy.linalg import inv
from isofit.core.instrument import Instrument
from spectral.io import envi
from scipy.spatial import KDTree
import numpy as np
import logging
import time
import matplotlib
import pylab as plt
from isofit.configs import configs
import multiprocessing
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
from sklearn.decomposition import PCA
plt.switch_backend("Agg")
def _write_bil_chunk(dat: np.array, outfile: str, line: int, shape: tuple, dtype: str = 'float32') -> None:
"""
Write a chunk of data to a binary, BIL formatted data cube.
Args:
dat: data to write
outfile: output file to write to
line: line of the output file to write to
shape: shape of the output file
dtype: output data type
Returns:
None
"""
outfile = open(outfile, 'rb+')
outfile.seek(line * shape[1] * shape[2] * np.dtype(dtype).itemsize)
outfile.write(dat.astype(dtype).tobytes())
outfile.close()
def _run_chunk(start_line: int, stop_line: int, reference_radiance_file: str, reference_atm_file: str,
reference_locations_file: str, input_radiance_file: str,
input_locations_file: str, segmentation_file: str, isofit_config: dict, output_reflectance_file: str,
output_uncertainty_file: str, radiance_factors: np.array, nneighbors: int,
nodata_value: float) -> None:
"""
Args:
start_line: line to start empirical line run at
stop_line: line to stop empirical line run at
reference_radiance_file: source file for radiance (interpolation built from this)
reference_atm_file: source file for atmosphere coefficients (interpolation built from this)
reference_locations_file: source file for file locations (lon, lat, elev), (interpolation built from this)
input_radiance_file: input radiance file (interpolate over this)
input_locations_file: input location file (interpolate over this)
segmentation_file: input file noting the per-pixel segmentation used
isofit_config: dictionary-stype isofit configuration
output_reflectance_file: location to write output reflectance to
output_uncertainty_file: location to write output uncertainty to
radiance_factors: radiance adjustment factors
nneighbors: number of neighbors to use for interpolation
nodata_value: nodata value of input and output
Returns:
None
"""
# Load reference images
reference_radiance_img = envi.open(reference_radiance_file + '.hdr', reference_radiance_file)
reference_atm_img = envi.open(reference_atm_file + '.hdr', reference_atm_file)
reference_locations_img = envi.open(reference_locations_file + '.hdr', reference_locations_file)
n_reference_lines, n_radiance_bands, n_reference_columns = [int(reference_radiance_img.metadata[n])
for n in ('lines', 'bands', 'samples')]
# Load input images
input_radiance_img = envi.open(input_radiance_file + '.hdr', input_radiance_file)
n_input_lines, n_input_bands, n_input_samples = [int(input_radiance_img.metadata[n])
for n in ('lines', 'bands', 'samples')]
wl = np.array([float(w) for w in input_radiance_img.metadata['wavelength']])
input_locations_img = envi.open(input_locations_file + '.hdr', input_locations_file)
n_location_bands = int(input_locations_img.metadata['bands'])
# Load output images
output_reflectance_img = envi.open(output_reflectance_file + '.hdr', output_reflectance_file)
output_uncertainty_img = envi.open(output_uncertainty_file + '.hdr', output_uncertainty_file)
n_output_reflectance_bands = int(output_reflectance_img.metadata['bands'])
n_output_uncertainty_bands = int(output_uncertainty_img.metadata['bands'])
# Load reference data
reference_locations_mm = reference_locations_img.open_memmap(interleave='source', writable=False)
reference_locations = np.array(reference_locations_mm[:, :, :]).reshape((n_reference_lines, n_location_bands))
reference_radiance_mm = reference_radiance_img.open_memmap(interleave='source', writable=False)
reference_radiance = np.array(reference_radiance_mm[:, :, :]).reshape((n_reference_lines, n_radiance_bands))
reference_atm_mm = reference_atm_img.open_memmap(interleave='source', writable=False)
reference_atm = np.array(reference_atm_mm[:, :, :]).reshape((n_reference_lines, n_radiance_bands*5))
rhoatm = reference_atm[:,:n_radiance_bands]
sphalb = reference_atm[:,n_radiance_bands:(n_radiance_bands*2)]
transm = reference_atm[:,(n_radiance_bands*2):(n_radiance_bands*3)]
solirr = reference_atm[:,(n_radiance_bands*3):(n_radiance_bands*4)]
coszen = reference_atm[:,(n_radiance_bands*4):(n_radiance_bands*5)]
# Load segmentation data
if segmentation_file:
segmentation_img = envi.open(segmentation_file + '.hdr', segmentation_file)
segmentation_img = segmentation_img.read_band(0)
else:
segmentation_img = None
# Prepare instrument model, if available
if isofit_config is not None:
config = configs.create_new_config(isofit_config)
instrument = Instrument(config)
logging.info('Loading instrument')
else:
instrument = None
# Load radiance factors
if radiance_factors is None:
radiance_adjustment = np.ones(n_radiance_bands, )
else:
radiance_adjustment = np.loadtxt(radiance_factors)
# PCA coefficients
rdn_pca = PCA(n_components=2)
reference_pca = rdn_pca.fit_transform(reference_radiance * radiance_adjustment)
# Create the tree to find nearest neighbor segments.
# Assume (heuristically) that, for distance purposes, 1 m vertically is
# comparable to 10 m horizontally, and that there are 100 km per latitude
# degree. This is all approximate of course. Elevation appears in the
# Third element, and the first two are latitude/longitude coordinates
# The fourth and fifth elements are "spectral distance" determined by the
# top principal component coefficients
loc_scaling = np.array([1e5, 1e5, 10, 100, 100])
scaled_ref_loc = np.concatenate((reference_locations,reference_pca),axis=1) * loc_scaling
tree = KDTree(scaled_ref_loc)
# Fit GP parameters on transmissivity of an H2O feature, in the
# first 400 datapoints
use = np.arange(min(len(rhoatm),400))
h2oband = np.argmin(abs(wl-940))
scale = (500,500,500,500,500)
bounds = ((100,2000),(100,2000),(100,2000),(100,2000),(100,2000))
kernel = RBF(length_scale=scale, length_scale_bounds=bounds) +\
WhiteKernel(noise_level=0.01, noise_level_bounds=(1e-10, 0.1))
gp = GaussianProcessRegressor(kernel=kernel, alpha=0.0, normalize_y=True)
gp = gp.fit(scaled_ref_loc[use,:], transm[use,h2oband])
kernel = gp.kernel_
# Iterate through image. Each segment has its own GP, stored in a
# hash table indexed by location in the segmentation map
hash_table = {}
for row in np.arange(start_line, stop_line):
# Load inline input data
input_radiance_mm = input_radiance_img.open_memmap(
interleave='source', writable=False)
input_radiance = np.array(input_radiance_mm[row, :, :])
if input_radiance_img.metadata['interleave'] == 'bil':
input_radiance = input_radiance.transpose((1, 0))
input_radiance = input_radiance * radiance_adjustment
input_locations_mm = input_locations_img.open_memmap(
interleave='source', writable=False)
input_locations = np.array(input_locations_mm[row, :, :])
if input_locations_img.metadata['interleave'] == 'bil':
input_locations = input_locations.transpose((1, 0))
output_reflectance_row = np.zeros(input_radiance.shape) + nodata_value
output_uncertainty_row = np.zeros(input_radiance.shape) + nodata_value
nspectra, start = 0, time.time()
for col in np.arange(n_input_samples):
# Get radiance, pca coordinates, physical location for this datum
my_rdn = input_radiance[col, :]
my_pca = rdn_pca.transform(my_rdn[np.newaxis,:])
my_loc = np.r_[input_locations[col, :], my_pca[0,:]] * loc_scaling
if np.all(np.isclose(my_rdn, nodata_value)):
output_reflectance_row[col, :] = nodata_value
output_uncertainty_row[col, :] = nodata_value
continue
# Retrieve or build the GP
gp_rhoatm, gp_sphalb, gp_transm, irr = None, None, None, None
hash_idx = segmentation_img[row, col]
if hash_idx in hash_table:
gp_rhoatm, gp_sphalb, gp_transm, irr = hash_table[hash_idx]
else:
# There is no GP for this segment, so we build one from
# the atmospheric coefficients from closest neighbors
dists, nn = tree.query(my_loc, nneighbors)
neighbor_rhoatm = rhoatm[nn, :]
neighbor_transm = transm[nn, :]
neighbor_sphalb = sphalb[nn, :]
neighbor_coszen = coszen[nn, :]
neighbor_solirr = solirr[nn, :]
neighbor_locs = scaled_ref_loc[nn, :]
# Create a new GP using the optimized parameters as a fixed kernel
gp_rhoatm = GaussianProcessRegressor(kernel=kernel, alpha=0.0,
normalize_y=True, optimizer=None)
gp_rhoatm.fit(neighbor_locs, neighbor_rhoatm)
gp_sphalb = GaussianProcessRegressor(kernel=kernel, alpha=0.0,
normalize_y=True, optimizer=None)
gp_sphalb.fit(neighbor_locs, neighbor_sphalb)
gp_transm = GaussianProcessRegressor(kernel=kernel, alpha=0.0,
normalize_y=True, optimizer=None)
gp_transm.fit(neighbor_locs, neighbor_transm)
irr = solirr[1,:]*coszen[1,:]
irr[irr<1e-8] = 1e-8
hash_table[hash_idx] = (gp_rhoatm, gp_sphalb, gp_transm, irr)
my_rhoatm = gp_rhoatm.predict(my_loc[np.newaxis,:])
my_sphalb = gp_sphalb.predict(my_loc[np.newaxis,:])
my_transm = gp_transm.predict(my_loc[np.newaxis,:])
my_rho = (my_rdn * np.pi) / irr
my_rfl = 1.0 / (my_transm / (my_rho - my_rhoatm) + my_sphalb)
output_reflectance_row[col, :] = my_rfl
# Calculate uncertainties. Sy approximation rather than Seps for
# speed, for now... but we do take into account instrument
# radiometric uncertainties
#output_uncertainty_row[col, :] = np.zeros()
#if instrument is None:
#else:
# Sy = instrument.Sy(x, geom=None)
# calunc = instrument.bval[:instrument.n_chan]
# output_uncertainty_row[col, :] = np.sqrt(
# np.diag(Sy) + pow(calunc * x, 2)) * bhat[:, 1]
# if loglevel == 'DEBUG':
# plot_example(xv, yv, bhat)
nspectra = nspectra + 1
elapsed = float(time.time() - start)
logging.info('row {}/{}, ({}/{} local), {} spectra per second'.format(row, n_input_lines, int(row - start_line),
int(stop_line - start_line),
round(float(nspectra) / elapsed, 2)))
del input_locations_mm
del input_radiance_mm
output_reflectance_row = output_reflectance_row.transpose((1, 0))
output_uncertainty_row = output_uncertainty_row.transpose((1, 0))
shp = output_reflectance_row.shape
output_reflectance_row = output_reflectance_row.reshape((1, shp[0], shp[1]))
shp = output_uncertainty_row.shape
output_uncertainty_row = output_uncertainty_row.reshape((1, shp[0], shp[1]))
_write_bil_chunk(output_reflectance_row, output_reflectance_file, row,
(n_input_lines, n_output_reflectance_bands, n_input_samples))
_write_bil_chunk(output_uncertainty_row, output_uncertainty_file, row,
(n_input_lines, n_output_uncertainty_bands, n_input_samples))
def interpolate_atmosphere(reference_radiance_file: str, reference_atm_file: str,
reference_locations_file: str, segmentation_file: str, input_radiance_file: str,
input_locations_file: str, output_reflectance_file: str, output_uncertainty_file: str,
nneighbors: int = 15, nodata_value: float = -9999.0, level: str = 'INFO',
radiance_factors: np.array = None, isofit_config: dict = None, n_cores: int = -1) -> None:
"""
Perform a Gaussian process interpolation of atmospheric parameters. It relies on precalculated
atmospheric coefficients at a subset of spatial locations stored in a file. The file has
each coefficient defined for every radiance channel, appearing in the order: (1) atmospheric
path reflectance; (2) spherical sky albedo; (3) total diffuse and direct transmittance of the
two-part downwelling and upwelling path; (4) extraterrestrial solar irradiance; (5) cosine of solar
zenith angle.
Args:
reference_radiance_file: source file for radiance (interpolation built from this)
reference_atm_file: source file for atmospheric coefficients (interpolation from this)
reference_locations_file: source file for file locations (lon, lat, elev), (interpolation from this)
segmentation_file: input file noting the per-pixel segmentation | |
<reponame>CarycaKatarzyna/pytest<filename>src/_pytest/config/__init__.py
""" command line options, ini-file and conftest.py processing. """
import argparse
import copy
import enum
import inspect
import os
import shlex
import sys
import types
import warnings
from functools import lru_cache
from types import TracebackType
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Union
import attr
import py
from packaging.version import Version
from pluggy import HookimplMarker
from pluggy import HookspecMarker
from pluggy import PluginManager
import _pytest._code
import _pytest.deprecated
import _pytest.hookspec # the extension point definitions
from .exceptions import PrintHelp
from .exceptions import UsageError
from .findpaths import determine_setup
from .findpaths import exists
from _pytest._code import ExceptionInfo
from _pytest._code import filter_traceback
from _pytest._io import TerminalWriter
from _pytest.compat import importlib_metadata
from _pytest.compat import TYPE_CHECKING
from _pytest.outcomes import fail
from _pytest.outcomes import Skipped
from _pytest.pathlib import Path
from _pytest.store import Store
from _pytest.warning_types import PytestConfigWarning
if TYPE_CHECKING:
from typing import Type
from .argparsing import Argument
_PluggyPlugin = object
"""A type to represent plugin objects.
Plugins can be any namespace, so we can't narrow it down much, but we use an
alias to make the intent clear.
Ideally this type would be provided by pluggy itself."""
hookimpl = HookimplMarker("pytest")
hookspec = HookspecMarker("pytest")
class ExitCode(enum.IntEnum):
"""
.. versionadded:: 5.0
Encodes the valid exit codes by pytest.
Currently users and plugins may supply other exit codes as well.
"""
#: tests passed
OK = 0
#: tests failed
TESTS_FAILED = 1
#: pytest was interrupted
INTERRUPTED = 2
#: an internal error got in the way
INTERNAL_ERROR = 3
#: pytest was misused
USAGE_ERROR = 4
#: pytest couldn't find tests
NO_TESTS_COLLECTED = 5
class ConftestImportFailure(Exception):
def __init__(self, path, excinfo):
Exception.__init__(self, path, excinfo)
self.path = path
self.excinfo = excinfo # type: Tuple[Type[Exception], Exception, TracebackType]
def main(args=None, plugins=None) -> Union[int, ExitCode]:
""" return exit code, after performing an in-process test run.
:arg args: list of command line arguments.
:arg plugins: list of plugin objects to be auto-registered during
initialization.
"""
try:
try:
config = _prepareconfig(args, plugins)
except ConftestImportFailure as e:
exc_info = ExceptionInfo(e.excinfo)
tw = TerminalWriter(sys.stderr)
tw.line(
"ImportError while loading conftest '{e.path}'.".format(e=e), red=True
)
exc_info.traceback = exc_info.traceback.filter(filter_traceback)
exc_repr = (
exc_info.getrepr(style="short", chain=False)
if exc_info.traceback
else exc_info.exconly()
)
formatted_tb = str(exc_repr)
for line in formatted_tb.splitlines():
tw.line(line.rstrip(), red=True)
return ExitCode.USAGE_ERROR
else:
try:
ret = config.hook.pytest_cmdline_main(
config=config
) # type: Union[ExitCode, int]
try:
return ExitCode(ret)
except ValueError:
return ret
finally:
config._ensure_unconfigure()
except UsageError as e:
tw = TerminalWriter(sys.stderr)
for msg in e.args:
tw.line("ERROR: {}\n".format(msg), red=True)
return ExitCode.USAGE_ERROR
def console_main() -> int:
"""pytest's CLI entry point.
This function is not meant for programmable use; use `main()` instead.
"""
# https://docs.python.org/3/library/signal.html#note-on-sigpipe
try:
code = main()
sys.stdout.flush()
return code
except BrokenPipeError:
# Python flushes standard streams on exit; redirect remaining output
# to devnull to avoid another BrokenPipeError at shutdown
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
return 1 # Python exits with error code 1 on EPIPE
class cmdline: # compatibility namespace
main = staticmethod(main)
def filename_arg(path, optname):
""" Argparse type validator for filename arguments.
:path: path of filename
:optname: name of the option
"""
if os.path.isdir(path):
raise UsageError("{} must be a filename, given: {}".format(optname, path))
return path
def directory_arg(path, optname):
"""Argparse type validator for directory arguments.
:path: path of directory
:optname: name of the option
"""
if not os.path.isdir(path):
raise UsageError("{} must be a directory, given: {}".format(optname, path))
return path
# Plugins that cannot be disabled via "-p no:X" currently.
essential_plugins = (
"mark",
"main",
"runner",
"fixtures",
"helpconfig", # Provides -p.
)
default_plugins = essential_plugins + (
"python",
"terminal",
"debugging",
"unittest",
"capture",
"skipping",
"tmpdir",
"monkeypatch",
"recwarn",
"pastebin",
"nose",
"assertion",
"junitxml",
"resultlog",
"doctest",
"cacheprovider",
"freeze_support",
"setuponly",
"setupplan",
"stepwise",
"warnings",
"logging",
"reports",
"faulthandler",
)
builtin_plugins = set(default_plugins)
builtin_plugins.add("pytester")
def get_config(args=None, plugins=None):
# subsequent calls to main will create a fresh instance
pluginmanager = PytestPluginManager()
config = Config(
pluginmanager,
invocation_params=Config.InvocationParams(
args=args or (), plugins=plugins, dir=Path().resolve()
),
)
if args is not None:
# Handle any "-p no:plugin" args.
pluginmanager.consider_preparse(args, exclude_only=True)
for spec in default_plugins:
pluginmanager.import_plugin(spec)
return config
def get_plugin_manager():
"""
Obtain a new instance of the
:py:class:`_pytest.config.PytestPluginManager`, with default plugins
already loaded.
This function can be used by integration with other tools, like hooking
into pytest to run tests into an IDE.
"""
return get_config().pluginmanager
def _prepareconfig(
args: Optional[Union[py.path.local, List[str]]] = None, plugins=None
):
if args is None:
args = sys.argv[1:]
elif isinstance(args, py.path.local):
args = [str(args)]
elif not isinstance(args, list):
msg = "`args` parameter expected to be a list of strings, got: {!r} (type: {})"
raise TypeError(msg.format(args, type(args)))
config = get_config(args, plugins)
pluginmanager = config.pluginmanager
try:
if plugins:
for plugin in plugins:
if isinstance(plugin, str):
pluginmanager.consider_pluginarg(plugin)
else:
pluginmanager.register(plugin)
return pluginmanager.hook.pytest_cmdline_parse(
pluginmanager=pluginmanager, args=args
)
except BaseException:
config._ensure_unconfigure()
raise
def _fail_on_non_top_pytest_plugins(conftestpath, confcutdir):
msg = (
"Defining 'pytest_plugins' in a non-top-level conftest is no longer supported:\n"
"It affects the entire test suite instead of just below the conftest as expected.\n"
" {}\n"
"Please move it to a top level conftest file at the rootdir:\n"
" {}\n"
"For more information, visit:\n"
" https://docs.pytest.org/en/latest/deprecations.html#pytest-plugins-in-non-top-level-conftest-files"
)
fail(msg.format(conftestpath, confcutdir), pytrace=False)
class PytestPluginManager(PluginManager):
"""
Overwrites :py:class:`pluggy.PluginManager <pluggy.PluginManager>` to add pytest-specific
functionality:
* loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and
``pytest_plugins`` global variables found in plugins being loaded;
* ``conftest.py`` loading during start-up;
"""
def __init__(self):
import _pytest.assertion
super().__init__("pytest")
# The objects are module objects, only used generically.
self._conftest_plugins = set() # type: Set[object]
# state related to local conftest plugins
# Maps a py.path.local to a list of module objects.
self._dirpath2confmods = {} # type: Dict[Any, List[object]]
# Maps a py.path.local to a module object.
self._conftestpath2mod = {} # type: Dict[Any, object]
self._confcutdir = None
self._noconftest = False
# Set of py.path.local's.
self._duplicatepaths = set() # type: Set[Any]
self.add_hookspecs(_pytest.hookspec)
self.register(self)
if os.environ.get("PYTEST_DEBUG"):
err = sys.stderr
encoding = getattr(err, "encoding", "utf8")
try:
err = open(
os.dup(err.fileno()), mode=err.mode, buffering=1, encoding=encoding,
)
except Exception:
pass
self.trace.root.setwriter(err.write)
self.enable_tracing()
# Config._consider_importhook will set a real object if required.
self.rewrite_hook = _pytest.assertion.DummyRewriteHook()
# Used to know when we are importing conftests after the pytest_configure stage
self._configured = False
def parse_hookimpl_opts(self, plugin, name):
# pytest hooks are always prefixed with pytest_
# so we avoid accessing possibly non-readable attributes
# (see issue #1073)
if not name.startswith("pytest_"):
return
# ignore names which can not be hooks
if name == "pytest_plugins":
return
method = getattr(plugin, name)
opts = super().parse_hookimpl_opts(plugin, name)
# consider only actual functions for hooks (#3775)
if not inspect.isroutine(method):
return
# collect unmarked hooks as long as they have the `pytest_' prefix
if opts is None and name.startswith("pytest_"):
opts = {}
if opts is not None:
# TODO: DeprecationWarning, people should use hookimpl
# https://github.com/pytest-dev/pytest/issues/4562
known_marks = {m.name for m in getattr(method, "pytestmark", [])}
for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"):
opts.setdefault(name, hasattr(method, name) or name in known_marks)
return opts
def parse_hookspec_opts(self, module_or_class, name):
opts = super().parse_hookspec_opts(module_or_class, name)
if opts is None:
method = getattr(module_or_class, name)
if name.startswith("pytest_"):
# todo: deprecate hookspec hacks
# https://github.com/pytest-dev/pytest/issues/4562
known_marks = {m.name for m in getattr(method, "pytestmark", [])}
opts = {
"firstresult": hasattr(method, "firstresult")
or "firstresult" in known_marks,
"historic": hasattr(method, "historic")
or "historic" in known_marks,
}
return opts
def register(self, plugin, name=None):
if name in _pytest.deprecated.DEPRECATED_EXTERNAL_PLUGINS:
warnings.warn(
PytestConfigWarning(
"{} plugin has been merged into the core, "
"please remove it from your requirements.".format(
name.replace("_", "-")
)
)
)
return
ret = super().register(plugin, name)
if ret:
self.hook.pytest_plugin_registered.call_historic(
kwargs=dict(plugin=plugin, manager=self)
)
if isinstance(plugin, types.ModuleType):
self.consider_module(plugin)
return ret
def getplugin(self, name):
# support deprecated naming because plugins (xdist e.g.) use it
return self.get_plugin(name)
def hasplugin(self, name):
"""Return True if the plugin with the given name is registered."""
return bool(self.get_plugin(name))
def pytest_configure(self, config):
# XXX now that the pluginmanager exposes hookimpl(tryfirst...)
# we should remove tryfirst/trylast as markers
config.addinivalue_line(
"markers",
"tryfirst: mark a hook implementation function such that the "
"plugin machinery will try to call it first/as early as possible.",
)
config.addinivalue_line(
"markers",
"trylast: mark a hook implementation function such that the "
"plugin machinery will try to call it last/as late as possible.",
)
self._configured = True
#
# internal API for local conftest plugin handling
#
def _set_initial_conftests(self, namespace):
""" load initial conftest files given a preparsed "namespace".
As conftest files may add their own command | |
XML control characters,
# and semicolon for good measure (as the closer for escaped characters)
index += 1
else:
break
if index == 127:
print('PTX:WARNING: Could not find delimiter for verbatim expression')
return '!Could not find delimiter for verbatim expression.!'
else:
response_text += item.replace(original_delimiter, chr(index))
else:
# These three characters are escaped in both TeX and MathJax
text_content = verbatim_content.replace('$', '\\$')
text_content = text_content.replace('{', '\\{')
text_content = text_content.replace('}', '\\}')
response_text += '\\text{' + text_content + '}'
else:
response_text += item
# Check for errors with PG processing
# Get booleans signaling badness: file_empty, no_compile, bad_xml, no_statement
file_empty = 'ERROR: This problem file was empty!' in response_text
no_compile = 'ERROR caught by Translator while processing problem file:' in response_text
bad_xml = False
no_statement = False
try:
from xml.etree import ElementTree
except ImportError:
msg = 'PTX:ERROR: failed to import ElementTree from xml.etree'
raise ValueError(msg)
try:
problem_root = ElementTree.fromstring(response_text)
except:
bad_xml = True
if not bad_xml:
if problem_root.find('.//statement') is None:
no_statement = True
badness = file_empty or no_compile or bad_xml or no_statement
# Custom responses for each type of badness
# message for terminal log
# tip reminding about -a (abort) option
# value for @failure attribute in static element
# base64 for a shell PG problem that simply indicates there was an issue and says what the issue was
if file_empty:
badness_msg = "PTX:ERROR: WeBWorK problem {} was empty\n"
badness_tip = ''
badness_type = 'empty'
badness_base64 = '<KEY>'
elif no_compile:
badness_msg = "PTX:ERROR: WeBWorK problem {} with seed {} did not compile \n{}\n"
badness_tip = ' Use -a to halt with full PG and returned content' if (origin[problem] == 'ptx') else ' Use -a to halt with returned content'
badness_type = 'compile'
badness_base64 = '<KEY>FTkRET0NVTUVOVCgpOw%3D%3D'
elif bad_xml:
badness_msg = "PTX:ERROR: WeBWorK problem {} with seed {} does not return valid XML \n It may not be PTX compatible \n{}\n"
badness_tip = ' Use -a to halt with returned content'
badness_type = 'xml'
badness_base64 = '<KEY>'
elif no_statement:
badness_msg = "PTX:ERROR: WeBWorK problem {} with seed {} does not have a statement tag \n Maybe it uses something other than BEGIN_TEXT or BEGIN_PGML to print the statement in its PG code \n{}\n"
badness_tip = ' Use -a to halt with returned content'
badness_type = 'statement'
badness_base64 = '<KEY>ZXh0KCdOdW1lcmljJyk7CgpCRUdJTl9QR01MCldlQldvcksgUHJvYmxlbSBEaWQgTm90IEhhdmUgYSBbfHN0YXRlbWVudHxdKiBUYWcKCkVORF9QR01MCgpFTkRET0NVTUVOVCgpOw%3D%3D'
# If we are aborting upon recoverable errors...
if abort_early:
if badness:
debugging_help = response_text
if origin[problem] == 'ptx' and no_compile:
debugging_help += "\n" + pg[problem]
raise ValueError(badness_msg.format(problem_identifier, seed[problem], debugging_help))
# If there is "badness"...
# Build 'shell' problems to indicate failures
if badness:
print(badness_msg.format(problem_identifier, seed[problem], badness_tip))
static_skeleton = "<static failure='{}'>\n<statement>\n <p>\n {} </p>\n</statement>\n</static>\n"
static[problem] = static_skeleton.format(badness_type, badness_msg.format(problem_identifier, seed[problem], badness_tip))
else:
# add to dictionary
static[problem] = response_text
# strip out actual PTX code between markers
start = start_marker.split(static[problem], maxsplit=1)
static[problem] = start[1]
end = end_marker.split(static[problem], maxsplit=1)
static[problem] = end[0]
# change element from webwork to static and indent
static[problem] = static[problem].replace('<webwork>', '<static>')
static[problem] = static[problem].replace('</webwork>', '</static>')
# Convert answerhashes XML to a sequence of answer elements
# This is crude text operation on the XML
# If correct_ans_latex_string is nonempty, use it, encased in <p><m>
# Else if correct_ans is nonempty, use it, encased in just <p>
# Else we have no answer to print out
answerhashes = re.findall(r'<AnSwEr\d+ (.*?) />', static[problem], re.DOTALL)
if answerhashes:
answer = ''
for answerhash in answerhashes:
try:
correct_ans = re.search('correct_ans="(.*?)"', answerhash, re.DOTALL).group(1)
except:
correct_ans = ''
try:
correct_ans_latex_string = re.search('correct_ans_latex_string="(.*?)"', answerhash, re.DOTALL).group(1)
except:
correct_ans_latex_string = ''
if correct_ans_latex_string or correct_ans:
answer += "<answer>\n <p>"
if not correct_ans_latex_string:
answer += correct_ans
else:
answer += '<m>' + correct_ans_latex_string + '</m>'
answer += "</p>\n</answer>\n"
# Now we need to cut out the answerhashes that came from the server.
beforehashes = re.compile('<answerhashes>').split(static[problem])[0]
afterhashes = re.compile('</answerhashes>').split(static[problem])[1]
static[problem] = beforehashes + afterhashes
# We don't just replace it with the answer we just built. To be
# schema-compliant, the answer should come right after the latter of
# (last closing statement, last closing hint)
# By reversing the string, we can just target first match
reverse = static[problem][::-1]
parts = re.split(r"(\n>tnemetats/<|\n>tnih/<)",reverse, 1)
static[problem] = parts[2][::-1] + parts[1][::-1] + answer + parts[0][::-1]
# nice to know what seed was used
static[problem] = static[problem].replace('<static', '<static seed="' + seed[problem] + '"')
# nice to know sourceFilePath for server problems
if origin[problem] == 'server':
static[problem] = static[problem].replace('<static', '<static source="' + source[problem] + '"')
# adjust indentation
static[problem] = re.sub(re.compile('^(?=.)', re.MULTILINE),' ',static[problem]).replace(' <static','<static').replace(' </static','</static')
# remove excess blank lines that come at the end from the server
static[problem] = re.sub(re.compile('\n+( *</static>)', re.MULTILINE),r"\n\1",static[problem])
# need to loop through content looking for images with pattern:
#
# <image source="relative-path-to-temporary-image-on-server"
#
graphics_pattern = re.compile(r'<image.*?source="([^"]*)"')
# replace filenames, download images with new filenames
count = 0
# ww_image_url will be the URL to an image file used by the problem on the ww server
for match in re.finditer(graphics_pattern, static[problem]):
ww_image_url = match.group(1)
# strip away the scheme and location, if present (e.g 'https://webwork-ptx.aimath.org/')
ww_image_url_parsed = urllib.parse.urlparse(ww_image_url)
ww_image_scheme = ww_image_url_parsed.scheme
ww_image_full_path = ww_image_url_parsed.path
count += 1
# split the full path into (path, file). path could theoretically be empty.
ww_image_path, ww_image_filename = os.path.split(ww_image_full_path)
# split the filename into (name, extension). extension can be empty or like '.png'.
ww_image_name, image_extension = os.path.splitext(ww_image_filename)
# rename, eg, webwork-extraction/webwork-5-image-3.png
ptx_image_name = problem + '-image-' + str(count)
ptx_image_filename = ptx_image_name + image_extension
if ww_image_scheme:
image_url = ww_image_url
else:
image_url = server_url + ww_image_full_path
# modify PTX problem source to include local versions
static[problem] = static[problem].replace(ww_image_full_path, 'images/' + ptx_image_filename)
# download actual image files
# http://stackoverflow.com/questions/13137817/how-to-download-image-using-requests
try:
response = session.get(image_url)
except requests.exceptions.RequestException as e:
root_cause = str(e)
msg = "PTX:ERROR: there was a problem downloading an image file,\n URL: {}\n"
raise ValueError(msg.format(image_url) + root_cause)
# and save the image itself
try:
with open(os.path.join(dest_dir, ptx_image_filename), 'wb') as image_file:
image_file.write(response.content)
except Exception as e:
root_cause = str(e)
msg = "PTX:ERROR: there was a problem saving an image file,\n Filename: {}\n"
raise ValueError(os.path.join(dest_dir, ptx_filename) + root_cause)
# place static content
# we open the file in binary mode to preserve the \r characters that may be present
try:
with open(include_file_name, 'ab') as include_file:
include_file.write(bytes(static[problem] + '\n', encoding='utf-8'))
except Exception as e:
root_cause = str(e)
msg = "PTX:ERROR: there was a problem writing a problem to the file: {}\n"
raise ValueError(msg.format(include_file_name) + root_cause)
# Write urls for interactive version
for hint in ['yes','no']:
for solution in ['yes','no']:
hintsol = 'hint_' + hint + '_solution_' + solution
url_tag = ' <server-url hint="{}" solution="{}">{}?courseID={}&userID={}&password={}&course_password={}&answersSubmitted=0&displayMode=MathJax&outputformat=simple&problemSeed={}&{}</server-url>\n\n'
source_selector = 'problemSource=' if (badness or origin[problem] == 'ptx') else 'sourceFilePath='
if badness:
source_value = badness_base64
else:
if origin[problem] == 'server':
source_value = source[problem]
else:
source_value = urllib.parse.quote_plus(pgbase64[hintsol][problem])
source_query = source_selector + source_value
try:
with open(include_file_name, 'a') as include_file:
include_file.write(url_tag.format(hint,solution,wwurl,courseID,userID,password,course_password,seed[problem],source_query))
except Exception as e:
root_cause = str(e)
msg = "PTX:ERROR: there was a problem writing URLs for {} to the file: {}\n"
raise ValueError(msg.format(problem_identifier, include_file_name) + root_cause)
# Write PG. For server problems, just include source as attribute and close pg tag
if origin[problem] == 'ptx':
pg_tag = ' <pg>\n{}\n </pg>\n\n'
if badness:
pg_shell = "DOCUMENT();\nloadMacros('PGstandard.pl','PGML.pl','PGcourse.pl');\nTEXT(beginproblem());\nBEGIN_PGML\n{}END_PGML\nENDDOCUMENT();"
formatted_pg = pg_shell.format(badness_msg.format(problem_identifier, seed[problem], badness_tip))
else:
formatted_pg = pg[problem]
# opportunity to cut out extra blank lines
formatted_pg = re.sub(re.compile(r"(\n *\n)( *\n)*", re.MULTILINE),r"\n\n",formatted_pg)
try:
with open(include_file_name, 'a') as include_file:
include_file.write(pg_tag.format(formatted_pg))
except Exception as e:
root_cause = str(e)
msg = "PTX:ERROR: there was a problem writing the PG for {} to the file: {}\n"
raise ValueError(msg.format(problem_identifier, include_file_name) + root_cause)
elif origin[problem] == 'server':
try:
with open(include_file_name, 'a') as include_file:
pg_tag = ' <pg source="{}" />\n\n'
include_file.write(pg_tag.format(source[problem]))
except Exception as e:
root_cause = str(e)
msg = "PTX:ERROR: there was a problem writing the PG for {} to the file: {}\n"
raise ValueError(msg.format(problem_identifier, include_file_name) + root_cause)
# close webwork-reps tag
try:
with open(include_file_name, 'a') as include_file:
include_file.write(' </webwork-reps>\n\n')
except Exception as e:
root_cause = str(e)
msg = "PTX:ERROR: there | |
which means the docs have to be written that way.
# Maybe someday in the future it should be expanded to handle
# @see foo(), bar(), etc., but I don't have time right now to do it.
docstring = re.sub('(@see\s+)([\w:.]+)\(', r'\1#\2(', docstring)
# Remove the '*' character that Javadoc doesn't want to see in @see's.
# (This doesn't make a difference; javadoc still can't match up the refs.)
# p = re.compile('@see[\s\w.:,()#]+[*][\s\w.:,()*#]')
# docstring = p.sub(removeStar, docstring)
# The syntax for @link is vastly different.
p = re.compile('@link([\s/*]+[\w\s,.:#()*]+[\s/*]*[\w():#]+[\s/*]*)@endlink', re.DOTALL)
docstring = p.sub(r'{@link \1}', docstring)
# Outside of @see and other constructs, dot is used to reference members
# instead of C++'s double colon.
docstring = docstring.replace(r'::', '.')
# Need to escape quotation marks. The reason is that the
# %javamethodmodifiers directives created for use with SWIG will
# themselves be double-quoted strings, and leaving embedded quotes
# will completely screw that up.
docstring = docstring.replace('"', "'")
docstring = docstring.replace(r"'", r"\'")
return docstring
def rewriteDocstringForCSharp (docstring):
"""rewriteDocstringForCSharp (docstring) -> docstring
Performs some mimimal C#-specific sanitizations on the
C++/Doxygen docstring.
"""
# Preliminary: rewrite some of the data type references to equivalent
# C# types. (Note: this rewriting affects only the documentation
# comments inside classes & methods, not the actual method signatures.)
docstring = docstring.replace(r'const char *', 'string ')
docstring = docstring.replace(r'const char* ', 'string ')
docstring = docstring.replace(r'an unsigned int', 'a long integer')
docstring = docstring.replace(r'unsigned int', 'long')
docstring = docstring.replace(r'const std::string&', 'string')
docstring = docstring.replace(r'const std::string &', 'string ')
docstring = docstring.replace(r'const std::string', 'string')
docstring = docstring.replace(r'std::string', 'string')
docstring = docstring.replace(r'const ', '')
docstring = docstring.replace(r'NULL', 'null')
docstring = docstring.replace(r'boolean', 'bool')
# Use C# syntax instead of "const XMLNode*" etc.
p = re.compile(r'const (%?)(' + r')( ?)(\*|&)', re.DOTALL)
docstring = p.sub(rewriteClassRefAddingSpace, docstring)
p = re.compile(r'(%?)(' + r')( ?)(\*|&)', re.DOTALL)
docstring = p.sub(rewriteClassRefAddingSpace, docstring)
# <code> has its own special meaning in C#; we have to turn our input
# file's uses of <code> into <c>. Conversely, we have to turn our
# uses of verbatim to <code>.
p = re.compile(r'<code>(.+?)</code>', re.DOTALL)
docstring = p.sub(r'<c>\1</c>', docstring)
p = re.compile('@verbatim(.+?)@endverbatim', re.DOTALL)
docstring = p.sub(r'<code>\1</code>', docstring)
# Do replacements on some documentation text we sometimes use.
p = re.compile(r'sbml2matlabConstants([@.])')
docstring = p.sub(r'sbml2matlabcs.sbml2matlab\1', docstring)
# Fix @link for constants that we forgot conditionalize in the source.
p = re.compile(r'@link +([A-Z_0-9]+?)@endlink', re.DOTALL)
docstring = p.sub(r'@link sbml2matlab.\1@endlink', docstring)
# Can't use math symbols. Kluge around it.
docstring = re.sub(r'\\f\$\\geq\\f\$', '>=', docstring)
docstring = re.sub(r'\\f\$\\leq\\f\$', '<=', docstring)
docstring = re.sub(r'\\f\$\\times\\f\$', '*', docstring)
# Some additional special cases.
docstring = docstring.replace(r'SBML_formulaToString()', 'sbml2matlabcs.sbml2matlab.formulaToString()')
docstring = docstring.replace(r'SBML_parseFormula()', 'sbml2matlabcs.sbml2matlab.parseFormula()')
# Need to escape the quotation marks:
docstring = docstring.replace('"', "'")
docstring = docstring.replace(r"'", r"\'")
return docstring
def indentVerbatimForPython (match):
text = match.group()
p = re.compile('^(.)', re.MULTILINE)
text = p.sub(r' \1', text)
return text
def rewriteDocstringForPython (docstring):
"""rewriteDocstringForPython (docstring) -> docstring
Performs some mimimal Python specific sanitizations on the
C++/Doxygen docstring.
Note: this is not the only processing performed for the Python
documentation. In docs/src, the doxygen-based code has an additional,
more elaborate filter that processes the output of *this* filter.
"""
# Take out the C++ comment start and end.
docstring = docstring.replace('/**', '').replace('*/', '')
p = re.compile('^(\s*)\*([ \t]*)', re.MULTILINE)
docstring = p.sub(r'\2', docstring)
# Rewrite some of the data type references to equivalent Python types.
# (Note: this rewriting affects only the documentation comments inside
# classes & methods, not the method signatures.)
docstring = docstring.replace(r'const char *', 'string ')
docstring = docstring.replace(r'const char* ', 'string ')
docstring = docstring.replace(r'an unsigned int', 'a long integer')
docstring = docstring.replace(r'unsigned int', 'long')
docstring = docstring.replace(r'const std::string&', 'string')
docstring = docstring.replace(r'const std::string', 'string')
docstring = docstring.replace(r'std::string', 'string')
docstring = docstring.replace(r'NULL', 'None')
docstring = docstring.replace(r'@c true', '@c True')
docstring = docstring.replace(r'@c false', '@c False')
# Also use Python syntax instead of "const XMLNode*" etc.
p = re.compile(r'const (%?)(' + r') ?(\*|&)', re.DOTALL)
docstring = p.sub(rewriteClassRef, docstring)
p = re.compile(r'(%?)(' + r') ?(\*|&)', re.DOTALL)
docstring = p.sub(rewriteClassRef, docstring)
# Need to escape the quotation marks:
docstring = docstring.replace('"', "'")
docstring = docstring.replace(r"'", r"\'")
# Python method cross-references won't be made by doxygen unless
# the method reference is written without arguments.
p = re.compile('(\s+)(\S+?)::(\w+\s*)(\([^)]*?\))', re.MULTILINE)
docstring = p.sub(translatePythonCrossRef, docstring)
p = re.compile('(@see\s+)(\w+\s*)(\([^)]*?\))')
docstring = p.sub(translatePythonSeeRef, docstring)
# Friggin' doxygen escapes HTML character codes, so the hack we have to
# do for Javadoc turns out doesn't work for the Python documentation.
# Kluge around it.
docstring = re.sub(r'\\f\$\\geq\\f\$', '>=', docstring)
docstring = re.sub(r'\\f\$\\leq\\f\$', '<=', docstring)
docstring = re.sub(r'\\f\$\\times\\f\$', '*', docstring)
# SWIG does some bizarre truncation of leading characters that
# happens to hit us because of how we have to format verbatim's.
# This tries to kluge around it:
p = re.compile('@verbatim.+?@endverbatim', re.DOTALL)
docstring = p.sub(indentVerbatimForPython, docstring)
return docstring
def rewriteDocstringForPerl (docstring):
"""rewriteDocstringForPerl (docstring) -> docstring
Performs some mimimal Perl specific sanitizations on the
C++/Doxygen docstring.
"""
# Get rid of the /** ... */ and leading *'s.
docstring = docstring.replace('/**', '').replace('*/', '').replace('*', ' ')
# Get rid of indentation
p = re.compile('^\s+(\S*\s*)', re.MULTILINE)
docstring = p.sub(r'\1', docstring)
# Get rid of paragraph indentation not caught by the code above.
p = re.compile('^[ \t]+(\S)', re.MULTILINE)
docstring = p.sub(r'\1', docstring)
# Get rid of blank lines.
p = re.compile('^[ \t]+$', re.MULTILINE)
docstring = p.sub(r'', docstring)
# Get rid of the %foo quoting.
docstring = re.sub('(\s)%(\w)', r'\1\2', docstring)
# The following are done in pairs because I couldn't come up with a
# better way to catch the case where @c and @em end up alone at the end
# of a line and the thing to be formatted starts on the next one after
# the comment '*' character on the beginning of the line.
docstring = re.sub('@c *([^ ,.:;()/*\n\t]+)', r'C<\1>', docstring)
docstring = re.sub('@c(\n[ \t]*\*[ \t]*)([^ ,.:;()/*\n\t]+)', r'\1C<\2>', docstring)
docstring = re.sub('@p +([^ ,.:;()/*\n\t]+)', r'C<\1>', docstring)
docstring = re.sub('@p(\n[ \t]*\*[ \t]+)([^ ,.:;()/*\n\t]+)', r'\1C<\2>', docstring)
docstring = re.sub('@em *([^ ,.:;()/*\n\t]+)', r'I<\1>', docstring)
docstring = re.sub('@em(\n[ \t]*\*[ \t]*)([^ ,.:;()/*\n\t]+)', r'\1I<\2>', docstring)
docstring = docstring.replace('<ul>', '\n=over\n')
docstring = docstring.replace('<li> ', '\n=item\n\n')
docstring = docstring.replace('</ul>', '\n=back\n')
docstring = docstring.replace('@return', 'Returns')
docstring = docstring.replace(' < ', ' E<lt> ').replace(' > ', ' E<gt> ')
docstring = re.sub('<code>([^<]*)</code>', r'C<\1>', docstring)
docstring = re.sub('<b>([^<]*)</b>', r'B<\1>', docstring)
return docstring
def processClassMethods(ostream, cclass):
# In the Python docs, we have to combine the docstring for methods with
# different signatures and write out a single method docstring. In the
# other languages, we write out separate docstrings for every method
# having a different signature.
if language == 'python':
written = {}
for m in cclass.methods:
if m.name + m.args in written:
continue
if m.name.startswith('~'):
continue
if cclass.methodVariants[m.name].__len__() > 1:
# This method has more than one variant. It's possible some or all
# of them are marked @internal. Therefore, before we start writing
# a statement that there are multiple variants, we must check that
# we're left with more than one non-internal method to document.
count = 0
for argVariant in list(cclass.methodVariants[m.name].values()):
if re.search('@internal', argVariant.docstring) == None:
count += 1
if count <= 1:
continue
newdoc = ' This method has multiple variants that differ in the' + \
' arguments\n they accept. Each is described separately' + \
' below.\n'
for argVariant in list(cclass.methodVariants[m.name].values()):
# Each entry in the methodVariants dictionary is itself a dictionary.
# The dictionary entries are keyed by method arguments (as strings).
# The dictionary values are the 'func' objects we use.
if re.search('@internal', argVariant.docstring) == None:
newdoc += "\n <hr>\n Method variant with the following"\
+ " signature:\n <pre class='signature'>" \
+ argVariant.name \
+ rewriteDocstringForPython(argVariant.args) \
+ "</pre>\n\n"
newdoc += rewriteDocstringForPython(argVariant.docstring)
written[argVariant.name + argVariant.args] = 1
else:
newdoc = rewriteDocstringForPython(m.docstring)
ostream.write(formatMethodDocString(m.name, cclass.name, newdoc, m.isInternal, m.args))
written[m.name + m.args] = 1
| |
<gh_stars>1-10
import base64
import datetime
import logging
import json
import os
import time
import uuid
import typing
import semver
import aiohttp
from asyncio import shield
from marshmallow.exceptions import ValidationError
from aries_cloudagent.config.injection_context import InjectionContext
from aries_cloudagent.connections.models.connection_record import ConnectionRecord
from aries_cloudagent.connections.models.connection_target import ConnectionTarget
from aries_cloudagent.messaging.models.base import BaseModelError
from aries_cloudagent.messaging.responder import BaseResponder
from aries_cloudagent.messaging.credential_definitions.util import CRED_DEF_SENT_RECORD_TYPE
from aries_cloudagent.core.dispatcher import DispatcherResponder
from aries_cloudagent.transport.inbound.receipt import MessageReceipt
from aries_cloudagent.core.error import BaseError
from aries_cloudagent.storage.base import BaseStorage, StorageRecord
from aries_cloudagent.storage.indy import IndyStorage
from aries_cloudagent.storage.error import StorageNotFoundError, StorageSearchError, StorageDuplicateError, StorageError
from aries_cloudagent.wallet.indy import IndyWallet
from aries_cloudagent.wallet.base import BaseWallet, DIDInfo
from aries_cloudagent.protocols.connections.v1_0.manager import ConnectionManager
from aries_cloudagent.ledger.base import BaseLedger
from aries_cloudagent.ledger.error import LedgerError
from aries_cloudagent.issuer.base import BaseIssuer, IssuerError
from aries_cloudagent.messaging.decorators.default import DecoratorSet
from aries_cloudagent.transport.pack_format import PackWireFormat
from aries_cloudagent.transport.wire_format import BaseWireFormat
from aries_cloudagent.messaging.decorators.transport_decorator import TransportDecorator, TransportDecoratorSchema
from aries_cloudagent.protocols.connections.v1_0.manager import ConnectionManager, ConnectionManagerError
from aries_cloudagent.protocols.connections.v1_0.messages.connection_invitation import ConnectionInvitation
from aries_cloudagent.indy.util import generate_pr_nonce
from aries_cloudagent.messaging.decorators.attach_decorator import AttachDecorator
from aries_cloudagent.messaging.util import str_to_epoch
from .messages.create_did import CreateDIDMessage
from .messages.read_did import ReadDIDMessage, ReadDIDMessageBody
from .messages.read_did_response import ReadDIDResponseMessage, ReadDIDResponseMessageSchema
from .messages.delete_did import DeleteDIDMessage, DeleteDIDMessageBody
from .messages.delete_did_response import DeleteDIDResponseMessage, DeleteDIDResponseMessageBody
from .messages.create_did_response import CreateDIDResponseMessage
from .messages.problem_report import (
MyDataDIDProblemReportMessage,
MyDataDIDProblemReportMessageReason,
DataAgreementNegotiationProblemReport,
DataAgreementProblemReport,
DataAgreementProblemReportReason
)
from .messages.read_data_agreement import ReadDataAgreement
from .messages.read_data_agreement_response import ReadDataAgreementResponse
from .messages.data_agreement_offer import DataAgreementNegotiationOfferMessage, DataAgreementNegotiationOfferMessageSchema
from .messages.data_agreement_accept import DataAgreementNegotiationAcceptMessage, DataAgreementNegotiationAcceptMessageSchema
from .messages.data_agreement_reject import DataAgreementNegotiationRejectMessage, DataAgreementNegotiationRejectMessageSchema
from .messages.data_agreement_terminate import DataAgreementTerminationTerminateMessage, DataAgreementTerminationTerminateMessageSchema
from .messages.data_agreement_verify import DataAgreementVerify
from .messages.data_agreement_qr_code_initiate import DataAgreementQrCodeInitiateMessage
from .messages.data_agreement_qr_code_problem_report import DataAgreementQrCodeProblemReport, DataAgreementQrCodeProblemReportReason
from .messages.json_ld_processed import JSONLDProcessedMessage
from .messages.json_ld_processed_response import JSONLDProcessedResponseMessage
from .messages.json_ld_problem_report import JSONLDProblemReport, JSONLDProblemReportReason
from .messages.read_all_data_agreement_template import ReadAllDataAgreementTemplateMessage
from .messages.read_all_data_agreement_template_response import ReadAllDataAgreementTemplateResponseMessage
from .messages.data_controller_details import DataControllerDetailsMessage
from .messages.data_controller_details_response import DataControllerDetailsResponseMessage
from .messages.existing_connections import ExistingConnectionsMessage
from .models.data_agreement_model import DATA_AGREEMENT_V1_SCHEMA_CONTEXT, DataAgreementEventSchema, DataAgreementV1, DataAgreementPersonalData, DataAgreementV1Schema
from .models.read_data_agreement_model import ReadDataAgreementBody
from .models.diddoc_model import MyDataDIDBody, MyDataDIDResponseBody, MyDataDIDDoc, MyDataDIDDocService, MyDataDIDDocVerificationMethod, MyDataDIDDocAuthentication
from .models.read_data_agreement_response_model import ReadDataAgreementResponseBody
from .models.exchange_records.mydata_did_registry_didcomm_transaction_record import MyDataDIDRegistryDIDCommTransactionRecord
from .models.exchange_records.data_agreement_didcomm_transaction_record import DataAgreementCRUDDIDCommTransaction
from .models.exchange_records.data_agreement_record import DataAgreementV1Record
from .models.exchange_records.data_agreement_personal_data_record import DataAgreementPersonalDataRecord
from .models.data_agreement_negotiation_offer_model import DataAgreementNegotiationOfferBody, DataAgreementEvent, DataAgreementProof, DataAgreementProofSchema
from .models.data_agreement_instance_model import DataAgreementInstance, DataAgreementInstanceSchema
from .models.data_agreement_negotiation_accept_model import DataAgreementNegotiationAcceptBody, DataAgreementNegotiationAcceptBodySchema
from .models.data_agreement_negotiation_reject_model import DataAgreementNegotiationRejectBody, DataAgreementNegotiationRejectBodySchema
from .models.data_agreement_termination_terminate_model import DataAgreementTerminationTerminateBody, DataAgreementTerminationTerminateBodySchema
from .models.data_agreement_verify_model import DataAgreementVerifyBody, DataAgreementVerifyBodySchema
from .models.data_agreement_qr_code_initiate_model import DataAgreementQrCodeInitiateBody
from .models.json_ld_processed_response_model import JSONLDProcessedResponseBody
from .models.json_ld_processed_model import JSONLDProcessedBody
from .models.data_controller_model import DataController, DataControllerSchema
from .models.existing_connections_model import ExistingConnectionsBody
from .utils.diddoc import DIDDoc
from .utils.did.mydata_did import DIDMyData
from .utils.wallet.key_type import KeyType
from .utils.verification_method import PublicKeyType
from .utils.jsonld import ED25519_2018_CONTEXT_URL
from .utils.jsonld.data_agreement import sign_data_agreement
from .utils.util import current_datetime_in_iso8601, bool_to_str, int_to_semver_str
from .utils.jsonld.create_verify_data import create_verify_data
from .decorators.data_agreement_context_decorator import DataAgreementContextDecoratorSchema, DataAgreementContextDecorator
from .message_types import (
DATA_AGREEMENT_NEGOTIATION_OFFER,
DATA_AGREEMENT_NEGOTIATION_ACCEPT,
READ_DATA_AGREEMENT
)
from ..patched_protocols.issue_credential.v1_0.models.credential_exchange import (
V10CredentialExchange
)
from ..patched_protocols.present_proof.v1_0.models.presentation_exchange import (
V10PresentationExchange
)
from ..patched_protocols.present_proof.v1_0.messages.presentation_request import PresentationRequest
from ..patched_protocols.present_proof.v1_0.message_types import ATTACH_DECO_IDS, PRESENTATION_REQUEST
from ..patched_protocols.present_proof.v1_0.manager import PresentationManager
from mydata_did.v1_0.utils.jsonld import data_agreement
class ADAManagerError(BaseError):
"""ADA manager error"""
class ADAManager:
# Record for indication a connection is labelled as Auditor (client)
RECORD_TYPE_AUDITOR_CONNECTION = "auditor_connection"
# Record for indicating a connection is labelled as MyData DID registry (client)
RECORD_TYPE_MYDATA_DID_REGISTRY_CONNECTION = "mydata_did_registry_connection"
# Record for indicating a MyData DID is registered in the DID registry (client)
RECORD_TYPE_MYDATA_DID_REMOTE = "mydata_did_remote"
# Record for storing data agreement instance metadata (client)
RECORD_TYPE_DATA_AGREEMENT_INSTANCE_METADATA = "data_agreement_instance_metadata"
# Record for keeping track of DIDs that are registered in the DID registry (MyData DID registry)
RECORD_TYPE_MYDATA_DID_REGISTRY_DID_INFO = "mydata_did_registry_did_info"
# Record for keeping metadata about data agreement QR codes (client)
RECORD_TYPE_DATA_AGREEMENT_QR_CODE_METADATA = "data_agreement_qr_code_metadata"
# Temporary record for keeping personal data of unpublished (or draft) data agreements
RECORD_TYPE_TEMPORARY_DATA_AGREEMENT_PERSONAL_DATA = "temporary_data_agreement_personal_data"
# Record for data controller details
RECORD_TYPE_DATA_CONTROLLER_DETAILS = "data_controller_details"
# Record for existing connection details.
RECORD_TYPE_EXISTING_CONNECTION = "existing_connection"
DATA_AGREEMENT_RECORD_TYPE = "dataagreement_record"
def __init__(self, context: InjectionContext) -> None:
self._context = context
self._logger = logging.getLogger(__name__)
@property
def context(self) -> InjectionContext:
return self._context
async def process_mydata_did_problem_report_message(self, mydata_did_problem_report: MyDataDIDProblemReportMessage, receipt: MessageReceipt):
"""
Process problem report DIDComm message for MyData DID protocol.
"""
# Thread identifier
thread_id = mydata_did_problem_report._thread_id
mydata_did_registry_didcomm_transaction_record = None
try:
# Fetch MyData DID registry didcomm transaction record
mydata_did_registry_didcomm_transaction_record: MyDataDIDRegistryDIDCommTransactionRecord = await MyDataDIDRegistryDIDCommTransactionRecord.retrieve_by_tag_filter(
context=self.context,
tag_filter={"thread_id": thread_id}
)
except (StorageNotFoundError, StorageDuplicateError) as e:
# No record found
self._logger.debug(
"Failed to process mydata-did/1.0/problem-report message; "
"No MyData DID registry didcomm transaction record found for thread_id: %s", thread_id
)
return
# Assert transaction record is not None
assert mydata_did_registry_didcomm_transaction_record is not None
mydata_did_registry_didcomm_transaction_record.messages_list.append(
mydata_did_problem_report.to_json()
)
# Update transaction record
await mydata_did_registry_didcomm_transaction_record.save(self.context)
async def process_create_did_response_message(self, create_did_response_message: CreateDIDResponseMessage, receipt: MessageReceipt):
"""
Process create-did-response DIDComm message
"""
# Storage instance
storage: IndyStorage = await self.context.inject(BaseStorage)
# Thread identifier
thread_id = create_did_response_message._thread_id
mydata_did_registry_didcomm_transaction_record = None
try:
# Fetch MyData DID registry didcomm transaction record
mydata_did_registry_didcomm_transaction_record: MyDataDIDRegistryDIDCommTransactionRecord = await MyDataDIDRegistryDIDCommTransactionRecord.retrieve_by_tag_filter(
context=self.context,
tag_filter={"thread_id": thread_id}
)
except (StorageNotFoundError, StorageDuplicateError) as e:
# No record found
self._logger.debug(
"Failed to process create-did-response message; "
"No MyData DID registry didcomm transaction record found for thread_id: %s", thread_id
)
return
# Assert transaction record is not None
assert mydata_did_registry_didcomm_transaction_record is not None
mydata_did_registry_didcomm_transaction_record.messages_list.append(
create_did_response_message.to_json()
)
# Update transaction record
await mydata_did_registry_didcomm_transaction_record.save(self.context)
# Mark MyData DID as remote i.e. registered in the DID registry
mydata_did_remote_record = StorageRecord(
type=self.RECORD_TYPE_MYDATA_DID_REMOTE,
value=create_did_response_message.body.did_doc.to_json(),
tags={
"did": create_did_response_message.body.did_doc.diddoc_id,
"sov_verkey": DIDMyData.from_did(create_did_response_message.body.did_doc.diddoc_id).public_key_b58,
"status": "active"
}
)
# Save record
await storage.add_record(mydata_did_remote_record)
async def process_create_did_message(self, create_did_message: CreateDIDMessage, receipt: MessageReceipt):
"""
Process create-did DIDComm message
"""
# Storage instance
storage: IndyStorage = await self.context.inject(BaseStorage)
# Wallet instance
wallet: IndyWallet = await self.context.inject(BaseWallet)
# Responder instance
responder: DispatcherResponder = await self.context.inject(BaseResponder, required=False)
# From and To DIDs of the recieved message
create_did_message_from_did: DIDMyData = DIDMyData.from_public_key_b58(
receipt.sender_verkey, key_type=KeyType.ED25519)
create_did_message_to_did: DIDMyData = DIDMyData.from_public_key_b58(
receipt.recipient_verkey, key_type=KeyType.ED25519)
# From and To DIDs for the response messages
response_message_from_did = create_did_message_to_did
response_message_to_did = create_did_message_from_did
mydata_did_registry_did_info_record = None
try:
# Check if DID is already registered
mydata_did_registry_did_info_record = await storage.search_records(
type_filter=ADAManager.RECORD_TYPE_MYDATA_DID_REGISTRY_DID_INFO,
tag_query={"did": create_did_message.body.diddoc_id}
).fetch_single()
# Send problem-report message.
mydata_did_problem_report = MyDataDIDProblemReportMessage(
problem_code=MyDataDIDProblemReportMessageReason.DID_EXISTS.value,
explain="DID already registered in the DID registry",
from_did=response_message_from_did.did,
to_did=response_message_to_did.did,
created_time=round(time.time() * 1000)
)
# Assign thread id
mydata_did_problem_report.assign_thread_id(
thid=create_did_message._id)
if responder:
await responder.send_reply(mydata_did_problem_report, connection_id=self.context.connection_record.connection_id)
return
except (StorageNotFoundError, StorageDuplicateError) as e:
pass
try:
# Validate the ownership of the did by verifying the signature
await create_did_message.verify_signed_field(
field_name="body",
wallet=wallet,
signer_verkey=DIDMyData.from_did(
create_did_message.body.diddoc_id).public_key_b58
)
except BaseModelError as e:
self._logger.error(
f"Create DID message signature validation failed: {e}")
# Send problem-report message.
mydata_did_problem_report = MyDataDIDProblemReportMessage(
problem_code=MyDataDIDProblemReportMessageReason.MESSAGE_BODY_SIGNATURE_VERIFICATION_FAILED.value,
explain="DID document signature verification failed",
from_did=response_message_from_did.did,
to_did=response_message_to_did.did,
created_time=round(time.time() * 1000)
)
# Assign thread id
mydata_did_problem_report.assign_thread_id(
thid=create_did_message._id)
if responder:
await responder.send_reply(mydata_did_problem_report, connection_id=self.context.connection_record.connection_id)
return
# Create a record for the registered DID
mydata_did_registry_did_info_record_tags = {
"did": create_did_message.body.diddoc_id,
"connection_id": self.context.connection_record.connection_id,
"version": "1",
"status": "active"
}
mydata_did_registry_did_info_record = StorageRecord(
type=ADAManager.RECORD_TYPE_MYDATA_DID_REGISTRY_DID_INFO,
value=create_did_message.body.to_json(),
tags=mydata_did_registry_did_info_record_tags
)
await storage.add_record(mydata_did_registry_did_info_record)
# Send create-did-response message
create_did_response_message = CreateDIDResponseMessage(
from_did=response_message_from_did.did,
to_did=response_message_to_did.did,
created_time=round(time.time() * 1000),
body=MyDataDIDResponseBody(
did_doc=create_did_message.body,
version=mydata_did_registry_did_info_record_tags.get(
"version"),
status=mydata_did_registry_did_info_record_tags.get("status")
)
)
# Assign thread id
create_did_response_message.assign_thread_id(
thid=create_did_message._id)
# Create transaction record to keep track of didcomm messages
transaction_record = MyDataDIDRegistryDIDCommTransactionRecord(
thread_id=create_did_message._id,
message_type=MyDataDIDRegistryDIDCommTransactionRecord.MESSAGE_TYPE_CREATE_DID,
messages_list=[create_did_message.to_json(
), create_did_response_message.to_json()],
connection_id=self.context.connection_record.connection_id,
)
# Save transaction record
await transaction_record.save(self.context)
if responder:
await responder.send_reply(create_did_response_message, connection_id=self.context.connection_record.connection_id)
async def fetch_mydata_did_registry_connection_record(self) -> typing.Tuple[typing.Union[ConnectionRecord, None], typing.Union[None, Exception]]:
# Wallet instance from context
wallet: IndyWallet = await self.context.inject(BaseWallet)
# Storage instance from context
storage: BaseStorage = await self.context.inject(BaseStorage)
mydata_did_registry_connection_record = None
try:
# Search for existing connection_id marked as MyData DID registry
mydata_did_registry_connection_record: StorageRecord = await storage.search_records(
self.RECORD_TYPE_MYDATA_DID_REGISTRY_CONNECTION,
).fetch_single()
# MyData DID Registry connection identifier
mydata_did_registry_connection_id = mydata_did_registry_connection_record.value
# Fetch connection record from storage
connection_record: ConnectionRecord = await ConnectionRecord.retrieve_by_id(self.context, mydata_did_registry_connection_id)
return connection_record, None
except (StorageError, StorageNotFoundError, StorageDuplicateError) as e:
return None, e
async def fetch_auditor_connection_record(self) -> typing.Tuple[typing.Union[ConnectionRecord, None], typing.Union[None, Exception]]:
# Wallet instance from context
wallet: IndyWallet = await self.context.inject(BaseWallet)
# Storage instance from context
storage: BaseStorage = await self.context.inject(BaseStorage)
auditor_connection_record = None
try:
# Search for existing connection_id marked as Auditor
auditor_connection_record: StorageRecord = await storage.search_records(
self.RECORD_TYPE_AUDITOR_CONNECTION,
).fetch_single()
# Auditor connection identifier
auditor_connection_id = auditor_connection_record.value
# Fetch connection record from storage
connection_record: ConnectionRecord = await ConnectionRecord.retrieve_by_id(self.context, auditor_connection_id)
return connection_record, None
except (StorageError, StorageNotFoundError, StorageDuplicateError) as e:
return None, e
async def send_create_did_message(self, did: str) -> MyDataDIDRegistryDIDCommTransactionRecord:
"""
Send create-did didcomm message to MyData DID Registry.
Args:
did: The did to be created.
Returns:
The transaction record.
"""
# Wallet instance from context
wallet: IndyWallet = await self.context.inject(BaseWallet)
# Storage instance from context
storage: BaseStorage = await self.context.inject(BaseStorage)
connection_record, err = await self.fetch_mydata_did_registry_connection_record()
if err:
raise ADAManagerError(
f"Failed to send create-did message. "
f"Reason: {err}"
)
# from_did
pairwise_local_did_record = await wallet.get_local_did(connection_record.my_did)
from_did = DIDMyData.from_public_key_b58(
pairwise_local_did_record.verkey, key_type=KeyType.ED25519)
# to_did
to_did = DIDMyData.from_public_key_b58(
connection_record.their_did, key_type=KeyType.ED25519)
# to be created did
# Fetch local did record for verkey provided.
local_did_record: DIDInfo = await wallet.get_local_did(did)
mydata_did = DIDMyData.from_public_key_b58(
local_did_record.verkey, key_type=KeyType.ED25519)
# Create DIDDoc
did_doc = MyDataDIDDoc(
context=DIDDoc.CONTEXT,
diddoc_id=mydata_did.did,
verification_method=[
MyDataDIDDocVerificationMethod(
verification_method_id=f"{mydata_did.did}#1",
verification_method_type=PublicKeyType.ED25519_SIG_2018.ver_type,
controller=mydata_did.did,
public_key_base58=mydata_did.fingerprint
)
],
authentication=[
MyDataDIDDocAuthentication(
authentication_type=PublicKeyType.ED25519_SIG_2018.authn_type,
public_key=f"{mydata_did.did}#1"
)
],
service=[
MyDataDIDDocService(
service_id=f"{mydata_did.did};didcomm",
service_type="DIDComm",
service_priority=0,
recipient_keys=[
mydata_did.fingerprint
| |
:param Type: 触发器类型,目前支持 cos 、cmq、 timer、 ckafka、apigw类型
:type Type: str
:param TriggerDesc: 触发器对应的参数,可见具体[触发器描述说明](https://cloud.tencent.com/document/product/583/39901)
:type TriggerDesc: str
:param Namespace: 函数的命名空间
:type Namespace: str
:param Qualifier: 函数的版本
:type Qualifier: str
:param Enable: 触发器的初始是能状态 OPEN表示开启 CLOSE表示关闭
:type Enable: str
:param CustomArgument: 用户自定义参数,仅支持timer触发器
:type CustomArgument: str
"""
self.FunctionName = None
self.TriggerName = None
self.Type = None
self.TriggerDesc = None
self.Namespace = None
self.Qualifier = None
self.Enable = None
self.CustomArgument = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.TriggerName = params.get("TriggerName")
self.Type = params.get("Type")
self.TriggerDesc = params.get("TriggerDesc")
self.Namespace = params.get("Namespace")
self.Qualifier = params.get("Qualifier")
self.Enable = params.get("Enable")
self.CustomArgument = params.get("CustomArgument")
class CreateTriggerResponse(AbstractModel):
"""CreateTrigger返回参数结构体
"""
def __init__(self):
"""
:param TriggerInfo: 触发器信息
:type TriggerInfo: :class:`tencentcloud.scf.v20180416.models.Trigger`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TriggerInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TriggerInfo") is not None:
self.TriggerInfo = Trigger()
self.TriggerInfo._deserialize(params.get("TriggerInfo"))
self.RequestId = params.get("RequestId")
class DeadLetterConfig(AbstractModel):
"""死信队列参数
"""
def __init__(self):
"""
:param Type: 死信队列模式
:type Type: str
:param Name: 死信队列名称
:type Name: str
:param FilterType: 死信队列主题模式的标签形式
:type FilterType: str
"""
self.Type = None
self.Name = None
self.FilterType = None
def _deserialize(self, params):
self.Type = params.get("Type")
self.Name = params.get("Name")
self.FilterType = params.get("FilterType")
class DeleteAliasRequest(AbstractModel):
"""DeleteAlias请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 函数名称
:type FunctionName: str
:param Name: 别名的名称
:type Name: str
:param Namespace: 函数所在的命名空间
:type Namespace: str
"""
self.FunctionName = None
self.Name = None
self.Namespace = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Name = params.get("Name")
self.Namespace = params.get("Namespace")
class DeleteAliasResponse(AbstractModel):
"""DeleteAlias返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteFunctionRequest(AbstractModel):
"""DeleteFunction请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 要删除的函数名称
:type FunctionName: str
:param Namespace: 函数所属命名空间
:type Namespace: str
:param Qualifier: 函数版本
:type Qualifier: str
"""
self.FunctionName = None
self.Namespace = None
self.Qualifier = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Namespace = params.get("Namespace")
self.Qualifier = params.get("Qualifier")
class DeleteFunctionResponse(AbstractModel):
"""DeleteFunction返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLayerVersionRequest(AbstractModel):
"""DeleteLayerVersion请求参数结构体
"""
def __init__(self):
"""
:param LayerName: 层名称
:type LayerName: str
:param LayerVersion: 版本号
:type LayerVersion: int
"""
self.LayerName = None
self.LayerVersion = None
def _deserialize(self, params):
self.LayerName = params.get("LayerName")
self.LayerVersion = params.get("LayerVersion")
class DeleteLayerVersionResponse(AbstractModel):
"""DeleteLayerVersion返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteNamespaceRequest(AbstractModel):
"""DeleteNamespace请求参数结构体
"""
def __init__(self):
"""
:param Namespace: 命名空间名称
:type Namespace: str
"""
self.Namespace = None
def _deserialize(self, params):
self.Namespace = params.get("Namespace")
class DeleteNamespaceResponse(AbstractModel):
"""DeleteNamespace返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteProvisionedConcurrencyConfigRequest(AbstractModel):
"""DeleteProvisionedConcurrencyConfig请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 需要删除预置并发的函数的名称
:type FunctionName: str
:param Qualifier: 函数的版本号
:type Qualifier: str
:param Namespace: 函数所属命名空间,默认为default
:type Namespace: str
"""
self.FunctionName = None
self.Qualifier = None
self.Namespace = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Qualifier = params.get("Qualifier")
self.Namespace = params.get("Namespace")
class DeleteProvisionedConcurrencyConfigResponse(AbstractModel):
"""DeleteProvisionedConcurrencyConfig返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteReservedConcurrencyConfigRequest(AbstractModel):
"""DeleteReservedConcurrencyConfig请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 需要删除预置并发的函数的名称
:type FunctionName: str
:param Namespace: 函数所属命名空间,默认为default
:type Namespace: str
"""
self.FunctionName = None
self.Namespace = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.Namespace = params.get("Namespace")
class DeleteReservedConcurrencyConfigResponse(AbstractModel):
"""DeleteReservedConcurrencyConfig返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteTriggerRequest(AbstractModel):
"""DeleteTrigger请求参数结构体
"""
def __init__(self):
"""
:param FunctionName: 函数的名称
:type FunctionName: str
:param TriggerName: 要删除的触发器名称
:type TriggerName: str
:param Type: 要删除的触发器类型,目前支持 cos 、cmq、 timer、ckafka 类型
:type Type: str
:param Namespace: 函数所属命名空间
:type Namespace: str
:param TriggerDesc: 如果删除的触发器类型为 COS 触发器,该字段为必填值,存放 JSON 格式的数据 {"event":"cos:ObjectCreated:*"},数据内容和 SetTrigger 接口中该字段的格式相同;如果删除的触发器类型为定时触发器或 CMQ 触发器,可以不指定该字段
:type TriggerDesc: str
:param Qualifier: 函数的版本信息
:type Qualifier: str
"""
self.FunctionName = None
self.TriggerName = None
self.Type = None
self.Namespace = None
self.TriggerDesc = None
self.Qualifier = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.TriggerName = params.get("TriggerName")
self.Type = params.get("Type")
self.Namespace = params.get("Namespace")
self.TriggerDesc = params.get("TriggerDesc")
self.Qualifier = params.get("Qualifier")
class DeleteTriggerResponse(AbstractModel):
"""DeleteTrigger返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class EipConfigIn(AbstractModel):
"""公网访问固定ip配置
"""
def __init__(self):
"""
:param EipStatus: Eip开启状态,取值['ENABLE','DISABLE']
:type EipStatus: str
"""
self.EipStatus = None
def _deserialize(self, params):
self.EipStatus = params.get("EipStatus")
class EipConfigOut(AbstractModel):
"""公网访问固定ip配置
"""
def __init__(self):
"""
:param EipStatus: 是否是固定IP,["ENABLE","DISABLE"]
:type EipStatus: str
:param EipAddress: IP列表
注意:此字段可能返回 null,表示取不到有效值。
:type EipAddress: list of str
"""
self.EipStatus = None
self.EipAddress = None
def _deserialize(self, params):
self.EipStatus = params.get("EipStatus")
self.EipAddress = params.get("EipAddress")
class EipOutConfig(AbstractModel):
"""EipOutConfig
"""
def __init__(self):
"""
:param EipFixed: 是否是固定IP,["TRUE","FALSE"]
:type EipFixed: str
:param Eips: IP列表
:type Eips: list of str
"""
self.EipFixed = None
self.Eips = None
def _deserialize(self, params):
self.EipFixed = params.get("EipFixed")
self.Eips = params.get("Eips")
class Environment(AbstractModel):
"""函数的环境变量参数
"""
def __init__(self):
"""
:param Variables: 环境变量数组
:type Variables: list of Variable
"""
self.Variables = None
def _deserialize(self, params):
if params.get("Variables") is not None:
self.Variables = []
for item in params.get("Variables"):
obj = Variable()
obj._deserialize(item)
self.Variables.append(obj)
class Filter(AbstractModel):
"""描述键值对过滤器,用于条件过滤查询。例如过滤ID、名称、状态等
若存在多个Filter时,Filter间的关系为逻辑与(AND)关系。
若同一个Filter存在多个Values,同一Filter下Values间的关系为逻辑或(OR)关系。
"""
def __init__(self):
"""
:param Name: 需要过滤的字段。
:type Name: str
:param Values: 字段的过滤值。
:type Values: list of str
"""
self.Name = None
self.Values = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Values = params.get("Values")
class Function(AbstractModel):
"""函数列表
"""
def __init__(self):
"""
:param ModTime: 修改时间
:type ModTime: str
:param AddTime: 创建时间
:type AddTime: str
:param Runtime: 运行时
:type Runtime: str
:param FunctionName: 函数名称
:type FunctionName: str
:param FunctionId: 函数ID
:type FunctionId: str
:param Namespace: 命名空间
:type Namespace: str
:param Status: 函数状态,状态值及流转[参考此处](https://cloud.tencent.com/document/product/583/47175)
:type Status: str
:param StatusDesc: 函数状态详情
:type StatusDesc: str
:param Description: 函数描述
:type Description: str
:param Tags: 函数标签
:type Tags: list of Tag
:param Type: 函数类型,取值为 HTTP 或者 Event
:type Type: str
:param StatusReasons: 函数状态失败原因
:type StatusReasons: list of StatusReason
:param TotalProvisionedConcurrencyMem: 函数所有版本预置并发内存总和
注意:此字段可能返回 null,表示取不到有效值。
:type TotalProvisionedConcurrencyMem: int
:param ReservedConcurrencyMem: 函数并发保留内存
注意:此字段可能返回 null,表示取不到有效值。
:type ReservedConcurrencyMem: int
"""
self.ModTime = None
self.AddTime = None
self.Runtime = None
self.FunctionName = None
self.FunctionId = None
self.Namespace = None
self.Status = None
self.StatusDesc = None
self.Description = None
self.Tags = None
self.Type = None
self.StatusReasons = None
self.TotalProvisionedConcurrencyMem = None
self.ReservedConcurrencyMem = None
def _deserialize(self, params):
self.ModTime = params.get("ModTime")
self.AddTime = params.get("AddTime")
self.Runtime = params.get("Runtime")
self.FunctionName = params.get("FunctionName")
self.FunctionId = params.get("FunctionId")
self.Namespace = params.get("Namespace")
self.Status = params.get("Status")
self.StatusDesc = params.get("StatusDesc")
self.Description = params.get("Description")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = Tag()
obj._deserialize(item)
self.Tags.append(obj)
self.Type = params.get("Type")
if params.get("StatusReasons") is not None:
self.StatusReasons = []
for item in params.get("StatusReasons"):
obj = StatusReason()
obj._deserialize(item)
self.StatusReasons.append(obj)
self.TotalProvisionedConcurrencyMem = params.get("TotalProvisionedConcurrencyMem")
self.ReservedConcurrencyMem = params.get("ReservedConcurrencyMem")
class FunctionLog(AbstractModel):
"""日志信息
"""
def __init__(self):
"""
:param FunctionName: 函数的名称
:type FunctionName: str
:param RetMsg: 函数执行完成后的返回值
:type RetMsg: str
:param RequestId: 执行该函数对应的requestId
:type RequestId: str
:param StartTime: 函数开始执行时的时间点
:type StartTime: str
:param RetCode: 函数执行结果,如果是 0 表示执行成功,其他值表示失败
:type RetCode: int
:param InvokeFinished: 函数调用是否结束,如果是 1 表示执行结束,其他值表示调用异常
:type InvokeFinished: int
:param Duration: 函数执行耗时,单位为 ms
:type Duration: float
:param BillDuration: 函数计费时间,根据 duration 向上取最近的 100ms,单位为ms
:type BillDuration: int
:param MemUsage: 函数执行时消耗实际内存大小,单位为 Byte
:type MemUsage: int
:param Log: 函数执行过程中的日志输出
:type Log: str
:param Level: 日志等级
:type Level: str
:param Source: 日志来源
:type Source: str
:param RetryNum: 重试次数
:type RetryNum: int
"""
self.FunctionName = None
self.RetMsg = None
self.RequestId = None
self.StartTime = None
self.RetCode = None
self.InvokeFinished = None
self.Duration = None
self.BillDuration = None
self.MemUsage = None
self.Log = None
self.Level = None
self.Source = None
self.RetryNum = None
def _deserialize(self, params):
self.FunctionName = params.get("FunctionName")
self.RetMsg = params.get("RetMsg")
self.RequestId = params.get("RequestId")
self.StartTime = params.get("StartTime")
self.RetCode = params.get("RetCode")
self.InvokeFinished = params.get("InvokeFinished")
self.Duration = params.get("Duration")
self.BillDuration = params.get("BillDuration")
self.MemUsage = params.get("MemUsage")
self.Log = params.get("Log")
self.Level = params.get("Level")
self.Source = params.get("Source")
self.RetryNum = params.get("RetryNum")
class FunctionVersion(AbstractModel):
"""函数版本信息
"""
def __init__(self):
"""
:param Version: 函数版本名称
:type Version: str
:param Description: 版本描述信息
注意:此字段可能返回 null,表示取不到有效值。
:type Description: str
:param AddTime: 创建时间
注意:此字段可能返回 null,表示取不到有效值。
:type AddTime: str
:param ModTime: 更新时间
注意:此字段可能返回 null,表示取不到有效值。
:type ModTime: str
"""
self.Version = None
self.Description = None
self.AddTime = None
self.ModTime = None
| |
"""Master which prepares work for all workers.
Evaluation of competition is split into work pieces. One work piece is a
either evaluation of an attack on a batch of images or evaluation of a
defense on a batch of adversarial images.
Work pieces are run by workers. Master prepares work pieces for workers and
writes them to the datastore.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
from collections import defaultdict
import csv
from io import BytesIO
import logging
import os
import pickle
import random
import time
from six import iteritems
from six import iterkeys
from six import itervalues
from six.moves import input as input_str
import eval_lib
# List of allowed sizes of adversarial perturbation
ALLOWED_EPS = [4, 8, 12, 16]
# Batch size
DEFAULT_BATCH_SIZE = 100
def print_header(text):
"""Prints header with given text and frame composed of '#' characters."""
print()
print('#'*(len(text)+4))
print('# ' + text + ' #')
print('#'*(len(text)+4))
print()
def save_dict_to_file(filename, dictionary):
"""Saves dictionary as CSV file."""
with open(filename, 'w') as f:
writer = csv.writer(f)
for k, v in iteritems(dictionary):
writer.writerow([str(k), str(v)])
class EvaluationMaster(object):
"""Class which encapsulates logit of the master."""
def __init__(self, storage_client, datastore_client, round_name, dataset_name,
blacklisted_submissions='', results_dir='',
num_defense_shards=None, verbose=False,
batch_size=DEFAULT_BATCH_SIZE, max_dataset_num_images=None):
"""Initializes EvaluationMaster.
Args:
storage_client: instance of eval_lib.CompetitionStorageClient
datastore_client: instance of eval_lib.CompetitionDatastoreClient
round_name: name of the current round
dataset_name: name of the dataset, 'dev' or 'final'
blacklisted_submissions: optional list of blacklisted submissions which
should not be evaluated
results_dir: local directory where results and logs should be written
num_defense_shards: optional number of defense shards
verbose: whether output should be verbose on not. If True, then methods
of this class will print some additional information which is useful
for debugging.
batch_size: batch size to use
max_dataset_num_images: maximum number of images from the dataset to use
or None if entire dataset should be used.
"""
self.storage_client = storage_client
self.datastore_client = datastore_client
self.round_name = round_name
self.dataset_name = dataset_name
self.results_dir = results_dir
if num_defense_shards:
self.num_defense_shards = int(num_defense_shards)
else:
self.num_defense_shards = None
self.verbose = verbose
self.blacklisted_submissions = [s.strip()
for s in blacklisted_submissions.split(',')]
self.batch_size = batch_size
self.max_dataset_num_images = max_dataset_num_images
# init client classes
self.submissions = eval_lib.CompetitionSubmissions(
datastore_client=self.datastore_client,
storage_client=self.storage_client,
round_name=self.round_name)
self.dataset_batches = eval_lib.DatasetBatches(
datastore_client=self.datastore_client,
storage_client=self.storage_client,
dataset_name=self.dataset_name)
self.adv_batches = eval_lib.AversarialBatches(
datastore_client=self.datastore_client)
self.class_batches = eval_lib.ClassificationBatches(
datastore_client=self.datastore_client,
storage_client=self.storage_client,
round_name=self.round_name)
self.attack_work = eval_lib.AttackWorkPieces(
datastore_client=self.datastore_client)
self.defense_work = eval_lib.DefenseWorkPieces(
datastore_client=self.datastore_client)
def ask_when_work_is_populated(self, work):
"""When work is already populated asks whether we should continue.
This method prints warning message that work is populated and asks
whether user wants to continue or not.
Args:
work: instance of WorkPiecesBase
Returns:
True if we should continue and populate datastore, False if we should stop
"""
work.read_all_from_datastore()
if work.work:
print('Work is already written to datastore.\n'
'If you continue these data will be overwritten and '
'possible corrupted.')
inp = input_str('Do you want to continue? '
'(type "yes" without quotes to confirm): ')
return inp == 'yes'
else:
return True
def prepare_attacks(self):
"""Prepares all data needed for evaluation of attacks."""
print_header('PREPARING ATTACKS DATA')
# verify that attacks data not written yet
if not self.ask_when_work_is_populated(self.attack_work):
return
self.attack_work = eval_lib.AttackWorkPieces(
datastore_client=self.datastore_client)
# prepare submissions
print_header('Initializing submissions')
self.submissions.init_from_storage_write_to_datastore()
if self.verbose:
print(self.submissions)
# prepare dataset batches
print_header('Initializing dataset batches')
self.dataset_batches.init_from_storage_write_to_datastore(
batch_size=self.batch_size,
allowed_epsilon=ALLOWED_EPS,
skip_image_ids=[],
max_num_images=self.max_dataset_num_images)
if self.verbose:
print(self.dataset_batches)
# prepare adversarial batches
print_header('Initializing adversarial batches')
self.adv_batches.init_from_dataset_and_submissions_write_to_datastore(
dataset_batches=self.dataset_batches,
attack_submission_ids=self.submissions.get_all_attack_ids())
if self.verbose:
print(self.adv_batches)
# prepare work pieces
print_header('Preparing attack work pieces')
self.attack_work.init_from_adversarial_batches(self.adv_batches.data)
self.attack_work.write_all_to_datastore()
if self.verbose:
print(self.attack_work)
def prepare_defenses(self):
"""Prepares all data needed for evaluation of defenses."""
print_header('PREPARING DEFENSE DATA')
# verify that defense data not written yet
if not self.ask_when_work_is_populated(self.defense_work):
return
self.defense_work = eval_lib.DefenseWorkPieces(
datastore_client=self.datastore_client)
# load results of attacks
self.submissions.init_from_datastore()
self.dataset_batches.init_from_datastore()
self.adv_batches.init_from_datastore()
self.attack_work.read_all_from_datastore()
# populate classification results
print_header('Initializing classification batches')
self.class_batches.init_from_adversarial_batches_write_to_datastore(
self.submissions, self.adv_batches)
if self.verbose:
print(self.class_batches)
# populate work pieces
print_header('Preparing defense work pieces')
self.defense_work.init_from_class_batches(
self.class_batches.data, num_shards=self.num_defense_shards)
self.defense_work.write_all_to_datastore()
if self.verbose:
print(self.defense_work)
def _save_work_results(self, run_stats, scores, num_processed_images,
filename):
"""Saves statistics about each submission.
Saved statistics include score; number of completed and failed batches;
min, max, average and median time needed to run one batch.
Args:
run_stats: dictionary with runtime statistics for submissions,
can be generated by WorkPiecesBase.compute_work_statistics
scores: dictionary mapping submission ids to scores
num_processed_images: dictionary with number of successfully processed
images by each submission, one of the outputs of
ClassificationBatches.compute_classification_results
filename: output filename
"""
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(
['SubmissionID', 'ExternalSubmissionId', 'Score',
'CompletedBatches', 'BatchesWithError', 'ProcessedImages',
'MinEvalTime', 'MaxEvalTime',
'MedianEvalTime', 'MeanEvalTime',
'ErrorMsg'])
for submission_id in sorted(iterkeys(run_stats)):
stat = run_stats.get(
submission_id,
collections.defaultdict(lambda: float('NaN')))
external_id = self.submissions.get_external_id(submission_id)
error_msg = ''
while not error_msg and stat['error_messages']:
error_msg = stat['error_messages'].pop()
if error_msg.startswith('Cant copy adversarial batch locally'):
error_msg = ''
writer.writerow([
submission_id, external_id, scores.get(submission_id, None),
stat['completed'], stat['num_errors'],
num_processed_images.get(submission_id, None),
stat['min_eval_time'], stat['max_eval_time'],
stat['median_eval_time'], stat['mean_eval_time'],
error_msg
])
def _save_sorted_results(self, run_stats, scores, image_count, filename):
"""Saves sorted (by score) results of the evaluation.
Args:
run_stats: dictionary with runtime statistics for submissions,
can be generated by WorkPiecesBase.compute_work_statistics
scores: dictionary mapping submission ids to scores
image_count: dictionary with number of images processed by submission
filename: output filename
"""
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(['SubmissionID', 'ExternalTeamId', 'Score',
'MedianTime', 'ImageCount'])
def get_second(x):
"""Returns second entry of a list/tuple"""
return x[1]
for s_id, score in sorted(iteritems(scores),
key=get_second, reverse=True):
external_id = self.submissions.get_external_id(s_id)
stat = run_stats.get(
s_id, collections.defaultdict(lambda: float('NaN')))
writer.writerow([s_id, external_id, score,
stat['median_eval_time'],
image_count[s_id]])
def _read_dataset_metadata(self):
"""Reads dataset metadata.
Returns:
instance of DatasetMetadata
"""
blob = self.storage_client.get_blob(
'dataset/' + self.dataset_name + '_dataset.csv')
buf = BytesIO()
blob.download_to_file(buf)
buf.seek(0)
return eval_lib.DatasetMetadata(buf)
def compute_results(self):
"""Computes results (scores, stats, etc...) of competition evaluation.
Results are saved into output directory (self.results_dir).
Also this method saves all intermediate data into output directory as well,
so it can resume computation if it was interrupted for some reason.
This is useful because computatin of resuls could take many minutes.
"""
# read all data
logging.info('Reading data from datastore')
dataset_meta = self._read_dataset_metadata()
self.submissions.init_from_datastore()
self.dataset_batches.init_from_datastore()
self.adv_batches.init_from_datastore()
self.attack_work.read_all_from_datastore()
if os.path.exists(os.path.join(self.results_dir, 'defense_work.dump')):
with open(os.path.join(self.results_dir, 'defense_work.dump')) as f:
self.defense_work.deserialize(f)
else:
self.defense_work.read_all_from_datastore()
with open(os.path.join(self.results_dir, 'defense_work.dump'), 'w') as f:
self.defense_work.serialize(f)
if os.path.exists(os.path.join(self.results_dir, 'class_batches.dump')):
with open(os.path.join(self.results_dir, 'class_batches.dump')) as f:
self.class_batches.deserialize(f)
else:
self.class_batches.init_from_datastore()
with open(os.path.join(self.results_dir, 'class_batches.dump'), 'w') as f:
self.class_batches.serialize(f)
# process data
logging.info('Processing classification results')
count_adv_images = self.adv_batches.count_generated_adv_examples()
intermediate_files = ['acc_matrix.dump', 'error_matrix.dump',
'hit_tc_matrix.dump', 'classified_images_count.dump']
if all([os.path.exists(os.path.join(self.results_dir, fname))
for fname in intermediate_files]):
with open(os.path.join(self.results_dir, 'acc_matrix.dump')) as f:
acc_matrix = pickle.load(f)
with open(os.path.join(self.results_dir, 'error_matrix.dump')) as f:
error_matrix = pickle.load(f)
with open(os.path.join(self.results_dir, 'hit_tc_matrix.dump')) as f:
hit_tc_matrix = pickle.load(f)
with open(os.path.join(self.results_dir,
'classified_images_count.dump')) as f:
classified_images_count = pickle.load(f)
else:
acc_matrix, error_matrix, hit_tc_matrix, classified_images_count = (
self.class_batches.compute_classification_results(
self.adv_batches,
self.dataset_batches,
dataset_meta,
self.defense_work))
with open(os.path.join(self.results_dir, 'acc_matrix.dump'), 'w') as f:
pickle.dump(acc_matrix, f)
with open(os.path.join(self.results_dir, 'error_matrix.dump'), 'w') as f:
pickle.dump(error_matrix, f)
with open(os.path.join(self.results_dir, 'hit_tc_matrix.dump'), 'w') as f:
pickle.dump(hit_tc_matrix, f)
with open(os.path.join(self.results_dir,
'classified_images_count.dump'), 'w') as f:
pickle.dump(classified_images_count, f)
# compute attacks and defenses which will be used for scoring
logging.info('Computing attacks and defenses which are used for scoring')
expected_num_adv_images = self.dataset_batches.count_num_images()
attacks_to_use = [k for k, v in iteritems(count_adv_images)
if ((v == expected_num_adv_images)
and (k not in self.blacklisted_submissions))]
total_num_adversarial = sum(itervalues(count_adv_images))
defenses_to_use = [k for k, v in iteritems(classified_images_count)
if ((v == total_num_adversarial)
and (k not in self.blacklisted_submissions))]
logging.info('Expected number of adversarial images: %d',
expected_num_adv_images)
logging.info('Number of attacks to use to score defenses: %d',
len(attacks_to_use))
logging.info('Expected number of classification predictions: %d',
total_num_adversarial)
logging.info('Number of defenses to use to score attacks: %d',
len(defenses_to_use))
save_dict_to_file(os.path.join(self.results_dir, 'count_adv_images.csv'),
count_adv_images)
save_dict_to_file(os.path.join(self.results_dir,
'classified_images_count.csv'),
classified_images_count)
# compute scores
logging.info('Computing scores')
attack_scores = defaultdict(lambda: 0)
targeted_attack_scores = defaultdict(lambda: 0)
defense_scores = defaultdict(lambda: 0)
for defense_id in acc_matrix.dim0:
for attack_id in acc_matrix.dim1:
if attack_id in attacks_to_use:
defense_scores[defense_id] += acc_matrix[defense_id, attack_id]
if defense_id in defenses_to_use:
if attack_id in self.submissions.targeted_attacks:
targeted_attack_scores[attack_id] += (
hit_tc_matrix[defense_id, attack_id])
else:
attack_scores[attack_id] += error_matrix[defense_id, attack_id]
# negate results of blacklisted submissions
for s_id in self.blacklisted_submissions:
if s_id in defense_scores:
defense_scores[s_id] = -defense_scores[s_id]
if s_id in attack_scores:
attack_scores[s_id] = -attack_scores[s_id]
if s_id in targeted_attack_scores:
targeted_attack_scores[s_id] = -targeted_attack_scores[s_id]
# save results
logging.info('Saving results')
all_attack_stats = self.attack_work.compute_work_statistics()
nontargeted_attack_stats = {k: v for k, v in iteritems(all_attack_stats)
if k in self.submissions.attacks}
targeted_attack_stats = {k: v for k, v in iteritems(all_attack_stats)
if k in self.submissions.targeted_attacks}
defense_stats = self.defense_work.compute_work_statistics()
self._save_work_results(
nontargeted_attack_stats, attack_scores, count_adv_images,
os.path.join(self.results_dir, 'attack_results.csv'))
self._save_work_results(
targeted_attack_stats, targeted_attack_scores, count_adv_images,
os.path.join(self.results_dir, 'targeted_attack_results.csv'))
self._save_work_results(
defense_stats, defense_scores,
| |
<reponame>macetheace96/metalearn
""" Contains unit tests for the Metafeatures class. """
import inspect
import json
import jsonschema
import math
import os
import random
import time
import unittest
import pandas as pd
import numpy as np
from metalearn import Metafeatures
from test.config import CORRECTNESS_SEED, METADATA_PATH
from test.data.dataset import read_dataset
from test.data.compute_dataset_metafeatures import get_dataset_metafeatures_path
FAIL_MESSAGE = "message"
FAIL_REPORT = "report"
TEST_NAME = "test_name"
class MetafeaturesWithDataTestCase(unittest.TestCase):
""" Contains tests for Metafeatures that require loading data first. """
def setUp(self):
self.datasets = {}
with open(METADATA_PATH, "r") as fh:
dataset_descriptions = json.load(fh)
for dataset_description in dataset_descriptions:
X, Y, column_types = read_dataset(dataset_description)
filename = dataset_description["filename"]
known_dataset_metafeatures_path = get_dataset_metafeatures_path(
filename
)
if os.path.exists(known_dataset_metafeatures_path):
with open(known_dataset_metafeatures_path) as fh:
metafeatures = json.load(fh)
self.datasets[filename] = {
"X": X, "Y": Y, "column_types": column_types,
"known_metafeatures": metafeatures,
"known_metafeatures_path": known_dataset_metafeatures_path
}
else:
raise FileNotFoundError(f"{known_dataset_metafeatures_path} does not exist")
def tearDown(self):
del self.datasets
def _report_test_failures(self, test_failures, test_name):
if test_failures != {}:
report_path = f"./failures_{test_name}.json"
with open(report_path, "w") as fh:
json.dump(test_failures, fh, indent=4)
message = next(iter(test_failures.values()))[FAIL_MESSAGE]
self.fail(
f"{message} Details have been written in {report_path}."
)
def _check_correctness(self, computed_mfs, known_mfs, filename):
"""
Tests whether computed_mfs are close to previously computed metafeature
values. This assumes that the previously computed values are correct
and allows testing for changes in metafeature computation. Only checks
the correctness of the metafeatures passed in--does not test that all
computable metafeatures were computed.
"""
test_failures = {}
fail_message = "Not all metafeatures matched previous results."
for mf_id, result in computed_mfs.items():
computed_value = result[Metafeatures.VALUE_KEY]
known_value = known_mfs[mf_id][Metafeatures.VALUE_KEY]
correct = True
if known_value is None:
correct = False
elif type(known_value) is str:
correct = known_value == computed_value
elif not np.isnan(known_value) and not np.isnan(computed_value):
correct = math.isclose(known_value, computed_value)
if not correct:
test_failures[mf_id] = {
"known_value": known_value,
"computed_value": computed_value
}
return self._format_check_report(
"correctness", fail_message, test_failures, filename
)
def _format_check_report(
self, test_name, fail_message, test_failures, filename
):
if test_failures == {}:
return test_failures
else:
return {
filename: {
TEST_NAME: test_name,
FAIL_MESSAGE: fail_message,
FAIL_REPORT: test_failures
}
}
def _check_compare_metafeature_lists(self, computed_mfs, known_mfs, filename):
"""
Tests whether computed_mfs matches the list of previously computed metafeature
names as well as the list of computable metafeatures in Metafeatures.list_metafeatures
"""
test_failures = {}
fail_message = "Metafeature lists do not match."
with open("./metalearn/metafeatures/metafeatures.json") as f:
master_mf_ids = json.load(f)["metafeatures"].keys()
master_mf_ids_set = set(master_mf_ids)
known_mf_ids_set = set({
x for x in known_mfs.keys() if "_Time" not in x
})
computed_mf_ids_set = set(computed_mfs.keys())
intersect_mf_ids_set = master_mf_ids_set.intersection(known_mf_ids_set
).intersection(computed_mf_ids_set)
master_diffs = master_mf_ids_set - intersect_mf_ids_set
if len(master_diffs) > 0:
test_failures["master_differences"] = list(master_names_unique)
known_diffs = known_mf_ids_set - intersect_mf_ids_set
if len(known_diffs) > 0:
test_failures["known_differences"] = list(known_names_unique)
computed_diffs = computed_mf_ids_set - intersect_mf_ids_set
if len(computed_diffs) > 0:
test_failures["computed_differences"] = list(computed_names_unique)
return self._format_check_report(
"metafeature_lists", fail_message, test_failures, filename
)
def _perform_checks(self, functions):
check = {}
for function, args in functions.items():
check = function(*args)
if check != {}:
break
return check
def test_run_without_exception(self):
try:
for dataset_filename, dataset in self.datasets.items():
Metafeatures().compute(
X=dataset["X"], Y=dataset["Y"],
column_types=dataset["column_types"]
)
except Exception as e:
exc_type = type(e).__name__
self.fail(f"computing metafeatures raised {exc_type} unexpectedly")
def test_correctness(self):
"""Tests that metafeatures are computed correctly, for known datasets.
"""
test_failures = {}
test_name = inspect.stack()[0][3]
for dataset_filename, dataset in self.datasets.items():
computed_mfs = Metafeatures().compute(
X=dataset["X"], Y=dataset["Y"], seed=CORRECTNESS_SEED,
column_types=dataset["column_types"]
)
known_mfs = dataset["known_metafeatures"]
required_checks = {
self._check_correctness: [
computed_mfs, known_mfs, dataset_filename
],
self._check_compare_metafeature_lists: [
computed_mfs, known_mfs, dataset_filename
]
}
test_failures.update(self._perform_checks(required_checks))
self._report_test_failures(test_failures, test_name)
def test_individual_metafeature_correctness(self):
test_failures = {}
test_name = inspect.stack()[0][3]
for dataset_filename, dataset in self.datasets.items():
known_mfs = dataset["known_metafeatures"]
for mf_id in Metafeatures.IDS:
computed_mfs = Metafeatures().compute(
X=dataset["X"], Y=dataset["Y"], seed=CORRECTNESS_SEED,
metafeature_ids=[mf_id],
column_types=dataset["column_types"]
)
required_checks = {
self._check_correctness: [
computed_mfs, known_mfs, dataset_filename
]
}
test_failures.update(self._perform_checks(required_checks))
self._report_test_failures(test_failures, test_name)
def test_no_targets(self):
""" Test Metafeatures().compute() without targets
"""
test_failures = {}
test_name = inspect.stack()[0][3]
for dataset_filename, dataset in self.datasets.items():
metafeatures = Metafeatures()
computed_mfs = metafeatures.compute(
X=dataset["X"], Y=None, seed=CORRECTNESS_SEED,
column_types=dataset["column_types"]
)
known_mfs = dataset["known_metafeatures"]
target_dependent_metafeatures = Metafeatures.list_metafeatures(
"target_dependent"
)
for mf_name in target_dependent_metafeatures:
known_mfs[mf_name] = {
Metafeatures.VALUE_KEY: Metafeatures.NO_TARGETS,
Metafeatures.COMPUTE_TIME_KEY: 0.
}
required_checks = {
self._check_correctness: [
computed_mfs, known_mfs, dataset_filename
],
self._check_compare_metafeature_lists: [
computed_mfs, known_mfs, dataset_filename
]
}
test_failures.update(self._perform_checks(required_checks))
self._report_test_failures(test_failures, test_name)
def test_request_metafeatures(self):
SUBSET_LENGTH = 20
test_failures = {}
test_name = inspect.stack()[0][3]
for dataset_filename, dataset in self.datasets.items():
metafeature_ids = random.sample(Metafeatures.IDS, SUBSET_LENGTH)
computed_mfs = Metafeatures().compute(
X=dataset["X"],Y=dataset["Y"], seed=CORRECTNESS_SEED,
metafeature_ids=metafeature_ids,
column_types=dataset["column_types"]
)
known_metafeatures = dataset["known_metafeatures"]
required_checks = {
self._check_correctness: [
computed_mfs, known_metafeatures, dataset_filename
]
}
test_failures.update(self._perform_checks(required_checks))
self.assertEqual(
metafeature_ids, list(computed_mfs.keys()),
"Compute did not return requested metafeatures"
)
self._report_test_failures(test_failures, test_name)
def test_compute_effects_on_dataset(self):
"""
Tests whether computing metafeatures has any side effects on the input
X or Y data. Fails if there are any side effects.
"""
for dataset in self.datasets.values():
X_copy, Y_copy = dataset["X"].copy(), dataset["Y"].copy()
Metafeatures().compute(
X=dataset["X"],Y=dataset["Y"],
column_types=dataset["column_types"]
)
if not (
X_copy.equals(dataset["X"]) and Y_copy.equals(dataset["Y"])
):
self.assertTrue(
False, "Input data has changed after Metafeatures.compute"
)
def test_compute_effects_on_compute(self):
"""
Tests whether computing metafeatures has any side effects on the
instance metafeatures object. Fails if there are any side effects.
"""
required_checks = {}
test_failures = {}
test_name = inspect.stack()[0][3]
for dataset_filename, dataset in self.datasets.items():
metafeatures_instance = Metafeatures()
# first run
metafeatures_instance.compute(
X=dataset["X"],Y=dataset["Y"],seed=CORRECTNESS_SEED,
column_types=dataset["column_types"]
)
# second run
computed_mfs = metafeatures_instance.compute(
X=dataset["X"],Y=dataset["Y"],seed=CORRECTNESS_SEED,
column_types=dataset["column_types"]
)
known_mfs = dataset["known_metafeatures"]
required_checks[self._check_correctness] = [
computed_mfs, known_mfs, dataset_filename
]
test_failures.update(self._perform_checks(required_checks))
self._report_test_failures(test_failures, test_name)
def test_output_format(self):
with open("./metalearn/metafeatures/metafeatures_schema.json") as f:
mf_schema = json.load(f)
for dataset_filename, dataset in self.datasets.items():
computed_mfs = Metafeatures().compute(
X=dataset["X"],Y=dataset["Y"],
column_types=dataset["column_types"]
)
try:
jsonschema.validate(computed_mfs, mf_schema)
except jsonschema.exceptions.ValidationError as e:
self.fail(
f"Metafeatures computed from {dataset_filename} do not "+
"conform to schema"
)
def test_output_json_compatibility(self):
with open("./metalearn/metafeatures/metafeatures_schema.json") as f:
mf_schema = json.load(f)
for dataset_filename, dataset in self.datasets.items():
computed_mfs = Metafeatures().compute(
X=dataset["X"],Y=dataset["Y"],
column_types=dataset["column_types"]
)
try:
json_computed_mfs = json.dumps(computed_mfs)
except Exception as e:
self.fail(
f"Failed to convert metafeature output to json: {str(e)}"
)
class MetafeaturesTestCase(unittest.TestCase):
""" Contains tests for Metafeatures that can be executed without loading data. """
def setUp(self):
self.dummy_features = pd.DataFrame(np.random.rand(50, 50))
self.dummy_target = pd.Series(np.random.randint(2, size=50), name="target").astype("str")
self.invalid_metafeature_message_start = "One or more requested metafeatures are not valid:"
self.invalid_metafeature_message_start_fail_message = "Error message indicating invalid metafeatures did not start with expected string."
self.invalid_metafeature_message_contains_fail_message = "Error message indicating invalid metafeatures should include names of invalid features."
def test_dataframe_input_error(self):
""" Tests if `compute` gives a user-friendly error when a TypeError occurs. """
expected_error_message1 = "X must be of type pandas.DataFrame"
fail_message1 = "We expect a user friendly message when the features passed to compute is not a Pandas.DataFrame."
expected_error_message2 = "Y must be of type pandas.Series"
fail_message2 = "We expect a user friendly message when the target column passed to compute is not a Pandas.Series."
# We don't check for the Type of TypeError explicitly as any other error would fail the unit test.
with self.assertRaises(TypeError) as cm:
Metafeatures().compute(X=None, Y=self.dummy_target)
self.assertEqual(str(cm.exception), expected_error_message1, fail_message1)
with self.assertRaises(TypeError) as cm:
Metafeatures().compute(X=np.zeros((500, 50)), Y=pd.Series(np.zeros(500)))
self.assertEqual(str(cm.exception), expected_error_message1, fail_message1)
with self.assertRaises(TypeError) as cm:
Metafeatures().compute(X=pd.DataFrame(np.zeros((500, 50))), Y=np.zeros(500))
self.assertEqual(str(cm.exception), expected_error_message2, fail_message2)
def _check_invalid_metafeature_exception_string(self, exception_str, invalid_metafeatures):
""" Checks if the exception message starts with the right string, and contains all of the invalid metafeatures expected. """
self.assertTrue(
exception_str.startswith(self.invalid_metafeature_message_start),
self.invalid_metafeature_message_start_fail_message
)
for invalid_mf in invalid_metafeatures:
self.assertTrue(
invalid_mf in exception_str,
self.invalid_metafeature_message_contains_fail_message
)
def test_metafeatures_input_all_invalid(self):
""" Test case where all requested metafeatures are invalid. """
invalid_metafeatures = ["ThisIsNotValid", "ThisIsAlsoNotValid"]
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(X=self.dummy_features, Y=self.dummy_target, metafeature_ids=invalid_metafeatures)
self._check_invalid_metafeature_exception_string(str(cm.exception), invalid_metafeatures)
def test_metafeatures_input_partial_invalid(self):
""" Test case where only some requested metafeatures are invalid. """
invalid_metafeatures = ["ThisIsNotValid", "ThisIsAlsoNotValid"]
valid_metafeatures = ["NumberOfInstances", "NumberOfFeatures"]
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(X=self.dummy_features, Y=self.dummy_target,
metafeature_ids=invalid_metafeatures + valid_metafeatures)
self._check_invalid_metafeature_exception_string(str(cm.exception), invalid_metafeatures)
# Order should not matter
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(X=self.dummy_features, Y=self.dummy_target,
metafeature_ids=valid_metafeatures + invalid_metafeatures)
self._check_invalid_metafeature_exception_string(str(cm.exception), invalid_metafeatures)
def test_column_type_input(self):
column_types = {col: "NUMERIC" for col in self.dummy_features.columns}
column_types[self.dummy_features.columns[2]] = "CATEGORICAL"
column_types[self.dummy_target.name] = "CATEGORICAL"
# all valid
try:
Metafeatures().compute(
self.dummy_features, self.dummy_target, column_types
)
except Exception as e:
exc_type = type(e).__name__
self.fail(f"computing metafeatures raised {exc_type} unexpectedly")
# some valid
column_types[self.dummy_features.columns[0]] = "NUMBER"
column_types[self.dummy_features.columns[1]] = "CATEGORY"
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(
self.dummy_features, self.dummy_target, column_types
)
self.assertTrue(
str(cm.exception).startswith(
"Invalid column types:"
),
"Some invalid column types test failed"
)
# all invalid
column_types = {feature: "INVALID_TYPE" for feature in self.dummy_features.columns}
column_types[self.dummy_target.name] = "INVALID"
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(
self.dummy_features, self.dummy_target, column_types
)
self.assertTrue(
str(cm.exception).startswith(
"Invalid column types:"
),
"All invalid column types test failed"
)
# invalid number of column types
del column_types[self.dummy_features.columns[0]]
with self.assertRaises(ValueError) as cm:
Metafeatures().compute(
self.dummy_features, self.dummy_target, | |
body = self[BODY_INDEX]
if body is None:
return
target = body[BODY_TARGET_INDEX]
if target is None:
return
if isinstance(target, PostureSpecVariable):
return target.name
elif target.is_part:
return target.part_owner
return target
@property
def _body_target_with_part(self):
body = self[BODY_INDEX]
if body is None:
return
target = body[BODY_TARGET_INDEX]
if target is None:
return
elif isinstance(target, PostureSpecVariable):
return target.name
return target
@property
def _body_part(self):
body = self[BODY_INDEX]
if body is None:
return
target = body[BODY_TARGET_INDEX]
if target is None or isinstance(target, PostureSpecVariable):
return
elif target.is_part:
return target.part_group_index
@property
def _is_carrying(self):
carry = self[CARRY_INDEX]
if carry is not None and carry[CARRY_TARGET_INDEX] is not None:
return True
return False
@property
def _at_surface(self):
surface = self[SURFACE_INDEX]
if surface is not None and surface[SURFACE_SLOT_TYPE_INDEX] is not None:
return True
return False
@property
def _surface_target_type(self):
surface = self[SURFACE_INDEX]
if surface is None:
return
target = surface[SURFACE_TARGET_INDEX]
if target is None:
return
if isinstance(target, PostureSpecVariable):
return target.name
if target.is_part:
target = target.part_owner
return type(target).__name__
@property
def _surface_target(self):
surface = self[SURFACE_INDEX]
if surface is None:
return
target = surface[SURFACE_TARGET_INDEX]
if target is None:
return
if isinstance(target, PostureSpecVariable):
return target.name
elif target.is_part:
return target.part_owner
return target
@property
def _surface_target_with_part(self):
surface = self[SURFACE_INDEX]
if surface is None:
return
target = surface[SURFACE_TARGET_INDEX]
if target is None:
return
elif isinstance(target, PostureSpecVariable):
return target.name
return target
@property
def _surface_part(self):
surface = self[SURFACE_INDEX]
if surface is None:
return
target = surface[SURFACE_TARGET_INDEX]
if target is None or isinstance(target, PostureSpecVariable):
return
elif target.is_part:
return target.part_group_index
@property
def _slot_target(self):
surface = self[SURFACE_INDEX]
if surface[SURFACE_TARGET_INDEX] is not None:
if surface[SURFACE_SLOT_TYPE_INDEX] is not None:
slot_target = surface[SURFACE_SLOT_TARGET_INDEX]
if slot_target is not None:
if isinstance(slot_target, PostureSpecVariable):
return slot_target.name
else:
return 'TargetInSlot'
else:
return 'EmptySlot'
else:
return 'AtSurface'
def __repr__(self):
result = '{}@{}'.format(self._body_posture_name, _simple_id_str(self._body_target_with_part))
carry = self[CARRY_INDEX]
if carry is None:
result += ', carry:any'
elif self[CARRY_INDEX][CARRY_TARGET_INDEX] is not None:
result += ', carry'
surface = self[SURFACE_INDEX]
if surface is None:
result += ', surface:any'
elif surface[SURFACE_SLOT_TYPE_INDEX] is not None:
if surface[SURFACE_SLOT_TARGET_INDEX] is not None:
result += ', surface:slot_target@{}'.format(_simple_id_str(self._surface_target_with_part))
else:
result += ', surface:empty_slot@{}'.format(_simple_id_str(self._surface_target_with_part))
elif surface[SURFACE_TARGET_INDEX] is not None:
result += ', surface:{}'.format(_simple_id_str(self._surface_target_with_part))
return result
def get_core_objects(self):
body_target = self[BODY_INDEX][BODY_TARGET_INDEX]
surface_target = self[SURFACE_INDEX][SURFACE_TARGET_INDEX]
core_objects = set()
if body_target is not None:
core_objects.add(body_target)
body_target_parent = body_target.parent
if body_target_parent is not None:
core_objects.add(body_target_parent)
if surface_target is not None:
core_objects.add(surface_target)
return core_objects
def get_relevant_objects(self):
body_posture = self[BODY_INDEX][BODY_POSTURE_TYPE_INDEX]
body_target = self[BODY_INDEX][BODY_TARGET_INDEX]
surface_target = self[SURFACE_INDEX][SURFACE_TARGET_INDEX]
if body_posture.mobile and body_target is None and surface_target is None or body_posture is PostureTuning.SIM_CARRIED_POSTURE:
return valid_objects()
relevant_objects = self.get_core_objects()
if body_target is not None:
if body_target.is_part:
relevant_objects.update(body_target.adjacent_parts_gen())
relevant_objects.update(body_target.children)
can_transition_to_carry = not body_posture.mobile or body_posture.mobile and body_target is None
if can_transition_to_carry and body_posture.is_available_transition(PostureTuning.SIM_CARRIED_POSTURE):
relevant_objects.update(instanced_sims())
return relevant_objects
def same_spec_except_slot(self, target):
if self.body == target.body and self.carry == target.carry and self[SURFACE_INDEX][SURFACE_TARGET_INDEX] == target[SURFACE_INDEX][SURFACE_TARGET_INDEX]:
return True
return False
def same_spec_ignoring_surface_if_mobile(self, target):
if self.body_posture.mobile and self.body_posture == target.body_posture and self.carry == target.carry:
return True
return False
def is_on_vehicle(self):
target = self.body[BODY_TARGET_INDEX]
if target is not None:
return target.vehicle_component is not None
return False
def validate_destination(self, destination_specs, var_map, interaction, sim):
if not any(self._validate_carry(destination_spec) for destination_spec in destination_specs):
return False
if not self._validate_subroot(interaction, sim):
return False
if not self._validate_surface(var_map, affordance=interaction.affordance):
return False
if not self._validate_body(interaction, sim):
return False
posture_graph_service = services.current_zone().posture_graph_service
zone_director = services.venue_service().get_zone_director()
for obj in (self.body_target, self.surface_target):
if not obj is None:
if isinstance(obj, PostureSpecVariable):
continue
if not obj.valid_for_distribution or posture_graph_service.is_object_pending_deletion(obj):
return False
if obj.check_affordance_for_suppression(sim, interaction, user_directed=False):
return False
if not zone_director.zone_director_specific_destination_tests(sim, obj):
return False
return True
def _validate_body(self, interaction, sim):
body = self[BODY_INDEX]
if body is None:
return True
target = body[BODY_TARGET_INDEX]
if target is None:
return True
else:
affordance = interaction.affordance
if interaction.is_social:
if sim is not interaction.sim:
linked_interaction_type = interaction.linked_interaction_type
if linked_interaction_type is not None:
if linked_interaction_type is not affordance:
affordance = linked_interaction_type
if not target.supports_affordance(affordance):
return False
return True
def _validate_carry(self, destination_spec):
dest_carry = destination_spec[CARRY_INDEX]
if dest_carry is None or dest_carry[CARRY_TARGET_INDEX] is None:
if self[CARRY_INDEX][CARRY_TARGET_INDEX] is None:
return True
return False
elif dest_carry == self[CARRY_INDEX]:
return True
return False
def _validate_surface(self, var_map, affordance=None):
surface_spec = self[SURFACE_INDEX]
if surface_spec is None:
return True
surface = surface_spec[SURFACE_TARGET_INDEX]
if surface is None:
return True
if affordance is not None and not surface.supports_affordance(affordance):
return False
slot_type = surface_spec[SURFACE_SLOT_TYPE_INDEX]
if slot_type is None:
return True
slot_manifest_entry = var_map.get(slot_type)
if slot_manifest_entry is None:
return False
else:
runtime_slots = set(surface.get_runtime_slots_gen(slot_types=slot_manifest_entry.slot_types))
slot_target = surface_spec[SURFACE_SLOT_TARGET_INDEX]
child = var_map.get(slot_target)
if child is None:
if PostureSpecVariable.SLOT_TEST_DEFINITION not in var_map:
if any(runtime_slot.empty for runtime_slot in runtime_slots):
return True
return False
else:
current_slot = child.parent_slot
if current_slot is not None and slot_manifest_entry.actor is child and current_slot in runtime_slots:
return True
return False
if PostureSpecVariable.SLOT_TEST_DEFINITION in var_map:
slot_test_object = DEFAULT
slot_test_definition = var_map[PostureSpecVariable.SLOT_TEST_DEFINITION]
else:
slot_test_object = child
slot_test_definition = DEFAULT
carry_target = self[CARRY_INDEX][CARRY_TARGET_INDEX]
carry_target = var_map.get(carry_target)
objects_to_ignore = (carry_target,) if carry_target is not None else DEFAULT
for runtime_slot in runtime_slots:
if runtime_slot.is_valid_for_placement(obj=slot_test_object, definition=slot_test_definition, objects_to_ignore=objects_to_ignore):
return True
return False
def _validate_subroot(self, interaction, sim):
body_posture = self.body_posture
if sim is interaction.sim and body_posture._actor_required_part_definition is not None:
if not self.body_target.is_part or body_posture._actor_required_part_definition is not self.body_target.part_definition:
return False
elif sim is interaction.target and body_posture._actor_b_required_part_definition is not None and (not self.body_target.is_part or body_posture._actor_b_required_part_definition is not self.body_target.part_definition):
return False
return True
@property
def requires_carry_target_in_hand(self):
return self[CARRY_INDEX][CARRY_TARGET_INDEX] is not None
@property
def requires_carry_target_in_slot(self):
return self[SURFACE_INDEX][SURFACE_SLOT_TARGET_INDEX] is not None
def get_carry_posture_aop(sim, carry_target):
from postures.posture_interactions import HoldObject
context = sim.create_posture_interaction_context()
for aop in carry_target.potential_interactions(context):
if issubclass(aop.affordance, HoldObject):
return aop
logger.error('Sim {} The carry_target: ({}) has no SIs of type HoldObjectCheck that your object has a Carryable Component.', sim, carry_target, owner='camilogarcia')
class PostureOperation:
DEFAULT_COST_KEY = 'default_cost'
COST_NOMINAL = Tunable(description='\n A nominal cost to simple operations just to prevent them from being\n free.\n ', tunable_type=float, default=0.1)
COST_STANDARD = Tunable(description='\n A cost for standard posture operations (such as changing postures or\n targets).\n ', tunable_type=float, default=1.0)
class OperationBase:
__slots__ = ()
def apply(self, node):
raise NotImplementedError()
def validate(self, sim, var_map, original_body_target=None):
return True
def get_validator(self, next_node):
return self.validate
def cost(self, node):
return {PostureOperation.DEFAULT_COST_KEY: PostureOperation.COST_NOMINAL}
@property
def debug_cost_str_list(self):
pass
def associated_aop(self, sim, var_map):
pass
def is_equivalent_to(self, other):
raise NotImplementedError
def get_constraint(self, sim, node, var_map):
pass
def set_target(self, target):
pass
class BodyTransition(OperationBase):
__slots__ = ('_posture_type', '_species_to_aops', '_disallowed_ages', 'target')
def __init__(self, posture_type, species_to_aops, target=None, disallowed_ages=None):
self._posture_type = posture_type
self._species_to_aops = species_to_aops
if disallowed_ages is None:
disallowed_ages_from_aops = {}
for (species, aop) in species_to_aops.items():
disallowed_ages_from_aops[species] = event_testing.test_utils.get_disallowed_ages(aop.affordance)
self._disallowed_ages = enumdict(Species, disallowed_ages_from_aops)
else:
self._disallowed_ages = enumdict(Species, disallowed_ages)
if target is None:
self.target = next(iter(self._species_to_aops.values())).target
else:
self.target = target
def is_equivalent_to(self, other):
return type(self) == type(other) and (self._species_to_aops[Species.HUMAN].is_equivalent_to(other._species_to_aops[Species.HUMAN]) and self._posture_type == other._posture_type)
def __repr__(self):
return '{}({})'.format(type(self).__name__, _str_for_type(self._posture_type))
@property
def posture_type(self):
return self._posture_type
def set_target(self, target):
self.target = target
def all_associated_aops_gen(self):
for (species, aop) in self._species_to_aops.items():
yield (species, aop)
def add_associated_aop(self, species, aop):
self._species_to_aops[species] = aop
def associated_aop(self, sim, var_map):
if sim.species in self._species_to_aops:
return self._species_to_aops[sim.species]
logger.error("Trying to get aop for {} in BodyOperation: {} which doesn't exist, using human instead", sim.species, self)
if Species.HUMAN not in self._species_to_aops:
logger.error('Failed to get fallback aop for Human species for Sim {} in body operation {}', sim, self)
return
return self._species_to_aops[Species.HUMAN]
def cost(self, node):
body_index = BODY_INDEX
body_posture_type_index = BODY_POSTURE_TYPE_INDEX
body_target_index = BODY_TARGET_INDEX
curr_body = node[body_index]
curr_body_target = curr_body[body_target_index]
curr_posture_type = curr_body[body_posture_type_index]
next_posture_type = self._posture_type
current_mobile = curr_posture_type.mobile
next_mobile = next_posture_type.mobile
next_body_target = self.target
base_cost = 0
if current_mobile != next_mobile:
base_cost += postures.posture_scoring.PostureScoring.ENTER_EXIT_OBJECT_COST
if curr_body_target is not next_body_target:
if not current_mobile:
if not next_mobile:
if vector3_almost_equal(curr_body_target.position, next_body_target.position):
base_cost += postures.posture_scoring.PostureScoring.INNER_NON_MOBILE_TO_NON_MOBILE_COINCIDENT_COST
else:
base_cost += postures.posture_scoring.PostureScoring.INNER_NON_MOBILE_TO_NON_MOBILE_COST
if curr_posture_type.multi_sim:
base_cost += PostureOperation.COST_STANDARD
costs = dict(curr_posture_type.get_transition_costs(next_posture_type))
for key in costs:
costs[key] += base_cost
return costs
@property
def debug_cost_str_list(self):
return []
def apply(self, spec):
if spec[CARRY_INDEX][CARRY_TARGET_INDEX] is not None and not self.posture_type._supports_carry:
return
surface_target = spec[SURFACE_INDEX][SURFACE_TARGET_INDEX]
destination_target = self.target
if surface_target is not None and destination_target is None:
return
body = spec[BODY_INDEX]
source_target = body[BODY_TARGET_INDEX]
source_posture_type = body[BODY_POSTURE_TYPE_INDEX]
if not source_posture_type.unconstrained and (surface_target is not None and self.posture_type.unconstrained) and self.posture_type is not PostureTuning.SIM_CARRIED_POSTURE:
return
dest_target_is_not_none = destination_target is not None
dest_target_parent = | |
<gh_stars>1-10
from copy import deepcopy
from time import time, strftime, sleep
from threading import Thread
from math import floor
class Node(object):
def __init__(self, data, next=None, previous=None):
self.data = data
self.next = next
self.previous = previous
def set_next(self, next):
self.next = next
def set_data(self, data):
self.data = data
class LinkedList(object):
def __init__(self, root=None):
self.root = root
self.current_node = self.root
self.size = 0
def __repr__(self):
return str(self.current_node.data)
def add(self, data):
node = Node(data)
node.next = self.root
if self.root is not None:
self.root.previous = node
self.root = node
self.current_node = node
self.size += 1
def remove(self, data):
node = self.root
prev = None
while node is not None:
if node.data == data:
if prev:
prev.set_next(node.next)
else:
self.root = node
self.size -= 1
return True
else:
prev = node
node = node.next
return False
def find(self, data):
node = self.root
while node is not None:
if node.data == data:
return node
else:
node = node.next
return None
def print(self):
node = self.root
while node is not None:
print("%d" % node.data, end=" ")
node = node.next
print()
del node
def next(self):
if self.current_node.next is not None:
self.current_node = self.current_node.next
return True
else:
return False
def hasNext(self, node):
if node.next is None:
return False
else:
return True
class Array:
def __init__(self, slots, initialize=None):
self.length = slots
self.data = [initialize] * slots
def set(self, pos, val=None, function=None):
if pos < self.length - 1 and pos >= 0:
if function is not None and val is None:
self.data[pos] = function(self.data[pos])
elif function is None and val is not None:
self.data[pos] = val
def get(self):
return self.data
def getValue(self, index):
return self.data[index]
def search(self, value):
indexes = []
for index, data in enumerate(self.data):
if data == value:
indexes.append(index)
if len(indexes) == 1:
return indexes[0]
elif len(indexes) == 0:
return None
else:
return indexes
def remove(self, index, value=None):
if index < self.length and index >= 0 and type(index) == int and value is None:
self.data.pop(index)
elif value is not None:
newArr = self.copy()
for _ in newArr.search(value):
newArr.data.remove(value)
return newArr
else:
raise IndexError
def copy(self):
return deepcopy(self)
@staticmethod
def listToArray(a):
if type(a) is list:
newArray = Array(len(a), 0)
newArray.data = deepcopy(a)
newArray.length = len(a)
return newArray
def swap(self, i, j):
value = self.data[i]
self.data[i] = self.data[j]
self.data[j] = value
del value
def reverse(self):
test = reversed(deepcopy(self.data))
self.data = test
del test
def concat(self, data):
test = deepcopy(self.data) + data
self.data = test
del test
def splice(self, index):
value = self.getValue(index)
self.remove(index)
newList = []
newList.append(value)
del value
return newList
def print(self):
for d in self.data:
print(d, end=" ")
print()
class Sorting:
def __init__(self, arr):
self.array = arr
self.original = arr
self.history = []
self.length = len(self.array)
def __repr__(self):
return str(self.array)
def get(self):
return self.array
def BubbleSort(self, reverse=False):
a = deepcopy(self)
start = time()
for i in range(a.length):
for j in range(a.length):
if not reverse:
if a.array[i] < a.array[j]:
aux = a.array[i]
a.array[i] = a.array[j]
a.array[j] = aux
a.history.append(deepcopy(a.array))
else:
if a.array[i] > a.array[j]:
aux = a.array[i]
a.array[i] = a.array[j]
a.array[j] = aux
a.history.append(deepcopy(a.array))
end = time()
print("Algorithm: {}".format("BubbleSort"), "|", "Time ellapsed: {}{}".format(
end-start, "s"), "|", "Moves: {}".format(len(a.history)), flush=True)
return a
def InsertionSort(self, save=False):
a = deepcopy(self)
start = time()
for i in range(1, a.length):
key = a.array[i]
j = i-1
while j >= 0 and key < a.array[j]:
a.array[j + 1] = a.array[j]
j -= 1
if save:
a.history.append(deepcopy(a.array))
a.array[j + 1] = key
if save:
a.history.append(deepcopy(a.array))
end = time()
print("Algorithm: {}".format("InsertionSort"), "|", "Time ellapsed: {}{}".format(
end-start, "s"), "|", "Moves: {}".format(len(a.history)), flush=True)
return a
def QuickSort(self):
a = deepcopy(self)
start = time()
self.__quicks(a.array, 0, a.length-1)
end = time()
print("Algorithm: {}".format("QuickSort"), "|", "Time ellapsed: {}{}".format(
end-start, "s"), "|", "Moves: {}".format(len(a.history)), flush=True)
return a
def __countingSort(self, arr, exp1):
n = len(arr)
output = [0] * (n)
count = [0] * (10)
for i in range(0, n):
index = (arr[i]/exp1)
count[(index) % 10] += 1
for i in range(1, 10):
count[i] += count[i-1]
i = n-1
while i >= 0:
index = int(arr[i]/exp1)
output[count[(index) % 10] - 1] = arr[i]
count[(index) % 10] -= 1
i -= 1
i = 0
for i in range(0, len(arr)):
arr[i] = output[i]
def RadixSort(self):
a = deepcopy(self)
start = time()
max1 = max(a.array)
exp = 1
while max1 / exp > 0:
self.__countingSort(a.array, exp)
exp *= 10
end = time()
print("Algorithm: {}".format("RadixSort"), "|", "Time ellapsed: {}{}".format(
end-start, "s"), "|", "Moves: {}".format(len(a.history)), flush=True)
return a
def __quicks(self, arr, low, high):
if low < high:
pi = self.__partition(arr, low, high)
self.__quicks(arr, low, pi-1)
self.__quicks(arr, pi+1, high)
def __partition(self, arr, low, high):
i = low - 1
pivot = arr[high]
for j in range(low, high):
if arr[j] <= pivot:
i = i+1
arr[i], arr[j] = arr[j], arr[i]
arr[i+1], arr[high] = arr[high], arr[i+1]
return (i + 1)
def ShellSort(self):
a = deepcopy(self)
gap = a.length//2
start = time()
while gap > 0:
for i in range(gap, a.length):
temp = a.array[i]
j = i
while j >= gap and a.array[j-gap] > temp:
a.array[j] = a.array[j-gap]
j -= gap
a.history.append(deepcopy(a.array))
a.array[j] = temp
a.history.append(deepcopy(a.array))
gap //= 2
end = time()
print("Algorithm: {}".format("ShellSort"), "|", "Time ellapsed: {}{}".format(
end-start, "s"), "|", "Moves: {}".format(len(a.history)), flush=True)
return a
def __insertionSort(self, b):
for i in range(1, len(b)):
up = b[i]
j = i - 1
while j >= 0 and b[j] > up:
b[j + 1] = b[j]
j -= 1
b[j + 1] = up
return b
def BucketSort(self):
a = deepcopy(self)
start = time()
arr = []
slot_num = 10
for i in range(slot_num):
arr.append([])
for j in a.array:
index_b = int(slot_num * j)
arr[index_b].append(j)
for i in range(slot_num):
arr[i] = self.__insertionSort(arr[i])
k = 0
for i in range(slot_num):
for j in range(len(arr[i])):
a.array[k] = arr[i][j]
k += 1
a.history.append(deepcopy(a.array))
end = time()
print("Algorithm: {}".format("BucketSort"), "|", "Time ellapsed: {}{}".format(
end-start, "s"), "|", "Moves: {}".format(len(a.history)), flush=True)
return a
def SelectionSort(self):
a = deepcopy(self)
start = time()
for i in range(a.length):
min_idx = i
for j in range(i+1, a.length):
if a.array[min_idx] > a.array[j]:
min_idx = j
a.array[i], a.array[min_idx] = a.array[min_idx], a.array[i]
a.history.append(deepcopy(a.array))
end = time()
print("Algorithm: {}".format("SelectionSort"), "|", "Time ellapsed: {}{}".format(
end-start, "s"), "|", "Moves: {}".format(len(a.history)), flush=True)
return a
def StoogeSort(self):
a = deepcopy(self)
start = time()
self.__stooge(a.array, 0, a.length-1)
end = time()
print("Algorithm: {}".format("StoogeSort"), "|", "Time ellapsed: {}{}".format(
end-start, "s"), "|", "Moves: {}".format(len(a.history)), flush=True)
return a
def __stooge(self, arr, l, h):
if l >= h:
return
if arr[l] > arr[h]:
t = arr[l]
arr[l] = arr[h]
arr[h] = t
if h-l + 1 > 2:
t = (int)((h-l + 1)/3)
self.__stooge(arr, l, (h-t))
self.__stooge(arr, l + t, (h))
self.__stooge(arr, l, (h-t))
def __getNextGap(self, gap):
gap = (gap * 10)/13
if gap < 1:
return 1
return int(gap)
def CombSort(self, save=False):
a = deepcopy(self)
start = time()
n = a.length
gap = n
swapped = True
while gap != 1 or swapped == 1:
gap = self.__getNextGap(gap)
swapped = False
for i in range(0, n-gap):
if a.array[i] > a.array[i + gap]:
a.array[i], a.array[i + gap] = a.array[i + gap], a.array[i]
swapped = True
if save:
a.history.append(deepcopy(a.array))
end = time()
print("Algorithm: {}".format("CombSort"), "|", "Time ellapsed: {}{}".format(
end-start, "s"), "|", "Moves: {}".format(len(a.history)), flush=True)
return a
def PigeonholeSort(self, save=False):
a = deepcopy(self)
start = time()
my_min = min(a.array)
my_max = max(a.array)
size = my_max - my_min + 1
holes = [0] * size
for x in a.array:
assert type(x) is int, "integers only please"
holes[x - my_min] += 1
i = 0
for count in range(size):
while holes[count] > 0:
holes[count] -= 1
a.array[i] = count + my_min
i += 1
if save:
a.history.append(deepcopy(a.array))
end = time()
print("Algorithm: {}".format("PigeonholeSort"), "|", "Time ellapsed: {}{}".format(
end-start, "s"), "|", "Moves: {}".format(len(a.history)), flush=True)
return a
def CocktailSort(self):
a = deepcopy(self)
starting = time()
swapped = True
start = 0
end = a.length-1
while (swapped == True):
swapped = False
for i in range(start, end):
if (a.array[i] > a.array[i + 1]):
a.array[i], a.array[i | |
#!/usr/bin/env python
# coding: utf-8
"""
This script has to be executed after hi_freq_data_to_csv.py and get_interval.py have succesfully run.
This script should be called with 1 (or 2) arguments.
The 1st mandatory argument is the ABSOLUTE path of the top directory for the flight campaign.
/media/spectors/HDD320/lidar/20201218_fresh <<----- This is it!
----------------------------/20201218_fresh/p_00_joined_pcap_files
----------------------------/20201218_fresh/p_01_apx_csv_shapefile <<----- This must be present and will be used as input.
----------------------------/20201218_fresh/p_02_plt <<----- Not used. Just for reference.
----------------------------/20201218_fresh/p_03_pcap <<----- This must be present and will be used as input.
----------------------------/20201218_fresh/2_planned_mision
----------------------------/20201218_fresh/ .....
----------------------------/20201218_fresh/logging <<----- This is where the logs will be stored.
----------------------------/20201218_fresh/transl_table.txt <<----- This must be present and will be used as input.
The 2nd optional argument can be a boresight-calibration string.
It must contain the boresight angles and be of the following form:
# RabcdefghPijklmnopYqrstuvwx
# Where abcdefgh is milionths of degree to ROLL. a is sign (p/n)
# ..... ijklmnop is milionths of degree to PITCH. i is sign (p/n)
# ..... qrstuvwx is milionths of degree to YAW. q is sign (p/n)
# In this order! ROLL -> PITCH -> YAW !
# Theoretically can encode up to 9.9° around each axis
This script combines .csv files with each of the .pcap flight lines and writes point clouds in .txt files.
It then calls a lew lastools to convert them to las, denoise and set the correct (georeference) metadata.
The script is run non-interactively.
The only exception is choosing the p_01_apx_csv_shapefile and p__03_pcap folders at the beginning if there are muktiple of them.
TO DO: add support for different EPSG codes.
"""
import time
import os
import sys
import datetime
import platform
import logging
import shutil
import re
from collections import OrderedDict
from multiprocessing import Pool, cpu_count
from multiprocessing.managers import SharedMemoryManager
from multiprocessing.shared_memory import SharedMemory
from scipy.interpolate import interp1d
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scapy.all import rdpcap
#from vlp16_tables import *
import vlp16_tables
log_dir = 'p_logging'
txt_dir_in = 'p_01_apx_csv_shapefile'
txt_in_base_len = len(txt_dir_in)
pcap_dir_in = 'p_03_pcap'
pcap_in_base_len = len(pcap_dir_in)
out_dir_ascii = 'p_04_ascii'
out_ascii_base_len = len(out_dir_ascii)
out_dir_las = 'p_05_las'
out_las_base_len = len(out_dir_las)
transl_table_fn = 'p_transl_table.txt'
fn_keyword = 'hi_freq_apx'
nl = '\n'
def shorten_string(text_string):
"""
Function to remove all duplicates from string
and keep the order of characters same
https://www.geeksforgeeks.org/remove-duplicates-given-string-python/
"""
return "".join(OrderedDict.fromkeys(text_string))
def remove_min_sec(ts):
return (int(ts) // 3600) * 3600
# ### Function to calculate the gaps between given azimuths. Needed to interpolate azimuths that are not given.
def get_azim_gap(azimuths, dual=True, preserve_shape=False):
"""
Only works for dual returns now.
preserve_shape is relevant for dual, where the azimuths repeat.
if False: return only unique gaps.
if True: return same shape as azimuths
"""
if dual:
azimuths_gap_flat = np.zeros_like(azimuths[:,0::2]).flatten()
azimuths_gap_flat[:-1] = ((azimuths[:,0::2].flatten()[1:] -\
azimuths[:,0::2].flatten()[:-1]) % 36000)
azimuths_gap_flat[-1] = azimuths_gap_flat[-2]
azimuths_gap = azimuths_gap_flat.reshape(azimuths[:,0::2].shape)
if preserve_shape:
azimuths_gap = np.tile(azimuths_gap,2)
return azimuths_gap
else:
raise NotImplementedError
def get_micros_pulses(micros, dual=True, preserve_shape=False):
"""
preserve_shape is relevant for dual, where the azimuths repeat.
if False: return only unique gaps.
if True: return same shape as azimuths
"""
if dual:
if preserve_shape:
micros_pulses = np.expand_dims(micros, axis=1) +\
vlp16_tables.TIMING_OFFSETS_DUAL.T.flatten() * 1e6
else:
micros_pulses = np.expand_dims(micros, axis=1) +\
vlp16_tables.TIMING_OFFSETS_DUAL.T[0::2,:].flatten() * 1e6
else:
micros_pulses = np.expand_dims(micros, axis=1) +\
vlp16_tables.TIMING_OFFSETS_SINGLE.T.flatten() * 1e6
return micros_pulses
def get_precision_azimuth(az_simple, azimuths_gap, dual=True, minimal_shape=True):
if dual:
timing_offsets_within_block = vlp16_tables.TIMING_OFFSETS_DUAL[:,0]
az_pulses = np.tile(az_simple,(vlp16_tables.LASERS_PER_DATA_BLOCK)).reshape(\
az_simple.shape[0], vlp16_tables.LASERS_PER_DATA_BLOCK, az_simple.shape[1])
az_pulses = az_pulses.transpose((0,2,1))
precision_azimuth = az_pulses[:,:,:] +\
timing_offsets_within_block / (2 * vlp16_tables.T_CYCLE) *\
np.expand_dims(azimuths_gap, axis=2)
precision_azimuth = precision_azimuth % 36000
if not minimal_shape:
precision_azimuth = np.tile(\
precision_azimuth.transpose((0,2,1)), (1,2,1)).transpose((0,2,1))
precision_azimuth = precision_azimuth.reshape(\
(precision_azimuth.shape[0], precision_azimuth.shape[1] * precision_azimuth.shape[2]))
return precision_azimuth
else:
raise NotImplementedError
def process_file(pcap_file_in, pcap_dir_in,
out_dir_ascii, out_dir_las,
shm_name, shm_shp, shm_dtp,
b_roll, b_pitch, b_yaw,
concat_cmd, wine_cmd):
print(f"Processing {pcap_file_in}")
logging.info(f"Processing {pcap_file_in}")
loc_shm = SharedMemory(shm_name)
loc_apx_arr = np.recarray(shape=shm_shp, dtype=shm_dtp, buf=loc_shm.buf)
### Temporary plug-in here.
# This is not a proper solution, just a quick proof-of-concept
# Before hand must manually copy the file yaw_correction.csv into the appropriate folder
if 'yaw_correction.csv' in os.listdir(pcap_dir_in):
yaw_agisoft = pd.read_csv(os.path.join(pcap_dir_in, 'yaw_correction.csv'), index_col=0)
else:
# just have a dataframe that when interpolated will result in 0 everywhere
idx = pd.Index([0, 1, 2597835528, 2597835529], name='utc_time')
yaw_agisoft = pd.DataFrame(data = np.array([[0],[0],[0],[0]]),
columns = ['smooth_yaw_err'],
index = idx)
# ### Read entire file only once (takes most time)
start = time.time()
packets = rdpcap(os.path.join(pcap_dir_in, pcap_file_in))
packets_read = len(packets)
end = time.time()
print(F"{pcap_file_in}: Read {packets_read} packets in {end-start:.2f} seconds.")
logging.info(F"{pcap_file_in}: Read {packets_read} packets in {end-start:.2f} seconds.")
# ### Make sure all packets have length == 1206!
start = time.time()
wrong_lengths = 0
for p in packets:
if len(p.load) != vlp16_tables.DATA_PACKET_LENGTH:
wrong_lengths += 1
end = time.time()
logging.info(F"{pcap_file_in}: Checked {packets_read} packets in {end-start:.2f} seconds.")
logging.info('All have same length ('+str(vlp16_tables.DATA_PACKET_LENGTH)+').' \
if wrong_lengths==0 else str(wrong_lengths)+' packets have a different length.')
logging.info('This is GOOD!' if wrong_lengths==0 else 'This is BAD!')
# ### Read all packets into 1 numpy array
start = time.time()
raw_pack_data = np.zeros((packets_read, vlp16_tables.DATA_PACKET_LENGTH), dtype = np.uint8)
for i,p in enumerate(packets):
raw_pack_data[i,:] = np.frombuffer(p.load, dtype = np.uint8)
if i % 1e5 == 0:
print(f"{pcap_file_in}: Packet {i} out of {packets_read} in {time.time()-start:.2f} seconds.")
end = time.time()
logging.info(F"{pcap_file_in}: Copied data from {packets_read} packets into a numpy array of shape {raw_pack_data.shape} in {end-start:.2f} seconds.")
# ### Make sure all packets are captured in the same mode (last, strongest, dual)
mode_hypothesis = raw_pack_data[0, vlp16_tables.RETURN_MODE_OFFSET]
logging.info(f"First packet reports {vlp16_tables.RETURN_MODE_NAME[mode_hypothesis]} capture mode.")
diff_ret_mode = (raw_pack_data[:, vlp16_tables.RETURN_MODE_OFFSET] != mode_hypothesis).sum()
logging.info(f"{diff_ret_mode} packets disagree.")
logging.info(f"{'This is GOOD!' if diff_ret_mode == 0 else 'This is BAD!'}")
# ### Make sure all packets are captured with the same sensor (only VLP16 expected)
sensor_hypothesis = raw_pack_data[0, vlp16_tables.PRODUCT_MODEL_OFFSET]
logging.info(f"{pcap_file_in}: First packet reports {vlp16_tables.PRODUCT_MODEL_NAME[sensor_hypothesis]} sensor model.")
diff_sensor = (raw_pack_data[:, vlp16_tables.PRODUCT_MODEL_OFFSET] != sensor_hypothesis).sum()
logging.info(f"{pcap_file_in}: {diff_sensor} packets disagree.")
logging.info(f"{pcap_file_in}: {'This is GOOD!' if diff_sensor == 0 else 'This is BAD!'}")
# ### Get µs timestamp from packets and transform to UNIX timestamp
#
# I found that Ethernet timestamp agrees with GNSS timestamp very well.
#
# Can be problematic if very close ho full hour and I am not careful.
#
# Let's look at 1st Ethernet timestamp.
#
# * if it is far enough from a full hour (>=1 minute), then we continue
# * ~if it is too close (<1 minute), then we look at last one~ _not implemented_
# * ~if last one is also too close (recorded for 1 entire hour, not likely),
# we find an optimal one in the middle~ _not implemented_
ts_1st_pack = datetime.datetime.fromtimestamp(int(packets[0].time))
if ts_1st_pack.minute > 1 and ts_1st_pack.minute < 59:
logging.info(f"{pcap_file_in}: Far enough from full hour (~{ts_1st_pack.minute} minutes).")
logging.info("This is GOOD!\nContinue!")
else:
logging.info(f"{pcap_file_in}: Too close to full hour (~{ts_1st_pack.minute} minutes).")
logging.info("That is not great, but the code below should deal with it.")
# #### Take Ethernet timestamp of (1st) packet,
# discard sub-hour info and add replace it with that from GNSS µs timestamp
#
# What happens when the capture rolls over a full hour?
#
# **Need to deal with this when such data is captured!**
#
# # Solution below!
start = time.time()
micros = np.zeros((packets_read,), dtype = np.int64)
micro_bytes = micros.view(dtype = np.uint8)
micro_bytes[0::8] = raw_pack_data[:, vlp16_tables.DATA_PACK_TIMESTAMP_OFFSET + 0]
micro_bytes[1::8] = raw_pack_data[:, vlp16_tables.DATA_PACK_TIMESTAMP_OFFSET + 1]
micro_bytes[2::8] = raw_pack_data[:, vlp16_tables.DATA_PACK_TIMESTAMP_OFFSET + 2]
micro_bytes[3::8] = raw_pack_data[:, vlp16_tables.DATA_PACK_TIMESTAMP_OFFSET + 3]
plt.plot(micros)
end = time.time()
logging.info(f"{pcap_file_in}: Extracted time stamp from {packets_read} packets in {end-start:.2f} seconds.")
logging.info(f"{pcap_file_in}: If the line jumps, a full hour occurs. Need to deal with it!")
# #### Another problem could be that the UDP packets are not guaranteed to arrive in order.
#
# An assumption that is made for the following calculations is that this does not happen.
#
# **Need to deal with this when such data is captured!**
while (micros[1:] < micros[:-1]).sum() > 0:
jump_position = np.where((micros[1:] < micros[:-1]))[0][0] + 1
micros[jump_position:] += int(3.6e9)
logging.info(f"{pcap_file_in}: Added another hour to micros at position {jump_position}")
plt.plot(micros)
if (micros[1:] - micros[:-1]).min() > 0:#all chronological
logging.info(f"{pcap_file_in}: Packets seem to be in right order. Continue!")
else:
logging.info(f"{pcap_file_in}: Not all packets are in order. Handle somehow!")
print(f"{pcap_file_in}: Not all packets are in order. Handle somehow!")
sys.exit(0)
eth_ts_hour = | |
not None and len(user_ids) > 100:
raise ValueError('user_ids can only be 100 entries long')
param = {
'broadcaster_id': broadcaster_id,
'user_id': user_ids,
'after': after,
'first': first
}
url = build_url(TWITCH_API_BASE_URL + 'moderation/moderators/events', param, remove_none=True, split_lists=True)
result = self.__api_get_request(url, AuthType.USER, [AuthScope.MODERATION_READ])
data = result.json()
data = fields_to_enum(data, ['event_type'], ModerationEventType, ModerationEventType.UNKNOWN)
data = make_fields_datetime(data, ['event_timestamp'])
return data
def create_stream_marker(self,
user_id: str,
description: Optional[str] = None) -> dict:
"""Creates a marker in the stream of a user specified by user ID.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.CHANNEL_MANAGE_BROADCAST`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#create-stream-marker
:param str user_id: ID of the broadcaster in whose live stream the marker is created.
:param str description: Description of or comments on the marker. Max length is 140 characters.
|default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if description has more than 140 characters
:rtype: dict
"""
if description is not None and len(description) > 140:
raise ValueError('max length for description is 140')
url = build_url(TWITCH_API_BASE_URL + 'streams/markers', {})
body = {'user_id': user_id}
if description is not None:
body['description'] = description
result = self.__api_post_request(url, AuthType.USER, [AuthScope.CHANNEL_MANAGE_BROADCAST], data=body)
data = result.json()
return make_fields_datetime(data, ['created_at'])
def get_streams(self,
after: Optional[str] = None,
before: Optional[str] = None,
first: int = 20,
game_id: Optional[List[str]] = None,
language: Optional[List[str]] = None,
user_id: Optional[List[str]] = None,
user_login: Optional[List[str]] = None) -> dict:
"""Gets information about active streams. Streams are returned sorted by number of current viewers, in
descending order. Across multiple pages of results, there may be duplicate or missing streams, as viewers join
and leave streams.\n\n
Requires App or User authentication.\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-streams
:param str after: Cursor for forward pagination |default| :code:`None`
:param str before: Cursor for backward pagination |default| :code:`None`
:param int first: Maximum number of objects to return. Maximum: 100. |default| :code:`20`
:param list[str] game_id: Returns streams broadcasting a specified game ID. You can specify up to 100 IDs.
|default| :code:`None`
:param list[str] language: Stream language. You can specify up to 100 languages. |default| :code:`None`
:param list[str] user_id: Returns streams broadcast by one or more specified user IDs. You can specify up
to 100 IDs. |default| :code:`None`
:param list[str] user_login: Returns streams broadcast by one or more specified user login names.
You can specify up to 100 names. |default| :code:`None`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if app authentication is not set or invalid
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100 or one of the following fields have more than 100 entries:
`user_id, game_id, language, user_login`
:rtype: dict
"""
if user_id is not None and len(user_id) > 100:
raise ValueError('a maximum of 100 user_id entries are allowed')
if user_login is not None and len(user_login) > 100:
raise ValueError('a maximum of 100 user_login entries are allowed')
if language is not None and len(language) > 100:
raise ValueError('a maximum of 100 languages are allowed')
if game_id is not None and len(game_id) > 100:
raise ValueError('a maximum of 100 game_id entries are allowed')
if first > 100 or first < 1:
raise ValueError('first must be between 1 and 100')
param = {
'after': after,
'before': before,
'first': first,
'game_id': game_id,
'language': language,
'user_id': user_id,
'user_login': user_login
}
url = build_url(TWITCH_API_BASE_URL + 'streams', param, remove_none=True, split_lists=True)
result = self.__api_get_request(url, AuthType.EITHER, [])
data = result.json()
return make_fields_datetime(data, ['started_at'])
def get_stream_markers(self,
user_id: str,
video_id: str,
after: Optional[str] = None,
before: Optional[str] = None,
first: int = 20) -> dict:
"""Gets a list of markers for either a specified user’s most recent stream or a specified VOD/video (stream),
ordered by recency.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.USER_READ_BROADCAST`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-stream-markers
Only one of user_id and video_id must be specified.
:param str user_id: ID of the broadcaster from whose stream markers are returned.
:param str video_id: ID of the VOD/video whose stream markers are returned.
:param str after: Cursor for forward pagination |default| :code:`None`
:param str before: Cursor for backward pagination |default| :code:`None`
:param int first: Number of values to be returned when getting videos by user or game ID. Limit: 100.
|default| :code:`20`
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if first is not in range 1 to 100 or neither user_id nor video_id is provided
:rtype: dict
"""
if first > 100 or first < 1:
raise ValueError('first must be between 1 and 100')
if user_id is None and video_id is None:
raise ValueError('you must specify either user_id and/or video_id')
param = {
'user_id': user_id,
'video_id': video_id,
'after': after,
'before': before,
'first': first
}
url = build_url(TWITCH_API_BASE_URL + 'streams/markers', param, remove_none=True)
result = self.__api_get_request(url, AuthType.USER, [AuthScope.USER_READ_BROADCAST])
return make_fields_datetime(result.json(), ['created_at'])
def get_broadcaster_subscriptions(self,
broadcaster_id: str,
user_ids: Optional[List[str]] = None,
after: Optional[str] = None,
first: Optional[int] = 20) -> dict:
"""Get all of a broadcaster’s subscriptions.\n\n
Requires User authentication with scope :const:`twitchAPI.types.AuthScope.CHANNEL_READ_SUBSCRIPTIONS`\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-broadcaster-subscriptions
:param str broadcaster_id: User ID of the broadcaster. Must match the User ID in the Bearer token.
:param list[str] user_ids: Unique identifier of account to get subscription status of. Maximum 100 entries
|default| :code:`None`
:param str after: Cursor for forward pagination. |default| :code:`None`
:param int first: Maximum number of objects to return. Maximum: 100. |default| :code:`20`
:raises ~twitchAPI.types.UnauthorizedException: if user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
:raises ValueError: if user_ids has more than 100 entries
:raises ValueError: if first is not in range 1 to 100
:rtype: dict
"""
if first < 1 or first > 100:
raise ValueError('first must be in range 1 to 100')
if user_ids is not None and len(user_ids) > 100:
raise ValueError('user_ids can have a maximum of 100 entries')
param = {
'broadcaster_id': broadcaster_id,
'user_id': user_ids,
'first': first,
'after': after
}
url = build_url(TWITCH_API_BASE_URL + 'subscriptions', param, remove_none=True, split_lists=True)
result = self.__api_get_request(url, AuthType.USER, [AuthScope.CHANNEL_READ_SUBSCRIPTIONS])
return result.json()
def check_user_subscription(self,
broadcaster_id: str,
user_id: str) -> dict:
"""Checks if a specific user (user_id) is subscribed to a specific channel (broadcaster_id).
Requires User or App Authorization with scope :const:`twitchAPI.types.AuthScope.USER_READ_SUBSCRIPTIONS`
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#check-user-subscription
:param str broadcaster_id: User ID of an Affiliate or Partner broadcaster.
:param str user_id: User ID of a Twitch viewer.
:rtype: dict
:raises ~twitchAPI.types.UnauthorizedException: if app or user authentication is not set or invalid
:raises ~twitchAPI.types.MissingScopeException: if the app or user authentication is missing the required scope
:raises ~twitchAPI.types.TwitchAuthorizationException: if the used authentication token became invalid
and a re authentication failed
:raises ~twitchAPI.types.TwitchAPIException: if the request was malformed
:raises ~twitchAPI.types.TwitchBackendException: if the Twitch API itself runs into problems
"""
param = {
'broadcaster_id': broadcaster_id,
'user_id': user_id
}
url = build_url(TWITCH_API_BASE_URL + 'subscriptions/user', param)
result = self.__api_get_request(url, AuthType.EITHER, [AuthScope.USER_READ_SUBSCRIPTIONS])
return result.json()
def get_all_stream_tags(self,
after: Optional[str] = None,
first: int = 20,
tag_ids: Optional[List[str]] = None) -> dict:
"""Gets the list of all stream tags defined by Twitch, optionally filtered by tag ID(s).\n\n
Requires App authentication\n
For detailed documentation, see here: https://dev.twitch.tv/docs/api/reference#get-all-stream-tags
:param str after: Cursor | |
to be priced.
expiries: A real `Tensor` of same dtype and compatible shape as
`volatilities`. The expiry of each option. The units should be such that
`expiry * volatility**2` is dimensionless.
spots: A real `Tensor` of any shape that broadcasts to the shape of the
`volatilities`. The current spot price of the underlying.
barriers: A real `Tensor` of same dtype as the `volatilities` and of the
shape that broadcasts with `volatilities`. The barriers of each option.
rebates: A real `Tensor` of same dtype as the `volatilities` and of the
shape that broadcasts with `volatilities`. A rebates contingent upon
reaching the barriers price.
discount_rates: A real `Tensor` of same dtype as the
`volatilities` and of the shape that broadcasts with `volatilities`.
Discount rates, or risk free rates.
Default value: `None`, equivalent to discount_rate = 0.
continuous_dividends: A real `Tensor` of same dtype as the
`volatilities` and of the shape that broadcasts with `volatilities`.
Either this or `cost_of_carries` can be given. If `None`,
`cost_of_carries` must be supplied.
Default value: `None`, calculated from `cost_of_carries`.
cost_of_carries: A optional real `Tensor` of same dtype as the
`volatilities` and of the shape that broadcasts with `volatilities`.
Cost of storing a physical commodity, the cost of interest paid when
long, or the opportunity cost, o the cost of paying dividends when short.
If not `None`, `continuous_dividends` is calculated as r - c,
where r are the `discount_rates` and c is `cost_of_carries`.
is_barrier_down: A real `Tensor` of `boolean` values and of the shape
that broadcasts with `volatilities`. True if barrier is below asset
price at expiration.
Default value: `True`.
is_knock_out: A real `Tensor` of `boolean` values and of the shape
that broadcasts with `volatilities`. True if option is knock out
else false.
Default value: `True`.
is_call_options: A real `Tensor` of `boolean` values and of the shape
that broadcasts with `volatilities`. True if option is call else
false.
Default value: `True`.
dtype: Optional `tf.DType`. If supplied, the dtype to be used for conversion
of any supplied non-`Tensor` arguments to `Tensor`.
Default value: `None` which maps to the default dtype inferred by
TensorFlow.
name: str. The name for the ops created by this function.
Default value: `None` which is mapped to the default name `barrier_price`.
Returns:
option_prices: A `Tensor` of same shape as `spots`. The approximate price of
the barriers option under black scholes.
"""
if (continuous_dividends is None) == (cost_of_carries is None):
raise ValueError('At most one of continuous_dividends and cost of carries '
'may be supplied')
with tf.name_scope(name or 'barrier_price'):
spots = tf.convert_to_tensor(spots, dtype=dtype, name='spots')
dtype = spots.dtype
strikes = tf.convert_to_tensor(strikes, dtype=dtype, name='strikes')
volatilities = tf.convert_to_tensor(
volatilities, dtype=dtype, name='volatilities')
expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')
barriers = tf.convert_to_tensor(barriers, dtype=dtype, name='barriers')
rebates = tf.convert_to_tensor(rebates, dtype=dtype, name='rebates')
# Convert all to tensor and enforce float dtype where required
if discount_rates is not None:
discount_rates = tf.convert_to_tensor(
discount_rates, dtype=dtype, name='discount_rates')
else:
discount_rates = tf.convert_to_tensor(
1, dtype=dtype, name='discount_rates')
if continuous_dividends is not None:
continuous_dividends = tf.convert_to_tensor(
continuous_dividends, dtype=dtype, name='continuous_dividends')
if cost_of_carries is not None:
continuous_dividends = tf.convert_to_tensor(
discount_rates - cost_of_carries, dtype=dtype,
name='continuous_dividends')
if is_barrier_down is None:
is_barrier_down = tf.constant(1, name='is_barrier_down')
else:
is_barrier_down = tf.convert_to_tensor(is_barrier_down, dtype=tf.bool,
name='is_barrier_down')
is_barrier_down = tf.where(is_barrier_down, 1, 0)
if is_knock_out is None:
is_knock_out = tf.constant(1, name='is_knock_out')
else:
is_knock_out = tf.convert_to_tensor(is_knock_out, dtype=tf.bool,
name='is_knock_out')
is_knock_out = tf.where(is_knock_out, 1, 0)
if is_call_options is None:
is_call_options = tf.constant(1, name='is_call_options')
else:
is_call_options = tf.convert_to_tensor(is_call_options, dtype=tf.bool,
name='is_call_options')
is_call_options = tf.where(is_call_options, 1, 0)
# Indices which range from 0-7 are used to select the appropriate
# mask for each barrier
indices = tf.bitwise.left_shift(
is_barrier_down, 2) + tf.bitwise.left_shift(
is_knock_out, 1) + is_call_options
# Masks select the appropriate terms for integral approximations
# Integrals are seperated by algebraic terms and probability
# distribution terms. This give 12 different terms per matrix
# (6 integrals, 2 terms each)
# shape = [8, 12]
mask_matrix_greater_strike = tf.constant([
[1, 1, -1, -1, 0, 0, 1, 1, 1, 1, 0, 0], # up and in put
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0], # up and in call
[0, 0, 1, 1, 0, 0, -1, -1, 0, 0, 1, 1], # up and out put
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], # up and out call
[0, 0, 1, 1, -1, -1, 1, 1, 0, 0, 1, 1], # down and in put
[0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0], # down and in call
[1, 1, -1, -1, 1, 1, -1, -1, 0, 0, 1, 1], # down and out put
[1, 1, 0, 0, -1, -1, 0, 0, 0, 0, 1, 1]]) # down and out call
mask_matrix_lower_strike = tf.constant([
[0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0], # up and in put
[0, 0, 1, 1, -1, -1, 1, 1, 1, 1, 0, 0], # up and in call
[1, 1, 0, 0, -1, -1, 0, 0, 0, 0, 1, 1], # up and out put
[1, 1, -1, -1, 1, 1, -1, -1, 0, 0, 1, 1], # up and out call
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0], # down and in put
[1, 1, -1, -1, 0, 0, 1, 1, 1, 1, 0, 0], # down and in call
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], # down and out put
[0, 0, 1, 1, 0, 0, -1, -1, 0, 0, 1, 1]]) # down and out call
# Create masks
# Masks are shape [strikes.shape, 12]
masks_lower = tf.gather(mask_matrix_lower_strike, indices, axis=0)
masks_greater = tf.gather(mask_matrix_greater_strike, indices, axis=0)
strikes_greater = tf.expand_dims(strikes > barriers, axis=-1)
masks = tf.where(strikes_greater, masks_greater, masks_lower)
masks = tf.cast(masks, dtype=dtype)
one = tf.constant(1, dtype=dtype)
call_or_put = tf.cast(tf.where(tf.equal(is_call_options, 0), -one, one),
dtype=dtype)
below_or_above = tf.cast(tf.where(tf.equal(is_barrier_down, 0), -one, one),
dtype=dtype)
# Calculate params for integrals
sqrt_var = volatilities * tf.math.sqrt(expiries)
mu = (discount_rates - continuous_dividends) - ((volatilities**2) / 2)
lamda = 1 + (mu / (volatilities**2))
x = (tf.math.log(spots / strikes) / (sqrt_var)) + (lamda * sqrt_var)
x1 = (tf.math.log(spots / barriers) / (sqrt_var)) + (lamda * sqrt_var)
y = (tf.math.log((barriers**2) / (spots * strikes)) / (
sqrt_var)) + (lamda * sqrt_var)
y1 = (tf.math.log(barriers / spots) / (sqrt_var)) + (lamda * sqrt_var)
b = ((mu**2) + (2 * (volatilities**2) * discount_rates)) / (volatilities**2)
z = (tf.math.log(barriers / spots) / (sqrt_var)) + (b * sqrt_var)
a = mu / (volatilities**2)
# Other params used for integrals
discount_rates_exponent = tf.math.exp(-discount_rates * expiries,
name='discount_rates_exponent')
continuous_dividends_exponent = tf.math.exp(
-continuous_dividends * expiries,
name='continuous_dividends_exponent')
barriers_ratio = tf.math.divide(barriers, spots, name='barriers_ratio')
spots_term = call_or_put * spots * continuous_dividends_exponent
strikes_term = call_or_put * strikes * discount_rates_exponent
# rank is used to stack elements and reduce_sum
strike_rank = len(strikes.shape)
# Constructing Matrix with first and second algebraic terms for each
# integral [strike.shape, 12]
terms_mat = tf.stack(
(spots_term, -strikes_term,
spots_term, -strikes_term,
spots_term * (barriers_ratio**(2 * lamda)),
-strikes_term * (barriers_ratio**((2 * lamda) - 2)),
spots_term * (barriers_ratio**(2 * lamda)),
-strikes_term * (barriers_ratio**((2 * lamda) - 2)),
rebates * discount_rates_exponent,
-rebates * discount_rates_exponent * (
barriers_ratio**((2 * lamda) - 2)),
rebates * (barriers_ratio**(a + b)),
rebates * (barriers_ratio**(a - b))),
name='term_matrix', axis=strike_rank)
# Constructing Matrix with first and second norm for each integral
# [strikes.shape, 12]
cdf_mat = tf.stack(
(call_or_put * x,
call_or_put * (x - sqrt_var),
call_or_put * x1,
call_or_put * (x1 - sqrt_var),
below_or_above * y,
below_or_above * (y - sqrt_var),
below_or_above * y1,
below_or_above * (y1 - sqrt_var),
below_or_above * (x1 - sqrt_var),
below_or_above * (y1 - sqrt_var),
below_or_above * z,
below_or_above * (z - (2 * b * sqrt_var))),
name='cdf_matrix', axis=strike_rank)
cdf_mat = _ncdf(cdf_mat)
# Calculating and returning price for each option
return tf.reduce_sum(masks * terms_mat * cdf_mat, axis=strike_rank)
# TODO(b/154806390): Binary price signature should be the same as that of the
# vanilla price.
def binary_price(*,
volatilities,
| |
val in r]
for r in row_info])
return design
def _get_design(self, num_grobs=None):
"""
create a design matrix if not explicit design has been provided
"""
if self.__design is not None:
return self.__design
if num_grobs is None:
if self.num_grobs is None:
raise ValueError("unclear number of grobs in layout...")
else:
num_grobs = self.num_grobs
if self.byrow is None or self.byrow:
order_str = "C"
else:
order_str = "F"
# if only ncol or nrow is defined...
ncol = self.ncol
nrow = self.nrow
if ncol is None:
ncol = int(np.ceil(num_grobs / nrow))
if nrow is None:
nrow = int(np.ceil(num_grobs / ncol))
inner_design = np.arange(ncol*nrow,
dtype = float).reshape((nrow, ncol),
order = order_str)
inner_design[inner_design >= num_grobs] = np.nan
_ = self._assess_mat(inner_design) # should pass since we just built it...
return inner_design
# property
design = property(_get_design)
"""
defines underlying ``design`` attribute (potentially defined relative to a
``cow.patch`` object if certain structure are not extremely specific.
"""
def _assess_mat(self, design):
"""
Assesses if the design matrix includes at least 1 box for patches
indexed 0 to (number of patches - 1). This doesn't actually assume to know
the number of patches.
Arguments
---------
design : np.array (integer)
design in numpy array format
Returns
-------
int
number of patches expected in the overall matrix.
Raises
------
ValueError
if design matrix doesn't include at least at least 1 box for all
indices between 0 to (number of patches - 1)
"""
if design is None:
return None # to identify later that we don't have a design matrix
unique_vals = np.unique(design)
unique_vals = np.sort(
unique_vals[np.logical_not(np.isnan(unique_vals))])
num_unique = unique_vals.shape[0]
if not np.allclose(unique_vals, np.arange(num_unique)):
raise ValueError("design input requires values starting "+\
"with 0/A and through integer/alphabetical "+\
"value expected for the number of patches "+\
"provided")
return num_unique
def _rel_structure(self, num_grobs=None):
"""
provide rel_structure (rel_widths, rel_heights) if missing
Arguments
---------
num_grobs : int
if not None, then this value will be used to understand the number
of grobs to be laid out
Returns
-------
rel_widths : np.array vector
a vector of relative widths of the columns of the layout design
rel_heights : np.array vector
a vector of relative heights of the rows of the layout design
"""
if num_grobs is None:
if not (self.ncol is not None and \
self.nrow is not None) and \
not (self.rel_widths is not None and \
self.rel_heights is not None):
raise ValueError("unclear number of grobs in layout -> "+\
"unable to identify relative width and height")
rel_widths = self.rel_widths
rel_heights = self.rel_heights
ncol = self.ncol
nrow = self.nrow
if rel_widths is not None and ncol is None:
ncol = rel_widths.shape[0]
if rel_heights is not None and nrow is None:
nrow = rel_heights.shape[0]
if ncol is None:
ncol = int(np.ceil(num_grobs/nrow))
if rel_widths is None:
rel_widths = np.ones(ncol)
if nrow is None:
nrow = int(np.ceil(num_grobs/ncol))
if rel_heights is None:
rel_heights = np.ones(nrow)
return rel_widths, rel_heights
def _element_locations(self, width_pt, height_pt, num_grobs=None):
"""
create a list of ``area`` objects associated with the location of
each of the layout's grobs w.r.t. a given points width and height
Arguments
---------
width_pt : float
global width (in points) of the full arangement of patches
height_pt : float
global height (in points) of the full arangement of patches
num_grobs : integer
if not ``None``, then this value will be used to understand the
number of grobs to be laid out
Returns
-------
list
list of ``area`` objects describing the location for each of the
layout's grobs (in the order of the index in the self.design)
"""
if self.num_grobs is None and num_grobs is None:
raise ValueError("unclear number of grobs in layout...")
if self.num_grobs is not None:
if num_grobs is not None and num_grobs != self.num_grobs:
warnings.warn("_element_locations overrides num_grobs "+\
"with self.num_grobs")
num_grobs = self.num_grobs
rel_widths, rel_heights = self._rel_structure(num_grobs=num_grobs)
areas = []
for p_idx in np.arange(num_grobs):
dmat_logic = self._get_design(num_grobs=num_grobs) == p_idx
r_logic = dmat_logic.sum(axis=1) > 0
c_logic = dmat_logic.sum(axis=0) > 0
inner_x_where = np.argwhere(c_logic)
inner_x_left = np.min(inner_x_where)
inner_x_right = np.max(inner_x_where)
inner_width = inner_x_right - inner_x_left + 1
inner_x_where = np.argwhere(r_logic)
inner_y_top = np.min(inner_x_where)
inner_y_bottom = np.max(inner_x_where)
inner_height = inner_y_bottom - inner_y_top + 1
inner_design_area = area(x_left = inner_x_left,
y_top = inner_y_top,
width = inner_width,
height = inner_height,
_type = "design")
areas.append(inner_design_area.pt(rel_widths=rel_widths,
rel_heights=rel_heights,
width_pt=width_pt,
height_pt=height_pt))
return areas
def _yokogaki_ordering(self, num_grobs=None):
"""
calculates the yokogaki (left to right, top to bottom) ordering
the the patches
Arguments
---------
num_grobs : integer
if not ``None``, then this value will be used to understand the
number of grobs to be laid out
Returns
-------
numpy array (vector) of integer index of plots in yokogaki ordering
Notes
-----
Yokogaki is a Japanese word that concisely describes the left to right,
top to bottom writing format. We'd like to thank `stack overflow`_.
for pointing this out.
.. _stack overflow:
https://english.stackexchange.com/questions/81520/is-there-a-word-for-left-to-right-and-top-to-bottom
"""
if self.num_grobs is None and num_grobs is None:
raise ValueError("unclear number of grobs in layout...")
if self.num_grobs is not None:
if num_grobs is not None and num_grobs != self.num_grobs:
warnings.warn("_element_locations overrides num_grobs "+\
"with self.num_grobs")
num_grobs = self.num_grobs
areas = self._element_locations(1,1) # basically getting relative positions (doesn't matter) - nor does it matter about rel_height and width, but ah well
all_x_left = np.array([a.x_left for a in areas])
all_y_top = np.array([a.y_top for a in areas])
index_list = np.arange(num_grobs)
yokogaki_ordering = []
# remember y_tops are w.r.t top axis
for y_val in np.sort(np.unique(all_y_top)):
given_row_logic = all_y_top == y_val
inner_index = index_list[given_row_logic]
inner_x_left = all_x_left[given_row_logic]
row_ids = inner_index[np.argsort(inner_x_left)]
yokogaki_ordering += list(row_ids)
return np.array(yokogaki_ordering)
def __hash__(self):
"""
Creates a 'unique' hash for the object to help with identification
Returns
-------
hash integer
"""
if self.num_grobs is None:
design_list = [None]
else:
design_list = list(self.design.ravel())
rw_list = [None]
if self.rel_widths is not None:
rw_list = list(self.rel_widths)
rh_list = [None]
if self.rel_heights is not None:
rh_list = list(self.rel_heights)
info_list = design_list + \
rw_list + rh_list +\
[self.ncol, self.nrow, self.num_grobs]
return abs(hash(tuple(info_list)))
def __str__(self):
return "<layout (%d)>" % self.__hash__()
def __repr__(self):
nrow_str = str(self.nrow)
if self.nrow is None:
nrow_str = "unk"
ncol_str = str(self.ncol)
if self.ncol is None:
ncol_str = "unk"
if self.num_grobs is None:
design_str = "*unk*"
else:
design_str = self.design.__str__()
rw_str = "unk"
if self.rel_widths is not None:
rw_str = self.rel_widths.__str__()
rh_str = "unk"
if self.rel_heights is not None:
rh_str = self.rel_heights.__str__()
out = "design (%s, %s):\n\n"% (nrow_str, ncol_str) +\
design_str +\
"\n\nwidths:\n" +\
rw_str +\
"\nheights:\n" +\
rh_str
return self.__str__() + "\n" + out
def __eq__(self, value):
"""
checks if object is equal to another object (value)
Arguments
---------
value : object
another object (that major or may not be of the layout class)
Returns
-------
boolean
if current object and other object (value) are equal
"""
# if value is not a layout...
if not inherits(value, layout):
return False
# if __design hasn't been specified on 1 but is on another
if (self.__design is None and value.__design is not None) or\
(self.__design is not None and value.__design is None):
return False
# accounting for lack of __design specification
design_logic = True
if self.__design is not None:
design_logic = np.allclose(self.design,value.design,equal_nan=True)
return design_logic and \
self.ncol == value.ncol and \
self.nrow == value.nrow and \
np.unique(self.rel_heights/value.rel_heights).shape[0] == 1 and \
np.unique(self.rel_widths/value.rel_widths).shape[0] == 1
class area:
def __init__(self,
x_left, y_top,
width, height,
_type):
"""
object that stores information about what area a ``patch`` will fill
Arguments
---------
x_left : float
scalar of where the left-most point of the patch is located (impacted
by the ``_type`` parameter)
y_top : float
scalar of where the top-most point of the patch is located (impacted
by the ``_type`` parameter)
width : float
scalar of the width of the patch (impacted by the ``_type``
parameter)
height : float
scalar of the height of the patch (impacted by the ``_type``
parameter)
_type : str {"design", "relative", "pt"}
describes how the parameters are stored. See Notes for more
information between the options.
Notes
-----
| |
"""
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
self.update_module()
self.assertNotIn("{}.test_alias".format(self.module_name), self.loader)
self.assertTrue(
inspect.isfunction(self.loader["{}.working_alias".format(self.module_name)])
)
@slowTest
def test_clear(self):
self.assertTrue(inspect.isfunction(self.loader["test.ping"]))
self.update_module() # write out out custom module
self.loader.clear() # clear the loader dict
# force a load of our module
self.assertTrue(inspect.isfunction(self.loader[self.module_key]))
# make sure we only loaded our custom module
# which means that we did correctly refresh the file mapping
for k, v in self.loader._dict.items():
self.assertTrue(k.startswith(self.module_name))
@slowTest
def test_load(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
self.update_module()
self.assertTrue(inspect.isfunction(self.loader[self.module_key]))
@slowTest
def test__load__(self):
"""
If a module specifies __load__ we should only load/expose those modules
"""
self.update_module()
# ensure it doesn't exist
self.assertNotIn(self.module_key + "2", self.loader)
@slowTest
def test__load__and_depends(self):
"""
If a module specifies __load__ we should only load/expose those modules
"""
self.update_module()
# ensure it doesn't exist
self.assertNotIn(self.module_key + "3", self.loader)
self.assertNotIn(self.module_key + "4", self.loader)
@slowTest
def test_reload(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
# make sure it updates correctly
for x in range(1, 3):
self.update_module()
self.loader.clear()
self.assertEqual(self.loader[self.module_key](), self.count)
self.rm_module()
# make sure that even if we remove the module, its still loaded until a clear
self.assertEqual(self.loader[self.module_key](), self.count)
self.loader.clear()
self.assertNotIn(self.module_key, self.loader)
def test_wrong_bytecode(self):
"""
Checks to make sure we don't even try to load .pyc files that are for a different Python
This should pass (the load should fail) all the time because we don't run Salt on Py 3.4 anymore
"""
test_module_name = "test_module.cpython-34"
filemap_save = copy.deepcopy(self.loader.file_mapping)
self.loader.file_mapping = {
test_module_name: (
"/temp/path/does/not/matter/here/__pycache__/"
+ test_module_name
+ ".pyc",
".pyc",
0,
)
}
self.assertFalse(self.loader._load_module(test_module_name))
self.loader.file_mapping = copy.deepcopy(filemap_save)
virtual_aliases = ("loadertest2", "loadertest3")
virtual_alias_module_template = """
__virtual_aliases__ = {}
def test():
return True
""".format(
virtual_aliases
)
class LazyLoaderVirtualAliasTest(TestCase):
"""
Test the loader of salt with changing modules
"""
module_name = "loadertest"
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts["grains"] = salt.loader.grains(cls.opts)
if not os.path.isdir(RUNTIME_VARS.TMP):
os.makedirs(RUNTIME_VARS.TMP)
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
opts = copy.deepcopy(self.opts)
dirs = salt.loader._module_dirs(opts, "modules", "module")
dirs.append(self.tmp_dir)
self.utils = salt.loader.utils(opts)
self.proxy = salt.loader.proxy(opts)
self.minion_mods = salt.loader.minion_mods(opts)
self.loader = salt.loader.LazyLoader(
dirs,
opts,
tag="module",
pack={
"__utils__": self.utils,
"__proxy__": self.proxy,
"__salt__": self.minion_mods,
},
)
def tearDown(self):
del self.tmp_dir
del self.utils
del self.proxy
del self.minion_mods
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
def update_module(self):
with salt.utils.files.fopen(self.module_path, "wb") as fh:
fh.write(salt.utils.stringutils.to_bytes(virtual_alias_module_template))
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
remove_bytecode(self.module_path)
@property
def module_path(self):
return os.path.join(self.tmp_dir, "{}.py".format(self.module_name))
@slowTest
def test_virtual_alias(self):
"""
Test the __virtual_alias__ feature
"""
self.update_module()
mod_names = [self.module_name] + list(virtual_aliases)
for mod_name in mod_names:
func_name = ".".join((mod_name, "test"))
log.debug("Running %s (dict attribute)", func_name)
self.assertTrue(self.loader[func_name]())
log.debug("Running %s (loader attribute)", func_name)
self.assertTrue(getattr(self.loader, mod_name).test())
submodule_template = """
from __future__ import absolute_import
import {0}.lib
def test():
return ({count}, {0}.lib.test())
"""
submodule_lib_template = """
def test():
return {count}
"""
class LazyLoaderSubmodReloadingTest(TestCase):
"""
Test the loader of salt with changing modules
"""
module_name = "loadertestsubmod"
module_key = "loadertestsubmod.test"
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts["grains"] = salt.loader.grains(cls.opts)
if not os.path.isdir(RUNTIME_VARS.TMP):
os.makedirs(RUNTIME_VARS.TMP)
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.addCleanup(shutil.rmtree, self.tmp_dir, ignore_errors=True)
os.makedirs(self.module_dir)
self.count = 0
self.lib_count = 0
opts = copy.deepcopy(self.opts)
dirs = salt.loader._module_dirs(opts, "modules", "module")
dirs.append(self.tmp_dir)
self.utils = salt.loader.utils(opts)
self.proxy = salt.loader.proxy(opts)
self.minion_mods = salt.loader.minion_mods(opts)
self.loader = salt.loader.LazyLoader(
dirs,
opts,
tag="module",
pack={
"__utils__": self.utils,
"__proxy__": self.proxy,
"__salt__": self.minion_mods,
},
)
def tearDown(self):
del self.tmp_dir
del self.utils
del self.proxy
del self.minion_mods
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
def update_module(self):
self.count += 1
with salt.utils.files.fopen(self.module_path, "wb") as fh:
fh.write(
salt.utils.stringutils.to_bytes(
submodule_template.format(self.module_name, count=self.count)
)
)
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
remove_bytecode(self.module_path)
def rm_module(self):
os.unlink(self.module_path)
remove_bytecode(self.module_path)
def update_lib(self):
self.lib_count += 1
for modname in list(sys.modules):
if modname.startswith(self.module_name):
del sys.modules[modname]
with salt.utils.files.fopen(self.lib_path, "wb") as fh:
fh.write(
salt.utils.stringutils.to_bytes(
submodule_lib_template.format(count=self.lib_count)
)
)
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
remove_bytecode(self.lib_path)
def rm_lib(self):
for modname in list(sys.modules):
if modname.startswith(self.module_name):
del sys.modules[modname]
os.unlink(self.lib_path)
remove_bytecode(self.lib_path)
@property
def module_dir(self):
return os.path.join(self.tmp_dir, self.module_name)
@property
def module_path(self):
return os.path.join(self.module_dir, "__init__.py")
@property
def lib_path(self):
return os.path.join(self.module_dir, "lib.py")
@slowTest
def test_basic(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
self.update_module()
self.update_lib()
self.loader.clear()
self.assertIn(self.module_key, self.loader)
@slowTest
def test_reload(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
# update both the module and the lib
for x in range(1, 3):
self.update_lib()
self.update_module()
self.loader.clear()
self.assertNotIn(self.module_key, self.loader._dict)
self.assertIn(self.module_key, self.loader)
self.assertEqual(
self.loader[self.module_key](), (self.count, self.lib_count)
)
# update just the module
for x in range(1, 3):
self.update_module()
self.loader.clear()
self.assertNotIn(self.module_key, self.loader._dict)
self.assertIn(self.module_key, self.loader)
self.assertEqual(
self.loader[self.module_key](), (self.count, self.lib_count)
)
# update just the lib
for x in range(1, 3):
self.update_lib()
self.loader.clear()
self.assertNotIn(self.module_key, self.loader._dict)
self.assertIn(self.module_key, self.loader)
self.assertEqual(
self.loader[self.module_key](), (self.count, self.lib_count)
)
self.rm_module()
# make sure that even if we remove the module, its still loaded until a clear
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
self.loader.clear()
self.assertNotIn(self.module_key, self.loader)
@slowTest
def test_reload_missing_lib(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
# update both the module and the lib
self.update_module()
self.update_lib()
self.loader.clear()
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
# remove the lib, this means we should fail to load the module next time
self.rm_lib()
self.loader.clear()
self.assertNotIn(self.module_key, self.loader)
mod_template = """
def test():
return ({val})
"""
class LazyLoaderModulePackageTest(TestCase):
"""
Test the loader of salt with changing modules
"""
module_name = "loadertestmodpkg"
module_key = "loadertestmodpkg.test"
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts["grains"] = salt.loader.grains(cls.opts)
if not os.path.isdir(RUNTIME_VARS.TMP):
os.makedirs(RUNTIME_VARS.TMP)
cls.utils = salt.loader.utils(copy.deepcopy(cls.opts))
cls.proxy = salt.loader.proxy(cls.opts)
cls.funcs = salt.loader.minion_mods(cls.opts, utils=cls.utils, proxy=cls.proxy)
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.addCleanup(shutil.rmtree, self.tmp_dir, ignore_errors=True)
dirs = salt.loader._module_dirs(copy.deepcopy(self.opts), "modules", "module")
dirs.append(self.tmp_dir)
self.loader = salt.loader.LazyLoader(
dirs,
copy.deepcopy(self.opts),
pack={
"__utils__": self.utils,
"__salt__": self.funcs,
"__proxy__": self.proxy,
},
tag="module",
)
def tearDown(self):
del self.tmp_dir
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
del cls.funcs
del cls.utils
del cls.proxy
def update_pyfile(self, pyfile, contents):
dirname = os.path.dirname(pyfile)
if not os.path.exists(dirname):
os.makedirs(dirname)
with salt.utils.files.fopen(pyfile, "wb") as fh:
fh.write(salt.utils.stringutils.to_bytes(contents))
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
remove_bytecode(pyfile)
def rm_pyfile(self, pyfile):
os.unlink(pyfile)
remove_bytecode(pyfile)
def update_module(self, relative_path, contents):
self.update_pyfile(os.path.join(self.tmp_dir, relative_path), contents)
def rm_module(self, relative_path):
self.rm_pyfile(os.path.join(self.tmp_dir, relative_path))
@slowTest
def test_module(self):
# ensure it doesn't exist
self.assertNotIn("foo", self.loader)
self.assertNotIn("foo.test", self.loader)
self.update_module("foo.py", mod_template.format(val=1))
self.loader.clear()
self.assertIn("foo.test", self.loader)
self.assertEqual(self.loader["foo.test"](), 1)
@slowTest
def test_package(self):
# ensure it doesn't exist
self.assertNotIn("foo", self.loader)
self.assertNotIn("foo.test", self.loader)
self.update_module("foo/__init__.py", mod_template.format(val=2))
self.loader.clear()
self.assertIn("foo.test", self.loader)
self.assertEqual(self.loader["foo.test"](), 2)
@slowTest
def test_module_package_collision(self):
# ensure it doesn't exist
self.assertNotIn("foo", self.loader)
self.assertNotIn("foo.test", self.loader)
self.update_module("foo.py", mod_template.format(val=3))
self.loader.clear()
self.assertIn("foo.test", self.loader)
self.assertEqual(self.loader["foo.test"](), 3)
self.update_module("foo/__init__.py", mod_template.format(val=4))
self.loader.clear()
self.assertIn("foo.test", self.loader)
self.assertEqual(self.loader["foo.test"](), 4)
deep_init_base = """
from __future__ import absolute_import
import {0}.top_lib
import {0}.top_lib.mid_lib
import {0}.top_lib.mid_lib.bot_lib
def top():
return {0}.top_lib.test()
def mid():
return {0}.top_lib.mid_lib.test()
def bot():
return {0}.top_lib.mid_lib.bot_lib.test()
"""
class LazyLoaderDeepSubmodReloadingTest(TestCase):
module_name = "loadertestsubmoddeep"
libs = ("top_lib", "mid_lib", "bot_lib")
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts["grains"] = salt.loader.grains(cls.opts)
if not os.path.isdir(RUNTIME_VARS.TMP):
os.makedirs(RUNTIME_VARS.TMP)
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.addCleanup(shutil.rmtree, self.tmp_dir, ignore_errors=True)
os.makedirs(self.module_dir)
self.lib_count = collections.defaultdict(int) # mapping of path -> count
# bootstrap libs
with salt.utils.files.fopen(
os.path.join(self.module_dir, "__init__.py"), "w"
) as fh:
# No .decode() needed here as deep_init_base is defined as str and
# not bytes.
fh.write(
salt.utils.stringutils.to_str(deep_init_base.format(self.module_name))
)
fh.flush()
os.fsync(fh.fileno()) # flush to disk
self.lib_paths = {}
dir_path = self.module_dir
for lib_name in self.libs:
dir_path = os.path.join(dir_path, lib_name)
self.lib_paths[lib_name] = dir_path
os.makedirs(dir_path)
self.update_lib(lib_name)
opts = copy.deepcopy(self.opts)
dirs = salt.loader._module_dirs(opts, "modules", "module")
dirs.append(self.tmp_dir)
self.utils = salt.loader.utils(opts)
self.proxy = salt.loader.proxy(opts)
self.minion_mods = salt.loader.minion_mods(opts)
self.loader = salt.loader.LazyLoader(
dirs,
copy.deepcopy(opts),
tag="module",
pack={
"__utils__": self.utils,
"__proxy__": self.proxy,
"__salt__": self.minion_mods,
},
)
self.assertIn("{}.top".format(self.module_name), self.loader)
def tearDown(self):
del self.tmp_dir
del self.lib_paths
del self.utils
del self.proxy
del self.minion_mods
del self.loader
del self.lib_count
@classmethod
def tearDownClass(cls):
del cls.opts
@property
def module_dir(self):
return os.path.join(self.tmp_dir, self.module_name)
def update_lib(self, lib_name):
for modname in list(sys.modules):
if modname.startswith(self.module_name):
del sys.modules[modname]
path | |
<gh_stars>0
import pytest
import hls4ml
import tensorflow as tf
import numpy as np
from tensorflow.keras import optimizers
from tensorflow.keras.layers import Input, Dense, Activation, Conv1D, Conv2D, \
Reshape, ELU, LeakyReLU, ThresholdedReLU, \
PReLU, BatchNormalization, Add, Subtract, \
Multiply, Average, Maximum, Minimum, Concatenate, \
MaxPooling1D, MaxPooling2D, AveragePooling1D, \
AveragePooling2D, Add, Subtract, Multiply, Average, Maximum, Minimum, Concatenate
import math
from tensorflow.keras import backend as K
from numpy.testing import assert_allclose
'''
There are four functions for all layers:
1.Making Keras model
2.Converting Keras model to HLS one
3.Testing Conversion process
4.Predicting and comparing results from both models
'''
# Dense Layer
def make_dense_model():
'''
This function makes a Sequential() model with 2 layers: Dense and Activation. Afterwards
it is compiled with Adam optimizer and return Keras model.
'''
model = tf.keras.models.Sequential()
model.add(Dense(2,
input_shape=(1,),
name='Dense',
use_bias=True,
kernel_initializer= tf.keras.initializers.RandomUniform(minval=1, maxval=10),
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None))
model.add(Activation(activation='elu', name='Activation'))
model.compile(optimizer='adam', loss='mse')
return model
def convert_dense_model():
'''
The Keras model is gotten by make_dense_model() function and assigned to
'model' variable. Then the Keras model is converted to the HLS model by
means of HLS configuration and return it.
'''
model = make_dense_model()
config = hls4ml.utils.config_from_keras_model(model)
hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config)
return hls_model
def test_dense_conversion():
'''
The Keras and HLS model are gotten by make_dense_model() and convert_dense_model()
functions, and assigned to 'model' and 'hls_model' variables. Lastly, attributes of
both models are compared by Pytest's assert statement.
'''
model = make_dense_model()
hls_model = convert_dense_model()
assert len(model.layers) + 1 == len(hls_model.get_layers())
assert list(hls_model.get_layers())[0].attributes['class_name'] == "InputLayer"
assert list(hls_model.get_layers())[1].attributes["class_name"] == model.layers[0]._name
assert list(hls_model.get_layers())[2].attributes['class_name'] == model.layers[1]._name
assert list(hls_model.get_layers())[0].attributes['input_shape'] == list(model.layers[0].input_shape[1:])
assert list(hls_model.get_layers())[1].attributes['n_in'] == model.layers[0].input_shape[1:][0]
assert list(hls_model.get_layers())[1].attributes['n_out'] == model.layers[0].output_shape[1:][0]
assert list(hls_model.get_layers())[2].attributes['activation'] == str(model.layers[1].activation).split()[1]
assert list(hls_model.get_layers())[1].attributes['activation'] == str(model.layers[0].activation).split()[1]
def test_dense_prediction():
'''
The Keras and HLS model are gotten by make_dense_model() and convert_dense_model()
functions, and assigned to 'model' and 'hls_model' variables. X_input is generated
by Numpy's random generator and and given to the Keras and HLS model. After getting
prediction results according to the both models we compare them.
'''
model = make_dense_model()
hls_model = convert_dense_model()
X_input = np.random.rand(1,)
keras_prediction = model.predict(X_input)
hls_model.compile()
hls_prediction = hls_model.predict(X_input)
assert round(np.average(np.subtract(np.abs(keras_prediction), np.abs(hls_prediction)))) < 3
# LeakyReLU and ELU Activation Layers
'''
keras_activation_functions list keep two types of activation function and
@pytest.mark.parametrize allows that we can create the Keras model with
two of them.
'''
keras_activation_functions = [LeakyReLU, ELU]
@pytest.mark.parametrize("activation_functions", keras_activation_functions)
def make_leakyrelu_elu(activation_functions):
'''
This function makes a Sequential() model with 2 layers: Dense and Activation. Afterwards
it is compiled with Adam optimizer and return Keras model.
'''
model = tf.keras.models.Sequential()
model.add(Dense(64,
input_shape=(1,),
name='Dense',
kernel_initializer='lecun_uniform',
kernel_regularizer=None))
model.add(activation_functions(alpha=1.0))
model.compile(optimizer='adam', loss='mse')
return model
def convert_activation_leakyrelu_elu(activation_functions):
'''
The Keras model is gotten by make_leakyrelu_elu() function and assigned to
'model' variable. Then the Keras model is converted to the HLS model by
means of HLS configuration and return it.
'''
model = make_leakyrelu_elu(activation_functions)
config = hls4ml.utils.config_from_keras_model(model)
hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config)
return hls_model
@pytest.mark.parametrize("activation_functions", keras_activation_functions)
def test_activation_leakyrelu_elu_conversion(activation_functions):
'''
The Keras and HLS model are gotten by make_leakyrelu_elu() and
convert_activation_leakyrelu_elu() functions, and assigned to 'model' and
'hls_model' variables. Lastly, attributes of both models are compared by
Pytest's assert statement. Here @pytest.mark.parametrize helps us to compare
both functions at the same time.
'''
model = make_leakyrelu_elu(activation_functions)
hls_model = convert_activation_leakyrelu_elu(activation_functions)
assert len(model.layers) + 1 == len(hls_model.get_layers())
if activation_functions == 'ELU':
assert list(hls_model.get_layers())[2].attributes['class_name'] == 'ELU'
elif activation_functions == 'LeakyReLU':
assert list(hls_model.get_layers())[2].attributes['class_name'] == 'LeakyReLU'
@pytest.mark.parametrize("activation_functions", keras_activation_functions)
def test_activation_leakyrelu_elu_conversion_prediction(activation_functions):
'''
The Keras and HLS model are gotten by make_leakyrelu_elu() and
convert_activation_leakyrelu_elu() functions, and assigned to 'model' and
'hls_model' variables. X_input is generated by Numpy's random generator and
given to the Keras and HLS model. After getting prediction results according
to the both models we compare them.
'''
model = make_leakyrelu_elu(activation_functions)
hls_model = convert_activation_leakyrelu_elu(activation_functions)
X_input = np.random.rand(1)
keras_prediction = model.predict(X_input)
hls_model.compile()
hls_prediction = hls_model.predict(X_input)
assert round(np.average(np.subtract(np.abs(keras_prediction), np.abs(hls_prediction)))) < 3
# ThresholdedReLU Activation Layer
keras_activation_functions = [ThresholdedReLU]
@pytest.mark.parametrize("activation_functions", keras_activation_functions)
def make_thresholdedrelu(activation_functions):
'''
keras_activation_functions list keeps only one type of activation function
-> ThresholdedReLU and @pytest.mark.parametrize allows that we can create
the Keras model with it.
'''
model = tf.keras.models.Sequential()
model.add(Dense(64,
input_shape=(1,),
name='Dense',
kernel_initializer='lecun_uniform',
kernel_regularizer=None))
model.add(activation_functions(theta=1.0))
model.compile(optimizer='adam', loss='mse')
return model
def convert_thresholdedrelu_model(activation_functions):
'''
The Keras model is gotten by make_thresholdedrelu() function and assigned to
'model' variable. Then the Keras model is converted to the HLS model by
means of HLS configuration and return it.
'''
model = make_thresholdedrelu(activation_functions)
config = hls4ml.utils.config_from_keras_model(model)
hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config)
return hls_model
@pytest.mark.parametrize("activation_functions", keras_activation_functions)
def test_thresholdedrelu_conversion(activation_functions):
'''
The Keras and HLS model are gotten by make_thresholdedrelu() and
convert_thresholdedrelu_model() functions, and assigned to 'model' and
'hls_model' variables. Lastly, attributes model are compared by Pytest's
assert statement.
'''
model = make_thresholdedrelu(activation_functions)
hls_model = convert_thresholdedrelu_model(activation_functions)
assert len(model.layers) + 1 == len(hls_model.get_layers())
if activation_functions == 'ThresholdedReLU':
assert list(hls_model.get_layers())[2].attributes['class_name'] == 'ThresholdedReLU'
'''
The Keras and HLS model are gotten by make_thresholdedrelu() and
convert_thresholdedrelu_model() functions, and assigned to 'model' and
'hls_model' variables. X_input is generated by Numpy's random generator and
given to the Keras and HLS model. After getting prediction results according
to the both models we compare them.
'''
@pytest.mark.parametrize("activation_functions", keras_activation_functions)
def test_thresholdedrelu_prediction(activation_functions):
model = make_thresholdedrelu(activation_functions)
hls_model = convert_thresholdedrelu_model(activation_functions)
X_input = np.random.rand(1,)
keras_prediction = model.predict(X_input)
hls_model.compile()
hls_prediction = hls_model.predict(X_input)
assert round(np.average(np.subtract(np.abs(keras_prediction), np.abs(hls_prediction)))) < 3
'''
Everything is same as in ThresholdedReLU Activation Layer above except making
model with PReLU Activation Layer.
'''
# PReLU Activation Layer
keras_activation_functions = [PReLU]
@pytest.mark.parametrize("activation_functions", keras_activation_functions)
def make_prelu_model(activation_functions):
model = tf.keras.models.Sequential()
model.add(Dense(64,
input_shape=(1,),
name='Dense',
kernel_initializer='lecun_uniform',
kernel_regularizer=None))
model.add(activation_functions(alpha_initializer="zeros",))
model.compile(optimizer='adam', loss='mse')
return model
def convert_prelu_model(activation_functions):
model = make_prelu_model(activation_functions)
config = hls4ml.utils.config_from_keras_model(model)
hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config)
return hls_model
@pytest.mark.parametrize("activation_functions", keras_activation_functions)
def test_prelu_conversion(activation_functions):
model = make_prelu_model(activation_functions)
hls_model = convert_prelu_model(activation_functions)
assert len(model.layers) + 1 == len(hls_model.get_layers())
if activation_functions == 'PReLU':
assert list(hls_model.get_layers())[2].attributes['class_name'] == 'PReLU'
@pytest.mark.parametrize("activation_functions", keras_activation_functions)
def test_prelu_prediction(activation_functions):
model = make_prelu_model(activation_functions)
hls_model = convert_prelu_model(activation_functions)
X_input = np.random.rand(1,)
keras_prediction = model.predict(X_input)
hls_model.compile()
hls_prediction = hls_model.predict(X_input)
assert round(np.average(np.subtract(np.abs(keras_prediction), np.abs(hls_prediction)))) < 3
# Activation layer
keras_activation_functions = [Activation]
@pytest.mark.parametrize("activation_functions", keras_activation_functions)
def make_activation_model(activation_functions):
'''
Everything is same as in ThresholdedReLU Activation Layer above except making
model with simple Activation Layer.
'''
model = tf.keras.models.Sequential()
model.add(Dense(64,
input_shape=(1,),
name='Dense',
kernel_initializer='lecun_uniform',
kernel_regularizer=None))
model.add(activation_functions(activation='relu', name='Activation'))
model.compile(optimizer='adam', loss='mse')
return model
def convert_activation_model(activation_functions):
model = make_activation_model(activation_functions)
config = hls4ml.utils.config_from_keras_model(model)
hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config)
return hls_model
@pytest.mark.parametrize("activation_functions", keras_activation_functions)
def test_activation_conversion(activation_functions):
model = make_activation_model(activation_functions)
hls_model = convert_activation_model(activation_functions)
assert len(model.layers) + 1 == len(hls_model.get_layers())
if activation_functions == 'Activation':
assert list(hls_model.get_layers())[2].attributes["activation"] == str(model.layers[1].activation).split()[1]
@pytest.mark.parametrize("activation_functions", keras_activation_functions)
def test_activation_prediction(activation_functions):
model = make_activation_model(activation_functions)
hls_model = convert_activation_model(activation_functions)
X_input = np.random.rand(1,)
keras_prediction = model.predict(X_input)
hls_model.compile()
hls_prediction = hls_model.predict(X_input)
assert round(np.average(np.subtract(np.abs(keras_prediction), np.abs(hls_prediction)))) < 3
# Conv1D Layer
keras_conv1d = [Conv1D]
padds_options = ['same', 'valid']
@pytest.mark.parametrize("conv1d", keras_conv1d)
@pytest.mark.parametrize("padds", padds_options)
def make_conv1d_model(conv1d, padds):
model = tf.keras.models.Sequential()
input_shape = (10, 128, 4)
model.add(conv1d(filters=32,
kernel_size=3,
strides=2,
padding=padds,
activation='relu',
input_shape=input_shape[1:],
kernel_initializer='normal',
use_bias=False,
data_format='channels_last'))
model.add(Activation(activation='relu'))
model.compile(optimizer='adam', loss='mse')
return model
def convert_conv1d_model(conv1d, padds):
'''
The Keras model is gotten by make_conv1d_model() function and assigned to
'model' variable. Then the Keras model is converted to the HLS model by
means of HLS configuration and return it.
'''
model = make_conv1d_model(conv1d, padds)
config = hls4ml.utils.config_from_keras_model(model)
hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config)
return hls_model
@pytest.mark.parametrize("conv1d", keras_conv1d)
@pytest.mark.parametrize("padds", padds_options)
def test_conv1d_conversion(conv1d, padds):
model = make_conv1d_model(conv1d, padds)
hls_model = convert_conv1d_model(conv1d, padds)
assert len(model.layers) + 2 == len(hls_model.get_layers())
if conv1d == 'Conv1D':
assert list(hls_model.get_layers())[1].attributes['class_name'] == 'Conv1D'
assert list(hls_model.get_layers())[1].attributes['activation'] == str(model.layers[0].activation).split()[1]
assert list(hls_model.get_layers())[1].attributes["n_in"] == model.layers[0]._batch_input_shape[1]
assert list(hls_model.get_layers())[1].attributes['filt_width'] == model.layers[0].kernel_size[0]
assert list(hls_model.get_layers())[1].attributes['n_chan'] == model.layers[0].input_shape[2]
assert list(hls_model.get_layers())[1].attributes['n_filt'] == model.layers[0].filters
assert list(hls_model.get_layers())[1].attributes['stride'] == model.layers[0].strides[0]
assert list(hls_model.get_layers())[1].attributes['padding'] == model.layers[0].padding
assert list(hls_model.get_layers())[1].attributes['data_format'] == model.layers[0].data_format
assert list(hls_model.get_layers())[1].attributes["n_out"] == list(model.layers[0].output_shape)[1]
@pytest.mark.parametrize("conv1d", keras_conv1d)
@pytest.mark.parametrize("padds", padds_options)
def test_conv1d_prediction(conv1d, padds):
model = make_conv1d_model(conv1d, padds)
hls_model = convert_conv1d_model(conv1d, padds)
X_input = np.random.rand(10, 128,4)
keras_prediction = model.predict(X_input)
hls_model.compile()
hls_prediction = hls_model.predict(X_input)
if padds_options == 'same':
assert round(np.average(np.subtract(np.abs(keras_prediction), np.abs(hls_prediction.reshape(10,64,32))))) < 3
elif padds_options == 'valid':
assert round(np.average(np.subtract(np.abs(keras_prediction), np.abs(hls_prediction.reshape(10,63,32))))) < 3
# MaxPooling1D, MaxPooling2D, AveragePooling1D, AveragePooling2D Layers
pooling_layers = [MaxPooling1D, MaxPooling2D, AveragePooling1D, AveragePooling2D]
padds_options = ['same', 'valid']
chans_options = ['channels_first', 'channels_last']
@pytest.mark.parametrize("poolings", pooling_layers)
@pytest.mark.parametrize("padds", padds_options)
@pytest.mark.parametrize("chans", chans_options)
def test_pooling(poolings, padds, chans):
model | |
for p,s in selected_prefix_var}]
selected_vars.value = new_selection
# print(f"{new_selection=}{current_suffix=}{g['old']=}")
else:
selected_vars.value = [selected_prefix_var[0][1]]
if len(prefix_dict):
selected_prefix = SelectMultiple(value=[select_prefix[0][1]], options=select_prefix,
layout=Layout(width='25%', height=select_height, font="monospace"),
description='')
selected_prefix.observe(get_prefix,names='value',type='change')
select = HBox([selected_vars,selected_prefix])
get_prefix({'new':select_prefix[0]})
else:
select = VBox([selected_vars])
options1 = HBox([diff]) if short >=2 else HBox([diff,legend])
options2 = HBox([scale, showtype])
if short:
vui = [select, options1, i_smpl]
else:
vui = [select, options1, options2, i_smpl]
vui = vui[:-1] if short >= 2 else vui
ui = VBox(vui)
show = interactive_output(explain, {'i_smpl': i_smpl, 'selected_vars': selected_vars, 'diff': diff, 'showtype': showtype,
'scale': scale, 'legend': legend})
# display(ui, show)
display(ui)
display(show)
return
@staticmethod
def display_toc(text='**Jupyter notebooks in this and all subfolders**',all=False):
'''In a jupyter notebook this function displays a clickable table of content of all
jupyter notebooks in this and sub folders'''
from IPython.display import display, Markdown, HTML
from pathlib import Path
display(Markdown(text))
for dir in sorted(Path('.').glob('**')):
if len(dir.parts) and str(dir.parts[-1]).startswith('.'):
continue
for i, notebook in enumerate(sorted(dir.glob('*.ipynb'))):
# print(notebook)
if (not all) and (notebook.name.startswith('test') or notebook.name.startswith('Overview')):
continue
if i == 0:
blanks = ''.join(
[' ']*len(dir.parts))
if len(dir.parts):
display(HTML(f'{blanks}<b>{str(dir)}</b>'))
else:
display(
HTML(f'{blanks}<b>{str(Path.cwd().parts[-1])} (.)</b>'))
name = notebook.name.split('.')[0]
display(HTML(
f' {blanks} <a href="{notebook}" target="_blank">{name}</a>'))
@staticmethod
def display_toc_this(pat='*',text='**Jupyter notebooks**',path='.',ext='ipynb',showext=False):
'''In a jupyter notebook this function displays a clickable table of content in the folder pat with name in path'''
from IPython.display import display, Markdown, HTML
from pathlib import Path
display(Markdown(text))
dir = Path(path)
print(dir,':')
for fname in sorted(dir.glob(pat+'.'+ext)):
name = fname.name if showext else fname.name.split('.')[0]
display(HTML(
f' <a href="{fname}" target="_blank">{name}</a>'))
@staticmethod
def widescreen():
'''Makes a jupyter notebook use all the avaiable real estate
'''
from IPython.display import HTML, display
display(HTML(data="""
<style>
div#notebook-container { width: 95%; }
div#menubar-container { width: 65%; }
div#maintoolbar-container { width: 99%; }
</style>
"""))
@staticmethod
def scroll_off():
try:
from IPython.display import display, Javascript
Javascript("""IPython.OutputArea.prototype._should_scroll = function(lines){
return false;
}
""")
except:
print('No scroll off')
@staticmethod
def scroll_on():
try:
from IPython.display import display, Javascript
Javascript("""IPython.OutputArea.prototype._should_scroll = function(lines){
return true;
}
""")
except:
print('no scroll on ')
@staticmethod
def modelflow_auto(run=True):
'''In a jupyter notebook this function activate autorun of the notebook.
Also it makes Jupyter use a larger portion of the browser width
The function should be run before the notebook is saved, and the output should not be cleared
'''
if not run:
return
try:
from IPython.display import HTML, display
display(HTML(data="""
<style>
div#notebook-container { width: 95%; }
div#menubar-container { width: 65%; }
div#maintoolbar-container { width: 99%; }
</style>
"""))
display(HTML("""\
<script>
// AUTORUN ALL CELLS ON NOTEBOOK-LOAD!
require(
['base/js/namespace', 'jquery'],
function(jupyter, $) {
$(jupyter.events).on('kernel_ready.Kernel', function () {
console.log('Auto-running all cells-below...');
jupyter.actions.call('jupyter-notebook:run-all-cells-below');
jupyter.actions.call('jupyter-notebook:save-notebook');
});
}
);
</script>"""))
except:
print('modelflow_auto not run')
class Json_Mixin():
'''This mixin class can dump a model and solution
as json serialiation to a file.
allows the precooking of a model and solution, so
a user can use a model without specifying it in
a session.
'''
def modeldump(self, outfile='',keep=False):
'''Dumps a model and its lastdf to a json file
if keep=True the model.keep_solutions will alse be dumped'''
dumpjson = {
'version': '1.00',
'frml': self.equations,
'lastdf': self.lastdf.to_json(),
'current_per': pd.Series(self.current_per).to_json(),
'modelname': self.name,
'oldkwargs': self.oldkwargs,
'var_description': self.var_description,
'equations_latex': self.equations_latex if hasattr(self, 'equations_latex') else '',
'keep_solutions': {k:v.to_json() for k,v in self.keep_solutions.items()} if keep else {},
'wb_MFMSAOPTIONS': self.wb_MFMSAOPTIONS if hasattr(self, 'wb_MFMSAOPTIONS') else '',
}
if outfile != '':
pathname = Path(outfile)
pathname.parent.mkdir(parents=True, exist_ok=True)
with open(outfile, 'wt') as f:
json.dump(dumpjson, f)
else:
return json.dumps(dumpjson)
@classmethod
def modelload(cls, infile, funks=[], run=False, keep_json=False, **kwargs):
'''Loads a model and an solution '''
def make_current_from_quarters(base, json_current_per):
''' Handle json for quarterly data to recover the right date index
'''
import datetime
start, end = json_current_per[[0, -1]]
start_per = datetime.datetime(
start['qyear'], start['month'], start['day'])
end_per = datetime.datetime(
end['qyear'], end['month'], end['day'])
current_dates = pd.period_range(
start_per, end_per, freq=start['freqstr'])
base_dates = pd.period_range(
base.index[0], base.index[-1], freq=start['freqstr'])
base.index = base_dates
return base, current_dates
with open(infile, 'rt') as f:
input = json.load(f)
version = input['version']
frml = input['frml']
lastdf = pd.read_json(input['lastdf'])
current_per = pd.read_json(input['current_per'], typ='series').values
modelname = input['modelname']
mmodel = cls(frml, modelname=modelname, funks=funks)
mmodel.oldkwargs = input['oldkwargs']
mmodel.json_current_per = current_per
mmodel.set_var_description(input.get('var_description', {}))
mmodel.equations_latex = input.get('equations_latex', None)
if input.get('wb_MFMSAOPTIONS', None) : mmodel.wb_MFMSAOPTIONS = input.get('wb_MFMSAOPTIONS', None)
mmodel.keep_solutions = {k : pd.read_json(jdf) for k,jdf in input.get('keep_solutions',{}).items()}
if keep_json:
mmodel.json_keep = input
try:
lastdf, current_per = make_current_from_quarters(
lastdf, current_per)
except:
pass
if run:
res = mmodel(lastdf, current_per[0], current_per[-1], **kwargs)
return mmodel, res
else:
return mmodel, lastdf
class Excel_Mixin():
def modeldump_excel(self, file, fromfile='control.xlsm', keep_open=False):
'''
Dump model and dataframe to excel workbook
Parameters
----------
file : TYPE
filename.
keep_open : TYPE, optional
Keep the workbook open in excel after returning, The default is False.
Returns
-------
wb : TYPE
xlwings instance of workbook .
'''
thispath = Path(file)
# breakpoint()
if thispath.suffix.upper() == '.XLSM':
wb = xw.Book(thispath.parent / Path(fromfile))
else:
wb = xw.Book()
wb.sheets.add()
wb.app.screen_updating = 1
me.obj_to_sheet('frml', {v: self.allvar[v]['frml']
for v in sorted(self.allvar.keys()) if self.allvar[v]['endo']}, wb)
me.obj_to_sheet('var_description', dict(self.var_description) if len(
self.var_description) else {'empty': 'empty'}, wb)
me.obj_to_sheet('oldkwargs', self.oldkwargs, wb)
me.obj_to_sheet('modelname', {'name': self.name}, wb)
if hasattr(self, 'current_per'):
me.obj_to_sheet('current_per', me.indextrans(
self.current_per), wb, after='frml')
if hasattr(self, 'lastdf'):
me.df_to_sheet('lastdf', self.lastdf.loc[:, sorted(
self.allvar)], wb, after='frml')
wb.app.screen_updating = 1
try:
wb.save(Path(file).absolute())
except Exception as e:
wb.close()
print(f'{Path(file).absolute()} not saved\n', str(e))
return
if not keep_open:
wb.close()
return
return wb
@classmethod
def modelload_excel(cls, infile='pak', funks=[], run=False, keep_open=False, **kwargs):
if isinstance(infile, xw.main.Book):
wb = infile
else:
wb = xw.Book(Path(infile).absolute())
wb.app.screen_updating = 0
frml = '\n'.join(f for f in me.sheet_to_dict(wb, 'frml').values())
modelname = me.sheet_to_dict(wb, 'modelname')['name']
var_description = me.sheet_to_dict(wb, 'var_description')
mmodel = cls(frml, modelname=modelname, funks=funks,
var_description=var_description)
mmodel.oldkwargs = me.sheet_to_dict(wb, 'oldkwargs')
try:
mmodel.current_per = me.sheet_to_df(wb, 'current_per').index
except:
pass
try:
lastdf = me.sheet_to_df(wb, 'lastdf')
except:
lastdf = pd.DataFrame()
if run:
res = mmodel(lastdf, **kwargs)
else:
res = lastdf
wb.app.screen_updating = 1
if not keep_open:
wb.close()
return mmodel, res, None
else:
return mmodel, res, wb
class Zip_Mixin():
def modeldump2(self, outfile=''):
outname = self.name if outfile == '' else outfile
cache_dir = Path('__pycache__')
self.modeldump(f'{outname}.mf')
with zipfile.ZipFile(f'{outname}_dump.zip', 'w', zipfile.ZIP_DEFLATED) as f:
f.write(f'{outname}.mf')
for c in Path().glob(f'{self.name}_*_jitsolver.*'):
f.write(c)
for c in cache_dir.glob(f'{self.name}_*.*'):
f.write(c)
@classmethod
def modelload2(cls, name):
with zipfile.ZipFile(f'{name}_dump.zip', 'r') as f:
f.extractall()
mmodel, df = cls.modelload(f'{name}.mf')
return mmodel, df
class Solver_Mixin():
DEFAULT_relconv = 0.0000001
def __call__(self, *args, **kwargs):
''' Runs a model.
Default a straight model is calculated by *xgenr* a simultaneous model is solved by *sim*
:sim: If False forces a model to be calculated (not solved) if True force simulation
:setbase: If True, place the result in model.basedf
:setlast: if False don't place the results in model.lastdf
if the modelproperty previousbase is true, the previous run is used as basedf.
'''
if kwargs.get('antal', False):
assert 1 == 2, 'Antal is not a valid simulation option, Use max_iterations'
self.dumpdf = None
if kwargs.get('reset_options', False):
self.oldkwargs = {}
if hasattr(self, 'oldkwargs'):
newkwargs = {**self.oldkwargs, **kwargs}
else:
newkwargs = kwargs
self.oldkwargs = newkwargs.copy()
self.save = newkwargs.get('save', self.save)
if self.save:
if self.previousbase and hasattr(self, 'lastdf'):
self.basedf = self.lastdf.copy(deep=True)
if self.maxlead >= 1:
if self.normalized:
solverguess = 'newtonstack'
else:
solverguess = 'newtonstack_un_normalized'
else:
if self.normalized:
if self.istopo:
solverguess = 'xgenr'
else:
solverguess = 'sim'
else:
solverguess = 'newton_un_normalized'
solver = newkwargs.get('solver', solverguess)
self.model_solver = getattr(self, solver)
# print(f'solver:{solver},solverkwargs:{newkwargs}')
# breakpoint()
outdf = self.model_solver(*args, **newkwargs)
if newkwargs.get('cache_clear', True):
self.dekomp.cache_clear()
if newkwargs.get('keep', '') and self.save:
if newkwargs.get('keep_variables', ''):
keepvar = self.vlist(newkwargs.get('keep_variables', ''))
self.keep_solutions[newkwargs.get(
'keep', '')] = outdf.loc[:, keepvar].copy()
else:
self.keep_solutions[newkwargs.get('keep', '')] = outdf.copy()
if self.save:
if (not hasattr(self, 'basedf')) or newkwargs.get('setbase', False):
self.basedf = outdf.copy(deep=True)
if newkwargs.get('setlast', True):
self.lastdf = outdf.copy(deep=True)
return outdf
@property
def showstartnr(self):
self.findpos()
variabler = [x for x in sorted(self.allvar.keys())]
return {v: self.allvar[v]['startnr'] for v in variabler}
def makelos(self, databank, ljit=0, stringjit=True,
solvename='sim', chunk=30, transpile_reset=False, newdata=False,
silent=True, **kwargs):
jitname = f'{self.name}_{solvename}_jit'
nojitname = f'{self.name}_{solvename}_nojit'
if solvename == 'sim':
solveout = partial(self.outsolve2dcunk, databank,
chunk=chunk, ljit=ljit, debug=kwargs.get('debug', 1))
elif solvename == 'sim1d':
solveout = partial(self.outsolve1dcunk, chunk=chunk, ljit=ljit, | |
<filename>aioheos/aioheoscontroller.py
#!/usr/bin/env python3
"""Heos python lib."""
import asyncio
import json
import logging
from concurrent.futures import CancelledError
from . import aioheosgroup
from . import aioheosplayer
from . import aioheosupnp
_LOGGER = logging.getLogger(__name__)
HEOS_PORT = 1255
GET_PLAYERS = 'player/get_players'
GET_PLAYER_INFO = 'player/get_player_info'
GET_PLAY_STATE = 'player/get_play_state'
SET_PLAY_STATE = 'player/set_play_state'
GET_MUTE_STATE = 'player/get_mute'
SET_MUTE_STATE = 'player/set_mute'
GET_VOLUME = 'player/get_volume'
SET_VOLUME = 'player/set_volume'
GET_NOW_PLAYING_MEDIA = 'player/get_now_playing_media'
GET_QUEUE = 'player/get_queue'
CLEAR_QUEUE = 'player/clear_queue'
PLAY_NEXT = 'player/play_next'
PLAY_PREVIOUS = 'player/play_previous'
PLAY_QUEUE = 'player/play_queue'
TOGGLE_MUTE = 'player/toggle_mute'
GET_GROUPS = 'group/get_groups'
SET_GROUP = 'group/set_group'
BROWSE = 'browse/browse'
EVENT_PLAYER_VOLUME_CHANGED = 'event/player_volume_changed'
EVENT_PLAYER_STATE_CHANGED = 'event/player_state_changed'
EVENT_PLAYERS_CHANGED = 'event/players_changed'
EVENT_PLAYER_NOW_PLAYING_CHANGED = 'event/player_now_playing_changed'
EVENT_PLAYER_NOW_PLAYING_PROGRESS = 'event/player_now_playing_progress'
EVENT_PLAYER_QUEUE_CHANGED = 'event/player_queue_changed'
EVENT_USER_CHANGED = 'event/user_changed'
EVENT_SOURCES_CHANGED = 'event/sources_changed'
EVENT_GROUPS_CHANGED = 'event/groups_changed'
EVENT_GROUP_VOLUME_CHANGED = 'event/group_volume_changed'
EVENT_REPEAT_MODE_CHANGED = "event/repeat_mode_changed"
EVENT_SHUTTLE_MODE_CHANGED = "event/shuffle_mode_changed"
SYSTEM_PRETTIFY = 'system/prettify_json_response'
SYSTEM_REGISTER_FOR_EVENTS = 'system/register_for_change_events'
SYSTEM_SIGNIN = 'system/sign_in'
SYSTEM_SIGNOUT = 'system/sign_out'
BROWSE_MUSIC_SOURCES = 'browse/get_music_sources'
BROWSE_SEARCH = 'browse/search'
BROWSE_BROWSE = 'browse/browse'
BROWSE_SEARCH_CRITERIA = 'browse/get_search_criteria'
BROWSE_PLAY_STREAM = 'browse/play_stream'
SOURCE_LIST = {
1: 'Pandora',
2: 'Rhapsody',
3: 'TuneIn',
4: 'Spotify',
5: 'Deezer',
6: 'Napster',
7: 'iHeartRadio',
8: 'Sirius XM',
9: 'Soundcloud',
10: 'Tidal',
11: 'Future service',
13: 'Amazon Music',
14: 'Future service',
15: 'Moodmix',
17: 'Future service',
18: 'QQMusic'
}
class AioHeosException(Exception):
"""AioHeosException class."""
# pylint: disable=super-init-not-called
def __init__(self, message):
self.message = message
class AioHeosController:
"""Asynchronous Heos class."""
# ddpylint: disable=too-many-public-methods,too-many-instance-attributes
def __init__(self,
loop,
host=None,
username=None,
password=None,
new_device_callback=None,
port=HEOS_PORT):
self._host = host
self._port = port
self._loop = loop
self._username = username
self._password = password
self._need_login = bool(self._username)
self._new_device_callback = new_device_callback
self._players = None
self._groups = None
self._upnp = None
self._reader = None
self._writer = None
self._subscribtion_task = None
self._close_requested = False
self._favourites = []
self._favourites_sid = None
self._music_sources = {}
async def ensure_player(self):
"""Ensure player."""
# timeout after 10 sec
self.request_players()
for _ in range(0, 20):
if self._players:
return
await asyncio.sleep(0.5)
async def ensure_group(self):
"""Ensure group."""
# timeout after 10 sec
self.request_groups()
for _ in range(0, 20):
if self._groups is not None:
return
await asyncio.sleep(0.5)
async def ensure_login(self):
"""Ensure login."""
# timeout after 20 sec
self.login()
for _ in range(0, 20):
if not self._need_login:
return
await asyncio.sleep(0.5)
async def ensure_favourites_loaded(self):
"""Ensure favourites loaded."""
# timeout after 20 sec
for _ in range(0, 20):
if self._favourites:
return
await asyncio.sleep(0.5)
@staticmethod
def _url_to_addr(url):
import re
addr = re.search('https?://([^:/]+)[:/].*$', url)
if addr:
return addr.group(1)
return None
async def connect(self, callback=None):
"""Connect to device."""
if not self._host:
# discover
if not self._upnp:
self._upnp = aioheosupnp.AioHeosUpnp(loop=self._loop)
url = await self._upnp.discover()
self._host = self._url_to_addr(url)
# connect
_LOGGER.debug('[I] Connecting to %s:%s', self._host, self._port)
await self._connect()
# please, do not prettify json
self.register_pretty_json(False)
# and get events
self.register_for_change_events()
# setup subscription loop
if not self._subscribtion_task:
self._subscribtion_task = self._loop.create_task(
self._async_subscribe(callback))
# request for players
await self.ensure_player()
await self.ensure_group()
if self._need_login:
await self.ensure_login()
self.request_music_sources()
async def _connect(self):
"""Connect."""
while not self._close_requested:
wait = 5
try:
# pylint: disable=line-too-long
self._reader, self._writer = await asyncio.open_connection(
self._host, self._port, loop=self._loop)
return
except TimeoutError:
_LOGGER.warning('[W] Connection timed out'
', will try %s:%s again in %d seconds ...',
self._host, self._port, wait)
except ConnectionRefusedError:
wait = 30
_LOGGER.warning('[W] Connection refused'
', will try %s:%s again in %d seconds ...',
self._host, self._port, wait)
except Exception as exc: # pylint: disable=broad-except
_LOGGER.error('[E] %s', exc)
await asyncio.sleep(wait)
def send_command(self, command, message=None):
"""Send command."""
msg = 'heos://' + command
if message:
msg += '?' + '&'.join("{}={}".format(key, val)
for (key, val) in message.items())
msg += '\r\n'
_LOGGER.debug(msg)
self._writer.write(msg.encode('ascii'))
@staticmethod
def _parse_message(message):
"""Parse message."""
result = {}
if message:
for elem in message.split('&'):
parts = elem.split('=')
if len(parts) == 2:
result[parts[0]] = parts[1]
elif len(parts) == 1:
result[parts[0]] = True
else:
_LOGGER.warning('[W] No parts found in %s', message)
return result
def _handle_error(self, message):
eid = message['eid']
if eid == '2':
pid = message['pid']
player = self.get_player(pid)
player.play_state = None
raise AioHeosException('Player {} is offline'.format(pid))
else:
raise AioHeosException(message)
def _dispatcher(self, command, message, payload):
"""Call parser functions."""
_LOGGER.debug('DISPATCHER')
_LOGGER.debug('[D] %s %s %s', command, message, payload)
callbacks = {
GET_PLAYERS:
self._parse_players,
GET_GROUPS:
self._parse_groups,
SET_GROUP:
self._parse_set_group,
GET_PLAY_STATE:
self._parse_play_state,
SET_PLAY_STATE:
self._parse_play_state,
GET_MUTE_STATE:
self._parse_mute_state,
SET_MUTE_STATE:
self._parse_mute_state,
GET_VOLUME:
self._parse_volume,
SET_VOLUME:
self._parse_volume,
GET_NOW_PLAYING_MEDIA:
self._parse_now_playing_media,
EVENT_PLAYER_VOLUME_CHANGED:
self._parse_player_volume_changed,
EVENT_GROUP_VOLUME_CHANGED:
self._parse_group_volume_changed,
EVENT_PLAYER_STATE_CHANGED:
self._parse_player_state_changed,
EVENT_PLAYERS_CHANGED:
self._parse_players_changed,
EVENT_PLAYER_NOW_PLAYING_CHANGED:
self._parse_player_now_playing_changed,
EVENT_PLAYER_NOW_PLAYING_PROGRESS:
self._parse_player_now_playing_progress,
EVENT_GROUPS_CHANGED:
self._parse_groups_changed,
SYSTEM_SIGNIN:
self._parse_system_signin,
BROWSE_MUSIC_SOURCES:
self._parse_browse_music_source,
BROWSE_BROWSE:
self._parse_browse_browse,
}
commands_ignored = (SYSTEM_PRETTIFY, SYSTEM_REGISTER_FOR_EVENTS,
EVENT_PLAYER_QUEUE_CHANGED, EVENT_SOURCES_CHANGED,
EVENT_USER_CHANGED, EVENT_SHUTTLE_MODE_CHANGED,
EVENT_REPEAT_MODE_CHANGED)
if command in callbacks:
callbacks[command](payload, message)
elif command in commands_ignored:
_LOGGER.debug('[D] command "%s" is ignored.', command)
else:
_LOGGER.debug('[D] command "%s" is not handled.', command)
def _parse_command(self, data):
" parse command "
try:
data_heos = data['heos']
command = data_heos['command']
message = {}
if 'message' in data_heos:
if data_heos['message'].startswith('command under process'):
return None
message = self._parse_message(data_heos['message'])
if 'result' in data_heos.keys() and data_heos['result'] == 'fail':
self._handle_error(message)
if 'payload' in data.keys():
self._dispatcher(command, message, data['payload'])
elif 'message' in data_heos.keys():
self._dispatcher(command, message, None)
elif 'command' in data_heos.keys():
self._dispatcher(command, None, None)
else:
raise AioHeosException(
'No message or payload in reply. payload {}'.format(data))
# pylint: disable=bare-except
except AioHeosException as exc:
raise exc
except Exception:
_LOGGER.exception("Unexpected error for msg '%s'", data)
raise AioHeosException('Problem parsing command.')
return None
async def _callback_wrapper(self, callback):
if callback:
try:
await callback()
except Exception: # pylint: disable=broad-except
pass
async def _async_subscribe(self, callback=None):
""" event loop """
# pylint: disable=too-many-branches,logging-too-many-args
while not self._close_requested:
try:
msg = await self._reader.readline()
except TimeoutError:
_LOGGER.warning(
'[W] Connection got timed out, try to reconnect...',
exc_info=True)
await self._connect()
continue
except ConnectionResetError:
_LOGGER.warning(
'[W] Peer reset our connection, try to reconnect...',
exc_info=True)
await self._connect()
continue
except (GeneratorExit, CancelledError):
_LOGGER.debug('[I] Cancelling event loop...', exc_info=True)
return
except Exception: # pylint: disable=broad-except
_LOGGER.debug('[E] Ignoring', exc_info=True)
continue
_LOGGER.debug(msg.decode())
# simplejson doesnt need to decode from byte to ascii
data = json.loads(msg.decode())
_LOGGER.debug('DATA:')
_LOGGER.debug(data)
try:
self._parse_command(data)
except AioHeosException as exc:
_LOGGER.error('[E]', exc)
_LOGGER.debug('MSG', msg)
_LOGGER.debug('MSG decoded', msg.decode())
_LOGGER.debug('MSG json', data)
continue
if callback:
_LOGGER.debug('TRIGGER CALLBACK')
self._loop.create_task(self._callback_wrapper(callback))
def new_device_callback(self, callback):
"""Callback when new device."""
self._new_device_callback = callback
async def close(self):
" close "
_LOGGER.info('[I] Closing down...')
self._close_requested = True
if self._writer:
self._writer.close()
if self._subscribtion_task:
self._subscribtion_task.cancel()
try:
await self._subscribtion_task
except asyncio.CancelledError:
pass
def register_for_change_events(self):
" register for change events "
self.send_command(SYSTEM_REGISTER_FOR_EVENTS, {'enable': 'on'})
def register_pretty_json(self, enable=False):
" register for pretty json "
set_enable = 'off'
if enable:
set_enable = 'on'
self.send_command(SYSTEM_PRETTIFY, {'enable': set_enable})
def request_players(self):
" get players "
self.send_command(GET_PLAYERS)
def login(self):
" login "
self.send_command(SYSTEM_SIGNIN, {
'un': self._username,
'pw': self._password
})
def _parse_players(self, payload, _message):
_players_json = payload
if not self._players:
self._players = []
for player in _players_json:
old_player = self.get_player(player['pid'])
if not old_player:
new_player = aioheosplayer.AioHeosPlayer(self, player)
self._players.append(new_player)
if self._new_device_callback:
self._new_device_callback(new_player)
else:
old_player.player_info = player
def _parse_groups(self, payload, _message):
_groups_json = payload
if not self._groups:
self._groups = []
group_copy = self._groups
for group in _groups_json:
old_group = self.get_group(group['gid'])
if not old_group:
new_group = aioheosgroup.AioHeosGroup(self, group)
self._groups.append(new_group)
if self._new_device_callback:
self._new_device_callback(new_group)
else:
old_group.player_info = group
group_copy.remove(group)
for remove_group in group_copy:
# Make group offline
remove_group.play_state = None
def _parse_set_group(self, _payload, _message):
self.request_groups()
def _parse_system_signin(self, _payload, _message):
self._need_login = False
def get_players(self):
""" get players array """
return self._players
def get_groups(self):
""" get groups array """
return self._groups
def get_player(self, pid):
""" get player from array """
for player in self._players:
if player.player_id == pid:
return player
return None
def get_group(self, pid):
"""Get group from array."""
for group in self._groups:
if group.player_id == pid:
return group
return None
def request_player_info(self, pid):
" request player info "
self.send_command(GET_PLAYER_INFO, {'pid': pid})
def request_play_state(self, pid):
" request play state "
self.send_command(GET_PLAY_STATE, {'pid': pid})
def _parse_play_state(self, _payload, message):
self.get_player(message['pid']).play_state = message['state']
if self.get_group(message['pid']):
self.get_group(message['pid']).play_state = message['state']
def request_mute_state(self, pid):
" request mute state "
self.send_command(GET_MUTE_STATE, {'pid': pid})
def _parse_mute_state(self, _payload, message):
self.get_player(message['pid']).mute = message['state']
if self.get_group(message['pid']):
self.get_group(message['pid']).mute = message['state']
def request_volume(self, pid):
" request volume "
self.send_command(GET_VOLUME, {'pid': pid})
def set_volume(self, volume_level, pid):
" set volume "
volume = min(100, max(0, volume_level))
self.send_command(SET_VOLUME, {'pid': pid, 'level': volume})
def _parse_volume(self, _payload, message):
self.get_player(message['pid']).volume = float(message['level'])
if self.get_group(message['pid']):
self.get_group(message['pid']).volume = float(message['level'])
def _set_play_state(self, state, pid):
" set play state "
if state not in ('play', 'pause', 'stop'):
AioHeosException('Not an accepted play state {}.'.format(state))
self.send_command(SET_PLAY_STATE, {
'pid': pid,
'state': state
})
def stop(self, pid=None):
" stop player "
self._set_play_state('stop', pid)
def play(self, pid=None):
" play "
self._set_play_state('play', pid)
def pause(self, pid=None):
" pause "
self._set_play_state('pause', pid)
def request_now_playing_media(self, pid):
" get playing media "
self.send_command(GET_NOW_PLAYING_MEDIA,
{'pid': pid})
def _parse_now_playing_media(self, payload, message):
player = self.get_player(message["pid"])
player.reset_now_playing()
player.media_artist = | |
found in the"
print self.keyerror[e]
continue
# if there exist a startmarker in the wdef resolve to id
if wdef.startmarker != None:
try:
startid = self.data_client.markerids[wdef.startmarker]
except KeyError, e:
e=str(e)
if not self.keyerror.has_key(e):
self.keyerror[e]=wdef
print
print "windowdef warning: Startmarker ", e, "not found in the"
print self.keyerror[e]
continue
# check if startmarker id has been seen in current buffer scope
if self.buffermarkers.has_key(startid) and \
self.buffermarkers[startid][0] < self.data_client.stdblocksize:
# if the startmarker is found we delete it from the window
# definition because from now on windows can be cut
wdef.startmarker = None
# in addition a start_flag is set and the min markeroffset
# for markers in this current block
self.start = True
self.min_markeroffset = self.buffermarkers[startid][0]
else:
continue
# check if corresponding marker id has been seen in current
# buffer scope or if the stopmarker is already True
if not self.buffermarkers.has_key(markerid) or self.end==True:
continue
# now prepare extraction windows for markers in the ``current'' block
# check if includedefs and excludedefs are fulfilled
for markeroffset in self.buffermarkers[markerid]:
if self.min_markeroffset <= markeroffset < self.data_client.stdblocksize and \
self._check_exclude_defs_ok(markeroffset, wdef.excludedefs) and \
self._check_include_defs_ok(markeroffset, wdef.includedefs):
try:
(extractwindow, start_time, end_time, markers_cur_win) = \
self._extractwindow(
markeroffset,
self._mstosamples(wdef.startoffsetms),
self._mstosamples(wdef.endoffsetms))
if self.data_consistency_check:
# test if extracted window has std zero
std = numpy.std(extractwindow,axis=1)
if sum(std < 10**-9): #can be considered as zero
# filter the channel names where std equals zero
zero_channels = [self.data_client.channelNames[index]
for (index,elem) in enumerate(std)
if elem < 10**-9]
print "Warning: Standard deviation of channel(s) " \
" %s in time interval [%.1f,%.1f] is zero!" \
% (str(zero_channels), start_time, end_time)
if wdef.skipfirstms is None or \
start_time > wdef.skipfirstms:
self.cur_extract_windows.append((wdef.windef_name,
extractwindow, wdef.classname, start_time,
end_time, markers_cur_win))
except MarkerWindowerException, e:
if warnings:
print >>sys.stderr, "warning:", e
# if this was the first window, adjust min_markeroffset before we
# move to the next block
if self.start:
self.min_markeroffset = 0
self.start = False
# check if the end of the stream is reached
if wdef.endmarker != None:
try:
endid = self.data_client.markerids[wdef.endmarker]
except KeyError, e:
e=str(e)
if not self.keyerror.has_key(e):
self.keyerror[e]=wdef
print
print "windowdef warning: Endmarker ", e, "not found in the"
print self.keyerror[e]
continue
# check if endmarker id has been seen in current buffer scope
if self.buffermarkers.has_key(endid):
if self.buffermarkers[endid][0] < 0 and not self.end:
# if the endmarker is reached we set the end-flag for window
# cutting to True
self.end = True
print "Endmarker found!"
raise StopIteration
def _check_exclude_defs_ok(self, markeroffset, excludedefs):
""" Check whether the exclude definitions match
.. note::
Changes in this section need to be checked also
in the following -check_include_defs_ok class,
because they are very similar.
"""
# Nothing to do if there are no excludedefs
if excludedefs is None or len(excludedefs)==0:
return True
# Check each exclude definition
for exc in excludedefs:
preexclude = markeroffset - self._mstosamples(exc.preexcludems)
if self.no_overlap:
postexclude = markeroffset + self._mstosamples(exc.postexcludems)
else:
postexclude = markeroffset + 1 + self._mstosamples(exc.postexcludems)
# Get markerid and skip if it does not exist.
try:
excmarkerid = self.data_client.markerids[exc.markername]
except KeyError, e:
e=str(e)
if not self.keyerror.has_key(e):
self.keyerror[e]=exc
print
print "exclude warning: Marker ", e, "not found in the ..."
print self.keyerror[e]
continue
# Skip if no proximal exclude marker seen
if not self.buffermarkers.has_key(excmarkerid):
continue
# Not ok, if exclude marker falls into exclude range
# This is the important part of this check!
# Question: Why not exc_marker <=postexclude or exc_marker > preexclude?
# Answer: Before one added
for exc_marker in self.buffermarkers[excmarkerid]:
# The inequation lets you exclude the same marker
# only a few seconds before or after the current marker,
# to deal with unwanted marker repetitions.
if preexclude <= exc_marker < postexclude and \
exc_marker != markeroffset:
return False
return True #if all excludedefs are fulfilled
def _check_include_defs_ok(self, markeroffset, includedefs):
"""Check whether all the include definitions match"""
#Code adapted from the previous exclude-check
#Checks if there are includedefs
if includedefs is None or len(includedefs)==0:
return True
# Check each include definition
for inc in includedefs:
preinclude = markeroffset - self._mstosamples(inc.preincludems)
if self.no_overlap:
postinclude = markeroffset + self._mstosamples(inc.postincludems)
else:
postinclude = markeroffset + 1 + self._mstosamples(inc.postincludems)
# Check allways breaks if the neccessary marker does not exist.
try:
incmarkerid = self.data_client.markerids[inc.markername]
except KeyError, e:
e=str(e)
if not self.keyerror.has_key(e):
self.keyerror[e]=inc
print
print "include warning: Marker ", e, "not found in the ..."
print self.keyerror[e]
return False
# Break if no proximal include marker seen (different to exclude,
# because include markers need to be proximal.)
if not self.buffermarkers.has_key(incmarkerid):
return False
# Not ok, if no include marker falls into include range
# It is important to remark that no includedefs using the current
# marker are allowed!
check = False # remembers if a check succeeded
for inc_marker in self.buffermarkers[incmarkerid]:
# inequality to use he same marker name for include def
if preinclude <= inc_marker < postinclude and \
inc_marker != markeroffset:
check = True
if not check:
return False
return True # If all includedefs are fulfilled
def _extractwindow(self, cur_sample_block_offset, start_offset, end_offset,
debug=False):
""" Extracts a sample window from the ring buffer and consolidates it
into a single numpy array object."""
# calculate current position with respect to prebuffer start
cur_sample_buf_offset = self.prebuflen * self.data_client.stdblocksize \
+ cur_sample_block_offset
buf_extract_start = cur_sample_buf_offset + start_offset
if self.no_overlap:
buf_extract_end = cur_sample_buf_offset + end_offset
else:
buf_extract_end = cur_sample_buf_offset + 1 + end_offset
if debug:
print "buf_extract_start", buf_extract_start
print "buf_extract_end", buf_extract_end
if buf_extract_start < 0:
raise MarkerWindowerException,"not enough history data available" \
" to extract window with start offset of %d samples" \
% start_offset
assert buf_extract_end >= 0
assert buf_extract_end <= self.buflen * self.data_client.stdblocksize
end_time_samples = \
(self.nblocks_read_total * self.data_client.stdblocksize) - \
(self.buflen * self.data_client.stdblocksize - buf_extract_end)
end_time = self._samplestoms(end_time_samples)
start_time_samples = end_time_samples - \
(buf_extract_end - buf_extract_start) + 1
start_time = self._samplestoms(start_time_samples)
# copy ring buffer into one long array and extract subwindow
ndsamplewin = numpy.hstack(self.samplebuf.get())[:,buf_extract_start:buf_extract_end]
markers_cur_window = self._extract_markers_cur_window(buf_extract_start, buf_extract_end)
return (ndsamplewin, start_time, end_time, markers_cur_window)
def _extract_markers_cur_window(self, buf_extract_start, buf_extract_end):
""" Filter out all markers that lie in the current window
to store this information. The markers are stored with their clear name
and temporal offset.
"""
markers_cur_window = dict()
for marker_id in self.buffermarkers:
for offset in self.buffermarkers[marker_id]:
if offset >= 0 \
and buf_extract_start <= offset < buf_extract_end \
and marker_id != self.nullmarker_id:
marker = self.data_client.markerNames[marker_id]
if not markers_cur_window.has_key(marker):
markers_cur_window[marker] = list()
markers_cur_window[marker].append(self._samplestoms(offset-buf_extract_start))
return markers_cur_window
# =====================
# = Exception classes =
# =====================
class MarkerWindowerException(Exception):
def __init__(self, arg):
super(MarkerWindowerException, self).__init__(arg)
# ==================================================
# = Support classes for definitions of constraints =
# ==================================================
class LabeledWindowDef(object):
"""Labeled window definition that is to be extracted from EEG stream."""
def __init__(self, windef_name, classname, markername, startoffsetms,
endoffsetms, excludedefs=None,includedefs=None,
skipfirstms=None, jitter=None,startmarker=None,endmarker=None):
super(LabeledWindowDef, self).__init__()
self.windef_name = windef_name
self.classname = classname
self.markername = markername # can be None
self.excludedefs = excludedefs
self.includedefs = includedefs
self.startoffsetms = startoffsetms
self.endoffsetms = endoffsetms
self.skipfirstms = skipfirstms
self.startmarker = startmarker
self.endmarker = endmarker
def __str__(self):
d = {'wdef' : self.windef_name, 'cls' : self.classname,
'skip_first' : self.skipfirstms, 'marker' : self.markername,
'start' : self.startoffsetms, 'end' : self.endoffsetms}
if d['marker'] == '':
d['marker'] = "''"
str_ = 'LabeledWindowDef %(wdef)s\n class: %(cls)s\n skip first: '\
'%(skip_first)s\n marker: %(marker)s\n start: %(start)d ms\n'\
' end: %(end)d ms\n' % d
# append exclude definitions if any
if self.excludedefs:
for exc in self.excludedefs:
for line in str(exc).splitlines():
str_ += " %s\n" % line
# append include definitions if any
if self.includedefs:
for inc in self.includedefs:
for line in str(inc).splitlines():
str_ += " %s\n" % line
return str_
class ExcludeDef(object):
"""Definition of exclude constraints for window extraction."""
def __init__(self, markername, preexcludems, postexcludems):
super(ExcludeDef, self).__init__()
self.markername = markername
self.preexcludems = preexcludems
self.postexcludems = postexcludems
def __str__(self):
d = {'name' : self.markername, 'pre' : self.preexcludems,
'post' : self.postexcludems}
str_ | |
"""This module contains all the classes relating to Checks."""
from json import dumps
from . import decorators
from . import models
class CheckPullRequest(models.GitHubCore):
"""Representation of a Pull Request returned in Checks APIs.
.. versionadded:: 1.3.0
.. note::
Refreshing this object returns a :class:`~github3.pulls.PullRequest`.
This object has the following attributes:
.. attribute:: id
The unique id of this pull request across all of GitHub.
.. attribute:: number
The number of this pull request on its repository.
.. attribute:: head
A dict of minimal head information retrieved from the Check data
representing the source of the pull request
.. attribute:: base
A dict of minimal base information retrieved from the Check data
representing the pull request destination.
"""
def _update_attributes(self, pull):
self.id = pull["id"]
self.number = pull["number"]
self.base = pull["base"]
self.head = pull["head"]
self._api = self.url = pull["url"]
def _repr(self):
return f"<CheckPullRequest [#{self.number}]>"
def to_pull(self):
"""Retrieve a full PullRequest object for this CheckPullRequest.
:returns:
The full information about this pull request.
:rtype:
:class:`~github3.pulls.PullRequest`
"""
from . import pulls
json = self._json(self._get(self.url), 200)
return self._instance_or_null(pulls.PullRequest, json)
refresh = to_pull
class CheckApp(models.GitHubCore):
"""Representation of an App returned in Checks APIs.
.. versionadded:: 1.3.0
.. note::
Refreshing this object returns a :class:`~github3.apps.App`.
This object has the following attributes:
.. attribute:: description
The description of the App provided by the owner.
.. attribute:: external_url
The URL provided for the App by the owner.
.. attribute:: html_url
The HTML URL provided for the App by the owner.
.. attribute:: id
The unique identifier for the App. This is useful in cases where you
may want to authenticate either as an App or as a specific
installation of an App.
.. attribute:: name
The display name of the App that the user sees.
.. attribute:: owner
A dict of minimal user information retrieved from the Check data
representing the app owner
"""
CUSTOM_HEADERS = {
"Accept": "application/vnd.github.machine-man-preview+json"
}
def _update_attributes(self, app):
self.description = app["description"]
self.external_url = app["external_url"]
self.html_url = app["html_url"]
self.id = app["id"]
self.name = app["name"]
self.owner = app["owner"]
_, slug = app["html_url"].rsplit("/", 1)
self._api = self.url = self._build_url("apps", slug)
def _repr(self):
return '<App ["{}" by {}]>'.format(
self.name, str(self.owner["login"])
)
def to_app(self):
"""Retrieve a full App object for this CheckApp.
:returns:
The full information about this App.
:rtype:
:class:`~github3.apps.App`
"""
from . import apps
headers = getattr(self, "CUSTOM_HEADERS", None)
json = self._json(self._get(self._api, headers=headers), 200)
return self._instance_or_null(apps.App, json)
refresh = to_app
class CheckSuite(models.GitHubCore):
"""The :class:`CheckSuite <CheckSuite>` object.
.. versionadded:: 1.3.0
Please see GitHub's `CheckSuite Documentation`_ for more information.
.. attribute:: status
The status of the Check Suite
.. attribute:: conclusion
The highest priority check run conclusion. If it has not completed this
will be None
.. attribute:: head_sha
The sha of the commit at the head of the branch the check was run
against (the source of the pull request)
.. attribute:: head_branch
The branch checked
.. attribute:: before
The sha of the pull request target branch at the time of the checks
.. attribute:: after
The sha of the target branch after the change is applied
.. attribute:: repository
A representation of the repository the suite belongs to as
:class:`~github3.repos.repo.ShortRepository`.
.. attribute:: original_pull_requests
A list of representations of the pull requests the suite belongs to as
:class:`~github3.checks.CheckPullRequest`.
.. note::
This may be empty.
.. attribute:: id
The unique GitHub assigned numerical id of this check suite.
.. attribute:: app
A :class:`~github3.checks.CheckApp` representing the App
this suite belongs to.
.. CheckSuite Documentation:
http://developer.github.com/v3/checks/suites/
"""
class_name = "CheckSuite"
CUSTOM_HEADERS = {"Accept": "application/vnd.github.antiope-preview+json"}
def _update_attributes(self, suite):
# Import here, because a toplevel import causes an import loop
from . import repos
self._api = suite["url"]
self.status = suite["status"]
self.conclusion = suite["conclusion"]
self.head_branch = suite["head_branch"]
self.head_sha = suite["head_sha"]
self.before = suite["before"]
self.after = suite["after"]
prs = suite.get("pull_requests", [])
self.original_pull_requests = [CheckPullRequest(p, self) for p in prs]
self.repository = repos.ShortRepository(suite["repository"], self)
self.id = suite["id"]
self.app = CheckApp(suite["app"], self)
def _repr(self):
return "<{s.class_name} [{s.id}:{s.status}]>".format(s=self)
@decorators.requires_auth
def rerequest(self):
"""Rerequest the check suite.
:returns:
True if successful, False otherwise
:rtype:
bool
"""
url = self._build_url("rerequest", base_url=self._api)
return self._boolean(
self._post(url, headers=CheckSuite.CUSTOM_HEADERS), 201, 404
)
def check_runs(self):
"""Retrieve the check runs for this suite.
:returns:
the check runs for this commit
:rtype:
:class:`~github3.checks.CheckRun`
"""
url = self._build_url("check-runs", base_url=self._api)
return self._iter(
-1,
url,
CheckRun,
headers=CheckRun.CUSTOM_HEADERS,
list_key="check_runs",
)
class CheckRunAnnotation(models.GitHubCore):
"""Representation of an annotation of a check run.
.. versionadded:: 1.3.0
.. attribute:: path
The path of the file of the annotation.
.. attribute:: start_line
The start line of the annotation.
.. attribute:: end_line
The end line of the annotation.
.. attribute:: start_column
The start column of the annotation.
.. attribute:: end_column
The end column of the annotation.
.. attribute:: annotation_level
The level of the annotation. Can be one of 'notice', 'warning'
or 'failure'.
.. attribute:: title
The title that represents the annotation.
.. attribute:: message
The short descript of the feedback for the lines of code.
.. attribute:: raw_details
The details about this annotation.
"""
class_name = "CheckRunAnnotation"
CUSTOM_HEADERS = {"Accept": "application/vnd.github.antiope-preview+json"}
def _repr(self):
return (
"<{s.class_name} [{s.path}:{s.start_line}-{s.end_line}]>"
).format(s=self)
def _update_attributes(self, note):
self.path = note["path"]
self.start_line = note["start_line"]
self.end_line = note["end_line"]
self.start_column = note["start_column"]
self.end_column = note["end_column"]
self.annotation_level = note["annotation_level"]
self.title = note["title"]
self.message = note["message"]
self.raw_details = note["raw_details"]
class CheckRunOutput(models.GitHubCore):
"""Representation of the output of a check run.
.. versionadded:: 1.3.0
.. attribute:: title
The title for the check run output.
.. attribute:: summary
The summary for the check run output.
.. attribute:: text
The output text of the check run.
.. attribute:: annotations_count
The number of annotations for this check run.
.. attribute:: annotations_url
The URL to retrieve the annotations
"""
class_name = "CheckRunOutput"
def _update_attributes(self, output):
self.title = output["title"]
self.summary = output["summary"]
self.text = output["text"]
self.annotations_count = output["annotations_count"]
self.annotations_url = output["annotations_url"]
def _repr(self):
return "<{s.class_name} [{s.title}]>".format(s=self)
def annotations(self):
"""Retrieve the annotations for a check run.
:returns:
the annotations for this check run
:rtype:
:class:`~github3.checks.CheckRunAnnotations`
"""
url = self._build_url(base_url=self.annotations_url)
return self._iter(
-1,
url,
CheckRunAnnotation,
headers=CheckRunAnnotation.CUSTOM_HEADERS,
)
class CheckRun(models.GitHubCore):
"""The :class:`CheckRun <CheckRun>` object.
.. versionadded:: 1.3.0
Please see GitHub's `CheckRun Documentation`_ for more information.
.. attribute:: status
The current status of the check.
.. attribute:: conclusion
The final conclusion of the check. If the run has not concluded
this will be None.
.. attribute:: head_sha
The sha of the commit at the head of the branch checked.
.. attribute:: name
The name of the check.
.. attribute:: started_at
A :class:`~datetime.datetime` object representing the date and time
when this check run started.
.. attribute:: completed_at
A :class:`~datetime.datetime` object representing the date and time
when this check run completed. If this run is not completed it will
be ``None``.
.. attribute:: original_pull_requests
A list of representations of the pull requests the run belongs to as
:class:`~github3.checks.CheckPullRequest`.
.. note::
This may be empty.
.. attribute:: id
The unique GitHub assigned numerical id of this check run.
.. attribute:: external_id
A reference for the run on the integrator's system. This may be None.
.. attribute:: html_url
The URL one would use to view this check run in the browser.
.. attribute:: check_suite
The ID of the check suite this run belongs to.
.. attribute:: output
A :class:`~github3.checks.CheckRunOutput` representing the output
of this check run.
.. attribute:: app
A :class:`~github3.checks.CheckApp` representing the App
this run belongs to.
.. CheckRun Documentation:
http://developer.github.com/v3/checks/runs/
"""
class_name = "CheckRun"
CUSTOM_HEADERS = {"Accept": "application/vnd.github.antiope-preview+json"}
def _update_attributes(self, run):
self._api = run["url"]
self.html_url = run["html_url"]
self.status = run["status"]
self.conclusion = run["conclusion"]
self.started_at = self._strptime(run["started_at"])
self.completed_at = self._strptime(run["completed_at"])
self.head_sha = run["head_sha"]
self.name = run["name"]
prs = run.get("pull_requests", [])
self.original_pull_requests = [CheckPullRequest(p, self) for p in prs]
self.id = run["id"]
self.external_id = run["external_id"]
self.app = CheckApp(run["app"], self)
self.check_suite = run["check_suite"]["id"]
self.output = CheckRunOutput(run["output"], self)
def _repr(self):
return "<{s.class_name} [{s.name}:{s.status}]>".format(s=self)
@decorators.requires_app_installation_auth
def update(
self,
name=None,
details_url=None,
external_id=None,
started_at=None,
status=None,
conclusion=None,
completed_at=None,
output=None,
actions=None,
):
"""Update this check run.
All parameters are optional.
:param str name:
(optional), new name of the check
:param str | |
<reponame>Umlamulankunzi/Zim_ID_Codes<filename>main.py
"""Main Script of Zim Code Application
Main module script imports relevant modules and launches the
application
"""
import os
import difflib
from tkinter import *
from tkinter import ttk, messagebox
import database_init
__author__ = "<NAME> (<NAME>)"
__copyright__ = "Copyright 2019, Umlamulankunzi Dev"
__credits__ = ["Umlamulankunzi"]
__license__ = "Apache License-2.0"
__version__ = "1.04.25"
__maintainer__ = "U<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
__date__ = "20/02/2019"
def start_app():
'''Starting point when module is the main routine.'''
root = Tk()
Application(root)
root.mainloop()
class Application:
'''This class configures and populates the main root window. master
is the main container root window.'''
def __init__(self, master=None):
"""Parameters
-------------------------
master : tkinter.Tk() instance
The main root window
"""
# database initialisation
self.db = database_init.Data()
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#d9d9d9' # X11 color: 'gray85'
font10 = "-family {Tahoma} -size 10 -weight normal -slant" \
" roman -underline 0 -overstrike 0"
self.style = ttk.Style()
self.style.configure('.', background=_bgcolor)
self.style.configure('.', foreground=_fgcolor)
self.style.configure('.', font="TkDefaultFont")
self.style.map('.', background=[('selected', _compcolor),
('active', _ana2color)])
# self.style.configure("Treeview.Heading", background="#7aafff",
# font=("calibri", 8, "bold"))
self.style.configure("TButton", background=_bgcolor,
foreground="green",
font=("Tahoma", 10, "bold"))
self.style.configure("bk.TButton", background=_bgcolor,
foreground="red",
font=("Tahoma", 10, "bold"))
# Creating custom style for Treeview heading
self.custom_style = ttk.Style()
self.custom_style.element_create("Custom.Treeheading.border",
"from", "default")
self.custom_style.layout("Custom.Treeview.Heading", [
("Custom.Treeheading.cell", {'sticky': 'nswe'}),
("Custom.Treeheading.border", {'sticky': 'nswe', 'children': [
("Custom.Treeheading.padding", {'sticky': 'nswe', 'children': [
("Custom.Treeheading.image", {'side': 'right', 'sticky': ''}),
("Custom.Treeheading.text", {'sticky': 'we'})
]})
]}),
])
self.custom_style.configure("Custom.Treeview.Heading",
background=_bgcolor, foreground="black",
relief="flat", font=("calibri", 11, "bold"))
self.custom_style.map("Custom.Treeview.Heading",
relief=[('active', 'groove'),
('pressed', 'sunken')])
self.master = master
self.master.geometry("600x334+451+152")
self.master.title("Zim National ID Codes")
self.master.resizable(0, 0)
self.master.configure(background="#d9d9d9")
self.master.configure(height="300")
self.master.iconbitmap('main_icon_small.ico')
self.master.bind('<Return>', lambda e: self.search())
# creating menus on the menu bar
self.menu_bar = Menu(self.master)
self.master.config(menu=self.menu_bar)
# Shortcuts
self.kb_shortcut_menu = Menu(self.menu_bar, tearoff=0)
self.kb_shortcut_menu.add_command(label='Clear entry field -Backspace')
self.kb_shortcut_menu.add_command(label='\nSearch -Enter')
#'\n')
self.menu_bar.add_cascade(label='Shortcuts', menu=self.kb_shortcut_menu)
# Help menu
self.help_menu = Menu(self.menu_bar, tearoff=0)
self.help_menu.add_command(label='How to use', command=
lambda:self.help("how_to"))
self.help_menu.add_command(label='Terms and Conditions', command=
lambda:self.help("t&cs"))
self.help_menu.add_separator()
self.help_menu.add_command(label='About', command=
lambda:self.help("about"))
self.menu_bar.add_cascade(label='Help', menu=self.help_menu)
main_label = Label(master, bg="#d9d9d9", disabledforeground="#a3a3a3",
fg="green", text="Zim ID Codes Finder",
font=("Calibri", 20, "bold"), width=214)
dev_label = Label(master, bg="#d9d9d9", disabledforeground="#a3a3a3",
fg="gray",
text="Developed by\nUMLAMULANKUNZI\n\u00a9 2019", )
self.image_lb = Label(master, background="#d9d9d9",
disabledforeground="#a3a3a3", fg="#000000",
width=114)
self.search_var = StringVar()
self.search_entry = Entry(master, bg="white", font=font10, fg="#000000",
disabledforeground="#a3a3a3",
width=234, insertbackground="black",
justify="center",
textvariable=self.search_var)
self.search_entry.focus()
self.search_var.trace_add("write", self.validate_command)
self.radio_var = StringVar()
self.radio_var.set("0")
self.radio_1 = ttk.Radiobutton(master, value="0", text="Province",
variable=self.radio_var, takefocus='0',
command=self.rad_select)
self.radio_2 = ttk.Radiobutton(master, text="District", value="1",
variable=self.radio_var, takefocus='0',
command=self.rad_select)
self.radio_3 = ttk.Radiobutton(master, text="ID Code", value="2",
variable=self.radio_var, takefocus='0',
command=self.rad_select, width=75)
self.btn_txt = StringVar()
self.sech_btn = ttk.Button(master, command=self.search, takefocus='0',
width=137, textvariable=self.btn_txt)
self.browse_frm = LabelFrame(master, relief=GROOVE, fg="black",
text="Browse ID Codes", bg="#d9d9d9",
width=270, font=("Tahoma", 10))
self.browse_lstbx = ScrolledListBox(self.browse_frm, bg="white",
disabledforeground="#a3a3a3",
font=font10, fg="black",
highlightbackground="#d9d9d9",
highlightcolor="#d9d9d9",
selectbackground="#42e5f4",
selectforeground="black", width=10)
self.update(self.browse_lstbx, action="update browse listbox")
self.browse_lstbx.bind("<Double-Button-1>", self.browse_select)
self.back_btn = ttk.Button(self.browse_frm, text=" Back ",
takefocus='0', style="bk.TButton",
command=lambda: self.update(self.browse_treevw,
action="back to listbox"),
state=DISABLED)
self.style.configure('Treeview.Heading', font="TkDefaultFont")
self.browse_treevw = ScrolledTreeView(self.browse_frm,
columns=["Dist", "Code"],
show="headings",
style="Custom.Treeview")
self.browse_treevw.bind('<Button-1>', self.handle_click)
self.browse_treevw.heading("Dist", text="District", anchor="w")
self.browse_treevw.heading("Code", text="ID Code", anchor="center")
self.browse_treevw.column("Dist", width="171", minwidth="20",
stretch="1", anchor="w")
self.browse_treevw.column("Code", width="60", minwidth="20",
stretch="1", anchor="center")
self.results_frm = LabelFrame(master, relief=GROOVE, fg="black",
text="Search Results", bg="#d9d9d9",
width=240, font=("Tahoma", 10))
self.search_treevw = ScrolledTreeView(self.results_frm,
columns=["Dist", "Code"],
show="headings",
style="Custom.Treeview")
self.search_treevw.bind('<Button-1>', self.handle_click)
self.search_treevw.heading("Dist", text="District", anchor="w")
self.search_treevw.heading("Code", text="ID Code", anchor="center")
self.search_treevw.column("Dist", width="146", minwidth="146",
stretch="1", anchor="w")
self.search_treevw.column("Code", width="55", minwidth="55",
stretch="1", anchor="center")
main_label.place(relx=0.32, rely=0.03, height=41, width=222)
dev_label.place(relx=0.75, rely=0.03, height=41)
self.image_lb.place(relx=0.03, rely=0.03, height=41, width=56)
self.search_entry.place(relx=0.05, rely=0.3, height=30, relwidth=0.39)
self.radio_1.place(relx=0.05, rely=0.21, relwidth=0.12, relheight=0.0,
height=21)
self.radio_2.place(relx=0.18, rely=0.21, relwidth=0.1, relheight=0.0,
height=21)
self.radio_3.place(relx=0.32, rely=0.21, relwidth=0.13,
relheight=0.0, height=21)
self.sech_btn.place(relx=0.13, rely=0.41, height=26, width=140)
self.browse_frm.place(relx=0.52, rely=0.18, relheight=0.76,
relwidth=0.45)
self.browse_lstbx.place(relx=0.03, rely=0.03, relheight=0.8,
relwidth=0.95)
self.back_btn.place(relx=0.68, rely=0.85, relwidth=0.3)
self.results_frm.place(relx=0.05, rely=0.57, relheight=0.37,
relwidth=0.4)
self.search_treevw.place(relx=0.02, rely=0.05, relheight=0.85
, relwidth=0.94)
self.rad_select()
# Using os module to get list of images to be used by
# change_img method
self.counter = 0
self.image_names = []
# images contained in seperate resource folder the basepath
# images named alphabetically for changing in correct order
basepath = 'gif_resource/'
with os.scandir(basepath) as entries:
for entry in entries:
if entry.is_file():
self.image_names.append(entry.name)
self.display_imgs = [PhotoImage(file=basepath + x) for x in self.image_names]
master.after(100, self.change_img)
@staticmethod
def handle_click(event):
_widget = event.widget
if _widget.identify_region(event.x, event.y) == "separator":
return "break"
@staticmethod
def help(action):
"""Show info about the application"""
if action == 'about':
title_txt = 'About Zim ID Codes'
info_text = ' ZIM ID CODES'
detail_txt = 'Ver ' + __version__ +'\nDeveloped by Umlamulankunzi' \
+ '\nPowered by Open Source\n\u00a9 2019'
elif action == 'how_to':
title_txt = 'Zim ID Codes Help'
info_text = '\t\tZIM ID CODES HELP'
detail_txt = '\n--------------------------------------------------------------------------\n' + \
'1. Browse ID Codes:\n' + \
'\t- Double Click on Province name\n' + \
'\t- View District and codes of province\n' + \
'\t- Click Back button to go back to province list\n' + \
'\n--------------------------------------------------------------------------\n' + \
'2. Search:\n' + \
'\t- Search by Province, District or Code\n' + \
'\t- By selecting appropriate type above search entry\n' + \
'\t- Enter search term in search Entry box\n' + \
'\t- View Search results in Search results box' +\
'\n--------------------------------------------------------------------------'
elif action == 't&cs':
title_txt = "Zim ID Codes T & Cs"
info_text ="Copyright 2019 Umlamulankunzi Dev (PD Jele)"
detail_txt = """Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the
License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to
in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the
specific language governing permissions and
limitations under the License."""
messagebox.showinfo(title=title_txt, message=info_text, detail=detail_txt)
def change_img(self):
"""Changes image of image label in main window
Creates gif animation by changing the image of image label
after 100ms.
"""
self.image_lb.configure(image=self.display_imgs[self.counter])
self.counter += 1
if self.counter == len(self.image_names):
self.counter = 0
self.master.after(100, self.change_img)
def validate_command(self, *_args):
"""Validates User input into search entry widget"""
if self.radio_var.get() == '2':
self.search_var.set(self.search_var.get().strip())
try:
int(self.search_var.get())
if len(self.search_var.get()) == 3:
raise ValueError
except ValueError:
self.search_var.set(self.search_var.get()[:len(self.search_var.get()) - 1])
self.search_var.set(self.search_var.get().title())
if self.search_var.get().strip() == "":
self.search_var.set("")
elif self.search_var.get()[-2:] == " ":
self.search_var.set(self.search_var.get()[:len(self.search_var.get()) - 1])
try:
if len(self.search_var.get()) == 30:
raise ValueError
except ValueError:
self.search_var.set(self.search_var.get()[:len(self.search_var.get()) - 1])
def update(self, w, **kwargs):
"""Update information on Treeview widgets or listbox widget
Parameters
----------
w : tkinter.TreeView or tkinter.Listbox Widget
The widget to be updated
keyword Args:
action: str
defines action to be done by function valid options
include:
- update browse listbox: updates listbox
- update search treeview: updates search treewview
widget
- back to listbox: deletes info on browse treeview
widget and places back the list box
data: list
defines data to be to be updated on widget w,
optional for listbox
prov: str
defines the province for selected data
optional for listbox
"""
if kwargs["action"] == "update browse listbox":
# data = self.db.query(col="all")
provs = self.db.get("prov")
for prov in provs:
w.insert("end", " " + prov)
elif kwargs["action"] == "update search treeview":
self.results_frm.configure(text=kwargs["prov"], foreground="blue",
font=("Tahoma", 12, "bold"))
self.clear_treeview(w)
for info in kwargs["data"]:
w.insert("", "end", values=(info[1:]))
elif kwargs["action"] == "back to listbox":
self.clear_treeview(w)
w.place_forget()
self.browse_lstbx.place(relx=0.03, rely=0.03, relheight=0.8,
relwidth=0.95)
self.back_btn.configure(state=DISABLED)
self.browse_frm.configure(text="Browse ID Codes", fg="black",
font=("Tahoma", 10))
@staticmethod
def clear_treeview(treeview_widget):
"""Clear treeview widget
Parameters
----------
treeview_widget: tkinter.Treeview Widget instance
"""
for row in treeview_widget.get_children():
treeview_widget.delete(row)
def browse_select(self, event):
widget = event.widget
selection = widget.curselection()
prov = widget.get(selection[0])
# populating with selected province districts
info = self.db.query(prov, "province", col1="district", col2="code")
for dist in info:
self.browse_treevw.insert('', 'end', values=dist)
self.browse_lstbx.place_forget()
self.browse_treevw.place(relx=0.03, rely=0.03, relheight=0.8,
relwidth=0.95)
self.back_btn.configure(state=NORMAL)
self.browse_frm.configure(text=prov)
self.browse_frm.configure(foreground="blue", font=("Tahoma", 12, "bold"))
def rad_select(self):
"""Change search button text on radio button select"""
if self.radio_var.get() == "0":
self.btn_txt.set("Search by Province")
elif self.radio_var.get() == "1":
self.btn_txt.set("Search by District")
elif self.radio_var.get() == "2":
self.btn_txt.set("Search by ID Code")
self.search_var.set("")
self.results_frm.configure(text="Search Results", fg="black",
bg="#d9d9d9", font=("Tahoma", 10))
def close_match(self, word, search_type=None):
"""Find close matches
Find possible match for incorrectly spelled user input
Parameters
----------
word: str
User input, used to find possible match
search_type: str
Search type used to define possibilities list from which
to find correct match
"""
possibilities = []
if search_type == "Province":
possibilities = self.db.get("prov")
elif search_type == "District":
possibilities = self.db.get("dist")
elif search_type | |
import ROOT as root
import numpy as np
import uncertainties.unumpy as unp
from uncertainties import ufloat
from uncertainties.unumpy import nominal_values as noms
from uncertainties.unumpy import std_devs as stds
from array import array
import sys
############### Readout command line argument
try:
name_of_folder = sys.argv[1]
try:
plot_style = sys.argv[2]
except IndexError:
plot_style = None
except IndexError:
print('No Argument given Or other Index out of Range Er')
sys.path.insert(0, './' + name_of_folder + '/')
########################## import pyData.py ######################################
from pyData import *
##################################################### Set Cnavas Style #############################
root.gStyle.SetOptTitle(0)
root.gStyle.SetOptFit(1)
root.gStyle.SetLabelSize(.05, "XY");
root.gStyle.SetTitleSize(.05, "XY");
root.gStyle.SetTitleOffset(1, "XY");
root.gStyle.SetStatFontSize(.08)
########################### Def Gaus function ######################
personal_gaus = root.TF1("personal_gaus", " [0] * exp( -0.5 * ( (x - [1]) / [2] ) * ( (x - [1]) / [2] ) ) ")
name_params = [ "amplitude/[MeanVcal]", "mean/[Col]", "sigma/[Col]"]
personal_gaus.SetParName(0,'Amplitude')
personal_gaus.SetParName(2,'Sigma')
if plot_style == 'thesis':
personal_gaus.SetParName(1,'Mittelwert')
else :
personal_gaus.SetParName(1,'Mean')
############################### Save Data in list #######################################
mean_value_col_list = []
mean_error_col_list = []
x_value = []
x_error = []
##############################################################################################################################
################################### Getting the mean hit value of all columns near the laserspot #############################
###############################################################################################################################
################################## Set sum area, size of sensetive area ###############################
xmin = 20
xmax = 26
ymin = 62
ymax = 72
#################################### calculating mean of each coloum ################################
for i in range(xmin,xmax): # going thru all col
content = []
error = []
x_value.append(i)
x_error.append(0.5)
test_error = []
for j in range(ymin,ymax): # going thru all rows
if qMap_Ag_C0_V0.GetBinContent(i,j) != 0:
content.append( qMap_Ag_C0_V0.GetBinContent(i,j)) # Is this the real error
N = qMap_Ag_C0_V0.GetBinEntries( qMap_Ag_C0_V0.GetBin(i,j))
if N == 1:
new_error = np.sqrt( ( qMap_Ag_C0_V0.GetBinContent(i,j) - qMap_Ag_C0_V0.GetBinContent(i,j)/N) **2)
else:
new_error = np.sqrt( 1/(N-1) * ( qMap_Ag_C0_V0.GetBinContent(i,j) - qMap_Ag_C0_V0.GetBinContent(i,j)/N) **2)
#error.append( 1/N * np.sqrt(qMap_Ag_C0_V0.GetBinContent(i,j) *N ) ) # Is this the real error
error.append( new_error ) # Is this the real error
else:
pass
content_bin = unp.uarray( content, error)
mean_content_col = content_bin.sum() # mean value of each bin in the col
# Saving values in lists
mean_value_col_list.append( noms(mean_content_col))
mean_error_col_list.append( stds(mean_content_col) )
########################### Create errorbar plot #####################################
errorbar_plot_col = root.TGraphErrors( len(x_value), array( 'f', x_value- np.ones(len(x_value))), array( 'f', mean_value_col_list), array( 'f', x_error), array( 'f', mean_error_col_list) )
x_value -= np.ones(len(x_value))
############################## Set axis label and range of errobar plot ##################################
if plot_style == 'thesis':
errorbar_plot_col.GetXaxis().SetTitle("Spalte")
errorbar_plot_col.GetYaxis().SetTitle("Summe Hits / Vcal")
else:
errorbar_plot_col.GetXaxis().SetTitle("Col")
errorbar_plot_col.GetYaxis().SetTitle("Mean Hit / Vcal")
errorbar_plot_col.SetMinimum(0)
errorbar_plot_col.SetMaximum( max( mean_value_col_list) + 0.3 * max(mean_value_col_list) )
####################### create Canvas and FIT ##########################################
c1 = root.TCanvas("c1", "c1", 1980, 1080)
c1.SetGrid()
if name_of_folder == '7_mm':
personal_gaus.SetParLimits(0, max(mean_value_col_list) * .2, max(mean_value_col_list) * 1.5 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .7, np.mean(x_value) * 1.2 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * 0.03, np.std(np.array(x_value)) * 1.4 )
elif name_of_folder == '14_mm':
personal_gaus.SetParLimits(0, max(mean_value_col_list) * .4, max(mean_value_col_list) * 1.5 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .8, np.mean(x_value) * 1.1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * 0.03, np.std(np.array(x_value))*1.1 )
else:
personal_gaus.SetParLimits(0, max(mean_value_col_list) * .5, max(mean_value_col_list) * 1.8 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .7, np.mean(x_value) * 1.2 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * 0.03, np.std(np.array(x_value)) * 1.2 )
errorbar_plot_col.Fit(personal_gaus, "", "", min(x_value) -0.5 , max( x_value) +0.5 )
#errorbar_plot_col.Fit("gaus", "", "", min(x_value) -0.5 , max( x_value) +0.5 )
errorbar_plot_col.Draw("ap*")
############################### Create legend ####################################
if plot_style == 'thesis':
legend = root.TLegend(0.15,0.71,0.37,0.93)
legend.SetTextSize(0.055)
legend.AddEntry(errorbar_plot_col,"Summe Hits","lep")
legend.AddEntry( personal_gaus,"Fit","l")
legend.Draw()
else:
legend = root.TLegend(0.65,0.47,0.98,0.7)
legend.SetTextSize(0.04)
legend.AddEntry(errorbar_plot_col,"Row sum hit value","lep")
legend.AddEntry( personal_gaus,"Gaussian Fit","l")
legend.Draw()
######## Transfer Sigma from Bin to mumeter ############################
sigma_mu_meter_col = ufloat(personal_gaus.GetParameter(2), personal_gaus.GetParError(2)) * 150 # 150 is pixel size in y direction
#############################################################################
############################### Save parameter and plot ###########################################
with open( f'./fit_params/{name_of_folder}_fit_parameters_col_xaxis.txt', 'w') as file:
for i in range(0,3):
file.write( name_params[i] + ' ' + str( personal_gaus.GetParameter(i) ) + ' ' + str(personal_gaus.GetParError(i)) + '\n')
with open( f'./fit_parameters_col_xaxis.txt', 'a') as file:
file.write( name_of_folder + 'Amplitude/Sigma/Mean:' + ' ' + str( personal_gaus.GetParameter(0) ) + ' ' + str(personal_gaus.GetParError(0)) + ' ' + str( personal_gaus.GetParameter(1) ) + ' ' + str(personal_gaus.GetParError(1)) + ' ' + str( personal_gaus.GetParameter(2) ) + ' ' + str(personal_gaus.GetParError(2)) + '\n')
with open( f'./sigma_col_xaxis.txt', 'a') as file:
file.write( name_params[i] + '_' + name_of_folder + ' ' + str( personal_gaus.GetParameter(2) ) + ' ' + str(personal_gaus.GetParError(2)) + '\n')
with open( f'./sigma_col_in_mumeter_xaxis.txt', 'a') as file:
file.write( name_params[i] +'_' + name_of_folder + ' ' + str( noms(sigma_mu_meter_col) ) + ' ' + str( stds(sigma_mu_meter_col) ) + '\n')
c1.SaveAs(f'./plots/{name_of_folder}_erorbar_plot_col.pdf')
##############################################################################################################################
################################### Getting the mean hit value of all rows near the laserspot #############################
###############################################################################################################################
############################Reset lists###########################################
mean_value_row_list = []
mean_error_row_list = []
x_value = []
x_error = []
row_with_hits = []
#################################### calculating mean of each row #####################################
for i in range(ymin,ymax): # going thru all rows
content = []
error = []
x_value.append(i)
x_error.append(0.5)
for j in range(xmin,xmax): # going thru all col
if qMap_Ag_C0_V0.GetBinContent(j,i) != 0:
content.append( qMap_Ag_C0_V0.GetBinContent(j,i))
N = qMap_Ag_C0_V0.GetBinEntries( qMap_Ag_C0_V0.GetBin(j,i))
if N == 1:
new_error = np.sqrt( ( qMap_Ag_C0_V0.GetBinContent(i,j) - qMap_Ag_C0_V0.GetBinContent(i,j)/N )**2)
else:
new_error = np.sqrt( 1/(N-1) * ( qMap_Ag_C0_V0.GetBinContent(i,j) - qMap_Ag_C0_V0.GetBinContent(i,j)/N) **2)
#error.append( 1/N * np.sqrt(qMap_Ag_C0_V0.GetBinContent(j,i) * N ) )
error.append( new_error)
else:
pass
content_bin = unp.uarray( content, error)
mean_content_row = content_bin.sum() # mean value of each bin in the col
# Saving values in lists
mean_value_row_list.append( noms(mean_content_row))
mean_error_row_list.append( stds(mean_content_row))
############################# Create new errorbar plot ####################################
errorbar_plot_rows = root.TGraphErrors( len(x_value), array( 'f', x_value - np.ones(len(x_value))), array( 'f', mean_value_row_list), array( 'f', x_error), array( 'f', mean_error_row_list) )
x_value -= np.ones(len(x_value))
errorbar_plot_rows.GetXaxis().SetNdivisions(20)
############################### create Canvas ########################################
c2 = root.TCanvas("c2", "c2", 1980, 1080);
c2.SetGrid()
############################## Set axis label of errobar plot ##################################
if plot_style == 'thesis':
errorbar_plot_rows.GetXaxis().SetTitle("Zeile")
errorbar_plot_rows.GetYaxis().SetTitle("Summe Hits / Vcal")
else:
errorbar_plot_rows.GetXaxis().SetTitle("Row")
errorbar_plot_rows.GetYaxis().SetTitle("Mean Hit / Vcal")
errorbar_plot_rows.SetMinimum(0)
if name_of_folder == '10-5_mm':
errorbar_plot_rows.SetMaximum( max(mean_value_row_list) + 0.15 * max(mean_value_row_list) )
elif name_of_folder == '11_mm':
errorbar_plot_rows.SetMaximum( max(mean_value_row_list) + 0.9 * max(mean_value_row_list) )
elif name_of_folder == '9_mm':
errorbar_plot_rows.SetMaximum( max(mean_value_row_list) + 0.4 * max(mean_value_row_list) )
else:
errorbar_plot_rows.SetMaximum( max(mean_value_row_list) + 0.3 * max(mean_value_row_list) )
############################### Plot fucntion and fit #############################################
if name_of_folder == '10-5_mm':
print(np.std(np.array(x_value)))
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .5, max(mean_value_row_list) * 1.5 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .9, np.mean(x_value) * 1.12)
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value))*0.6 )
elif name_of_folder == '11_mm':
#personal_gaus.SetParameter(1, 66 )
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .5, max(mean_value_row_list) * 1.8)
personal_gaus.SetParLimits(1, np.mean(x_value) * .9, np.mean(x_value) * 1.12 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .05, np.std(np.array(x_value))*0.8 )
elif name_of_folder == '7_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .2, max(mean_value_row_list)*1.2 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .7, np.mean(x_value) * 1.3)
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) * 1.05 )
elif name_of_folder == '6_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .2, max(mean_value_row_list) * 1.31 )
personal_gaus.SetParLimits(1, np.mean(x_value) -3, np.mean(x_value)+3 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) *1.05 )
elif name_of_folder == '9-5_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .2, np.std(np.array(x_value)) )
elif name_of_folder == '9_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) )
elif name_of_folder == '12_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) )
elif name_of_folder == '13_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) )
elif name_of_folder == '14_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) )
elif name_of_folder == '5_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) )
#
#
elif name_of_folder == '10_mm':
personal_gaus.SetParameter(0, max(mean_value_row_list) )
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.8)
personal_gaus.SetParLimits(1, np.mean(x_value) * .7, np.mean(x_value) * 1.3 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value))*1.4 )
#
#elif name_of_folder == '15_mm':
# #personal_gaus.SetParameter(0, 743 )
# #personal_gaus.SetParameter(1, 66 )
# #personal_gaus.SetParameter(2, 3.05)
#
# personal_gaus.SetParLimits(0,max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.2 )
# personal_gaus.SetParLimits(1, np.mean(x_value) * .3, np.mean(x_value) * 1.4 )
# personal_gaus.SetParLimits(2,np.std(np.array(x_value)) * .2, np.std(np.array(x_value)) * 1.6 )
#
#
#
else:
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.5 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .8, np.mean(x_value) * 1.2 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .2, np.std(np.array(x_value)) * 1.1 )
errorbar_plot_rows.Fit( personal_gaus, "", "", min(x_value) -0.5 , max( x_value) +0.5 )
errorbar_plot_rows.Draw("ap*")
##################################### create legend ################################################
if plot_style == 'thesis':
legend = root.TLegend(0.15,0.71,0.37,0.93)
legend.SetTextSize(0.055)
legend.AddEntry(errorbar_plot_rows,"Summe Hits","lep")
legend.AddEntry( personal_gaus,"Fit","l")
legend.Draw()
else:
legend = root.TLegend(0.65,0.47,0.98,0.7)
legend.SetTextSize(0.04)
legend.AddEntry(errorbar_plot_col,"Row sum hit value","lep")
legend.AddEntry( personal_gaus,"Gaussian Fit","l")
legend.Draw()
######## Transfer Sigma from Bin to mumeter ############################
sigma_mu_meter_row = ufloat(personal_gaus.GetParameter(2), personal_gaus.GetParError(2)) * 100 # 100 is pixel size in y direction
#############################################################################
########################################### saveplot and fit params ########################################
with open( f'./fit_params/{name_of_folder}_fit_parameters_row_yaxis.txt', 'w') as file:
for i in range(0,3):
file.write( name_params[i] + ' ' + str( personal_gaus.GetParameter(i) ) + ' ' + str(personal_gaus.GetParError(i)) + '\n')
with open( f'./sigma_row_yaxis.txt', 'a') as file:
file.write( name_params[i] +'_' + name_of_folder + ' ' + str( personal_gaus.GetParameter(2) ) + ' ' + str(personal_gaus.GetParError(2)) + '\n')
with open( f'./sigma_row_in_mumeter_yaxis.txt', 'a') as file:
file.write( name_params[i] +'_' + name_of_folder + ' ' + str( noms(sigma_mu_meter_row) ) + ' ' + str( stds(sigma_mu_meter_row) ) + | |
<filename>ino/commands/build.py
# -*- coding: utf-8; -*-
import re
import os.path
import inspect
import subprocess
import platform
import jinja2
import shlex
from jinja2.runtime import StrictUndefined
import ino.filters
from ino.commands.base import Command
from ino.environment import Version
from ino.filters import colorize
from ino.utils import SpaceList, list_subdirs
from ino.exc import Abort
class Build(Command):
"""
Build a project in the current directory and produce a ready-to-upload
firmware file.
The project is expected to have a `src' subdirectory where all its sources
are located. This directory is scanned recursively to find
*.[c|cpp|pde|ino] files. They are compiled and linked into resulting
firmware hex-file.
Also any external library dependencies are tracked automatically. If a
source file includes any library found among standard Arduino libraries or
a library placed in `lib' subdirectory of the project, the library gets
built too.
Build artifacts are placed in `.build' subdirectory of the project.
"""
name = 'build'
help_line = "Build firmware from the current directory project"
default_make = 'make'
default_cppflags = ''
default_cflags = ''
default_cxxflags = ''
default_ldflags = ''
def setup_arg_parser(self, parser):
super(Build, self).setup_arg_parser(parser)
self.e.add_board_model_arg(parser)
self.e.add_arduino_dist_arg(parser)
parser.add_argument('--make', metavar='MAKE',
default=self.default_make,
help='Specifies the make tool to use. If '
'a full path is not given, searches in Arduino '
'directories before PATH. Default: %(default)s".')
parser.add_argument('--cc', metavar='COMPILER',
default=None,
help='Specifies the compiler used for C files. If '
'a full path is not given, searches in Arduino '
'directories before PATH for the architecture '
'specific compiler.')
parser.add_argument('--cxx', metavar='COMPILER',
default=None,
help='Specifies the compiler used for C++ files. '
'If a full path is not given, searches in Arduino '
'directories before PATH for the architecture '
'specific compiler.')
parser.add_argument('--ar', metavar='AR',
default=None,
help='Specifies the AR tool to use. If a full path '
'is not given, searches in Arduino directories '
'before PATH for the architecture specific ar tool.')
parser.add_argument('--objcopy', metavar='OBJCOPY',
default=None,
help='Specifies the OBJCOPY to use. If a full path '
'is not given, searches in Arduino directories '
'before PATH for the architecture specific objcopy '
'tool.')
parser.add_argument('-f', '--cppflags', metavar='FLAGS',
default=self.default_cppflags,
help='Flags that will be passed to the compiler. '
'Note that multiple (space-separated) flags must '
'be surrounded by quotes, e.g. '
'`--cppflags="-DC1 -DC2"\' specifies flags to define '
'the constants C1 and C2. Default: "%(default)s".')
parser.add_argument('--cflags', metavar='FLAGS',
default=self.default_cflags,
help='Like --cppflags, but the flags specified are '
'only passed to compilations of C source files. '
'Default: "%(default)s".')
parser.add_argument('--cxxflags', metavar='FLAGS',
default=self.default_cxxflags,
help='Like --cppflags, but the flags specified '
'are only passed to compilations of C++ source '
'files. Default: "%(default)s".')
parser.add_argument('--ldflags', metavar='FLAGS',
default=self.default_ldflags,
help='Like --cppflags, but the flags specified '
'are only passed during the linking stage. Note '
'these flags should be specified as if `ld\' were '
'being invoked directly (i.e. the `-Wl,\' prefix '
'should be omitted). Default: "%(default)s".')
parser.add_argument('-v', '--verbose', default=False, action='store_true',
help='Verbose make output')
def discover(self, args):
board = self.e.board_model(args.board_model)
self.e.find_arduino_dir('arduino_core_dir',
['hardware', 'arduino', board['arch'], 'cores', 'arduino'],
['Arduino.h'] if self.e.arduino_lib_version.major else ['WProgram.h'],
'Arduino core library ({})'.format(board['arch']))
self.e.find_arduino_dir('arduino_libraries_dir', ['libraries'],
human_name='Arduino standard libraries')
self.e.find_arduino_dir('arduino_hardware_libraries_dir', ['hardware', 'arduino', board['arch'], 'libraries'],
human_name='Arduino harware libraries')
if board['arch'] in ['sam']:
self.e.find_arduino_dir('arduino_system_dir', ['hardware', 'arduino', board['arch'], 'system'],
human_name='Arduino system libraries')
if self.e.arduino_lib_version.major:
self.e.find_arduino_dir('arduino_variants_dir',
['hardware', 'arduino', board['arch'], 'variants'],
human_name='Arduino variants directory ({})'.format(board['arch']))
toolset = [
('make', args.make, 'avr'),
('cc', args.cc, None),
('cxx', args.cxx, None),
('ar', args.ar, None),
('ld', None, None),
('objcopy', args.objcopy, None),
]
tools_arch_mapping = {
'avr': {
'dirname': 'avr',
'tool_prefix': 'avr-',
'tools': {
'make': 'make',
'cc': 'gcc',
'cxx': 'g++',
'ar': 'ar',
'ld': 'gcc',
'objcopy': 'objcopy'
}
},
'sam': {
'dirname': 'g++_arm_none_eabi',
'tool_prefix': 'arm-none-eabi-',
'tools': {
'cc': 'gcc',
'cxx': 'g++',
'ar': 'ar',
'ld': 'g++',
'objcopy': 'objcopy'
}
}
}
if board['arch'] not in tools_arch_mapping:
raise Abort('Unknown architecture "{}"'.format(board['arch']))
arch_info = tools_arch_mapping[board['arch']]
for tool_key, tool_binary, arch_override in toolset:
actual_tool_binary = tool_binary if tool_binary else arch_info['tool_prefix'] + arch_info['tools'][tool_key]
self.e.find_arduino_tool(
tool_key, ['hardware', 'tools', arch_info['dirname'] if not arch_override else arch_override, 'bin'],
items=[actual_tool_binary], human_name=tool_binary)
def setup_flags(self, args):
board = self.e.board_model(args.board_model)
mcu_key = '-mcpu=' if board['arch'] in ['sam'] else <KEY>
mcu = mcu_key + board['build']['mcu']
# Hard-code the flags that are essential to building the sketch
self.e['cppflags'] = SpaceList([
mcu,
'-DF_CPU=' + board['build']['f_cpu'],
'-DARDUINO=' + str(self.e.arduino_lib_version.as_int()),
'-DARDUINO_' + board['build']['board'],
'-DARDUINO_ARCH_' + board['arch'].upper(),
'-I' + self.e['arduino_core_dir'],
])
# Add additional flags as specified
self.e['cppflags'] += SpaceList(shlex.split(args.cppflags))
platform_settings = self.e.platform_settings()[board['arch']]
self.e['cppflags'] += SpaceList(platform_settings['compiler']['cpp']['flags'].split(' '))
self.e['objcopyflags'] = SpaceList(platform_settings['compiler']['elf2hex']['flags'].split(' '))
# SAM boards have a pre-built system library
if board['arch'] in ['sam']:
system_dir = self.e.arduino_system_dir
self.e['cppflags'] += [p.replace("{build.system.path}", system_dir) for p in
["-I{build.system.path}/libsam",
"-I{build.system.path}/CMSIS/CMSIS/Include/",
"-I{build.system.path}/CMSIS/Device/ATMEL/"]]
if 'vid' in board['build']:
self.e['cppflags'].append('-DUSB_VID=%s' % board['build']['vid'])
if 'pid' in board['build']:
self.e['cppflags'].append('-DUSB_PID=%s' % board['build']['pid'])
if board['arch'] in ['sam']:
self.e['cppflags'].append('-DUSBCON')
if 'extra_flags' in board['build']:
flags = [f.strip() for f in board['build']['extra_flags'].split(' ')]
flags = filter(lambda f: f not in ['{build.usb_flags}'], flags)
self.e['cppflags'].extend(flags)
if self.e.arduino_lib_version.major:
variant_dir = os.path.join(self.e.arduino_variants_dir,
board['build']['variant'])
self.e.cppflags.append('-I' + variant_dir)
self.e['cflags'] = SpaceList(shlex.split(args.cflags))
self.e['cxxflags'] = SpaceList(shlex.split(args.cxxflags))
# Again, hard-code the flags that are essential to building the sketch
self.e['ldflags'] = SpaceList([mcu])
self.e['ldflags'] += SpaceList([
'-Wl,' + flag for flag in shlex.split(args.ldflags)
])
self.e['ld_pre'] = ''
self.e['ld_post'] = ''
if board['arch'] in ['sam']:
self.e['ldflags'] += SpaceList(['-mthumb', '-lgcc'])
self.e['ld_pre'] = SpaceList([
'-Wl,--check-sections',
'-Wl,--gc-sections',
'-Wl,--entry=Reset_Handler',
'-Wl,--unresolved-symbols=report-all',
'-Wl,--warn-common',
'-Wl,--warn-section-align',
'-Wl,--warn-unresolved-symbols',
'-Wl,--start-group'
])
# The order of linking is very specific in the SAM build.
# This .o must come first, then the variant system lib,
# then the project object files.
self.e['ld_pre'] += SpaceList([
os.path.join(self.e.build_dir, 'arduino', 'syscalls_sam3.o')
])
self.e['ld_post'] = SpaceList([
'-Wl,--end-group'
])
if 'variant_system_lib' in board['build']:
variant_system_lib = os.path.join(self.e.arduino_variants_dir,
board['build']['variant'],
board['build']['variant_system_lib'])
self.e['ld_variant_system_lib'] = variant_system_lib
else:
self.e['ld_variant_system_lib'] = ''
if 'ldscript' in board['build']:
ldscript = os.path.join(self.e.arduino_variants_dir,
board['build']['variant'],
board['build']['ldscript'])
self.e['ldflags'] += SpaceList([
'-T' + ldscript
])
self.e['names'] = {
'obj': '%s.o',
'lib': 'lib%s.a',
'cpp': '%s.cpp',
'deps': '%s.d',
}
def create_jinja(self, verbose):
templates_dir = os.path.join(os.path.dirname(__file__), '..', 'make')
self.jenv = jinja2.Environment(
loader=jinja2.FileSystemLoader(templates_dir),
undefined=StrictUndefined, # bark on Undefined render
extensions=['jinja2.ext.do'])
# inject @filters from ino.filters
for name, f in inspect.getmembers(ino.filters, lambda x: getattr(x, 'filter', False)):
self.jenv.filters[name] = f
# inject globals
self.jenv.globals['e'] = self.e
self.jenv.globals['v'] = '' if verbose else '@'
self.jenv.globals['slash'] = os.path.sep
self.jenv.globals['SpaceList'] = SpaceList
def render_template(self, source, target, **ctx):
template = self.jenv.get_template(source)
contents = template.render(**ctx)
out_path = os.path.join(self.e.build_dir, target)
with open(out_path, 'wt') as f:
f.write(contents)
return out_path
def make(self, makefile, **kwargs):
makefile = self.render_template(makefile + '.jinja', makefile, **kwargs)
ret = subprocess.call([self.e.make, '-f', makefile, 'all'])
if ret != 0:
raise Abort("Make failed with code %s" % ret)
def recursive_inc_lib_flags(self, libdirs, board_arch):
# These directories are not used in a build. For more info see
# https://github.com/arduino/Arduino/wiki/Arduino-IDE-1.5:-Library-specification
ignore_architectures = set(board['arch'] for board in self.e.board_models().itervalues()) - set([board_arch])
lib_excludes = ['extras', 'examples']
flags = SpaceList()
for d in libdirs:
flags.append('-I' + d)
for subdir in list_subdirs(d, exclude=lib_excludes):
# This dir requires special handling as it is architecture specific.
# It is explained in more detail in the link above, but the expected
# behavior is to prefer a subdir that matches the board architecture,
# or if none is found, use the 'default' unoptimized architecture.
if os.path.basename(subdir) == 'arch':
arch_subdir = list_subdirs(subdir, include=[board_arch])
if not arch_subdir:
arch_subdir = list_subdirs(subdir, include=['default'])
if arch_subdir:
flags.append('-I' + arch_subdir[0])
flags.extend('-I' + subd for subd in list_subdirs(arch_subdir[0], recursive=True, exclude=lib_excludes))
else:
flags.append('-I' + subdir)
flags.extend('-I' + subd for subd in list_subdirs(subdir, recursive=True, exclude=lib_excludes))
return flags
def _scan_dependencies(self, dir, lib_dirs, inc_flags):
output_filepath = os.path.join(self.e.build_dir, os.path.basename(dir), 'dependencies.d')
self.make('Makefile.deps', inc_flags=inc_flags, src_dir=dir, output_filepath=output_filepath)
self.e['deps'].append(output_filepath)
# search for dependencies on libraries
# for this scan dependency file generated by make
# with regexes to find entries that start with
# libraries dirname
regexes = dict((lib, re.compile(r'\s' + lib + re.escape(os.path.sep))) for lib in lib_dirs)
used_libs = set()
with open(output_filepath) as f:
for line in f:
for lib, regex in regexes.iteritems():
if regex.search(line) and lib != dir:
used_libs.add(lib)
return used_libs
def scan_dependencies(self, args):
board = self.e.board_model(args.board_model)
board_arch = board['arch']
self.e['deps'] = SpaceList()
lib_dirs = [self.e.arduino_core_dir]
lib_dirs += list_subdirs(self.e.lib_dir)
lib_dirs += list_subdirs(self.e.arduino_libraries_dir)
lib_dirs += list_subdirs(self.e.arduino_hardware_libraries_dir)
lib_dirs += [os.path.join(self.e.arduino_variants_dir, board['build']['variant'])]
inc_flags = self.recursive_inc_lib_flags(lib_dirs, board_arch)
# If lib A depends on lib B it have to | |
#
# ENVISIoN
#
# Copyright (c) 2017-2021 <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##############################################################################################
#
# Alterations to this file by <NAME>, <NAME>, <NAME>,
# <NAME> and <NAME>
#
# To the extent possible under law, the person who associated CC0 with
# the alterations to this file has waived all copyright and related
# or neighboring rights to the alterations made to this file.
#
# You should have received a copy of the CC0 legalcode along with
# this work. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
##############################################################################################
#
# Alterations to this file by <NAME>
#
# To the extent possible under law, the person who associated CC0 with
# the alterations to this file has waived all copyright and related
# or neighboring rights to the alterations made to this file.
#
# You should have received a copy of the CC0 legalcode along with
# this work. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
##############################################################################################
#
# Alterations to this file by <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>
#
# To the extent possible under law, the person who associated CC0 with
# the alterations to this file has waived all copyright and related
# or neighboring rights to the alterations made to this file.
#
# You should have received a copy of the CC0 legalcode along with
# this work. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
import numpy as np
import h5py
import array as arr
import scipy.ndimage
def _write_coordinates(h5file, atom_count, coordinates_list, elements, path):
with h5py.File(h5file, "a") as h5:
p=0
for n in range(0,len(atom_count)):
dataset_name = path+'/Atoms/'+format(n,'04d')
h5.create_dataset(
dataset_name,
data=np.asarray(coordinates_list[p:atom_count[n]+p]),
dtype=np.float32
)
h5[dataset_name].attrs["element"] = elements[n]
p=p+atom_count[n]
return
def _write_forces(h5file, atom_count, force_list, path):
with h5py.File(h5file, "a") as h5:
p=0
for n in range(0,len(atom_count)):
dataset_name = path+'/Atoms/'+format(n,'04d')+"F"
h5.create_dataset(
dataset_name,
data=np.asarray(force_list[p:atom_count[n]+p]),
dtype=np.float32
)
p=p+atom_count[n]
return
def _write_basis(h5file, basis):
with h5py.File(h5file, "a") as h5:
if not "/basis" in h5:
h5.create_dataset('/basis', data=basis, dtype=np.float32)
return
def _write_scaling_factor(h5file, scaling_factor):
scaling_factor = 1
with h5py.File(h5file, "a") as h5:
if not "/scaling_factor" in h5:
h5.create_dataset('/scaling_factor', data=scaling_factor, dtype=np.float32)
return
def _write_md(h5file, atom_count, coordinates_list, elements, step):
with h5py.File(h5file, "a") as h5:
p=0
for n in range(0,len(atom_count)):
dataset_name = '/MD/Atoms/'+format(step,'04d')+ "/" + elements[n]
h5.create_dataset(
dataset_name,
data=np.asarray(coordinates_list[p:atom_count[n]+p]),
dtype=np.float32,
maxshape=(None, 3)
)
h5[dataset_name].attrs["element"] = elements[n]
h5[dataset_name].attrs["atoms"] = atom_count[n]
p=p+atom_count[n]
return
#def _write_md(h5file, atom_count, coordinates_list, elements, step):
# with h5py.File(h5file, "a") as h5:
# p=0
# for n in range(0,len(atom_count)):
# dataset_name = '/MD/Atoms/'+format(n,'04d')
# if step == 0:
# h5.create_dataset(
# dataset_name,
# data=np.asarray(coordinates_list[p:atom_count[n]+p]),
# dtype=np.float32,
# maxshape=(None, 3)
# )
# h5[dataset_name].attrs["element"] = elements[n]
# h5[dataset_name].attrs["atoms"] = atom_count[n]
# p=p+atom_count[n]
# else:
# dataset = h5[dataset_name]
# dataset.resize((step+1)*atom_count[n],axis=0)
# start = step*atom_count[n]
# dataset[start:] = np.asarray(coordinates_list[p:atom_count[n]+p])
# p=p+atom_count[n]
# return
def _write_steps(h5file, steps):
with h5py.File(h5file, "a") as h5:
h5['/MD'].attrs["steps"] = steps
return
def _write_bandstruct(h5file, band_data, kval_list, parsed_symbols, parsed_coords):
with h5py.File(h5file, "a") as h5:
h5.create_dataset('BandStructure/KPoints', data=np.array(kval_list), dtype = np.float32)
for i, band in enumerate(band_data):
dataset = h5.create_dataset('Bandstructure/Bands/{}/{}'.format(i, 'Energy'), data=np.array(band), dtype = np.float32)
dataset.attrs['Unit'] = 'eV'
dataset.attrs['QuantitySymbol'] = '$E$'
dataset.attrs['QuantityName'] = 'Energy'
dataset.attrs['VariableName'] = 'Band {}'.format(i)
dataset.attrs['VariableSymbol'] = '$B_{{{}}}$'.format(i)
for i in range(0, len(parsed_symbols)):
dataset = h5.create_dataset('/Highcoordinates/{}/Symbol'.format(i),
data = np.asarray(parsed_symbols[i],dtype=h5py.special_dtype(vlen=str)))
dataset.attrs['Unit'] = 'NoUnit'
dataset.attrs['QuantitySymbol'] = '$Character$'.format(i)
dataset.attrs['QuantityName'] = 'Symbol of highcoordinate'
dataset.attrs['VariableName'] = 'Symbol of coordinate {}'.format(i)
dataset.attrs['VariableSymbol'] = '$S_{{{}}}$'.format(i)
h5.create_dataset('/Highcoordinates/{}/Coordinates'.format(i),
data = np.array(parsed_coords[i]),
dtype = np.float32)
def _write_fermi_energy(h5file, fermi_energy):
with h5py.File(h5file,"a") as h5:
h5.create_dataset('/FermiEnergy',
data = np.float(fermi_energy),
dtype = np.float32)
def _write_fermisurface(h5file, kval_list, fermi_energy, reciprocal_lattice_vectors):
with h5py.File(h5file,"a") as h5:
for i in range(0, len(kval_list)):
dataset = h5.create_dataset('/FermiSurface/KPoints/{}/Energy'.format(i),
data = np.array(kval_list[i].energies),
dtype = np.float32)
dataset.attrs['Unit'] = 'eV'
dataset.attrs['QuantitySymbol'] = '$E$'
dataset.attrs['QuantityName'] = 'Energy'
dataset.attrs['VariableName'] = 'Band {}'.format(i)
dataset.attrs['VariableSymbol'] = '$B_{{{}}}$'.format(i)
h5.create_dataset('/FermiSurface/KPoints/{}/Coordinates'.format(i),
data = np.array(kval_list[i].coordinates),
dtype = np.float32)
if '/FermiEnergy' in h5:
print('Fermi energy already parsed. Skipping.')
else:
_write_fermi_energy(h5file, fermi_energy)
for i in range(0, len(reciprocal_lattice_vectors)):
h5.create_dataset('/FermiSurface/ReciprocalLatticeVectors/{}'.format(i),
data = np.array([float(x) for x in reciprocal_lattice_vectors[i]]),
dtype = np.float32)
def _write_dos(h5file, total, partial, total_data, partial_list, fermi_energy):
def set_attrs(dataset, VariableName = '', VariableSymbol = '', QuantityName = '', QuantitySymbol = '', Unit = ''):
dataset.attrs.update({
'VariableName' : VariableName,
'VariableSymbol' : VariableSymbol,
'QuantityName' : QuantityName,
'QuantitySymbol' : QuantitySymbol,
'Unit' : Unit
})
with h5py.File(h5file, "a") as h5:
dataset = h5.create_dataset('FermiEnergy', data = np.array(fermi_energy), dtype = np.float32)
set_attrs(dataset, 'Fermi Energy', '$E_f$', 'Energy', '$E$', 'eV')
for i, (name, data) in enumerate(zip(total, total_data)):
dataset = h5.create_dataset('DOS/Total/{}'.format(name), data=np.array(data), dtype = np.float32)
if name == 'Energy':
set_attrs(dataset, 'Energy', '$E$', 'Energy', '$E$', 'eV')
else:
if name.startswith('Integrated'):
set_attrs(dataset, name, '', 'Integrated Density of States', '$\\int D$', 'states')
else:
set_attrs(dataset, name, '', 'Density of States', '$D$', 'states/eV')
for i, partial_data in enumerate(partial_list):
# Write all available DOS. Stop when partial or partial_data runs out (if directories for for example f-DOS
# exist but no corresponding data exists, stop.)
for (name, data) in zip(partial, partial_data):
dataset = h5.create_dataset('DOS/Partial/{}/{}'.format(i, name), data=np.array(data), dtype = np.float32)
if name == 'Energy':
set_attrs(dataset, 'Energy', '$E$', 'Energy', '$E$', 'eV')
else:
if name.startswith('Integrated'):
set_attrs(dataset, name, '', 'Integrated Density of States', '$\\int D$', 'states')
else:
set_attrs(dataset, name, '', 'Density of States', '$D$', 'states/eV')
def _write_volume(h5file, i, array, data_dim, hdfgroup):
with h5py.File(h5file, "a") as h5:
if array:
# Normalize array.
array = np.array(array)
array -= min(array) # Lowest value becomes 0
array /= max(array) # Highest value becomes 1
# Turn into 3 dimensional array
volumeArray = np.reshape(array, (data_dim[2],data_dim[1],data_dim[0]))
# Inviwo requires arrays to be above a certain size.
# Volumes in hdf5 below 48x48x48 will not be detected
# Larger interpolated volume dimensions make slice look better.
# 128 seem to be a good choice between size and looks.
scale = 128/min(data_dim)
if scale > 1:
volumeArray = scipy.ndimage.zoom(volumeArray, scale, None, 3, 'wrap')
h5.create_dataset('{}/{}'.format(hdfgroup, i), data = volumeArray, dtype=np.float32)
else:
try:
h5['{}/final'.format(hdfgroup)] = h5['{}/{}'.format(hdfgroup, i-1)]
except KeyError:
print('Not able to write volume')
return False
return True
def _write_incar(h5file, incar_data):
with h5py.File(h5file, 'a') as h5:
for key, value in incar_data.items():
h5.create_dataset("/incar/{}".format(key), data=value)
def _write_parcharges(h5file, array_tot, data_dim_tot, array_mag, data_dim_mag, band_nr):
with h5py.File(h5file, "a") as h5:
totVolume = np.reshape(array_tot, (data_dim_tot[2],data_dim_tot[1],data_dim_tot[0]))
while any(x < 48 for x in data_dim_tot):
data_dim_tot[0] *= 2
data_dim_tot[1] *= 2
data_dim_tot[2] *= 2
totVolume = scipy.ndimage.zoom(totVolume, 2)
h5.create_dataset('PARCHG/Bands/{}/total'.format(band_nr), data = totVolume, dtype=np.float32)
if np.size(array_mag) != 0:
magVolume = np.reshape(array_mag, (data_dim_mag[2],data_dim_mag[1],data_dim_mag[0]))
while any(x < 48 for x in data_dim_mag):
data_dim_mag[0] *= 2
data_dim_mag[1] *= 2
data_dim_mag[2] *= 2
magVolume = scipy.ndimage.zoom(magVolume, 2)
h5.create_dataset('PARCHG/Bands/{}/magnetic'.format(band_nr), data = magVolume, dtype=np.float32)
return
def _write_pcdat_multicol(h5file, pcdat_data, APACO_val, NPACO_val):
# The function is called to write data from PCDAT to HDF5-file. A dataset is created for each element in the system.
# The function is either called in the case of a system with one element.
# Or for a system of multiple elements and an average PCF is calculated for each element.
# PCF is abbreviation for Paircorrelation function.
# Parameters
# __________
# h5file: str
# String containing path to HDF5-file.
# pcdat_data:
# Is a dictionary with the structure {'element_type':[PCF_values]}. If the system has elements 'Si', 'Au' and 'K', the dictionary will be {'Si':float[x], 'Au':float[x], 'K':float[x]} where the float[x] is a list with the PKF values.
#
# APACO_val:
# The value of APACO in INCAR if set, otherwise set with default value 16 (Å). It sets the maximum | |
params.get('hideLegend', len(self.data) > 10):
elements = []
hideNull = params.get('hideNullFromLegend', False)
for series in self.data:
if series.name:
if not(hideNull and all(v is None for v in list(series))):
elements.append((
unquote_plus(series.name),
series.color,
series.options.get('secondYAxis')))
if len(elements) > 0:
self.drawLegend(elements, params.get('uniqueLegend', False))
# Setup axes, labels, and grid
# First we adjust the drawing area size to fit X-axis labels
if (
not self.params.get('hideAxes', False) and
not self.params.get('hideXAxis', False)
):
self.area['ymax'] -= self.getExtents()['maxAscent'] * 2
self.startTime = min([series.start for series in self.data])
if (
self.lineMode == 'staircase' or
set([len(series) for series in self.data]) == set([2])
):
self.endTime = max([series.end for series in self.data])
else:
self.endTime = max([
(series.end - series.step) for series in self.data])
self.timeRange = self.endTime - self.startTime
# Now we consolidate our data points to fit in the currently estimated
# drawing area
self.consolidateDataPoints()
self.encodeHeader('axes')
# Now its time to fully configure the Y-axis and determine the space
# required for Y-axis labels. Since we'll probably have to squeeze the
# drawing area to fit the Y labels, we may need to reconsolidate our
# data points, which in turn means re-scaling the Y axis, this process
# will repeat until we have accurate Y labels and enough space to fit
# our data points
currentXMin = self.area['xmin']
currentXMax = self.area['xmax']
if self.secondYAxis:
self.setupTwoYAxes()
else:
self.setupYAxis()
while (
currentXMin != self.area['xmin'] or
currentXMax != self.area['xmax']
): # see if the Y-labels require more space
# this can cause the Y values to change
self.consolidateDataPoints()
# so let's keep track of the previous Y-label space requirements
currentXMin = self.area['xmin']
currentXMax = self.area['xmax']
if self.secondYAxis: # and recalculate their new requirements
self.setupTwoYAxes()
else:
self.setupYAxis()
# Now that our Y-axis is finalized, let's determine our X labels (this
# won't affect the drawing area)
self.setupXAxis()
if not self.params.get('hideAxes', False):
self.drawLabels()
if not self.params.get('hideGrid', False):
# hideAxes implies hideGrid
self.encodeHeader('grid')
self.drawGridLines()
# Finally, draw the graph lines
self.encodeHeader('lines')
self.drawLines()
def drawVTitle(self, text, rightAlign=False):
lineHeight = self.getExtents()['maxHeight']
if rightAlign:
self.encodeHeader('vtitleRight')
x = self.area['xmax'] - lineHeight
y = self.height / 2
for line in text.split('\n'):
self.drawText(line, x, y, align='center', valign='baseline',
rotate=90)
x -= lineHeight
self.area['xmax'] = x - self.margin - lineHeight
else:
self.encodeHeader('vtitle')
x = self.area['xmin'] + lineHeight
y = self.height / 2
for line in text.split('\n'):
self.drawText(line, x, y, align='center', valign='baseline',
rotate=270)
x += lineHeight
self.area['xmin'] = x + self.margin + lineHeight
def getYCoord(self, value, side=None):
if "left" == side:
yLabelValues = self.yLabelValuesL
yTop = self.yTopL
yBottom = self.yBottomL
elif "right" == side:
yLabelValues = self.yLabelValuesR
yTop = self.yTopR
yBottom = self.yBottomR
else:
yLabelValues = self.yLabelValues
yTop = self.yTop
yBottom = self.yBottom
try:
highestValue = max(yLabelValues)
lowestValue = min(yLabelValues)
except ValueError:
highestValue = yTop
lowestValue = yBottom
pixelRange = self.area['ymax'] - self.area['ymin']
relativeValue = value - lowestValue
valueRange = highestValue - lowestValue
if self.logBase:
if value <= 0:
return None
relativeValue = (
math.log(value, self.logBase) -
math.log(lowestValue, self.logBase))
valueRange = math.log(highestValue, self.logBase) - math.log(
lowestValue, self.logBase)
pixelToValueRatio = pixelRange / valueRange
valueInPixels = pixelToValueRatio * relativeValue
return self.area['ymax'] - valueInPixels
def drawLines(self, width=None, dash=None, linecap='butt',
linejoin='miter'):
if not width:
width = self.lineWidth
self.ctx.set_line_width(width)
originalWidth = width
width = float(int(width) % 2) / 2
if dash:
self.ctx.set_dash(dash, 1)
else:
self.ctx.set_dash([], 0)
self.ctx.set_line_cap({
'butt': cairo.LINE_CAP_BUTT,
'round': cairo.LINE_CAP_ROUND,
'square': cairo.LINE_CAP_SQUARE,
}[linecap])
self.ctx.set_line_join({
'miter': cairo.LINE_JOIN_MITER,
'round': cairo.LINE_JOIN_ROUND,
'bevel': cairo.LINE_JOIN_BEVEL,
}[linejoin])
# check whether there is an stacked metric
singleStacked = False
for series in self.data:
if 'stacked' in series.options:
singleStacked = True
if singleStacked:
self.data = sort_stacked(self.data)
# stack the values
if self.areaMode == 'stacked' and not self.secondYAxis:
# TODO Allow stacked area mode with secondYAxis
total = []
for series in self.data:
if 'drawAsInfinite' in series.options:
continue
series.options['stacked'] = True
for i in range(len(series)):
if len(total) <= i:
total.append(0)
if series[i] is not None:
original = series[i]
series[i] += total[i]
total[i] += original
elif self.areaMode == 'first':
self.data[0].options['stacked'] = True
elif self.areaMode == 'all':
for series in self.data:
if 'drawAsInfinite' not in series.options:
series.options['stacked'] = True
# apply alpha channel and create separate stroke series
if self.params.get('areaAlpha'):
try:
alpha = float(self.params['areaAlpha'])
except ValueError:
alpha = 0.5
strokeSeries = []
for series in self.data:
if 'stacked' in series.options:
series.options['alpha'] = alpha
newSeries = TimeSeries(
series.name, series.start, series.end,
series.step * series.valuesPerPoint,
[x for x in series])
newSeries.xStep = series.xStep
newSeries.color = series.color
if 'secondYAxis' in series.options:
newSeries.options['secondYAxis'] = True
strokeSeries.append(newSeries)
self.data += strokeSeries
# setup the clip region
self.ctx.set_line_width(1.0)
self.ctx.rectangle(self.area['xmin'], self.area['ymin'],
self.area['xmax'] - self.area['xmin'],
self.area['ymax'] - self.area['ymin'])
self.ctx.clip()
self.ctx.set_line_width(originalWidth)
# save clip to restore once stacked areas are drawn
self.ctx.save()
clipRestored = False
for series in self.data:
if 'stacked' not in series.options:
# stacked areas are always drawn first. if this series is not
# stacked, we finished stacking. reset the clip region so
# lines can show up on top of the stacked areas.
if not clipRestored:
clipRestored = True
self.ctx.restore()
if 'lineWidth' in series.options:
self.ctx.set_line_width(series.options['lineWidth'])
if 'dashed' in series.options:
self.ctx.set_dash([series.options['dashed']], 1)
else:
self.ctx.set_dash([], 0)
# Shift the beginning of drawing area to the start of the series
# if the graph itself has a larger range
missingPoints = (series.start - self.startTime) / series.step
startShift = series.xStep * (missingPoints / series.valuesPerPoint)
x = float(self.area['xmin']) + startShift + (self.lineWidth / 2.0)
y = float(self.area['ymin'])
startX = x
if series.options.get('invisible'):
self.setColor(series.color, 0, True)
else:
self.setColor(series.color,
series.options.get('alpha') or 1.0)
# The number of preceding datapoints that had a None value.
consecutiveNones = 0
for index, value in enumerate(series):
if value != value: # convert NaN to None
value = None
if value is None and self.params.get('drawNullAsZero'):
value = 0.0
if value is None:
if consecutiveNones == 0:
self.ctx.line_to(x, y)
if 'stacked' in series.options:
# Close off and fill area before unknown interval
if self.secondYAxis:
if 'secondYAxis' in series.options:
self.fillAreaAndClip(
x, y, startX,
self.getYCoord(0, "right"))
else:
self.fillAreaAndClip(
x, y, startX,
self.getYCoord(0, "left"))
else:
self.fillAreaAndClip(x, y, startX,
self.getYCoord(0))
x += series.xStep
consecutiveNones += 1
else:
if self.secondYAxis:
if 'secondYAxis' in series.options:
y = self.getYCoord(value, "right")
else:
y = self.getYCoord(value, "left")
else:
y = self.getYCoord(value)
if y is None:
value = None
elif y < 0:
y = 0
if 'drawAsInfinite' in series.options and value > 0:
self.ctx.move_to(x, self.area['ymax'])
self.ctx.line_to(x, self.area['ymin'])
self.ctx.stroke()
x += series.xStep
continue
if consecutiveNones > 0:
startX = x
if self.lineMode == 'staircase':
if consecutiveNones > 0:
self.ctx.move_to(x, y)
else:
self.ctx.line_to(x, y)
x += series.xStep
self.ctx.line_to(x, y)
elif self.lineMode == 'slope':
if consecutiveNones > 0:
self.ctx.move_to(x, y)
self.ctx.line_to(x, y)
x += series.xStep
elif self.lineMode == 'connected':
# If if the gap is larger than the connectedLimit or
# if this is the first non-None datapoint in the
# series, start drawing from that datapoint.
if (
consecutiveNones > self.connectedLimit or
consecutiveNones == index
):
self.ctx.move_to(x, y)
self.ctx.line_to(x, y)
x += series.xStep
consecutiveNones = 0
if 'stacked' in series.options:
if self.lineMode == 'staircase':
xPos = x
else:
xPos = x-series.xStep
if self.secondYAxis:
if 'secondYAxis' in series.options:
areaYFrom = self.getYCoord(0, "right")
else:
areaYFrom = self.getYCoord(0, "left")
else:
areaYFrom = self.getYCoord(0)
self.fillAreaAndClip(xPos, y, startX, areaYFrom)
else:
self.ctx.stroke()
# return to the original line width
self.ctx.set_line_width(originalWidth)
if 'dashed' in series.options:
# if we changed the dash setting before, change it back now
if dash:
self.ctx.set_dash(dash, 1)
else:
self.ctx.set_dash([], 0)
def fillAreaAndClip(self, x, y, startX=None, areaYFrom=None):
startX = (startX or self.area['xmin'])
areaYFrom = (areaYFrom or self.area['ymax'])
pattern = self.ctx.copy_path()
# fill
self.ctx.line_to(x, areaYFrom) # bottom endX
self.ctx.line_to(startX, areaYFrom) # bottom startX
self.ctx.close_path()
if self.areaMode == 'all':
self.ctx.fill_preserve()
else:
self.ctx.fill()
# clip above y axis
self.ctx.append_path(pattern)
self.ctx.line_to(x, areaYFrom) # yZero endX
self.ctx.line_to(self.area['xmax'], areaYFrom) # yZero right
self.ctx.line_to(self.area['xmax'], self.area['ymin']) # top right
self.ctx.line_to(self.area['xmin'], self.area['ymin']) # top left
self.ctx.line_to(self.area['xmin'], areaYFrom) # yZero left
self.ctx.line_to(startX, areaYFrom) # yZero startX
# clip below y axis
self.ctx.line_to(x, areaYFrom) # yZero endX
self.ctx.line_to(self.area['xmax'], areaYFrom) # yZero right
self.ctx.line_to(self.area['xmax'], self.area['ymax']) # bottom right
self.ctx.line_to(self.area['xmin'], self.area['ymax']) # bottom left
self.ctx.line_to(self.area['xmin'], areaYFrom) # yZero left
self.ctx.line_to(startX, areaYFrom) # | |
except StopIteration:
tst.assertEqual(0, i)
else:
tst.assertEqual("(Unavailable because no Python graph has been loaded)",
next(line_iter))
def check_syntax_error_output(tst, out, command_prefix):
"""Check RichTextLines output for valid command prefix but invalid syntax."""
tst.assertEqual([
"Syntax error for command: %s" % command_prefix,
"For help, do \"help %s\"" % command_prefix
], out.lines)
def check_error_output(tst, out, command_prefix, args):
"""Check RichTextLines output from invalid/erroneous commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
command_prefix: The command prefix of the command that caused the error.
args: The arguments (excluding prefix) of the command that caused the error.
"""
tst.assertGreater(len(out.lines), 2)
tst.assertStartsWith(out.lines[0],
"Error occurred during handling of command: %s %s" %
(command_prefix, " ".join(args)))
def check_main_menu(tst,
out,
list_tensors_enabled=False,
node_info_node_name=None,
print_tensor_node_name=None,
list_inputs_node_name=None,
list_outputs_node_name=None):
"""Check the main menu annotation of an output."""
tst.assertIn(debugger_cli_common.MAIN_MENU_KEY, out.annotations)
menu = out.annotations[debugger_cli_common.MAIN_MENU_KEY]
tst.assertEqual(list_tensors_enabled,
menu.caption_to_item("list_tensors").is_enabled())
menu_item = menu.caption_to_item("node_info")
if node_info_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(node_info_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("print_tensor")
if print_tensor_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(print_tensor_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("list_inputs")
if list_inputs_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(list_inputs_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("list_outputs")
if list_outputs_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(list_outputs_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
tst.assertTrue(menu.caption_to_item("run_info").is_enabled())
tst.assertTrue(menu.caption_to_item("help").is_enabled())
def check_menu_item(tst, out, line_index, expected_begin, expected_end,
expected_command):
attr_segs = out.font_attr_segs[line_index]
found_menu_item = False
for begin, end, attribute in attr_segs:
attributes = [attribute] if not isinstance(attribute, list) else attribute
menu_item = [attribute for attribute in attributes if
isinstance(attribute, debugger_cli_common.MenuItem)]
if menu_item:
tst.assertEqual(expected_begin, begin)
tst.assertEqual(expected_end, end)
tst.assertEqual(expected_command, menu_item[0].content)
found_menu_item = True
break
tst.assertTrue(found_menu_item)
def create_analyzer_cli(dump):
"""Create an analyzer CLI.
Args:
dump: A `DebugDumpDir` object to base the analyzer CLI on.
Returns:
1) A `DebugAnalyzer` object created based on `dump`.
2) A `CommandHandlerRegistry` that is based on the `DebugAnalyzer` object
and has the common tfdbg commands, e.g., lt, ni, li, lo, registered.
"""
# Construct the analyzer.
analyzer = analyzer_cli.DebugAnalyzer(dump, _cli_config_from_temp_file())
# Construct the handler registry.
registry = debugger_cli_common.CommandHandlerRegistry()
# Register command handlers.
registry.register_command_handler(
"list_tensors",
analyzer.list_tensors,
analyzer.get_help("list_tensors"),
prefix_aliases=["lt"])
registry.register_command_handler(
"node_info",
analyzer.node_info,
analyzer.get_help("node_info"),
prefix_aliases=["ni"])
registry.register_command_handler(
"list_inputs",
analyzer.list_inputs,
analyzer.get_help("list_inputs"),
prefix_aliases=["li"])
registry.register_command_handler(
"list_outputs",
analyzer.list_outputs,
analyzer.get_help("list_outputs"),
prefix_aliases=["lo"])
registry.register_command_handler(
"print_tensor",
analyzer.print_tensor,
analyzer.get_help("print_tensor"),
prefix_aliases=["pt"])
registry.register_command_handler(
"print_source",
analyzer.print_source,
analyzer.get_help("print_source"),
prefix_aliases=["ps"])
registry.register_command_handler(
"list_source",
analyzer.list_source,
analyzer.get_help("list_source"),
prefix_aliases=["ls"])
registry.register_command_handler(
"eval",
analyzer.evaluate_expression,
analyzer.get_help("eval"),
prefix_aliases=["ev"])
return analyzer, registry
@test_util.run_v1_only("b/120545219")
class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
cls._dump_root_for_unique = tempfile.mkdtemp()
cls._is_gpu_available = test.is_gpu_available()
if cls._is_gpu_available:
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0"
cls._curr_file_path = os.path.abspath(
tf_inspect.getfile(tf_inspect.currentframe()))
cls._sess = session.Session(config=no_rewrite_session_config())
with cls._sess as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
u_name = "simple_mul_add/u"
v_name = "simple_mul_add/v"
u_init = constant_op.constant(u_init_val, shape=[2, 2], name="u_init")
u = variables.VariableV1(u_init, name=u_name)
cls._u_line_number = line_number_above()
v_init = constant_op.constant(v_init_val, shape=[2, 1], name="v_init")
v = variables.VariableV1(v_init, name=v_name)
cls._v_line_number = line_number_above()
w = math_ops.matmul(u, v, name="simple_mul_add/matmul")
cls._w_line_number = line_number_above()
x = math_ops.add(w, w, name="simple_mul_add/add")
cls._x_line_number = line_number_above()
a = variables.VariableV1([1, 3, 3, 7], name="a")
u.initializer.run()
v.initializer.run()
a.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run([x], options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
file_io.delete_recursively(cls._dump_root)
file_io.delete_recursively(cls._dump_root_for_unique)
def testMeasureTensorListColumnWidthsGivesRightAnswerForEmptyData(self):
timestamp_col_width, dump_size_col_width, op_type_col_width = (
self._analyzer._measure_tensor_list_column_widths([]))
self.assertEqual(len("t (ms)") + 1, timestamp_col_width)
self.assertEqual(len("Size (B)") + 1, dump_size_col_width)
self.assertEqual(len("Op type") + 1, op_type_col_width)
def testMeasureTensorListColumnWidthsGivesRightAnswerForData(self):
dump = self._debug_dump.dumped_tensor_data[0]
self.assertLess(dump.dump_size_bytes, 1000)
self.assertEqual(
"VariableV2", self._debug_dump.node_op_type(dump.node_name))
_, dump_size_col_width, op_type_col_width = (
self._analyzer._measure_tensor_list_column_widths([dump]))
# The length of str(dump.dump_size_bytes) is less than the length of
# "Size (B)" (8). So the column width should be determined by the length of
# "Size (B)".
self.assertEqual(len("Size (B)") + 1, dump_size_col_width)
# The length of "VariableV2" is greater than the length of "Op type". So the
# column should be determined by the length of "VariableV2".
self.assertEqual(len("VariableV2") + 1, op_type_col_width)
def testListTensors(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", [])
assert_listed_tensors(self, out, [
"simple_mul_add/u:0", "simple_mul_add/v:0", "simple_mul_add/u/read:0",
"simple_mul_add/v/read:0", "simple_mul_add/matmul:0",
"simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "Add"
])
# Check the main menu.
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseTimeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "timestamp", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "Add"
],
sort_by="timestamp",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInDumpSizeOrderWorks(self):
out = self._registry.dispatch_command("lt", ["-s", "dump_size"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "Add"
],
sort_by="dump_size")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseDumpSizeOrderWorks(self):
out = self._registry.dispatch_command("lt", ["-s", "dump_size", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "Add"
],
sort_by="dump_size",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsWithInvalidSortByFieldGivesError(self):
out = self._registry.dispatch_command("lt", ["-s", "foobar"])
self.assertIn("ValueError: Unsupported key to sort tensors by: foobar",
out.lines)
def testListTensorsInOpTypeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "op_type"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "Add"
],
sort_by="op_type",
reverse=False)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseOpTypeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "op_type", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "Add"
],
sort_by="op_type",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInTensorNameOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "tensor_name"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "Add"
],
sort_by="tensor_name",
reverse=False)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseTensorNameOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "tensor_name", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
], [
"VariableV2", "VariableV2", "Identity", "Identity",
_matmul_op_name(), "Add"
],
sort_by="tensor_name",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsFilterByNodeNameRegex(self):
out = self._registry.dispatch_command("list_tensors",
["--node_name_filter", ".*read.*"])
assert_listed_tensors(
self,
out, ["simple_mul_add/u/read:0", "simple_mul_add/v/read:0"],
["Identity", "Identity"],
node_name_regex=".*read.*")
out = self._registry.dispatch_command("list_tensors", ["-n", "^read"])
assert_listed_tensors(self, out, [], [], node_name_regex="^read")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorFilterByOpTypeRegex(self):
out = self._registry.dispatch_command("list_tensors",
["--op_type_filter", "Identity"])
assert_listed_tensors(
self,
out, ["simple_mul_add/u/read:0", "simple_mul_add/v/read:0"],
["Identity", "Identity"],
op_type_regex="Identity")
out = self._registry.dispatch_command(
"list_tensors", ["-t", "(Add|" + _matmul_op_name() + ")"])
assert_listed_tensors(
self,
out, ["simple_mul_add/add:0", "simple_mul_add/matmul:0"],
["Add", _matmul_op_name()],
op_type_regex=("(Add|" + _matmul_op_name() + ")"))
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorFilterByNodeNameRegexAndOpTypeRegex(self):
out = self._registry.dispatch_command(
"list_tensors", ["-t", "(Add|MatMul)", "-n", ".*add$"])
assert_listed_tensors(
self,
out, ["simple_mul_add/add:0"], ["Add"],
node_name_regex=".*add$",
op_type_regex="(Add|MatMul)")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorWithFilterAndNodeNameExclusionWorks(self):
# First, create and register the filter.
def is_2x1_vector(datum, tensor):
del datum # Unused.
return list(tensor.shape) == [2, 1]
self._analyzer.add_tensor_filter("is_2x1_vector", is_2x1_vector)
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command(
"lt", ["-f", "is_2x1_vector", "--filter_exclude_node_names", ".*v.*"])
# If the --filter_exclude_node_names were not used, then the matching
# tensors would be:
# - simple_mul_add/v:0
# - simple_mul_add/v/read:0
# - simple_mul_add/matmul:0
# - simple_mul_add/add:0
#
# With the --filter_exclude_node_names option, only the last two should
# show up in the result.
assert_listed_tensors(
self,
out, ["simple_mul_add/matmul:0", "simple_mul_add/add:0"],
[_matmul_op_name(), "Add"],
tensor_filter_name="is_2x1_vector")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsFilterNanOrInf(self):
"""Test register and invoke a tensor filter."""
# First, register the filter.
self._analyzer.add_tensor_filter("has_inf_or_nan",
debug_data.has_inf_or_nan)
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-f", "has_inf_or_nan"])
# This TF graph run did not generate any bad numerical values.
assert_listed_tensors(
self, out, [], [], tensor_filter_name="has_inf_or_nan")
# TODO(cais): A test with some actual bad numerical values.
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorNonexistentFilter(self):
"""Test attempt to use a nonexistent tensor filter."""
out = self._registry.dispatch_command("lt", ["-f", "foo_filter"])
self.assertEqual(["ERROR: There is no tensor filter named \"foo_filter\"."],
out.lines)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInvalidOptions(self):
out = self._registry.dispatch_command("list_tensors", ["--bar"])
check_syntax_error_output(self, out, "list_tensors")
def testNodeInfoByNodeName(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", [node_name])
recipients = [("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")]
assert_node_attribute_lines(self, out, node_name, _matmul_op_name(),
self._main_device,
[("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
recipients, [])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
# Verify that the node name is bold in the first line.
self.assertEqual(
[(len(out.lines[0]) - len(node_name), len(out.lines[0]), "bold")],
out.font_attr_segs[0])
def testNodeInfoShowAttributes(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-a", node_name])
test_attr_key_val_pairs = [("transpose_a", "b: false"),
("transpose_b", "b: false"),
("T", "type: DT_DOUBLE")]
if test_util.IsMklEnabled():
test_attr_key_val_pairs.append(("_kernel", 's: "MklNameChangeOp"'))
assert_node_attribute_lines(
self,
out,
node_name,
_matmul_op_name(),
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
attr_key_val_pairs=test_attr_key_val_pairs)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoShowDumps(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-d", node_name])
assert_node_attribute_lines(
| |
search).splitlines()
# Find the Server Entry for the given name and list the ports
for entry in entries:
if entry.startswith(name + "("):
namedEndPoints = AdminConfig.list("NamedEndPoint", entry).splitlines()
for namedEndPoint in namedEndPoints:
endPoint = AdminConfig.showAttribute(namedEndPoint, "endPoint" )
html = html + " <tr><td class=\"left\">" + AdminConfig.showAttribute(namedEndPoint, "endPointName") + "</td><td>" + AdminConfig.showAttribute(endPoint, "port" ) + "</td></tr>" + lineSeparator
# Return generated HTML
return html
# Function - Get Thread Pool settings for a given Server
def getJVMThreadPoolsXML(level, serverid):
# Init the HTML
xml = indent(level) + "<threadpools>" + lineSeparator
# Get the Thread Pools for this given Server
threadPoolMgrId = AdminConfig.list('ThreadPoolManager', serverid)
threadPoolIds = AdminConfig.list('ThreadPool', threadPoolMgrId).splitlines()
# Get the Thread Pool for the Web Container
for threadPoolId in threadPoolIds:
tpName = AdminConfig.showAttribute(threadPoolId, 'name')
if tpName == "WebContainer":
for property,desc in jvmThreadPoolProperties.items():
value = AdminConfig.showAttribute(threadPoolId, property)
xml = xml + indent(level + 1) + "<pool name=\"" + tpName + "\">" + lineSeparator
xml = xml + indent(level + 2) + "<value description=\"" + desc + "\">" + value + "</value>" + lineSeparator
xml = xml + indent(level + 1) + "</pool>" + lineSeparator
# Return generated xml
xml = xml + indent(level) + "</threadpools>" + lineSeparator
return xml
# Function - Get Thread Pool settings for a given Server
def getJVMThreadPools(serverid):
# Init the HTML
html = " <tr>" + lineSeparator
html = html + " <th class=\"name\" colspan=2>Thread Pools" + lineSeparator
html = html + " <div class=\"top\">" + lineSeparator
html = html + " <a href=\"#top\">Top</a>" + lineSeparator
html = html + " </div>" + lineSeparator
html = html + " </th>" + lineSeparator
html = html + " </tr>" + lineSeparator
# Get the Thread Pools for this given Server
threadPoolMgrId = AdminConfig.list('ThreadPoolManager', serverid)
threadPoolIds = AdminConfig.list('ThreadPool', threadPoolMgrId).splitlines()
# Get the Thread Pool for the Web Container
for threadPoolId in threadPoolIds:
tpName = AdminConfig.showAttribute(threadPoolId, 'name')
if tpName == "WebContainer":
for property,desc in jvmThreadPoolProperties.items():
value = AdminConfig.showAttribute(threadPoolId, property)
html = html + " <tr><td class=\"left\">" + tpName + ": " + desc + "</td><td>" + value + "</td></tr>" + lineSeparator
# Return generated HTML
return html
# Function - Get Session Management settings for a given Server
def getSessionManagementXML(level, serverid):
# Init the xml
xml = indent(level) + "<sessionmgnt>" + lineSeparator
# Get the Session Management settings for this server
tuningParamsId = AdminConfig.list("TuningParams", serverid)
# Iterate through Session Management settings and add them to the list
for property,desc in tuningParamsProperties.items():
value = AdminConfig.showAttribute(tuningParamsId, property)
xml = xml + indent(level + 1) + "<value description=\"" + desc + "\">" + value + "</value>" + lineSeparator
# Return generated xml
xml = xml + indent(level) + "</sessionmgnt>" + lineSeparator
return xml
# Function - Get Session Management settings for a given Server
def getSessionManagement(serverid):
# Init the HTML
html = " <tr>" + lineSeparator
html = html + " <th class=\"name\" colspan=2>Session Management" + lineSeparator
html = html + " <div class=\"top\">" + lineSeparator
html = html + " <a href=\"#top\">Top</a>" + lineSeparator
html = html + " </div>" + lineSeparator
html = html + " </th>" + lineSeparator
html = html + " </tr>" + lineSeparator
# Get the Session Management settings for this server
tuningParamsId = AdminConfig.list("TuningParams", serverid)
# Iterate through Session Management settings and add them to the list
for property,desc in tuningParamsProperties.items():
value = AdminConfig.showAttribute(tuningParamsId, property)
html = html + " <tr><td class=\"left\">" + desc + "</td><td>" + value + "</td></tr>" + lineSeparator
# Return generated HTML
return html
# Function - Get Trace Settings for a given Server
def getTraceSettingsXML(level, serverid):
# Generate the HTML with the Server's Trace Settings
return indent(level) + "<trace property=\"string\">" + AdminConfig.showAttribute(AdminConfig.list('TraceService', serverid), 'startupTraceSpecification') + "</trace>" + lineSeparator
# Function - Get Trace Settings for a given Server
def getTraceSettings(serverid):
# Generate the HTML with the Server's Trace Settings
html = " <tr>" + lineSeparator
html = html + " <th class=\"name\" colspan=2>Trace Settings" + lineSeparator
html = html + " <div class=\"top\">" + lineSeparator
html = html + " <a href=\"#top\">Top</a>" + lineSeparator
html = html + " </div>" + lineSeparator
html = html + " </th>" + lineSeparator
html = html + " </tr>" + lineSeparator
html = html + " <tr><td class=\"left\">Trace String</td><td>" + AdminConfig.showAttribute(AdminConfig.list('TraceService', serverid), 'startupTraceSpecification') + "</td></tr>" + lineSeparator
# Return generated HTML
return html
# Function - Get Process Definition Environment Entries for a given Server
def getProcessDefEnvEntries(serverid):
# Init variable
html = ""
# Retrieve the Process Definition for the Server
proc_def = AdminConfig.list('JavaProcessDef', serverid)
# Retrieve the Environment Entries
entries = AdminConfig.showAttribute(proc_def, 'environment')[1:-1].split(") ")
# Build Environment Entries table entries if they exist
if entries[0]:
html = " <tr>" + lineSeparator
html = html + " <th class=\"name\" colspan=2>Environment Entries" + lineSeparator
html = html + " <div class=\"top\">" + lineSeparator
html = html + " <a href=\"#top\">Top</a>" + lineSeparator
html = html + " </div>" + lineSeparator
html = html + " </th>" + lineSeparator
html = html + " </tr>" + lineSeparator
# Iterate through Custom Properties and add them to the list
for entry in entries:
if entry.find(")") == -1:
entry = entry + ")"
name = entry.split("(")[0].replace('\"','')
value = AdminConfig.showAttribute(entry, 'value')
if not value:
value = " "
html = html + " <tr><td class=\"left\">" + name + "</td><td>" + value + "</td></tr>" + lineSeparator
# Return the generated HTML
return html
# Function - Get Custom Properties for a given JVM
def getCustomPropertiesXML(level, jvmid):
# Init variables
xml = ""
# Retrieve Custom Properties
props = AdminConfig.showAttribute(jvmid, 'systemProperties')[1:-1].split(") ")
# Build Custom Properties table entries if they exist
if props[0]:
xml = indent(level) + "<customprops>" + lineSeparator
# Iterate through Custom Properties and add them to the list
for prop in props:
if prop.find(")") == -1:
prop = prop + ")"
name = prop.split("(")[0].replace('\"','')
value = AdminConfig.showAttribute(prop, 'value')
if not value:
value = ""
xml = xml + indent(level + 1) + "<property name=\"" + name + "\">" + value + "</property>" + lineSeparator
xml = xml + indent(level) + "</customprops>" + lineSeparator
# Return the generated xml
return xml
# Function - Get Custom Properties for a given JVM
def getCustomProperties(jvmid):
# Init variables
html = ""
# Retrieve Custom Properties
props = AdminConfig.showAttribute(jvmid, 'systemProperties')[1:-1].split(") ")
# Build Custom Properties table entries if they exist
if props[0]:
html = " <tr>" + lineSeparator
html = html + " <th class=\"name\" colspan=2>Custom Properties" + lineSeparator
html = html + " <div class=\"top\">" + lineSeparator
html = html + " <a href=\"#top\">Top</a>" + lineSeparator
html = html + " </div>" + lineSeparator
html = html + " </th>" + lineSeparator
html = html + " </tr>" + lineSeparator
# Iterate through Custom Properties and add them to the list
for prop in props:
if prop.find(")") == -1:
prop = prop + ")"
name = prop.split("(")[0].replace('\"','')
value = AdminConfig.showAttribute(prop, 'value')
if not value:
value = " "
html = html + " <tr><td class=\"left\">" + name + "</td><td>" + value + "</td></tr>" + lineSeparator
# Return the generated HTML
return html
# Function - Build Server Configuration table
def buildServerConfigXML(level, scope):
type = scope.split(":")[0]
name = scope.split(":")[1]
if type == "app_server":
name = name.split(",")[0]
node = scope.split(":")[2]
name = name + ":" + node
elif type == "web_server":
return
# Build the title
xml = indent(level) + "<serverconfig>" + lineSeparator
# List JVM Properties, Custom Properties, Thread Pools, Session Management and
# Trace Settings for Application Server
serverid, jvmid, jvm_xml = getJVMPropertiesXML(level + 1, name)
xml = xml + jvm_xml
#xml = xml + getProcessDefEnvEntries(serverid)
xml = xml + getCustomPropertiesXML(level + 1, jvmid)
xml = xml + getJVMThreadPoolsXML(level + 1, serverid)
xml = xml + getSessionManagementXML(level + 1, serverid)
xml = xml + getTraceSettingsXML(level + 1, serverid)
# List ports for this server
xml = xml + getPortsXML(level | |
dim but when multiple clusters are being
binned it will be 2 dim.
In that case np.apply_along_axis functionality is applied.
The spike weights in that case might be created like so:
>>> spk_W = np.zeros(shape=[len(trial.nClusters), trial.npos])
>>> for i, cluster in enumerate(trial.clusters):
>>> x1 = trial.getClusterIdx(cluster)
>>> spk_W[i, :] = np.bincount(x1, minlength=trial.npos)
This can then be fed into this fcn something like so:
>>> rng = np.array((np.ma.min(
trial.POS.xy, 1).data, np.ma.max(rial.POS.xy, 1).data))
>>> h = _binData(
var=trial.POS.xy, bin_edges=np.array([64, 64]),
weights=spk_W, rng=rng)
Returned will be a tuple containing the binned up data and
the bin edges for x and y (obv this will be the same for all
entries of h)
"""
if weights is None:
weights = np.ones_like(var)
dims = weights.ndim
if (dims == 1 and var.ndim == 1):
var = var[np.newaxis, :]
bin_edges = bin_edges[np.newaxis, :]
elif (dims > 1 and var.ndim == 1):
var = var[np.newaxis, :]
bin_edges = bin_edges[np.newaxis, :]
else:
var = np.flipud(var)
ndhist = np.apply_along_axis(lambda x: np.histogramdd(
var.T, weights=x, bins=bin_edges), 0, weights.T)
return ndhist
def _circPadSmooth(self, var, n=3, ny=None):
"""
Smooths a vector by convolving with a gaussian
Mirror reflects the start and end of the vector to
deal with edge effects
Parameters
----------
var : array_like
The vector to smooth
n, ny : int
Size of the smoothing (sigma in gaussian)
Returns
-------
res : array_like
The smoothed vector with shape the same as var
"""
tn = len(var)
t2 = int(np.floor(tn / 2))
var = np.concatenate((var[t2:tn], var, var[0:t2]))
if ny is None:
ny = n
x, y = np.mgrid[-n:n+1, 0-ny:ny+1]
g = np.exp(-(x**2/float(n) + y**2/float(ny)))
if np.ndim(var) == 1:
g = g[n, :]
g = g / g.sum()
improc = signal.convolve(var, g, mode='same')
improc = improc[tn-t2:tn-t2+tn]
return improc
def _circularStructure(self, radius):
"""
Generates a circular binary structure for use with morphological
operations such as ndimage.binary_dilation etc
This is only used in this implementation for adaptively binning
ratemaps for use with information theoretic measures (Skaggs etc)
Parameters
----------
radius : int
the size of the circular structure
Returns
-------
res : array_like
Binary structure with shape [(radius*2) + 1,(radius*2) + 1]
See Also
--------
RateMap.__adpativeMap
"""
from skimage.morphology import disk
return disk(radius)
def getAdaptiveMap(self, pos_binned, spk_binned, alpha=200):
"""
Produces a ratemap that has been adaptively binned according to the
algorithm described in Skaggs et al., 1996) [1]_.
Parameters
----------
pos_binned : array_like
The binned positional data. For example that returned from getMap
above with mapType as 'pos'
spk_binned : array_like
The binned spikes
alpha : int, optional, default = 200
A scaling parameter determing the amount of occupancy to aim at
in each bin
Returns
-------
Returns adaptively binned spike and pos maps. Use to generate Skaggs
information measure
Notes
-----
Positions with high rates mean proportionately less error than those
with low rates, so this tries to even the playing field. This type
of binning should be used for calculations of spatial info
as with the skaggs_info method in the fieldcalcs class (see below)
alpha is a scaling parameter that might need tweaking for different
data sets.
From the paper:
The data [are] first binned
into a 64 X 64 grid of spatial locations, and then the firing rate
at each point in this grid was calculated by expanding a circle
around the point until the following criterion was met:
Nspks > alpha / (Nocc^2 * r^2)
where Nspks is the number of spikes emitted in a circle of radius
r (in bins), Nocc is the number of occupancy samples, alpha is the
scaling parameter
The firing rate in the given bin is then calculated as:
sample_rate * (Nspks / Nocc)
References
----------
.. [1] <NAME>, <NAME>, <NAME> & <NAME>
"An Information-Theoretic Approach to Deciphering the Hippocampal
Code"
Neural Information Processing Systems, 1993.
"""
# assign output arrays
smthdPos = np.zeros_like(pos_binned)
smthdSpk = np.zeros_like(spk_binned)
smthdRate = np.zeros_like(pos_binned)
idx = pos_binned == 0
pos_binned[idx] = np.nan
spk_binned[idx] = np.nan
visited = np.zeros_like(pos_binned)
visited[pos_binned > 0] = 1
# array to check which bins have made it
binCheck = np.isnan(pos_binned)
r = 1
while np.any(~binCheck):
# create the filter kernel
h = self._circularStructure(r)
h[h >= np.max(h) / 3.0] = 1
h[h != 1] = 0
if h.shape >= pos_binned.shape:
break
# filter the arrays using astropys convolution
filtPos = convolution.convolve(pos_binned, h, boundary=None)
filtSpk = convolution.convolve(spk_binned, h, boundary=None)
filtVisited = convolution.convolve(visited, h, boundary=None)
# get the bins which made it through this iteration
trueBins = alpha / (np.sqrt(filtSpk) * filtPos) <= r
trueBins = np.logical_and(trueBins, ~binCheck)
# insert values where true
smthdPos[trueBins] = filtPos[trueBins] / filtVisited[trueBins]
smthdSpk[trueBins] = filtSpk[trueBins] / filtVisited[trueBins]
binCheck[trueBins] = True
r += 1
smthdRate = smthdSpk / smthdPos
smthdRate[idx] = np.nan
smthdSpk[idx] = np.nan
smthdPos[idx] = np.nan
return smthdRate, smthdSpk, smthdPos
def autoCorr2D(self, A, nodwell, tol=1e-10):
"""
Performs a spatial autocorrelation on the array A
Parameters
----------
A : array_like
Either 2 or 3D. In the former it is simply the binned up ratemap
where the two dimensions correspond to x and y.
If 3D then the first two dimensions are x
and y and the third (last dimension) is 'stack' of ratemaps
nodwell : array_like
A boolean array corresponding the bins in the ratemap that
weren't visited. See Notes below.
tol : float, optional
Values below this are set to zero to deal with v small values
thrown up by the fft. Default 1e-10
Returns
-------
sac : array_like
The spatial autocorrelation in the relevant dimensionality
Notes
-----
The nodwell input can usually be generated by:
>>> nodwell = ~np.isfinite(A)
"""
assert(np.ndim(A) == 2)
m, n = np.shape(A)
o = 1
x = np.reshape(A, (m, n, o))
nodwell = np.reshape(nodwell, (m, n, o))
x[nodwell] = 0
# [Step 1] Obtain FFTs of x, the sum of squares and bins visited
Fx = np.fft.fft(np.fft.fft(x, 2*m-1, axis=0), 2*n-1, axis=1)
FsumOfSquares_x = np.fft.fft(np.fft.fft(
np.power(x, 2), 2*m-1, axis=0), 2*n-1, axis=1)
Fn = np.fft.fft(np.fft.fft(
np.invert(nodwell).astype(int), 2*m-1, axis=0), 2*n-1, axis=1)
# [Step 2] Multiply the relevant transforms and invert to obtain the
# equivalent convolutions
rawCorr = np.fft.fftshift(np.real(np.fft.ifft(np.fft.ifft(
Fx * np.conj(Fx), axis=1), axis=0)), axes=(0, 1))
sums_x = np.fft.fftshift(np.real(np.fft.ifft(np.fft.ifft(
np.conj(Fx) * Fn, axis=1), axis=0)), axes=(0, 1))
sumOfSquares_x = np.fft.fftshift(np.real(np.fft.ifft(np.fft.ifft(
Fn * np.conj(FsumOfSquares_x), axis=1), axis=0)), axes=(0, 1))
N = np.fft.fftshift(np.real(np.fft.ifft(np.fft.ifft(
Fn * np.conj(Fn), axis=1), axis=0)), axes=(0, 1))
# [Step 3] Account for rounding errors.
rawCorr[np.abs(rawCorr) < tol] = 0
sums_x[np.abs(sums_x) < tol] = 0
sumOfSquares_x[np.abs(sumOfSquares_x) < tol] = 0
N = np.round(N)
N[N <= 1] = np.nan
# [Step 4] Compute correlation matrix
mapStd = np.sqrt((sumOfSquares_x * N) - sums_x**2)
mapCovar = (rawCorr * N)-sums_x*sums_x[::-1, :, :][:, ::-1, :][:, :, :]
return np.squeeze(
mapCovar / mapStd / mapStd[::-1, :, :][:, ::-1, :][:, :, :])
def crossCorr2D(self, A, B, A_nodwell, B_nodwell, tol=1e-10):
"""
Performs a spatial crosscorrelation between the arrays A and B
Parameters
----------
A, B : array_like
Either 2 or 3D. In the former it is simply the binned up ratemap
where the two dimensions correspond to x and y.
If 3D then the first two dimensions are x
and y and the third (last dimension) is 'stack' of ratemaps
nodwell_A, nodwell_B : array_like
A boolean array corresponding the bins in the ratemap that
weren't visited. See Notes below.
tol : float, optional
Values below this are set to zero to deal with v small values
thrown up by the fft. Default 1e-10
Returns
-------
sac : array_like
The spatial crosscorrelation in the relevant dimensionality
Notes
-----
The nodwell input can usually be generated by:
>>> nodwell = ~np.isfinite(A)
"""
if np.ndim(A) != np.ndim(B):
raise ValueError('Both arrays must have the same dimensionality')
assert(np.ndim(A) == 2)
ma, na = np.shape(A)
mb, nb = np.shape(B)
oa = ob = 1
A = np.reshape(A, (ma, na, oa))
B = np.reshape(B, (mb, nb, ob))
A_nodwell = np.reshape(A_nodwell, (ma, na, oa))
B_nodwell = np.reshape(B_nodwell, (mb, nb, | |
<filename>apps/challenges/forms.py
from datetime import datetime
from django import forms
from django.db.models import Q
from django.forms import widgets
from django.forms.formsets import BaseFormSet
from django.forms.models import inlineformset_factory, ModelChoiceField, BaseInlineFormSet
from django.forms.util import ErrorDict
from challenges.models import (Submission, ExternalLink, Category,
Judgement, JudgingCriterion, JudgingAnswer,
PhaseRound, SubmissionHelp, Phase)
from challenges.widgets import CustomRadioSelect
entry_widgets = {
'title': forms.TextInput(attrs={'aria-describedby': 'info_title'}),
'brief_description': forms.TextInput(attrs={'aria-describedby': 'info_brief_description'}),
'sketh_note': forms.FileInput(attrs={'aria-describedby': 'info_sketh_note'}),
'description': forms.Textarea(attrs={'aria-describedby': 'info_description',
'id': 'wmd-input', }),
'life_improvements': forms.Textarea(attrs={
'aria-describedby': 'info_life_improvements',
}),
'take_advantage': forms.Textarea(attrs={
'aria-describedby': 'info_take_advantage',
}),
'interest_making': forms.Textarea(attrs={
'aria-describedby': 'info_intertest_making',
}),
'team_members': forms.Textarea(attrs={
'aria-describedby': 'info_team_members'
}),
'collaborators': forms.Textarea(),
'is_draft': forms.CheckboxInput(attrs={'aria-describedby': 'info_is_draft'}),
}
entry_fields = (
'title',
'brief_description',
'description',
'collaborators',
'is_draft',
'sketh_note',
'category',
'is_draft',
'sketh_note',
'category',
'life_improvements',
'take_advantage',
'interest_making',
'team_members',
)
# List new fields for the Submission
development_entry_fields = (
'title',
'brief_description', # What problem are you intending to solve?
'description', # What is the technological approach, or development roadmap?
'is_draft',
# Following three to support additional resource
'sketh_note',
'repository_url',
'blog_url',
'category', # Which Priority Area(s) does the app address?
'is_draft',
'life_improvements', # How will end users interact with it, and how will they benefit?
'take_advantage', # How will your app leverage the 1Gbps, sliceable and deeply programmable network?
'required_effort', # How much effort do you expect this work to take?
'interest_making', # Will your work be beta-ready by the end of the Development Challenge?
'team_members', # Describe yourself and your Team
'collaborators', # Do you need help?
)
class EntryForm(forms.ModelForm):
# Need to specify this explicitly here to remove the empty option
category = ModelChoiceField(queryset=Category.objects.all(),
empty_label=None,
widget=CustomRadioSelect())
class Meta:
model = Submission
widgets = entry_widgets
fields = entry_fields
def clean(self):
super(EntryForm, self).clean()
if self.errors:
# Either something is wrong with the image, or there was another
# error on the form. In the former case, we don't want the image any
# more; in the latter, we've already lost it and it'll need
# re-uploading.
self.files.pop(self.add_prefix('sketh_note'), None)
return self.cleaned_data
class NewEntryForm(EntryForm):
"""New Entries require to accept the Terms and Conditions"""
terms_and_conditions = forms.BooleanField()
class Meta:
model = Submission
fields = entry_fields + ('terms_and_conditions',)
widgets = entry_widgets
class DevelopmentEntryForm(EntryForm):
"""Fields for a new Submission during the Development phase"""
# Any required field should be described here and called the same as
# it is called on the Submission model
# e.g. to make the stkety_note required:
# sketh_note = forms.ImageField()
#repository_url = forms.URLField()
#blog_url = forms.URLField()
take_advantage = forms.CharField(
widget=forms.Textarea)
interest_making = forms.CharField(
widget=forms.Textarea)
team_members = forms.CharField(
widget=forms.Textarea)
collaborators = forms.CharField(
widget=forms.Textarea)
required_effort = forms.CharField(
widget=forms.Textarea)
class Meta:
model = Submission
fields = development_entry_fields
widgets = entry_widgets
class NewDevelopmentEntryForm(DevelopmentEntryForm):
"""New entries during the Development phase require a Terms and conditions
flag"""
terms_and_conditions = forms.BooleanField()
class Meta:
model = Submission
fields = development_entry_fields + ('terms_and_conditions',)
widgets = entry_widgets
class AutoDeleteForm(forms.ModelForm):
"""Form class which deletes its instance if all fields are empty."""
def is_blank(self):
# Using base_fields here to ignore any foreign key or ID fields added
for name, field in self.base_fields.iteritems():
field_value = field.widget.value_from_datadict(self.data,
self.files, self.add_prefix(name))
if field_value:
return False
return True
def full_clean(self):
if self.is_blank():
# Blank forms are always valid
self._errors = ErrorDict()
self.cleaned_data = {}
return
super(AutoDeleteForm, self).full_clean()
def save(self, commit=True):
"""Save the contents of this form.
Note that this form will fail if the commit argument is set to False
and all fields are empty.
"""
if self.is_blank() and self.instance.pk:
if not commit:
raise RuntimeError('Auto-deleting forms do not support '
'uncommitted saves.')
self.instance.delete()
return None
if self.is_blank() and not self.instance.pk:
# Nothing to do
return None
return super(AutoDeleteForm, self).save()
class EntryLinkForm(AutoDeleteForm):
class Meta:
model = ExternalLink
fields = (
'name',
'url',
)
url_error = u'Please provide a valid URL and name for each link provided'
class BaseExternalLinkFormSet(BaseFormSet):
def clean(self):
"""Custom error validation to raise a single error message"""
if any(self.errors):
raise forms.ValidationError(url_error)
class BaseExternalLinkInlineFormSet(BaseInlineFormSet):
def clean(self):
"""Custom error validation to raise a single error message"""
if any(self.errors):
raise forms.ValidationError(url_error)
InlineLinkFormSet = inlineformset_factory(Submission, ExternalLink,
can_delete=False, form=EntryLinkForm,
formset=BaseExternalLinkInlineFormSet)
class JudgingForm(forms.ModelForm):
"""A form for judges to rate submissions.
The form is generated dynamically using a list of JudgingCriterion objects,
each of which is a question about some aspect of the submission. Each of
these criteria has a numeric range (0 to 10 by default).
"""
def __init__(self, *args, **kwargs):
criteria = kwargs.pop('criteria')
initial = kwargs.pop('initial', {})
instance = kwargs.get('instance')
# Having to do this a bit backwards because we need to retrieve any
# existing ratings to pass into the superclass constructor, but can't
# add the extra fields until after the constructor has been called
new_fields = {}
for criterion in criteria:
key = 'criterion_%s' % criterion.pk
new_fields[key] = self._field_from_criterion(criterion)
if instance:
try:
answer = instance.answers.get(criterion=criterion)
initial[key] = answer.rating
except JudgingAnswer.DoesNotExist:
# No answer for this question yet
pass
super(JudgingForm, self).__init__(*args, initial=initial, **kwargs)
self.fields.update(new_fields)
self.fields.keyOrder = filter(lambda a: a not in 'notes', self.fields.keyOrder)
self.fields.keyOrder.append('notes')
def _field_from_criterion(self, criterion):
return MinMaxIntegerField(label=criterion.question,
min_value=criterion.min_value,
max_value=criterion.max_value)
@property
def answer_data(self):
"""The cleaned data from this form related to criteria answers."""
# criterion_15 -> 15
# criterion_foo_bang -> foo_bang, if you're feeling so inclined
extract_key = lambda k: k.split('_', 1)[1]
return dict((extract_key(k), v) for k, v in self.cleaned_data.items()
if k.startswith('criterion_'))
def save(self):
judgement = super(JudgingForm, self).save()
for key, value in self.answer_data.items():
# If this fails, we want to fall over fairly horribly
criterion = JudgingCriterion.objects.get(pk=key)
kwargs = {'judgement': judgement, 'criterion': criterion}
try:
answer = JudgingAnswer.objects.get(**kwargs)
except JudgingAnswer.DoesNotExist:
answer = JudgingAnswer(**kwargs)
answer.rating = value
answer.save()
return judgement
class Meta:
model = Judgement
exclude = ('submission', 'judge')
class NumberInput(widgets.Input):
input_type = 'number'
class RangeInput(widgets.Input):
input_type = 'range'
class MinMaxIntegerField(forms.ChoiceField):
"""An integer field that supports passing min/max values to its widget."""
widget = widgets.RadioSelect
def __init__(self, *args, **kwargs):
min_value = kwargs.pop('min_value')
max_value = kwargs.pop('max_value')
# ammended +1 to max_value - so labels go from 1 to 10 (opposed to 1 to 9)
choices = [(x, x) for x in range(min_value, max_value + 1)]
kwargs.update({'choices': choices})
super(MinMaxIntegerField, self).__init__(*args, **kwargs)
class PhaseRoundAdminForm(forms.ModelForm):
"""Form for validating the ``PhaseRound`` dates"""
class Meta:
model = PhaseRound
def clean(self):
"""Validate that
- The round dates don't overlap
- The round is inside the phase they are associated
"""
data = self.cleaned_data
# ignore non_field_errors if the required fields are not in
# self.cleaned_data
if not all(k in data for k in ('start_date', 'end_date', 'phase')):
return data
start_date = data['start_date']
end_date = data['end_date']
phase = data['phase']
if end_date < start_date:
raise forms.ValidationError('Start date must be before the end date')
# Selected phase should contain the PhaseRound
if not all([phase.start_date <= start_date,
phase.end_date >= end_date]):
raise forms.ValidationError('Dates should be inside the %s phase.'
' Between %s and %s' % \
(phase.name, phase.start_date,
phase.end_date))
# PhaseRound shouldn't overlap
query_args = []
if self.instance.id:
# this may be an update avoid it if so
query_args = [~Q(id=self.instance.id)]
# Make sure the dates don't overlap, are contained or contain other
# rounds
if PhaseRound.objects.filter(
(Q(start_date__lte=start_date) & Q(end_date__gte=start_date)) |
(Q(start_date__lte=end_date) & Q(end_date__gte=end_date)) |
(Q(start_date__lte=start_date) & Q(end_date__gte=end_date)) |
(Q(start_date__gte=start_date) & Q(end_date__lte=end_date)),
*query_args):
raise forms.ValidationError('This round dates overlap with other '
'rounds')
return self.cleaned_data
class SubmissionHelpForm(forms.ModelForm):
class Meta:
model = SubmissionHelp
fields = ('notes', 'status',)
def get_judging_phase_choices():
"""Generates a touple of choices for the available for judging phases"""
choices = [('','- Select Phase or Round -')]
for phase in Phase.objects.all():
if not phase.phase_rounds:
choices.append(('phase-%s' % phase.id, 'Phase: %s' % phase.name))
else:
for phase_round in phase.phase_rounds:
choices.append(('round-%s' % phase_round.id,
'Phase: %s. %s'% (phase.name, phase_round.name)))
return choices
class JudgingAssignmentAdminForm(forms.Form):
judging_phase = forms.ChoiceField(choices=get_judging_phase_choices())
judges_per_submission = forms.IntegerField(required=False,
help_text='Leave empty to '
'assign all submissions to '
'all judges')
def __init__(self, *args, **kwargs):
self.judge_profiles = kwargs.pop('judge_profiles')
super(JudgingAssignmentAdminForm, self).__init__(*args, **kwargs)
def _validate_phase(self, phase):
"""Makes sure the ``Phase`` is closed"""
if phase.is_open or phase.end_date > datetime.utcnow():
raise forms.ValidationError('The Phase/Round must be finished to '
'assign the judges to the Submissions')
def clean_judges_per_submission(self):
judges_submission = self.cleaned_data.get('judges_per_submission')
if judges_submission and len(self.judge_profiles) < judges_submission:
raise forms.ValidationError("You don't have enough judges "
"assigned: you only have %d" %
len(judges_submission))
if not judges_submission:
judges_submission = len(self.judge_profiles)
return judges_submission
def clean(self):
if 'judging_phase' in self.cleaned_data:
model_slug, pki = self.cleaned_data['judging_phase'].split('-')
# Fail as loud as possible if the id | |
busy; %i/%i jobs complete\r" %
(time.ctime(), nbusy, (WQ.stats.total_workers_joined - WQ.stats.total_workers_removed),
WQ.stats.total_tasks_complete, WQ.stats.total_tasks_dispatched), newline=False)
if time.time() - wq_reactor.t0 > newline_time:
wq_reactor.t0 = time.time()
logger.info('')
if task:
exectime = task.cmd_execution_time / 1000000
if task.result != 0:
oldid = task.id
oldhost = task.hostname
taskid = WQ.submit(task)
if hasattr(task, 'calc'):
task.calc.wqids.append(taskid)
logger.warning("Task '%s' (id %i) failed on host %s (%i seconds), resubmitted:"
"id %i" % (task.tag, oldid, oldhost, exectime, taskid))
else:
logger.info("Task '%s' (id %i) returned from %s (%i seconds)"
% (task.tag, task.id, task.hostname, exectime),
printlvl=(1 if exectime > success_time else 2))
# Launch the next calculation!
if hasattr(task, 'calc'):
task.calc.saveStatus('ready', display=False)
task.calc.wqids.remove(task.id)
task.calc.launch()
del task
elif (niter >= iters):
break
if iters == np.inf:
logger.info("Reactor loop has no more tasks!")
else:
logger.info("\n")
wq_reactor.t0 = time.time()
def make_task(cmd, cwd, inputs=[], outputs=[], tag=None, calc=None, verbose=0, priority=None):
"""
Run a task locally or submit it to the Work Queue.
Parameters
----------
cmd : str
Command to be executed using a system call.
cwd : str
Working directory for the calculation.
inputs : list
(For WQ) Names of input files inside the working directory, to be sent to the worker
outputs : list
(For WQ) Names of output files, to be written back to the working directory
(Not for WQ) The locally run calculation will have
tag : str
(For WQ) Descriptive name for the task, if None then use the command.
calc : Calculation
(For WQ) The calculation object is an attribute of the WQ task object,
and will allow a completed task to execute the next step.
verbose : int
Print information out to the screen
priority : int
The priority of this task when waiting in the queue
"""
global WQ
# if calc != None and calc.read_only:
# logger.error("I should never get here - make_task called with read enabled")
# raise RuntimeError
if isinstance(inputs, str):
inputs = [inputs]
if isinstance(outputs, str):
outputs = [outputs]
# Actually print the command to the output folder. :)
with open(os.path.join(cwd, 'command.sh'), 'w') as f:
print(cmd, file=f)
if WQ != None:
# Create and submit Work Queue Task object.
task = work_queue.Task(cmd)
# The task priority is either an argument or a field of the calculation object
if priority == None:
if calc != None:
priority = calc.priority
else:
priority = 0
task.specify_priority(priority)
input_paths = [os.path.abspath(os.path.join(cwd, f)) for f in inputs]
output_paths = [os.path.abspath(os.path.join(cwd, f)) for f in outputs]
for f in input_paths:
task.specify_input_file(f, os.path.basename(f), cache=False)
for f in output_paths:
task.specify_output_file(f, os.path.basename(f), cache=False)
task.specify_algorithm(work_queue.WORK_QUEUE_SCHEDULE_FCFS)
if tag != None:
task.specify_tag(tag)
else:
task.specify_tag(cmd)
taskid = WQ.submit(task)
# Keep track of the work queue task IDs belonging to each task
if calc != None:
task.calc = calc
task.calc.wqids.append(taskid)
task.calc.saveStatus('launch')
logger.info("\x1b[94mWQ task\x1b[0m '%s'; taskid %i priority %i" % (task.tag, taskid, priority), printlvl=3)
else:
# Run the calculation locally.
if calc != None: calc.saveStatus('launch')
_exec(cmd, print_command=(verbose >= 3), persist=True, cwd=cwd)
# After executing the task, run launch() again
# because launch() is designed to be multi-pass.
if calc != None: calc.launch()
class Calculation(object):
"""
Class representing a general refinement calculation in the workflow.
"""
calctype = "Calculation"
statlvl = 1
def __init__(self, initial, home, **kwargs):
"""
Initialize the calculation. This function is intended to be
called at the end of the constructor of the derived class
(which simply figures out the home folder).
Parameters
----------
initial : str or Molecule object
Initial Molecule object or file name.
home : str
Home folder for the calculation.
fast_restart : bool
Read calculation status from disk and skip over ones marked as complete, failed, etc.
parent : Calculation or None
The calculation may contain a reference to its parent calculation
charge : int
Net charge of the atoms in the xyz file. If provided this takes highest priority.
mult : int
Spin multiplicity of the atoms in the xyz file (2*<Sz>+1). If provided this takes highest priority.
"""
# Initial Molecule object or file name.
self.initial = initial
# Specify and create the home folder.
self.home = os.path.abspath(home)
if not os.path.exists(self.home):
os.makedirs(self.home)
# Get the calculation name.
self.name = kwargs.pop('name', self.home.replace(os.getcwd(), '').strip('/'))
# If we created the object using a file from another folder,
# write the location of this file to source.txt.
if isinstance(initial, str) and self.home not in os.path.abspath(initial):
with open(os.path.join(self.home, 'source.txt'), 'w') as f:
print(os.path.abspath(initial), file=f)
# Calculations have the ability to access their parent.
self.parent = kwargs.pop('parent', None)
# Set charge and multiplicity.
self.charge = kwargs.pop('charge', None)
self.mult = kwargs.pop('mult', None)
# If set to False, calculations marked as complete will be skipped.
self.fast_restart = kwargs.get('fast_restart', False)
# Read calculations only; don't run them.
self.read_only = kwargs.get('read_only', False)
# Set the verbosity level.
self.verbose = kwargs.get('verbose', 0)
# Set the priority of the calculation in Work Queue.
self.priority = kwargs.pop('priority', 0)
# Drawing level (see analyze_path).
self.draw = kwargs.get('draw', 2)
# Maximum number of growing string cycles before termination.
self.gsmax = kwargs.get('gsmax', 600)
# Number of images in a growing string / NEB calculation.
self.images = kwargs.get('images', 21)
# Whether to keep spectators in the pathway.
self.spectators = kwargs.get('spectators', 0)
# Whether to include trivial rearrangements (e.g. H3N + H4N+ -> H4N+ + H3N)
self.trivial = kwargs.get('trivial', 0)
# The sequential path for running GS and TS calculations looks like this -
# TS calculations are launched after GS if below a certain threshold:
#
# GS->GS->TS->GS->TS->GS->TS(Conv)
#
# The parallel path for running GS and TS calculations looks like this -
# typically there are two calculations running in parallel. This is faster
# for completing individual pathways but less efficient in general:
#
# TS TS TS(Conv)
# / / /
# GS->GS->GS->GS->GS(Ter)
#
self.ts_branch = kwargs.get('ts_branch', 0)
# Store list of methods and bases for different calculations.
self.methods = kwargs['methods'][:]
self.bases = kwargs['bases'][:]
# Save a list of Work Queue IDs belonging to this calculation.
self.wqids = []
# If more methods are provided than bases, then assume the biggest basis
# is used for the later calculations (and vice versa).
if len(self.methods) > len(self.bases):
self.bases += [self.bases[-1] for i in range(len(self.methods) - len(self.bases))]
elif len(self.bases) > len(self.methods):
self.methods += [self.methods[-1] for i in range(len(self.bases) - len(self.methods))]
# Certain keyword arguments like fast_restart get passed onto calculations
# that they create.
self.kwargs = deepcopy(kwargs)
# Read calculation status from file (if exists).
self.initStatus()
def initStatus(self):
"""
Read calculation status from the .status file
which lives in the home folder of the calculation.
Possible states on disk are:
complete: Calculation is complete, don't descend into this branch unless forced
continue: Calculation may continue, run "make" on this branch
busy.12345: Calculation is running on process ID 12345, don't interfere
Variables set here:
self.status: Internal status of this calculation that affects behavior of action()
self.message: Optional informative 1-line message
"""
self.message = ''
statpath = os.path.join(self.home, '.status.%s' % self.calctype.lower())
if os.path.exists(statpath) and os.path.getsize(statpath) > 0:
statline = open(statpath).readlines()[0].strip()
else:
statline = 'ready'
statword = statline.split(None, 1)[0]
message = statline.split(None, 1)[1] if len(statline.split()) > 1 else ''
if statword.startswith('busy.'):
busypid = int(statword.replace('busy.', ''))
if busypid in pid_table():
self.saveStatus('busy')
else:
self.saveStatus('ready', display=(self.verbose >= 1))
elif self.fast_restart:
display = (self.verbose >= 3) if statword == 'ready' else 1
self.saveStatus(statword, message=message, display=display)
else:
self.saveStatus('ready', display=(self.verbose >= 3))
def saveStatus(self, status, ansi="\x1b[93m", message=None, display=True, to_disk=True):
"""
Set calculation status and also write to status file
which lives in the home folder of the calculation.
"""
statpath = os.path.join(self.home, '.status.%s' % self.calctype.lower())
statout = status
if (hasattr(self, 'status') and self.status == status):
display = False
self.status = status
# If calculation is busy, append the pid to the status.
if status == 'busy':
statout += '.%i' % os.getpid()
# Append a status message if desired.
if message != None:
self.message = message
statout += ' ' + message
# Save status to disk if desired.
if self.read_only: to_disk = False
if | |
<reponame>ipashchenko/jetsim
import math
import numpy as np
mas_to_rad = 4.8481368 * 1E-09
rad_to_mas = 1. / mas_to_rad
# Parsec [cm]
pc = 3.0857 * 10 ** 18
# Mass of electron [g]
m_e = 9.109382 * 10 ** (-28)
# Mass of proton [g]
m_p = 1.672621 * 10 ** (-24)
# Charge of electron [C]
q_e = 1.602176 * 10 ** (-19)
# Charge of proton [C]
q_p = 1.602176 * 10 ** (-19)
# Speed of light [cm / s]
c = 3. * 10 ** 10
class AlongBorderException(Exception):
pass
# Plasma frequency (default - for electrons)
def nu_plasma(n, q=q_e, m=m_e):
"""
Returns plasma frequency for particles with charge ``q`` and mass ``m``.
Default are electrons/positrons.
:param n:
Concentration [cm ** (-3)]
:param q (optional):
Particle's charge. Default is ``q_e``.
:param m (optional):
Particle's mass. Default is ``m_e``.
:return:
Plasma frequency [Hz].
"""
return np.sqrt(n * q ** 2. / (math.pi * m))
# Larmor frequency (default - for electrons)
def nu_b(B, q=q_e, m=m_e):
"""
Returns larmor frequency for particles with charge ``q`` and mass ``m``.
Default are electrons/positrons.
:param B:
Magnetic field [G]
:param q (optional):
Particle's charge. Default is ``q_e``.
:param m (optional):
Particle's mass. Default is ``m_e``.
:return:
Larmor frequency [Hz].
"""
return q * B / (2. * math.pi * m * c)
# TODO: I dont' need it: just use nu_b * sin(n, B)
# Larmor frequency with sin(n, B) (default - for electrons)
def nu_b_tr(n, B, q=q_e, m=m_e):
"""
Returns larmor frequency for particles with charge ``q`` and mass ``m``.
Default are electrons/positrons.
:param n:
Direction of emission.
:param B:
Magnetic field vecotr [G]
:param q (optional):
Particle's charge. Default is ``q_e``.
:param m (optional):
Particle's mass. Default is ``m_e``.
:return:
Larmor frequency [Hz].
"""
return q * abs(np.cross(n, B)) / (2. * np.linalg.norm(B) * math.pi * m * c)
# eta_0 (default - for electrons)
def eta_0(n, B, q=q_e, m=m_e):
"""
Coefficient ``eta_0`` in emission coefficient.
:param n:
Concentration [cm ** (-3)]
:param B:
Magnetic field [G]
:param q (optional):
Particle's charge. Default is ``q_e``.
:param m (optional):
Particle's mass. Default is ``m_e``.
:return:
Coefficient ``eta_0`` used in expression for emission coefficient.
"""
return math.pi * nu_plasma(n, q=q, m=m) ** 2. * nu_b(B, q=q, m=m) * m / c
# k_0 (default - for electrons)
def k_0(nu, n, B, q=q_e, m=m_e):
"""
Coefficient ``k_0`` in absorption coefficient.
:param nu:
Frequency of radiation [Hz].
:param n:
Concentration [cm ** (-3)]
:param B:
Magnetic field [G]
:param q (optional):
Particle's charge. Default is ``q_e``.
:param m (optional):
Particle's mass. Default is ``m_e``.
:return:
Coefficient ``k_0`` used in expression for absorption coefficient.
"""
return math.pi * nu_plasma(n, q=q, m=m) ** 2. * nu_b(B, q=q, m=m) /\
(c * nu ** 2.)
def eta_I(nu, n, B, sin_theta, s=2.5, q=q_e, m=m_e):
"""
Emission coefficient.
:param nu:
Frequency of radiation [Hz].
:param n:
Concentration [cm ** (-3)]
:param B:
Magnetic field [G]
:param sin_theta:
Sin of angle between direction of emission and magnetic field.
:param s (optional):
Power law index of electron energy distribution. Default is 2.5
:param q (optional):
Particle's charge. Default is ``q_e``.
:param m (optional):
Particle's mass. Default is ``m_e``.
:return:
"""
return eta_0(n, B, q=q, m=m) * sin_theta *\
(nu_b(B, q=q, m=m) * sin_theta / nu) ** ((s - 1.) / 2.) *\
(3. ** (s / 2.) / (2. * (s + 1.))) *\
math.gamma(s / 4. + 19. / 12.) * math.gamma(s / 4. - 1. / 12.)
def k_I(nu, n, B, sin_theta, s=2.5, q=q_e, m=m_e):
"""
Absorption coefficient.
:param nu:
Frequency of radiation [Hz].
:param n:
Concentration [cm ** (-3)]
:param B:
Magnetic field [G]
:param sin_theta:
Sin of angle between direction of emission and magnetic field.
:param s (optional):
Power law index of electron energy distribution. Default is 2.5
:param q (optional):
Particle's charge. Default is ``q_e``.
:param m (optional):
Particle's mass. Default is ``m_e``.
:return:
"""
return k_0(nu, n, B, q=q, m=m) * sin_theta *\
(nu_b(B, q=q, m=m) * sin_theta / nu) ** (s / 2.) *\
(3. ** ((s + 1.) / 2.) / 4.) *\
math.gamma(s / 4. + 11. / 16.) * math.gamma(s / 4. + 1. / 6.)
def source_func(nu, n, B, sin_theta, s=2.5, q=q_e, m=m_e):
"""
Source function
:param nu:
Frequency of radiation [Hz].
:param n:
Concentration [cm ** (-3)]
:param B:
Magnetic field [G]
:param sin_theta:
Sin of angle between direction of emission and magnetic field.
:param s (optional):
Power law index of electron energy distribution. Default is 2.5
:param q (optional):
Particle's charge. Default is ``q_e``.
:param m (optional):
Particle's mass. Default is ``m_e``.
:return:
"""
return eta_I(nu, n, B, sin_theta, s=s, q=q, m=m) / k_I(nu, n, B, sin_theta,
s=s, q=q, m=q)
def velsum(v, u):
"""
Relativistic sum of two 3-velocities ``u`` and ``v``.
u, v - 3-velocities [c]
"""
gamma_v = 1. / math.sqrt(1. - np.linalg.norm(v))
return (1. / (1. + v.dot(u))) * (v + (1. / gamma_v) * u +
(gamma_v / (1. + gamma_v)) * v.dot(u) * v)
def boost_direction(v1, v2, n1):
"""
:param v1:
Velocity of first frame relative to observer frame.
:param v2:
Velocity of second frame relative to observer frame.
:param n1:
Direction of propagation in first RF that moves with velocity ``v1``.
:return:
Direction in RF that moves with velocity ``v2``.
"""
v2r1 = velsum(v2, -v1)
G2r1 = 1. / math.sqrt(1. - v2r1.dot(v2r1))
# Direction of propagation in second RF.
return (n1 + G2r1 * v2r1 * (G2r1 * n1.dot(v2r1) / (G2r1 + 1.) - 1.)) /\
(G2r1 * (1. - n1.dot(v2r1)))
def doppler_factor(v1, v2, n1):
"""
Function that calculates Doppler factor for RF2 that has velocity ``v2``
relative to RF1 that has velocity ``v1`` and direction in RF1 ``n1``.
:param v1:
Velocity of first frame relative to observer frame.
:param v2:
Velocity of second frame relative to observer frame.
:param n1:
Direction of propagation in first RF.
:return:
Value of Doppler factor.
:note:
To find Doppler factor for emission boosted by jet moving with velocity
v_jet relative to observer (observer has velocity v_obs=0) use:
>>>doopler_factor(0, v_jet, n_obs)
To find Doppler factor of emission deboosted (in jet RF):
>>>n_jet = boost_direction(v_jet, 0, n_obs)
>>>doppler_factor(v_jet, 0, n_jet)
"""
v2r1 = velsum(v2, -v1)
G2r1 = 1. / math.sqrt(1. - v2r1.dot(v2r1))
D2r1 = 1. / (G2r1 * (1. - n1.dot(v2r1)))
return D2r1
# G = 10.
# v2 = np.array([0, 0, math.sqrt(G**2-1)/G])
# v1 = np.array([0.0, 0, 0])
# n1 = np.array([-sin(1/G), 0, cos(1/G)])
# stokes1 = array([1., 0, 0, 0])
# TODO: add optional arg ``n2`` - direction in final rest frame. Thus make
# ``n1`` also optional.
def transfer_stokes(stokes1, v1, v2, n1, bf2):
"""
Transfer stokes vector from frame (1) that has velocity v1 in observer frame
to frame (2) that has velocity v2 in observer frame. Index 2 means value in
second (final) rest frame. Index 1 means value in first (initial) rest
frame.
:param stokes1:
Stokes vector in RF that has velocity v1 relative to observer frame.
:param v1:
Velocity of first frame relative to observer frame.
:param v2:
Velocity of second frame relative to observer frame.
:param n1:
Direction of propagation in first RF.
:param bf2:
Direction of B-field in second RF.
:return:
Stokes vector in second rest frame.
"""
# Find Doppler factor of v2 relative to v1 and direction n1 in first RF.
v2r1 = velsum(v2, -v1)
G2r1 = 1. / math.sqrt(1. - v2r1.dot(v2r1))
# Direction of propagation in second RF.
# array([-0.9999986 , 0. , 0.00167561])
n2 = (n1 + G2r1 * v2r1 * (G2r1 * n1.dot(v2r1) / (G2r1 + 1.) - 1.)) / \
(G2r1 * (1. - n1.dot(v2r1)))
D2r1 = 1. / (G2r1 * (1. - n1.dot(v2r1)))
# print "D = ", D2r1
I1, Q1, U1, V1 = stokes1
LP1 = math.sqrt(Q1 ** 2. + U1 ** 2.)
chi1 = math.atan2(U1, Q1)
# Polarization angle in first RF
# array([ 0., 1., 0.])
e1 = np.array([n1[2] * math.sin(chi1),
math.cos(chi1),
-n1[0] * math.sin(chi1)])
# Polarization angle in second RF
# | |
<reponame>kiranmusze/deutschland<filename>deutschland/jobsuche/model/job_search_response_aggregierungen_plzebene2.py<gh_stars>0
"""
Bundesagentur für Arbeit: Jobsuche API
Die größte Stellendatenbank Deutschlands durchsuchen, Details zu Stellenanzeigen und Informationen über Arbeitgeber abrufen. <br><br> Die Authentifizierung funktioniert per OAuth 2 Client Credentials mit JWTs. Folgende Client-Credentials können dafür verwendet werden:<br><br> **ClientID:** c003a37f-024f-462a-b36d-b001be4cd24a <br> **ClientSecret:** 32a39620-32b3-4307-9aa1-511e3d7f48a8 # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from deutschland.jobsuche.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from deutschland.jobsuche.exceptions import ApiAttributeError
class JobSearchResponseAggregierungenPlzebene2(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"_10": (int,), # noqa: E501
"_12": (int,), # noqa: E501
"_13": (int,), # noqa: E501
"_14": (int,), # noqa: E501
"_15": (int,), # noqa: E501
"_16": (int,), # noqa: E501
"_17": (int,), # noqa: E501
"_18": (int,), # noqa: E501
"_19": (int,), # noqa: E501
"_20": (int,), # noqa: E501
"_21": (int,), # noqa: E501
"_22": (int,), # noqa: E501
"_23": (int,), # noqa: E501
"_24": (int,), # noqa: E501
"_25": (int,), # noqa: E501
"_26": (int,), # noqa: E501
"_27": (int,), # noqa: E501
"_28": (int,), # noqa: E501
"_29": (int,), # noqa: E501
"_30": (int,), # noqa: E501
"_31": (int,), # noqa: E501
"_32": (int,), # noqa: E501
"_33": (int,), # noqa: E501
"_34": (int,), # noqa: E501
"_35": (int,), # noqa: E501
"_36": (int,), # noqa: E501
"_37": (int,), # noqa: E501
"_38": (int,), # noqa: E501
"_39": (int,), # noqa: E501
"_40": (int,), # noqa: E501
"_41": (int,), # noqa: E501
"_42": (int,), # noqa: E501
"_44": (int,), # noqa: E501
"_45": (int,), # noqa: E501
"_46": (int,), # noqa: E501
"_47": (int,), # noqa: E501
"_48": (int,), # noqa: E501
"_49": (int,), # noqa: E501
"_50": (int,), # noqa: E501
"_51": (int,), # noqa: E501
"_52": (int,), # noqa: E501
"_53": (int,), # noqa: E501
"_54": (int,), # noqa: E501
"_55": (int,), # noqa: E501
"_56": (int,), # noqa: E501
"_57": (int,), # noqa: E501
"_58": (int,), # noqa: E501
"_59": (int,), # noqa: E501
"_60": (int,), # noqa: E501
"_61": (int,), # noqa: E501
"_63": (int,), # noqa: E501
"_64": (int,), # noqa: E501
"_65": (int,), # noqa: E501
"_66": (int,), # noqa: E501
"_67": (int,), # noqa: E501
"_68": (int,), # noqa: E501
"_69": (int,), # noqa: E501
"_70": (int,), # noqa: E501
"_71": (int,), # noqa: E501
"_72": (int,), # noqa: E501
"_73": (int,), # noqa: E501
"_74": (int,), # noqa: E501
"_75": (int,), # noqa: E501
"_76": (int,), # noqa: E501
"_77": (int,), # noqa: E501
"_78": (int,), # noqa: E501
"_79": (int,), # noqa: E501
"_80": (int,), # noqa: E501
"_81": (int,), # noqa: E501
"_82": (int,), # noqa: E501
"_83": (int,), # noqa: E501
"_84": (int,), # noqa: E501
"_85": (int,), # noqa: E501
"_86": (int,), # noqa: E501
"_87": (int,), # noqa: E501
"_88": (int,), # noqa: E501
"_89": (int,), # noqa: E501
"_90": (int,), # noqa: E501
"_91": (int,), # noqa: E501
"_92": (int,), # noqa: E501
"_93": (int,), # noqa: E501
"_94": (int,), # noqa: E501
"_95": (int,), # noqa: E501
"_96": (int,), # noqa: E501
"_97": (int,), # noqa: E501
"_98": (int,), # noqa: E501
"_99": (int,), # noqa: E501
"_01": (int,), # noqa: E501
"_02": (int,), # noqa: E501
"_03": (int,), # noqa: E501
"_04": (int,), # noqa: E501
"_06": (int,), # noqa: E501
"_07": (int,), # noqa: E501
"_08": (int,), # noqa: E501
"_09": (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"_10": "10", # noqa: E501
"_12": "12", # noqa: E501
"_13": "13", # noqa: E501
"_14": "14", # noqa: E501
"_15": "15", # noqa: E501
"_16": "16", # noqa: E501
"_17": "17", # noqa: E501
"_18": "18", # noqa: E501
"_19": "19", # noqa: E501
"_20": "20", # noqa: E501
"_21": "21", # noqa: E501
"_22": "22", # noqa: E501
"_23": "23", # noqa: E501
"_24": "24", # noqa: E501
"_25": "25", # noqa: E501
"_26": "26", # noqa: E501
"_27": "27", # noqa: E501
"_28": "28", # noqa: E501
"_29": "29", # noqa: E501
"_30": "30", # noqa: E501
"_31": "31", # noqa: E501
"_32": "32", # noqa: E501
"_33": "33", # noqa: E501
"_34": "34", # noqa: E501
"_35": "35", # noqa: E501
"_36": "36", # noqa: E501
"_37": "37", # noqa: E501
"_38": "38", # noqa: E501
"_39": "39", # noqa: E501
"_40": "40", # noqa: E501
"_41": "41", # noqa: E501
"_42": "42", # noqa: E501
"_44": "44", # noqa: E501
"_45": "45", # noqa: E501
"_46": "46", # noqa: E501
"_47": "47", # noqa: E501
"_48": "48", # noqa: E501
"_49": "49", # noqa: E501
"_50": "50", # noqa: E501
"_51": "51", # noqa: E501
"_52": "52", # noqa: E501
"_53": "53", # noqa: E501
"_54": "54", # noqa: E501
"_55": "55", # noqa: E501
"_56": "56", # noqa: E501
"_57": "57", # noqa: E501
"_58": "58", # noqa: E501
"_59": "59", # noqa: E501
"_60": "60", # noqa: E501
"_61": "61", # noqa: E501
"_63": "63", # noqa: E501
"_64": "64", # noqa: E501
"_65": "65", # noqa: E501
"_66": "66", # noqa: E501
"_67": "67", # noqa: E501
"_68": "68", # noqa: E501
"_69": "69", # noqa: E501
"_70": "70", # noqa: E501
"_71": "71", # noqa: E501
"_72": "72", # noqa: E501
"_73": "73", # noqa: E501
"_74": "74", # noqa: E501
"_75": "75", # noqa: E501
"_76": "76", # noqa: E501
"_77": "77", # noqa: E501
"_78": "78", # noqa: E501
"_79": "79", # noqa: E501
"_80": "80", # noqa: E501
"_81": "81", # noqa: E501
"_82": "82", # noqa: E501
"_83": "83", # noqa: E501
"_84": "84", # noqa: E501
"_85": "85", # noqa: E501
"_86": "86", # noqa: E501
"_87": "87", # noqa: E501
"_88": "88", # noqa: E501
"_89": "89", # noqa: E501
"_90": "90", # noqa: E501
"_91": "91", # noqa: E501
"_92": "92", # noqa: E501
"_93": "93", # noqa: E501
"_94": "94", # noqa: E501
"_95": "95", # noqa: E501
"_96": "96", # noqa: E501
"_97": "97", # noqa: E501
"_98": "98", # noqa: E501
"_99": "99", # noqa: E501
"_01": "01", # noqa: E501
"_02": "02", # noqa: E501
"_03": "03", # noqa: E501
"_04": "04", # noqa: E501
"_06": "06", # noqa: E501
"_07": "07", # noqa: E501
"_08": "08", # noqa: E501
"_09": | |
<reponame>hellojoko/autoparse<filename>autoparse/automaton_fitter.py
import networkx as nx
import matplotlib.pyplot as plt
from autoparse.automaton import preprocess, Automaton
class Transition:
def __init__(
self,
word: str,
state_in,
state_out,
transition_ids=[],
weight: int = 1,
variables={},
):
self.word = word
self.state_in = state_in
self.state_out = state_out
self.weight = weight
self.variables = variables
self.transitions_ids = set(transition_ids)
self.tid = next(iter(self.transitions_ids))
self.p = {}
def make_generic(self):
generic = "*"
best_count = 0
for var, count in self.variables.items():
if count > best_count:
generic = "<$" + var + ">"
best_count = count
self.word = generic
return generic
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (
self.word == other.word
and self.state_in == other.state_in
and self.state_out == other.state_out
)
def __hash__(self):
return hash(str(self.state_in) + str(self.state_out))
def __repr__(self):
return " {:6d} --{:^20}--> {:6d} ".format(
self.state_in.id, self.word, self.state_out.id
)
class TransitionSet:
"""A set implementation that add weights when adding a transition multiple times"""
def __init__(self):
self._dict = {}
def __contains__(self, item):
return item in self._dict
def __iter__(self):
return self._dict.keys().__iter__()
def __len__(self):
return len(self._dict)
def __repr__(self):
return self._dict.__repr__()
def _add(self, item):
"""Do not cumulate weight"""
self._dict[item] = item
def add(self, item):
if not item in self._dict:
self._dict[item] = item
else:
transition = self._dict[item]
transition.weight += item.weight
transition.transitions_ids |= item.transitions_ids
for var in item.variables:
if not var in transition.variables:
transition.variables[var] = 0
transition.variables[var] += item.variables[var]
def remove(self, item):
if item in self._dict:
del self._dict[item]
class State:
def __init__(self, node_id: int, word: str):
self.id = node_id
self.transitions_in = TransitionSet()
self.transitions_out = TransitionSet()
self.word = word
@property
def weight(self):
total_weight = 0
for t in self.transitions_in:
total_weight += t.weight
return total_weight
@property
def child(self):
for t in self.transitions_out:
yield t.state_out
@property
def parents(self):
for t in self.transitions_in:
yield t.state_in
def merge_on(self, state):
transitions_to_delete = []
for t in self.transitions_in:
new_transition = Transition(
t.word,
t.state_in,
state,
transition_ids=t.transitions_ids,
weight=t.weight,
variables=t.variables,
)
state.add_transition_in(new_transition)
transitions_to_delete.append(t)
for t in self.transitions_out:
new_transition = Transition(
t.word,
state,
t.state_out,
transition_ids=t.transitions_ids,
weight=t.weight,
variables=t.variables,
)
state.add_transition_out(new_transition)
transitions_to_delete.append(t)
for t in transitions_to_delete:
t.state_out.remove_transition_in(t)
def generify(self, limit_weight):
if self.weight <= limit_weight:
self.word = "*"
for t in self.transitions_in:
generic = t.make_generic()
if generic != "*":
self.word = generic
def get_generic_ancestors(self):
"""return the last ancestors connected by generics transion and drop those transitions"""
if self.id == 0 or not self.word == "*":
return [self], []
else:
ancestors = []
intermediary_states = [self]
for transition in self.transitions_in:
new_ancestors, new_intermediary_states = (
transition.state_in.get_generic_ancestors()
)
ancestors += new_ancestors
intermediary_states += new_intermediary_states
return ancestors, intermediary_states
def merge_generic_parents(self):
if self.id == 0 or not self.word == "*":
return
ancestors, intermediary_states = self.get_generic_ancestors()
transitions_ids = set()
for state in intermediary_states:
transitions_to_remove = list(state.transitions_in)
for transition in transitions_to_remove:
transitions_ids |= transition.transitions_ids
state.remove_transition_in(transition)
for ancestor in ancestors:
self.add_transition_in(
Transition(self.word, ancestor, self, transitions_ids)
)
def get_trivial_group(self):
if len(self.transitions_in) <= 1:
return set()
merge_group = set()
for parent in self.parents:
if len(parent.transitions_out) == 1:
merge_group.add(parent.id)
return merge_group
def add_transition_in(self, transition):
self.transitions_in.add(transition)
transition.state_in.__add_transition_out(transition)
def add_transition_out(self, transition):
self.transitions_out.add(transition)
transition.state_out.__add_transition_in(transition)
def remove_transition_in(self, transition):
self.transitions_in.remove(transition)
transition.state_in.__remove_transition_out(transition)
def remove_transition_out(self, transition):
self.transitions_out.remove(transition)
transition.state_out.__remove_transition_in(transition)
def __add_transition_in(self, transition):
self.transitions_in._add(transition)
def __add_transition_out(self, transition):
self.transitions_out._add(transition)
def __remove_transition_in(self, transition):
self.transitions_in.remove(transition)
def __remove_transition_out(self, transition):
self.transitions_out.remove(transition)
class AutomatonFitter:
"""A class that fit an automaton on a list of documents
The documents are assumed to be produced by a few numbers of templates that includes both
fixed and variable words, produced with str.format() for instance. The fitted automaton
will guess which transitions hold variables and can extract them from new documents.
Methods
-------
fit:
Fit the automaton
build:
Return an executable automaton, should be called after fit
pprint:
Pretty printer using Networkx and matplotlib
print:
Regular printer in string format
"""
def __init__(self, docs, variables={}, order: int = 3):
"""Initialize the automaton
Parameters
----------
docs : str[]
Documents to fit the automaton on
variables: {str: str[]}
keys are the name of variables (e.g. city) an values list of examples (e.g. ["Paris", "London", ...])
order: int
The memory size of the internal markov model used to predict path probability.
"""
self.nb_docs = len(docs)
self.start_state = State(0, "<start>")
self.stop_state = State(1, "<stop>")
self.states = {0: self.start_state, 1: self.stop_state}
self.stateCounter = 2
self.transitionCounter = 1
self.transitions_sequences = []
self.order = order
for var in variables.keys():
variables[var] = set([v.lower() for v in variables[var]])
for doc in docs:
transition_sequence = []
previous = self.stop_state
doc = preprocess(doc)
doc = " ".join(doc.split("/"))
for word in doc.split(" ")[::-1]:
state = self.create_state(word)
var_count = self.get_variables(previous.word, variables)
transition_out, tid = self.create_transition(state, previous, var_count)
transition_sequence.append(tid)
state.add_transition_out(transition_out)
self.states[state.id] = state
previous = state
transition_out, tid = self.create_transition(self.start_state, state, {})
transition_sequence.append(tid)
self.start_state.add_transition_out(transition_out)
transition_sequence = (transition_sequence + [0] * order)[::-1]
self.transitions_sequences.append(transition_sequence)
@staticmethod
def get_variables(word, variables):
"""
Return the list of variables this word is matching based on examples
word: string
variables: {string: set()}
return: {string: int}
"""
var_count = {}
for var, examples in variables.items():
if word in examples:
var_count[var] = 1
return var_count
def create_transition(self, state_in, state_out, variables_count):
tid = self.transitionCounter
new_transition = Transition(
state_out.word, state_in, state_out, [tid], variables=variables_count
)
self.transitionCounter += 1
return new_transition, tid
def create_state(self, word):
new_state = State(self.stateCounter, word)
self.stateCounter += 1
return new_state
def iterate_states(self, f, acc=None):
"""Apply `acc = f(state, acc)` on each state, return acc"""
done = set()
stack = [self.stop_state]
while len(stack) > 0:
state = stack.pop()
if state.id in done:
continue
done.add(state.id)
acc = f(state, acc)
stack.extend(state.parents)
return acc
def count_word(self):
def add_word(state, word_count):
if not state.word in word_count:
word_count[state.word] = 0
word_count[state.word] += 1
return word_count
return self.iterate_states(add_word, {})
def count_variables(self):
def add_vars(state, vars_count):
for t in state.transitions_in:
for var, count in t.variables.items():
var = "<$" + var + ">"
if not var in vars_count:
vars_count[var] = 0
vars_count[var] += count
return vars_count
return self.iterate_states(add_vars, {})
def make_state_generic(self, threshold: float = 0):
limit_weight = threshold * self.nb_docs
def generify(state, limit_weight):
state.generify(limit_weight)
return limit_weight
self.iterate_states(generify, limit_weight)
def simplify_generic_chains(self):
def merge_generics(state, acc):
state.merge_generic_parents()
return acc
self.iterate_states(merge_generics)
def merge_trivial_groups(self):
def trivial_group(state, group_list):
group_list.append(state.get_trivial_group())
return group_list
merge_group_list = self.iterate_states(trivial_group, [])
for group in merge_group_list:
self.merge_group(group, 0)
def remove_rare_transitions(self, freq: float):
limit_weight = freq * self.nb_docs
def remove_rare_out_transitions(state, limit_weight):
transitions_to_remove = []
for t in state.transitions_out:
if t.weight <= limit_weight:
transitions_to_remove.append(t)
for t in transitions_to_remove:
state.remove_transition_out(t)
return limit_weight
self.iterate_states(remove_rare_out_transitions, limit_weight)
def merge_group(self, merge_group, threshold):
if (
not len(merge_group) >= 2
or not len(merge_group) >= threshold * self.nb_docs
):
return False
merge_state = self.states[next(iter(merge_group))]
merge_group.remove(merge_state.id)
def merge(state, acc):
if state.id in merge_group:
state.merge_on(merge_state)
return acc
self.iterate_states(merge)
return True
def find_merge_group(self, word: str):
incompatibles = set()
merge_group = set()
stack = [(self.stop_state, set())] # (state, set of descendants)
visited = {} # state -> [nb_visit, set of descendants]
while len(stack) > 0:
state, descendants = stack.pop()
new_descendant = set()
if state.word == word:
new_descendant.add(state.id)
merge_group.add(state.id)
for descendant_id in descendants:
incompatibles.add((descendant_id, state.id))
incompatibles.add((state.id, descendant_id))
if not state in visited:
visited[state] = [0, set()]
visited[state][0] += 1
visited[state][1] |= descendants
visited[state][1] |= new_descendant
if visited[state][0] >= len(state.transitions_out):
descendants = visited[state][1]
for parent in state.parents:
stack.append((parent, descendants))
return self.remove_incompatibles(merge_group, incompatibles)
def remove_incompatibles(self, merge_group, incompatibles):
incompatible_count = {}
for state1, state2 in incompatibles:
if not state1 in incompatible_count:
incompatible_count[state1] = 0
if not state2 in incompatible_count:
incompatible_count[state2] = 0
incompatible_count[state1] += 1
incompatible_count[state2] += 1
for state1, state2 in incompatibles:
if state1 in merge_group and state2 in merge_group:
if incompatible_count[state1] > incompatible_count[state2]:
merge_group.remove(state1)
else:
merge_group.remove(state2)
return merge_group
def merge_word(self, word: str, threshold: float = 0):
return self.merge_group(self.find_merge_group(word), threshold)
def reduce(self, threshold: float = 0, variables: bool = False, word_black_list=[]):
"""
Merge either on words or on variables. Should merge on variable only after
`self.make_state_generic` has been called.
"""
count_function = self.count_word
if variables == True:
count_function = self.count_variables
done = False
black_list = set([w.lower() for w in word_black_list])
for word, nb_occurrences in self.count_word().items():
if nb_occurrences < threshold * self.nb_docs:
black_list.add(word)
while not done:
transition_count = [
(word, nb_occurrences)
for word, nb_occurrences in count_function().items()
if word not in black_list
]
if len(transition_count) == 0:
done = True
break
transition_count.sort(key=lambda x: x[1])
word, count = transition_count.pop()
if count | |
import sys, os
import re
import math
import datetime as dt
import numpy as np
from scipy import stats
import urepr
from loguniform import LogUniform, ModifiedLogUniform
from kumaraswamy import kumaraswamy
# CONSTANTS
mjup2mearth = 317.8284065946748 # 1 Mjup in Mearth
template_setup = """
[kima]
GP: false
GP_kernel: 0
MA: false
hyperpriors: false
trend: false
degree: 0
multi_instrument: false
known_object: false
n_known_object: 0
studentt: false
indicator_correlations: false
indicators:
file: filename.txt
units: kms
skip: 0
multi: false
files:
M0_epoch: 0.0
[priors.general]
[priors.planets]
"""
def need_model_setup(exception):
print()
print("[FATAL] Couldn't find the file kima_model_setup.txt")
print("Probably didn't include a call to save_setup() in the")
print("RVModel constructor (this is the recommended solution).")
print("As a workaround, create a file called `kima_model_setup.txt`,")
print("and add to it (after editting!) the following options:")
print(template_setup)
sys.tracebacklimit = 0
raise exception
def read_datafile(datafile, skip):
"""
Read data from `datafile` for multiple instruments.
Can be str, in which case the 4th column is assumed to contain an integer
identifier of the instrument.
Or list, in which case each element will be one different filename
containing three columns each.
"""
if isinstance(datafile, list):
data = np.empty((0, 3))
obs = np.empty((0, ))
for i, df in enumerate(datafile):
d = np.loadtxt(df, usecols=(0, 1, 2), skiprows=skip, ndmin=2)
data = np.append(data, d, axis=0)
obs = np.append(obs, (i + 1) * np.ones((d.shape[0])))
return data, obs
else:
data = np.loadtxt(datafile, usecols=(0, 1, 2), skiprows=skip)
obs = np.loadtxt(datafile, usecols=(3, ), skiprows=skip, dtype=int)
uobs = np.unique(obs)
id0 = 0
for i, o in enumerate(obs):
if o != uobs[id0]:
id0 += 1
obs[i] = id0 + 1
return data, obs
def show_tips():
""" Show a few tips on how to use kima """
tips = (
"Press Ctrl+C in any of kima's plots to copy the figure.",
"Run 'kima-showresults all' to plot every figure.",
"Use the 'kima-template' script to create a new bare-bones directory.")
if np.random.rand() < 0.2: # only sometimes, otherwise it's annoying :)
tip = np.random.choice(tips)
print('[kima TIP] ' + tip)
def rms(array):
""" Root mean square of array """
return np.sqrt(np.sum(array**2) / array.size)
def wrms(array, weights):
""" Weighted root mean square of array, given weights """
mu = np.average(array, weights=weights)
return np.sqrt(np.sum(weights * (array - mu)**2) / np.sum(weights))
def apply_argsort(arr1, arr2, axis=-1):
"""
Apply arr1.argsort() on arr2, along `axis`.
"""
# check matching shapes
assert arr1.shape == arr2.shape, "Shapes don't match!"
i = list(np.ogrid[[slice(x) for x in arr1.shape]])
i[axis] = arr1.argsort(axis)
return arr2[i]
def percentile68_ranges(a, min=None, max=None):
if min is None and max is None:
mask = np.ones_like(a, dtype=bool)
elif min is None:
mask = a < max
elif max is None:
mask = a > min
else:
mask = (a > min) & (a < max)
lp, median, up = np.percentile(a[mask], [16, 50, 84])
return (median, up - median, median - lp)
def percentile68_ranges_latex(a, min=None, max=None):
median, plus, minus = percentile68_ranges(a, min, max)
return '$' + urepr.core.uformatul(median, plus, minus, 'L') + '$'
def percentile_ranges(a, percentile=68, min=None, max=None):
if min is None and max is None:
mask = np.ones_like(a, dtype=bool)
elif min is None:
mask = a < max
elif max is None:
mask = a > min
else:
mask = (a > min) & (a < max)
half = percentile / 2
lp, median, up = np.percentile(a[mask], [50 - half, 50, 50 + half])
return (median, up - median, median - lp)
def percentile_ranges_latex(a, percentile, min=None, max=None):
median, plus, minus = percentile_ranges(a, percentile, min, max)
return '$' + urepr.core.uformatul(median, plus, minus, 'L') + '$'
def clipped_mean(arr, min, max):
""" Mean of `arr` between `min` and `max` """
mask = (arr > min) & (arr < max)
return np.mean(arr[mask])
def clipped_std(arr, min, max):
""" std of `arr` between `min` and `max` """
mask = (arr > min) & (arr < max)
return np.std(arr[mask])
def get_planet_mass(P, K, e, star_mass=1.0, full_output=False, verbose=False):
"""
Calculate the planet (minimum) mass Msini given orbital period `P`,
semi-amplitude `K`, eccentricity `e`, and stellar mass. If star_mass is a
tuple with (estimate, uncertainty), this (Gaussian) uncertainty will be
taken into account in the calculation.
Units:
P [days]
K [m/s]
e []
star_mass [Msun]
Returns:
if P is float:
if star_mass is float:
Msini [Mjup], Msini [Mearth]
if star_mass is tuple:
(Msini, error_Msini) [Mjup], (Msini, error_Msini) [Mearth]
if P is array:
if full_output: mean Msini [Mjup], std Msini [Mjup], Msini [Mjup] (array)
else: mean Msini [Mjup], std Msini [Mjup], mean Msini [Mearth], std Msini [Mearth]
"""
if verbose: print('Using star mass = %s solar mass' % star_mass)
try:
P = float(P)
# calculate for one value of the orbital period
# then K, e, and star_mass should also be floats
assert isinstance(K, float) and isinstance(e, float)
uncertainty_star_mass = False
if isinstance(star_mass, tuple) or isinstance(star_mass, list):
star_mass = np.random.normal(star_mass[0], star_mass[1], 5000)
uncertainty_star_mass = True
m_mj = 4.919e-3 * star_mass**(2. / 3) * P**(1. / 3) * K * np.sqrt(1 -
e**2)
m_me = m_mj * mjup2mearth
if uncertainty_star_mass:
return (m_mj.mean(), m_mj.std()), (m_me.mean(), m_me.std())
else:
return m_mj, m_me
except TypeError:
# calculate for an array of periods
if isinstance(star_mass, tuple) or isinstance(star_mass, list):
# include (Gaussian) uncertainty on the stellar mass
star_mass = np.random.normal(star_mass[0], star_mass[1], P.size)
m_mj = 4.919e-3 * star_mass**(2. / 3) * P**(1. / 3) * K * np.sqrt(1 -
e**2)
m_me = m_mj * mjup2mearth
if full_output:
return m_mj.mean(), m_mj.std(), m_mj
else:
return (m_mj.mean(), m_mj.std(), m_me.mean(), m_me.std())
def get_planet_mass_latex(P, K, e, star_mass=1.0, earth=False, **kargs):
out = get_planet_mass(P, K, e, star_mass, full_output=True, verbose=False)
if isinstance(P, float):
if earth:
return '$%f$' % out[1]
else:
return '$%f$' % out[0]
else:
if earth:
return percentile68_ranges_latex(out[2] * mjup2mearth)
else:
return percentile68_ranges_latex(out[2])
def get_planet_semimajor_axis(P, K, star_mass=1.0, full_output=False,
verbose=False):
"""
Calculate the semi-major axis of the planet's orbit given
orbital period `P`, semi-amplitude `K`, and stellar mass.
Units:
P [days]
K [m/s]
star_mass [Msun]
Returns:
if P is float: a [AU]
if P is array:
if full_output: mean a [AU], std a [AU], a [AU] (array)
else: mean a [AU], std a [AU]
"""
if verbose: print('Using star mass = %s solar mass' % star_mass)
# gravitational constant G in AU**3 / (Msun * day**2), to the power of 1/3
f = 0.0666378476025686
if isinstance(P, float):
# calculate for one value of the orbital period
# then K and star_mass should also be floats
assert isinstance(K, float)
assert isinstance(star_mass, float)
a = f * star_mass**(1. / 3) * (P / (2 * np.pi))**(2. / 3)
return a # in AU
else:
if isinstance(star_mass, tuple) or isinstance(star_mass, list):
star_mass = star_mass[0] + star_mass[1] * np.random.randn(P.size)
a = f * star_mass**(1. / 3) * (P / (2 * np.pi))**(2. / 3)
if full_output:
return a.mean(), a.std(), a
else:
return a.mean(), a.std()
def get_planet_semimajor_axis_latex(P, K, star_mass=1.0, earth=False, **kargs):
out = get_planet_semimajor_axis(P, K, star_mass, full_output=True,
verbose=False)
if isinstance(P, float):
return '$%f$' % out
else:
return '$%f$' % out[0]
def lighten_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
def _prior_to_dist():
""" Convert a prior name to a prior object """
d = {
'Uniform': stats.uniform,
'LogUniform': stats.reciprocal,
'ModifiedLogUniform': ModifiedLogUniform,
'Gaussian': stats.norm,
'TruncatedGaussian': stats.truncnorm,
'Exponential': stats.expon,
'Kumaraswamy': kumaraswamy,
'Laplace': stats.laplace,
'Cauchy': stats.cauchy,
}
return d
def _get_prior_parts(prior):
if not isinstance(prior, str):
raise ValueError('`prior` should be a string, got', prior)
try:
inparens = re.search(r'\((.*?)\)', prior).group(1)
except AttributeError:
raise ValueError('Cannot decode "%s", seems badly formed' % prior)
try:
truncs = re.search(r'\[(.*?)\]', prior).group(1)
except AttributeError:
truncs = ''
name = prior[:prior.find('(')]
return inparens, truncs, name
def find_prior_limits(prior):
"""
Find lower and upper limits of a prior from the kima_model_setup.txt file.
"""
inparens, truncs, name = _get_prior_parts(prior)
if 'Truncated' in name:
return tuple(float(v) for v in truncs.split(','))
if name == 'ModifiedLogUniform':
return (0.0, float(inparens.split(';')[1]))
if name == 'Uniform':
v1, v2 = inparens.split(';')
return (float(v1), float(v2) - float(v1))
if name == 'LogUniform':
return tuple(float(v) for | |
<reponame>sidorenkom/specter-diy<filename>src/apps/wallets/manager.py
from app import BaseApp
from gui.screens import Menu, InputScreen, Prompt, TransactionScreen
from .screens import WalletScreen, ConfirmWalletScreen
import platform
import os
from binascii import hexlify, unhexlify, a2b_base64
from bitcoin import script, bip32, compact
from bitcoin.psbt import DerivationPath
from bitcoin.psbtview import PSBTView, read_write, PSBTError
from bitcoin.networks import NETWORKS
from bitcoin.transaction import SIGHASH
from .wallet import WalletError, Wallet
from .commands import DELETE, EDIT
from io import BytesIO
from bcur import bcur_decode_stream, bcur_encode_stream
from helpers import a2b_base64_stream, b2a_base64_stream
import gc
import json
SIGN_PSBT = 0x01
ADD_WALLET = 0x02
# verify address from address itself
# and it's index
VERIFY_ADDRESS = 0x03
# show address with certain
# derivation path or descriptor
DERIVE_ADDRESS = 0x04
# sign psbt transaction encoded in bc-ur format
SIGN_BCUR = 0x05
# list wallet names
LIST_WALLETS = 0x06
BASE64_STREAM = 0x64
RAW_STREAM = 0xFF
SIGHASH_NAMES = {
SIGHASH.ALL: "ALL",
SIGHASH.NONE: "NONE",
SIGHASH.SINGLE: "SINGLE",
}
# add sighash | anyonecanpay
for sh in list(SIGHASH_NAMES):
SIGHASH_NAMES[sh | SIGHASH.ANYONECANPAY] = SIGHASH_NAMES[sh] + " | ANYONECANPAY"
class WalletManager(BaseApp):
"""
WalletManager class manages your wallets.
It stores public information about the wallets
in the folder and signs it with keystore's id key
"""
button = "Wallets"
prefixes = [b"addwallet", b"sign", b"showaddr", b"listwallets"]
name = "wallets"
# Class constants for inheritance
PSBTViewClass = PSBTView
B64PSBT_PREFIX = b"cHNi"
# wallet class
WalletClass = Wallet
# supported networks
Networks = NETWORKS
DEFAULT_SIGHASH = SIGHASH.ALL
def __init__(self, path):
self.root_path = path
platform.maybe_mkdir(path)
self.path = None
self.wallets = []
def init(self, keystore, network, *args, **kwargs):
"""Loads or creates default wallets for new keystore or network"""
super().init(keystore, network, *args, **kwargs)
self.keystore = keystore
# add fingerprint dir
path = self.root_path + "/" + hexlify(self.keystore.fingerprint).decode()
platform.maybe_mkdir(path)
if network not in self.Networks:
raise WalletError("Invalid network")
self.network = network
# add network dir
path += "/" + network
platform.maybe_mkdir(path)
self.path = path
self.wallets = self.load_wallets()
if self.wallets is None or len(self.wallets) == 0:
w = self.create_default_wallet(path=self.path + "/0")
self.wallets = [w]
def get_address(self, psbtout):
"""Helper function to get an address for every output"""
network = self.Networks[self.network]
# finally just return bitcoin address or unconfidential
try:
return psbtout.script_pubkey.address(network)
except Exception as e:
# use hex if script doesn't have address representation
return hexlify(psbtout.script_pubkey.data).decode()
async def menu(self, show_screen):
buttons = [(None, "Your wallets")]
buttons += [(w, w.name) for w in self.wallets if not w.is_watchonly]
if len(buttons) != (len(self.wallets)+1):
buttons += [(None, "Watch only wallets")]
buttons += [(w, w.name) for w in self.wallets if w.is_watchonly]
menuitem = await show_screen(Menu(buttons, last=(255, None)))
if menuitem == 255:
# we are done
return False
else:
w = menuitem
# pass wallet and network
self.show_loader(title="Loading wallet...")
cmd = await w.show(self.network, show_screen)
if cmd == DELETE:
scr = Prompt(
"Delete wallet?",
'You are deleting wallet "%s".\n'
"Are you sure you want to do it?" % w.name,
)
conf = await show_screen(scr)
if conf:
self.delete_wallet(w)
elif cmd == EDIT:
scr = InputScreen(
title="Enter new wallet name",
note="",
suggestion=w.name,
min_length=1, strip=True
)
name = await show_screen(scr)
if name is not None and name != w.name and name != "":
w.name = name
w.save(self.keystore)
return True
def can_process(self, stream):
cmd, stream = self.parse_stream(stream)
return cmd is not None
def parse_stream(self, stream):
prefix = self.get_prefix(stream)
# if we have prefix
if prefix is not None:
if prefix == b"sign":
return SIGN_PSBT, stream
elif prefix == b"showaddr":
return DERIVE_ADDRESS, stream
elif prefix == b"addwallet":
return ADD_WALLET, stream
elif prefix == b"listwallets":
return LIST_WALLETS, stream
else:
return None, None
# if not - we get data any without prefix
# trying to detect type:
# probably base64-encoded PSBT
data = stream.read(40)
if data[:9] == b"UR:BYTES/":
# rewind
stream.seek(0)
return SIGN_BCUR, stream
if data[:len(self.PSBTViewClass.MAGIC)] == self.PSBTViewClass.MAGIC:
stream.seek(0)
return SIGN_PSBT, stream
if data[:len(self.B64PSBT_PREFIX)] == self.B64PSBT_PREFIX:
try:
psbt = a2b_base64(data)
if psbt[:len(self.PSBTViewClass.MAGIC)] != self.PSBTViewClass.MAGIC:
return None, None
# rewind
stream.seek(0)
return SIGN_PSBT, stream
except:
pass
# probably wallet descriptor
if b"&" in data and b"?" not in data:
# rewind
stream.seek(0)
return ADD_WALLET, stream
# probably verifying address
if data.startswith(b"bitcoin:") or data.startswith(b"BITCOIN:") or b"index=" in data:
if data.startswith(b"bitcoin:") or data.startswith(b"BITCOIN:"):
stream.seek(8)
else:
stream.seek(0)
return VERIFY_ADDRESS, stream
return None, None
async def process_host_command(self, stream, show_screen):
platform.delete_recursively(self.tempdir)
cmd, stream = self.parse_stream(stream)
if cmd == SIGN_PSBT:
magic = stream.read(len(self.PSBTViewClass.MAGIC))
if magic == self.PSBTViewClass.MAGIC:
encoding = RAW_STREAM
elif magic.startswith(self.B64PSBT_PREFIX):
encoding = BASE64_STREAM
else:
raise WalletError("Invalid PSBT magic!")
stream.seek(-len(magic), 1)
res = await self.sign_psbt(stream, show_screen, encoding)
if res is not None:
obj = {
"title": "Transaction is signed!",
"message": "Scan it with your wallet",
}
return res, obj
return
if cmd == SIGN_BCUR:
# move to the end of UR:BYTES/
stream.seek(9, 1)
# move to the end of hash if it's there
d = stream.read(70)
if b"/" in d:
pos = d.index(b"/")
stream.seek(pos-len(d)+1, 1)
else:
stream.seek(-len(d), 1)
with open(self.tempdir+"/raw", "wb") as f:
bcur_decode_stream(stream, f)
gc.collect()
with open(self.tempdir+"/raw", "rb") as f:
res = await self.sign_psbt(f, show_screen, encoding=RAW_STREAM)
if res is not None:
# bcur-encode to temp data file
with open(self.tempdir+"/bcur_data", "wb") as fout:
if isinstance(res, str):
with open(res, "rb") as fin:
l, hsh = bcur_encode_stream(fin, fout, upper=True)
else:
l, hsh = bcur_encode_stream(res, fout, upper=True)
# add prefix and hash
with open(self.tempdir+"/bcur_full", "wb") as fout:
fout.write(b"UR:BYTES/")
fout.write(hsh)
fout.write(b"/")
with open(self.tempdir+"/bcur_data", "rb") as fin:
b = bytearray(100)
while True:
l = fin.readinto(b)
if l == 0:
break
fout.write(b, l)
obj = {
"title": "Transaction is signed!",
"message": "Scan it with your wallet",
}
gc.collect()
return self.tempdir+"/bcur_full", obj
return
elif cmd == LIST_WALLETS:
wnames = json.dumps([w.name for w in self.wallets])
return BytesIO(wnames.encode()), {}
elif cmd == ADD_WALLET:
# read content, it's small
desc = stream.read().decode().strip()
w = self.parse_wallet(desc)
res = await self.confirm_new_wallet(w, show_screen)
if res:
self.add_wallet(w)
return
elif cmd == VERIFY_ADDRESS:
data = stream.read().decode().replace("bitcoin:", "")
# should be of the form addr?index=N or similar
if "index=" not in data or "?" not in data:
raise WalletError("Can't verify address with unknown index")
addr, rest = data.split("?")
args = rest.split("&")
idx = None
for arg in args:
if arg.startswith("index="):
idx = int(arg[6:])
break
w, _ = self.find_wallet_from_address(addr, index=idx)
await show_screen(WalletScreen(w, self.network, idx))
return
elif cmd == DERIVE_ADDRESS:
arr = stream.read().split(b" ")
redeem_script = None
if len(arr) == 2:
script_type, path = arr
elif len(arr) == 3:
script_type, path, redeem_script = arr
else:
raise WalletError("Too many arguments")
paths = [p.decode() for p in path.split(b",")]
if len(paths) == 0:
raise WalletError("Invalid path argument")
res = await self.showaddr(
paths, script_type, redeem_script, show_screen=show_screen
)
return BytesIO(res), {}
else:
raise WalletError("Unknown command")
async def sign_psbt(self, stream, show_screen, encoding=BASE64_STREAM):
if encoding == BASE64_STREAM:
with open(self.tempdir+"/raw", "wb") as f:
# read in chunks, write to ram file
a2b_base64_stream(stream, f)
with open(self.tempdir+"/raw", "rb") as f:
res = await self.sign_psbt(f, show_screen, encoding=RAW_STREAM)
if res:
with open(self.tempdir+"/signed_b64", "wb") as fout:
with open(res, "rb") as fin:
b2a_base64_stream(fin, fout)
return self.tempdir+"/signed_b64"
return
# preprocess stream - parse psbt, check wallets in inputs and outputs,
# get metadata to display, default sighash for signing,
# fill missing metadata and store it in temp file:
with open(self.tempdir + "/filled_psbt", "wb") as fout:
try:
wallets, meta = self.preprocess_psbt(stream, fout)
except PSBTError as e:
raise WalletError("Invalid PSBT:\n\n%s" % e)
# now we can work with copletely filled psbt:
with open(self.tempdir + "/filled_psbt", "rb") as f:
psbtv = self.PSBTViewClass.view(f, compress=True)
# ask user for everything, if None is returned - user cancelled at some point
options = await self.confirm_transaction(wallets, meta, show_screen)
if options is None:
return
del meta
gc.collect()
# sign transaction if the user confirmed
self.show_loader(title="Signing transaction...")
with open(self.tempdir+"/signed_raw", "wb") as f:
sig_count = self.sign_psbtview(psbtv, f, wallets, **options)
return self.tempdir+"/signed_raw"
async def confirm_transaction(self, wallets, meta, show_screen):
"""
Checks parsed metadata, asks user about unclear options:
- sign with provided sighashes or only with default?
- sign if unknown wallet in inputs?
- final tx confirmation
Returns dict with options to pass to sign_psbtview function.
"""
# ask the user if he wants to sign with custom sighashes
sighash = await self.confirm_sighashes(meta, show_screen)
if sighash == False:
return
# ask if we want to continue with unknown wallets
if not await self.confirm_wallets(wallets, show_screen):
return
if not await self.confirm_transaction_final(wallets, meta, show_screen):
return
return dict(sighash=sighash)
async def | |
from math import ceil
from pathlib import Path
import random
import numpy as np
import pandas as pd
def _samples_per_set_size(split, n_samples, n_set_sizes):
"""evenly divide samples for dataset split across visual search set sizes"""
# below, divide by two for target present / absent
samples_per_set_size = (n_samples / n_set_sizes) / 2
if samples_per_set_size.is_integer():
samples_per_set_size = int(samples_per_set_size)
samples_per_set_size = [samples_per_set_size for _ in range(n_set_sizes)]
return samples_per_set_size
else:
raise TypeError(f'{split}_size_per_set_size, {samples_per_set_size}, is is not a whole number.\n'
f'It is calculated as: ({split}_size_per_stim_type / len(set_sizes)) / 2\n'
'(2 is for target present or absent).\n'
'Adjust total number of samples, or number of set sizes.')
def split(csv_file_in,
train_size,
dataset_type='searchstims',
csv_file_out=None,
stim_types=None,
val_size=None,
test_size=None,
train_size_per_set_size=None,
val_size_per_set_size=None,
test_size_per_set_size=None,
shard_train=False,
shard_size=None):
"""split a dataset into training, validation, and test sets.
Takes a .csv file representing the dataset, and adds a 'split' field
with string values that specify which split each sample belongs to, one of {'train', 'val', 'test'}.
Parameters
----------
csv_file_in : str
path to .csv file generated by searchstims package. Typically in root of train_dir.
train_size : int
number of samples to put in training data set.
Note that if there is more than on stimulus type in the source data set, then the number of stimuli from
each type will be train_size / number of types.
dataset_type : str
one of {'searchstims', 'VSD'}. Specifies whether dataset is images generated by searchstims package, or
images from Pascal-VOC dataset that were used to create the Visual Search Difficulty 1.0 dataset.
csv_file_out : str
name of .csv file that this function will save. Default is None, in which case the suffix '_split' is added
to csv_file_in.
stim_types : list
of strings; specifies which visual search stimulus types to use when creating dataset. Strings must be keys in
.json file within train_dir. Default is None, in which case all types found in .csv file will be used.
val_size : int
number of samples to put in validation data set.
Default is None, in which case there will be no validation set.
Note that if there is more than on stimulus type in the source data set, then the number of stimuli from
each type will be val_size / number of types.
test_size : int
number of samples to put in test data set.
Default is None, in which case all samples not used in training and validation sets are used for test set.
Note that if there is more than on stimulus type in the source data set, then the number of stimuli from
each type will be test_size / number of types.
train_size_per_set_size, val_size_per_set_size, test_size_per_set_size : list
number of samples in split per visual search set size. Default is None, in which case the total number of
samples for each stimulus type will be divided by the number of set sizes, so that an equal number is used
for each set size.
shard_train : bool
if True, split the training set into shards. This will add an additional column to the .csv generated, 'shard'
of integer values representing the shard that the sample in each row of the .csv belongs to.
shard_size : int
number of samples per shard
Returns
-------
None
"""
if csv_file_out is None:
# if no name declared for csv_file_out, then add '_split' to csv_file_in and use that for name
csv_file_in_stem = Path(csv_file_in).stem
csv_file_out = csv_file_in_stem + '_split.csv'
csv_file_out = Path(csv_file_in).parent.joinpath(csv_file_out)
df = pd.read_csv(csv_file_in)
if dataset_type == 'VSD':
if any([not type(size) == float for size in [train_size, val_size, test_size]]):
raise ValueError(
'when using Visual Difficulty Score dataset, sizes of splits (train_size, val_size, and test_size) '
'must be specified as proportions of dataset (e.g. 0.5, 0.25, 0.25).'
)
df.columns = ['img', 'difficulty_score']
inds = df.index.values
n_samples = inds.shape[0]
train_size = np.round(n_samples * train_size).astype(int)
val_size = np.round(n_samples * val_size).astype(int)
test_size = np.round(n_samples * test_size).astype(int)
# shuffle then grab indices
inds = np.random.permutation(inds)
train_inds = inds[0: train_size]
val_inds = inds[train_size: train_size + val_size]
test_inds = inds[train_size + val_size: train_size + val_size + test_size]
leftover_inds = inds[train_size + val_size + test_size:]
split_col = np.asarray([''] * inds.shape[0], dtype='object')
split_col[train_inds] = 'train'
split_col[val_inds] = 'val'
split_col[test_inds] = 'test'
if leftover_inds:
split_col[leftover_inds] = 'None'
df['split'] = split_col
elif dataset_type == 'searchstims':
if any([not type(size) == int for size in [train_size, val_size, test_size]]):
raise ValueError(
'when using searchstims dataset, sizes of splits (train_size, val_size, and test_size) '
'must be specified as number of samples from dataset (e.g. 6400, 1200, 800).'
)
df_stim_types = df['stimulus'].unique().tolist()
if stim_types:
if type(stim_types) != list or not all([type(stim_type) == str for stim_type in stim_types]):
raise TypeError('stim_types must be a list of strings')
# check that all stim_types are actually in the DataFrame
if not all([stim_type in df_stim_types
for stim_type in stim_types]):
not_in_fnames_dict = [stim_type for stim_type in stim_types if stim_type not in df_stim_types]
raise ValueError(
f'the following stimulus types were not found in {csv_file_in}: {not_in_fnames_dict}'
)
# if all stim_types are valid, filter by them
df = df[df['stimulus'].isin(stim_types)]
# reset index to zero, don't keep the original indices
# this is necessary if we don't use all stimuli, because old
# indices if used for indexing might not correspond to
# new rows of the shorter DataFrame
df = df.reset_index(drop=True)
else:
stim_types = df_stim_types
num_stim_types = len(stim_types)
# split is the column we will add to the dataframe. The value in each row will be the split that row belongs to,
# i.e., it will be one of {'train', 'val', 'test'}
split_col = np.asarray(['' for _ in range(len(df))], dtype='object')
if shard_train:
# shard is the column we will add to the dataframe that indicates which shard each sample belongs to. I.e.,
# the value in each row will be an integer 0, 1, ..., n-1 where n is the number of shards
shard_col = np.zeros(shape=(len(df),), dtype=int)
# ----- (1) divide the split size by the number of visual search stimulus types --------------------------------
train_size_per_stim_type = train_size / num_stim_types
if train_size_per_stim_type.is_integer():
train_size_per_stim_type = int(train_size_per_stim_type)
else:
raise TypeError(f'train_size_per_stim_type, {train_size_per_stim_type}, is is not a whole number.\n'
'It is calculated as: (train_size / number of visual search stimulus types))\n'
'Adjust total number of samples, or number of stimulus types.')
if train_size_per_set_size:
total_train_size_from_per_set_size = sum(train_size_per_set_size)
if total_train_size_from_per_set_size != train_size_per_stim_type:
raise ValueError(
f'total number of training samples specified in '
f'train_size_per_set_size, {total_train_size_from_per_set_size} does not equal number determined '
f'by dividing train_size up by number of stim_types: {train_size_per_stim_type}'
)
if val_size:
val_size_per_stim_type = val_size / num_stim_types
if val_size_per_stim_type.is_integer():
val_size_per_stim_type = int(val_size_per_stim_type)
else:
raise TypeError('val_size_per_set_size is not a whole number, adjust '
'total number of samples, or number of set sizes.')
else:
val_size_per_stim_type = 0
if test_size:
test_size_per_stim_type = test_size / num_stim_types
if test_size_per_stim_type.is_integer():
test_size_per_stim_type=int(test_size_per_stim_type)
else:
raise TypeError('test_size_per_set_size is not a whole number, adjust '
'total number of samples, or number of set sizes.')
else:
# "-1" means "use the remaining samples for the test set"
test_size_per_stim_type = -1
# ----- (2) determine the number of samples in each split *for each set visual search set size* ----------------
for stim_type in stim_types:
# and this will be set sizes declared by user for this stimulus (could be diff't for each stimulus type).
# First have to convert set size from char to int
set_sizes = df[df['stimulus'] == stim_type]['set_size'].unique()
n_set_sizes = len(set_sizes)
# if user did not specify train_size_per_set_size, divide evenly among set sizes
if train_size_per_set_size is None:
train_size_per_set_size_this_stim = _samples_per_set_size('train',
train_size_per_stim_type,
n_set_sizes)
else:
# if train_size_per_set_size is not None, divide each element in two (for target present / absent)
train_size_per_set_size_this_stim = [item // 2 if item % 2 == 0 else item / 2
for item in train_size_per_set_size]
if val_size:
# if user did not specify val_size_per_set_size, divide evenly among set sizes
if val_size_per_set_size is None:
val_size_per_set_size_this_stim = _samples_per_set_size('val',
val_size_per_stim_type,
n_set_sizes)
else:
# if val_size_per_set_size is not None, divide each element in two (for target present / absent)
val_size_per_set_size_this_stim = [item // | |
<gh_stars>0
#author: <NAME>
#guid: 2210049p
#!/usr/bin/env python
#coding: utf-8
#
# In[1]
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm
from scipy.stats import stats
import math
import random
from matplotlib import pyplot as plt
import numpy as np
import matplotlib.backends.backend_pdf
import random
import math
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import sys
from pyecharts.charts import Bar
from pyecharts import options as opts
from pyecharts.globals import ThemeType
from pyecharts.charts import Bar
from pyecharts import options as opts
import dataframe_image as dfi
from jupyterthemes import get_themes
import jupyterthemes as jt
from jupyterthemes.stylefx import set_nb_theme
from IPython.core.display import display, HTML
from IPython.display import display, Markdown, clear_output
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, interact_manual
import time
get_ipython().run_line_magic('matplotlib', 'inline')
sns.set()
# In[2]:
#Load the dataset with the calculated differences Y[t], ommit the first value because difference is NaN and print the head()
def file(fileinput):
if not ".csv" in fileinput:
fileinput = "data/" + fileinput + ".csv"
global df
df = pd.read_csv(fileinput,skiprows=0)
df['difference'] = df.iloc[:,1].diff()
df = df.iloc[1:]
df.columns = ['date', 'X[t]', 'Y[t]']
df.date = pd.to_datetime(df.date)
df.set_index('date', inplace=True)
return df
# # Code for the Random(p) Problem
# In[3]:
def random_prob_model(counter, probability):
#From the dataframe these are the load values and the dates columns stored in arrays to be processed
distribution = df.reset_index(drop=False).iloc[:,1].values
distribution = distribution[:len(distribution)-31]
dates = df.reset_index(drop=False).iloc[:,0].values
#Fix a probability
#probability = 0.5
#counter = 100
#Empty lists to store times we stopped, loads at each stop and the minimum values of each run and times
load_list = []
time_list = []
minimums = []
minimums_times = []
load_data = []
for i in range(0, len(distribution), counter):
chunked_data = distribution[i:i + counter]
min_value = min(chunked_data)
minimums.append(min_value)
min_index = np.where(chunked_data == min_value)[0][0]
minimums_times.append(min_index)
load_data.append(chunked_data)
for chunk in load_data:
best = 0
index = 0
#Find the best candidate for the model to stop in the first run by comparing the random generated value with the fixed probability x
#If less STOP and offload, else Keep looking unitl the 100th observation
while index <= len(chunk)-1:
x = random.uniform(0, 1)
if x < probability:
best = index
#print ("Best candidate found! We offload on " + str(candidate_time[best]) + "\nThe load when we offload is " + str(candidate[best]))
time_list.append(np.where(chunk == chunk[best])[0][0])
load_list.append(chunk[best])
#print("The difference between the Optimal and the Achieved load values is " + str(candidate[best] - minimumL))
break
elif index == counter-1:
best = index
#print ("Best candidate found! We offload on " + str(next_times[-1]) + "\nThe load when we offload is " + str(next_observations[-1]))
time_list.append(np.where(chunk == chunk[best])[0][0])
load_list.append(chunk[best])
#print("The difference between the Optimal and the Achieved load values is " + str(next_observations[-1] - minimumL))
break
index += 1
load_differences = np.asarray(load_list) - np.asarray(minimums)
times_differences = np.array(time_list) - np.array(minimums_times)
return minimums, load_list, load_differences, minimums_times, time_list, times_differences
# # Code for Secretary Model
# In[4]:
def secretary_model(counter):
#This is the code for the Secretary Problem
#From the dataframe these are the load values and the dates columns stored in arrays to be processed
distribution = df.reset_index(drop=False).iloc[:,1].values
distribution = distribution[:len(distribution)-31]
dates = df.reset_index(drop=False).iloc[:,0].values
#Empty lists to store times we stopped, loads at each stop and the minimum values of each run
time_list = []
load_list = []
minimums = []
minimums_times = []
load_data = []
for i in range(0, len(distribution), counter):
chunked_data = distribution[i:i + counter]
min_value = min(chunked_data)
minimums.append(min_value)
min_index = np.where(chunked_data == min_value)[0][0]
minimums_times.append(min_index)
load_data.append(chunked_data)
for chunk in load_data:
samplesize = round(len(chunk) * math.exp(-1))
sample = chunk[ : samplesize]
#Compute the benchmark_secretary of the sample (minimum values of the sample)
benchmark = min(sample)
best = 0
index = samplesize
#Find the best_secretary candidate_secretary for the model to stop in the first run by comparing all next values with the benchmark_secretary value
while index <= len(chunk)-1:
if chunk[index] <= benchmark:
best = index
break
index += 1
#Once we observe the first that is less than the benchmark_secretary value STOP there and offload. Store the value
if(chunk[best] <= benchmark):
time_list.append(np.where(chunk == chunk[best])[0][0])
load_list.append(chunk[best])
#If we dont then go to the end of the 100 observations, Stop there and offload
elif index == counter:
best = index
time_list.append(np.where(chunk == chunk[best-1])[0][0])
load_list.append(chunk[best-1])
load_differences = np.asarray(load_list) - np.asarray(minimums)
times_differences = np.array(time_list) - np.array(minimums_times)
time_delays = [x - 37 for x in time_list]
return minimums, load_list, load_differences, minimums_times, time_list, times_differences
# # Code for the House Selling Model
# In[5]:
#Without Dataset
# A = [[2,6,7,10,4,7,4,8,9,3], [8,5,3,9,1,7,9,10,4,3]]
def house_selling_model(counter, r):
distribution = df.reset_index(drop=False).iloc[:,1].values
distribution = distribution[:len(distribution)-31]
dates = df.reset_index(drop=False).iloc[:,0].values
N = counter
#Empty lists to store times we stopped, loads at each stop and the minimum values of each run and times
scaled_list = []
load_list = []
time_list = []
minimums = []
minimums_times = []
A = []
for i in range(0, len(distribution), counter):
chunked_data = distribution[i:i + counter]
min_value = min(chunked_data)
minimums.append(min_value)
min_index = np.where(chunked_data == min_value)[0][0]
minimums_times.append(min_index)
A.append(chunked_data)
# Scale the availability values
for each in A:
scaled_availability = np.full(shape=len(each), fill_value=0, dtype=np.float)
for k in range(1,len(each)):
scaled_availability[k] = (max(each) - each[k])/(max(each) - min(each))
scaled_list.append(scaled_availability)
d = np.full(shape=counter, fill_value=0, dtype=np.float)
for i in range(N-2,-1,-1):
d[i] = (1/(1+r))*((1+(d[i+1])**2)/2)
c = 0
for each_list in scaled_list:
for i in range(0,len(each_list)+1):
if each_list[i] >= d[i]:
load_list.append(A[c][i])
time_list.append(i)
break
c += 1
load_differences = np.asarray(load_list) - np.asarray(minimums)
times_differences = np.array(time_list) - np.array(minimums_times)
return minimums, load_list, load_differences, minimums_times, time_list, times_differences
# # RUNS AND VALUES SIMULATIONS FOR MODELS
# In[15]:
#Simulate the random prob model by defining the rpb to be executed
#Define chunk_func as rpb_200[0-4] and chunks N
def randomP_simulation_run(chunk_func, N):
n_groups = len(chunk_func[0])
# create plot for loads
plt.figure(figsize=(30,25))
index = np.arange(n_groups)
bar_width = 0.4
opacity = 0.8
#Loads Plot
#Plot the achieved values of each observed samle
rects2 = plt.bar(index, chunk_func[1], bar_width,alpha=opacity,color='black',label='Achieved')
#Plot the minimum values of each observed sample
rects1 = plt.bar(index + bar_width, chunk_func[0], bar_width,alpha=opacity,color='darkred',label='Optimal')
#Label
plt.xlabel('Stops', size = 50)
plt.ylabel('Load Values', size = 50)
plt.title('Loads in each Run with N = {} for Random(P) Model'.format(N), size = 60)
plt.xticks(index + (bar_width/2), tuple(range(1,n_groups+1)))
plt.xticks(fontsize= 30)
plt.yticks(fontsize= 30)
plt.xlim([0-bar_width/2,index.size])
plt.plot()
plt.legend(prop={'size': 25})
plt.savefig('randomp_figures/random(p)_{}_'.format(N) + time.strftime("%Y-%m-%d %H%M%S") + '.png')
plt.figure(figsize=(30,25))
#Times Plot
rects2 = plt.bar(index, np.absolute(chunk_func[5]), bar_width,alpha=opacity,color='darkblue',label='Time instance difference from optimal')
#Label
plt.xlabel('Stops', size = 50)
plt.ylabel('Time Instances', size = 50)
plt.title('Times in each Run with N = {} for Random(P) Model'.format(N), size = 60)
plt.xticks(index, tuple(range(1,n_groups+1)))
plt.xticks(fontsize= 30)
plt.yticks(fontsize= 30)
plt.xlim([0-bar_width/2,index.size])
plt.plot()
plt.legend(prop={'size': 25})
plt.savefig('randomp_figures/random(p)_times_{}_'.format(N) + time.strftime("%Y-%m-%d %H%M%S") + '.png')
#Display the dataframe
runs_data = {'Run': list(range(1,len(chunk_func[0])+1)),'Optimal': chunk_func[0],'Load when Offloading': chunk_func[1],
'Load Difference': chunk_func[2],}
runs_frame = pd.DataFrame(runs_data, columns = ['Run','Optimal','Load when Offloading', 'Load Difference'])
runs_frame.index += 1
display(runs_frame)
runs_frame.to_csv('randomp_figures/dataframes/randomp_data_{}_'.format(N) + time.strftime("%Y-%m-%d %H%M%S") + '.csv')
#Simulate the random prob model by defining the rpb to be executed
#Define secretary_model as 200
def secretary_simulation_run(chunks):
# data to plot
n_groups_secretary = len(secretary_model(chunks)[0])
# create plot for loads
plt.figure(figsize=(30,25))
index = np.arange(n_groups_secretary)
bar_width = 0.4
opacity = 0.8
# Loads Plot
#Plot the achieved values of each observed samle
rects2 = plt.bar(index, secretary_model(chunks)[1], bar_width,alpha=opacity,color='black',label='Achieved')
#Plot the minimum values of each observed sample
rects1 = plt.bar(index + bar_width, secretary_model(chunks)[0], bar_width, alpha=opacity,color='darkred',label='Optimal')
#Label
plt.xlabel('Stops', size = 50)
plt.ylabel('Load Values', size = 50)
plt.title('Loads in each Run with N = {} for Secretary Model'.format(chunks), size = 60)
plt.xticks(index + (bar_width/2), tuple(range(1,n_groups_secretary+1)))
plt.xticks(fontsize= 30)
plt.yticks(fontsize= 30)
plt.xlim([0-bar_width/2,index.size])
plt.plot()
plt.legend(prop={'size': 25})
plt.savefig('secretary_figures/secretary_{}_'.format(chunks) + time.strftime("%Y-%m-%d %H%M%S") + '.png')
#Time Plot
plt.figure(figsize=(30,25))
#Plot the minimum values of each observed sample
rects1 = plt.bar(index + bar_width, secretary_model(chunks)[5], bar_width, alpha=opacity,color='darkblue',label='Time instance difference from optimal')
#Label
plt.xlabel('Stops', size = 50)
plt.ylabel('Time Instances', size = 50)
plt.title('Times in each Run with N = {} for Secretary Model'.format(chunks), size = 60)
plt.xticks(index + (bar_width), tuple(range(1,n_groups_secretary+1)))
plt.xticks(fontsize= 30)
plt.yticks(fontsize= 30)
plt.xlim([0-bar_width/2,index.size])
plt.plot()
plt.legend(prop={'size': 25})
# ax2.plot()
plt.savefig('secretary_figures/secretary_times_{}_'.format(chunks) + time.strftime("%Y-%m-%d %H%M%S") + '.png')
#Display the dataframe
runs_data = {'Run': list(range(1,len(secretary_model(chunks)[0])+1)),'Optimal': secretary_model(chunks)[0],'Load when Offloading': secretary_model(chunks)[1],
'Load Difference': secretary_model(chunks)[2],}
runs_frame = pd.DataFrame(runs_data, columns = ['Run','Optimal','Load when Offloading', 'Load Difference'])
runs_frame.index += 1
display(runs_frame)
runs_frame.to_csv('secretary_figures/dataframes/secretary_data_{}_'.format(chunks) + time.strftime("%Y-%m-%d %H%M%S") + '.csv')
def house_selling_simulation_run(chunks, r):
n_groups_house = len(house_selling_model(chunks, r)[0])
# create plot for loads
plt.figure(figsize=(30,25))
index = np.arange(n_groups_house)
bar_width = 0.4
opacity = 0.8
# Loads Plot
#Plot the achieved values of each | |
"""Used by the CLI or any UI to deliver services to our lovely users
"""
from functools import wraps
import logging
from dodola.core import (
apply_bias_correction,
build_xesmf_weights_file,
xesmf_regrid,
standardize_gcm,
xclim_remove_leapdays,
apply_downscaling,
apply_wet_day_frequency_correction,
train_quantiledeltamapping,
adjust_quantiledeltamapping_year,
train_analogdownscaling,
adjust_analogdownscaling,
)
import dodola.repository as storage
logger = logging.getLogger(__name__)
def log_service(func):
"""Decorator for dodola.services to log service start and stop"""
@wraps(func)
def service_logger(*args, **kwargs):
servicename = func.__name__
logger.info(f"Starting {servicename} dodola service")
func(*args, **kwargs)
logger.info(f"dodola service {servicename} done")
return service_logger
@log_service
def train_qdm(historical, reference, out, variable, kind):
"""Train quantile delta mapping and dump to `out`
Parameters
----------
historical : str
fsspec-compatible URL to historical simulation store.
reference : str
fsspec-compatible URL to store to use as model reference.
out : str
fsspec-compatible URL to store trained model.
variable : str
Name of target variable in input and output stores.
kind : {"additive", "multiplicative"}
Kind of QDM scaling.
"""
hist = storage.read(historical)
ref = storage.read(reference)
kind_map = {"additive": "+", "multiplicative": "*"}
try:
k = kind_map[kind]
except KeyError:
# So we get a helpful exception message showing accepted kwargs...
raise ValueError(f"kind must be {set(kind_map.keys())}, got {kind}")
qdm = train_quantiledeltamapping(
reference=ref, historical=hist, variable=variable, kind=k
)
storage.write(out, qdm.ds)
@log_service
def apply_qdm(simulation, qdm, year, variable, out, include_quantiles=False):
"""Apply trained QDM to adjust a year within a simulation, dump to NetCDF.
Dumping to NetCDF is a feature likely to change in the near future.
Parameters
----------
simulation : str
fsspec-compatible URL containing simulation data to be adjusted.
qdm : str
fsspec-compatible URL pointing to Zarr Store containing canned
``xclim.sdba.adjustment.QuantileDeltaMapping`` Dataset.
year : int
Target year to adjust, with rolling years and day grouping.
variable : str
Target variable in `simulation` to adjust. Adjusted output will share the
same name.
out : str
fsspec-compatible path or URL pointing to NetCDF4 file where the
QDM-adjusted simulation data will be written.
include_quantiles : bool
Flag to indicate whether bias-corrected quantiles should be
included in the QDM-adjusted output.
"""
sim_ds = storage.read(simulation)
qdm_ds = storage.read(qdm)
year = int(year)
variable = str(variable)
adjusted_ds = adjust_quantiledeltamapping_year(
simulation=sim_ds,
qdm=qdm_ds,
year=year,
variable=variable,
include_quantiles=include_quantiles,
)
# Write to NetCDF, usually on local disk, pooling and "fanning-in" NetCDFs is
# currently faster and more reliable than Zarr Stores. This logic is handled
# in workflow and cloud artifact repository.
logger.debug(f"Writing to {out}")
adjusted_ds.to_netcdf(out, compute=True)
logger.info(f"Written {out}")
@log_service
def train_aiqpd(coarse_reference, fine_reference, out, variable, kind):
"""Train analog-inspired quantile preserving downscaling and dump to `out`
Parameters
----------
coarse_reference : str
fsspec-compatible URL to resampled coarse reference store.
fine_reference : str
fsspec-compatible URL to fine-resolution reference store.
out : str
fsspec-compatible URL to store adjustment factors.
variable : str
Name of target variable in input and output stores.
kind : {"additive", "multiplicative"}
Kind of AIQPD downscaling.
"""
ref_coarse = storage.read(coarse_reference)
ref_fine = storage.read(fine_reference)
kind_map = {"additive": "+", "multiplicative": "*"}
try:
k = kind_map[kind]
except KeyError:
# So we get a helpful exception message showing accepted kwargs...
raise ValueError(f"kind must be {set(kind_map.keys())}, got {kind}")
aiqpd = train_analogdownscaling(
coarse_reference=ref_coarse,
fine_reference=ref_fine,
variable=variable,
kind=k,
)
storage.write(out, aiqpd.ds)
@log_service
def apply_aiqpd(simulation, aiqpd, variable, out):
"""Apply AIQPD adjustment factors to downscale a simulation, dump to NetCDF.
Dumping to NetCDF is a feature likely to change in the near future.
Parameters
----------
simulation : str
fsspec-compatible URL containing simulation data to be adjusted.
aiqpd : str
fsspec-compatible URL pointing to Zarr Store containing canned
``xclim.sdba.adjustment.AnalogQuantilePreservingDownscaling`` Dataset.
variable : str
Target variable in `simulation` to downscale. Downscaled output will share the
same name.
out : str
fsspec-compatible path or URL pointing to NetCDF4 file where the
AIQPD-downscaled simulation data will be written.
"""
sim_ds = storage.read(simulation)
aiqpd_ds = storage.read(aiqpd)
# needs to not be chunked
sim_ds = sim_ds.load()
aiqpd_ds = aiqpd_ds.load()
variable = str(variable)
downscaled_ds = adjust_analogdownscaling(
simulation=sim_ds, aiqpd=aiqpd_ds, variable=variable
)
# Write to NetCDF, usually on local disk, pooling and "fanning-in" NetCDFs is
# currently faster and more reliable than Zarr Stores. This logic is handled
# in workflow and cloud artifact repository.
logger.debug(f"Writing to {out}")
downscaled_ds.to_netcdf(out, compute=True, engine="netcdf4")
logger.info(f"Written {out}")
@log_service
def bias_correct(x, x_train, train_variable, y_train, out, out_variable, method):
"""Bias correct input model data with IO to storage
Parameters
----------
x : str
Storage URL to input data to bias correct.
x_train : str
Storage URL to input biased data to use for training bias-correction
model.
train_variable : str
Variable name used in training and obs data.
y_train : str
Storage URL to input 'true' data or observations to use for training
bias-correction model.
out : str
Storage URL to write bias-corrected output to.
out_variable : str
Variable name used as output variable name.
method : str
Bias correction method to be used.
"""
gcm_training_ds = storage.read(x_train)
obs_training_ds = storage.read(y_train)
gcm_predict_ds = storage.read(x)
# This is all made up demo. Just get the output dataset the user expects.
bias_corrected_ds = apply_bias_correction(
gcm_training_ds,
obs_training_ds,
gcm_predict_ds,
train_variable,
out_variable,
method,
)
storage.write(out, bias_corrected_ds)
@log_service
def downscale(
x,
y_climo_coarse,
y_climo_fine,
out,
train_variable,
out_variable,
method,
domain_file,
adjustmentfactors=None,
weights_path=None,
):
"""Downscale bias corrected model data with IO to storage
Parameters
----------
x : str
Storage URL to bias corrected input data to downscale.
y_climo_coarse : str
Storage URL to input coarse-res obs climatology to use for computing adjustment factors.
y_climo_fine : str
Storage URL to input fine-res obs climatology to use for computing adjustment factors.
out : str
Storage URL to write downscaled output to.
adjustmentfactors : str or None, optional
Storage URL to write fine-resolution adjustment factors to.
train_variable : str
Variable name used in training and obs data.
out_variable : str
Variable name used as output variable name.
method : {"BCSD"}
Downscaling method to be used.
domain_file : str
Storage URL to input grid for regridding adjustment factors
adjustmentfactors : str, optional
Storage URL to write fine-resolution adjustment factors to.
weights_path : str or None, optional
Storage URL for input weights for regridding
"""
bc_ds = storage.read(x)
obs_climo_coarse = storage.read(y_climo_coarse)
obs_climo_fine = storage.read(y_climo_fine)
domain_fine = storage.read(domain_file)
adjustment_factors, downscaled_ds = apply_downscaling(
bc_ds,
obs_climo_coarse=obs_climo_coarse,
obs_climo_fine=obs_climo_fine,
train_variable=train_variable,
out_variable=out_variable,
method=method,
domain_fine=domain_fine,
weights_path=weights_path,
)
storage.write(out, downscaled_ds)
if adjustmentfactors is not None:
storage.write(adjustmentfactors, adjustment_factors)
@log_service
def build_weights(x, method, domain_file, outpath=None):
"""Generate local NetCDF weights file for regridding climate data
Parameters
----------
x : str
Storage URL to input xr.Dataset that will be regridded.
method : str
Method of regridding. Passed to ``xesmf.Regridder``.
domain_file : str
Storage URL to input xr.Dataset domain file to regrid to.
outpath : optional
Local file path name to write regridding weights file to.
"""
ds = storage.read(x)
ds_domain = storage.read(domain_file)
build_xesmf_weights_file(ds, ds_domain, method=method, filename=outpath)
@log_service
def rechunk(x, target_chunks, out):
"""Rechunk data to specification
Parameters
----------
x : str
Storage URL to input data.
target_chunks : dict
Mapping {coordinate_name: chunk_size} showing how data is
to be rechunked.
out : str
Storage URL to write rechunked output to.
"""
ds = storage.read(x)
# Simple, stable, but not for more specialized rechunking needs.
# In that case use "rechunker" package, or similar.
ds = ds.chunk(target_chunks)
# Hack to get around issue with writing chunks to zarr in xarray ~v0.17.0
# https://github.com/pydata/xarray/issues/2300
for v in ds.data_vars.keys():
del ds[v].encoding["chunks"]
storage.write(out, ds)
@log_service
def regrid(
x, out, method, domain_file, weights_path=None, astype=None, add_cyclic=None
):
"""Regrid climate data
Parameters
----------
x : str
Storage URL to input xr.Dataset that will be regridded.
out : str
Storage URL to write regridded output to.
method : str
Method of regridding. Passed to ``xesmf.Regridder``.
domain_file : str
Storage URL to input xr.Dataset domain file to regrid to.
weights_path : optional
Local file path name to write regridding weights file to.
astype : str, numpy.dtype, or None, optional
Typecode or data-type to which the regridded output is cast.
add_cyclic : str, or None, optional
Add cyclic (aka wrap-around values) to dimension before regridding.
Useful for avoiding dateline artifacts along longitude in global
datasets.
"""
ds = storage.read(x)
ds_domain = storage.read(domain_file)
regridded_ds = xesmf_regrid(
ds,
ds_domain,
method=method,
weights_path=weights_path,
astype=astype,
add_cyclic=add_cyclic,
)
storage.write(out, regridded_ds)
@log_service
def clean_cmip6(x, out, leapday_removal):
"""Cleans and standardizes CMIP6 GCM
Parameters
----------
x : str
Storage URL to input xr.Dataset that will be cleaned.
out : str
| |
3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = | |
snippetsAddSnippet(info, word, abbr)
completion['abbr'] = abbr
completion['menu'] = menu
completion['info'] = info
completion['dup'] = 1
# Replace the number that represents a specific kind with a better
# textual representation.
completion['kind'] = kinds[result.cursorKind]
return completion
class CompleteThread(threading.Thread):
def __init__(self, line, column, currentFile, fileName, params, timer):
threading.Thread.__init__(self)
self.line = line
self.column = column
self.currentFile = currentFile
self.fileName = fileName
self.result = None
self.args = params['args']
self.cwd = params['cwd']
self.timer = timer
def run(self):
with workingDir(self.cwd):
with libclangLock:
if self.line == -1:
# Warm up the caches. For this it is sufficient to get the
# current translation unit. No need to retrieve completion
# results. This short pause is necessary to allow vim to
# initialize itself. Otherwise we would get: E293: block was
# not locked The user does not see any delay, as we just pause
# a background thread.
time.sleep(0.1)
getCurrentTranslationUnit(self.args, self.currentFile, self.fileName,
self.timer)
else:
self.result = getCurrentCompletionResults(self.line, self.column,
self.args, self.currentFile,
self.fileName, self.timer)
def WarmupCache():
params = getCompileParams(vim.current.buffer.name)
timer = CodeCompleteTimer(0, "", -1, -1, params)
t = CompleteThread(-1, -1, getCurrentFile(), vim.current.buffer.name,
params, timer)
t.start()
def getCurrentCompletions(base):
global debug
debug = int(vim.eval("g:clang_debug")) == 1
sorting = vim.eval("g:clang_sort_algo")
line, _ = vim.current.window.cursor
column = int(vim.eval("b:col"))
params = getCompileParams(vim.current.buffer.name)
timer = CodeCompleteTimer(debug, vim.current.buffer.name, line, column,
params)
t = CompleteThread(line, column, getCurrentFile(), vim.current.buffer.name,
params, timer)
t.start()
while t.isAlive():
t.join(0.01)
cancel = int(vim.eval('complete_check()'))
if cancel != 0:
return (str([]), timer)
cr = t.result
if cr is None:
print "Cannot parse this source file. The following arguments " \
+ "are used for clang: " + " ".join(params['args'])
return (str([]), timer)
results = cr.results
timer.registerEvent("Count # Results (%s)" % str(len(results)))
if base != "":
results = filter(lambda x: getAbbr(x.string).startswith(base), results)
timer.registerEvent("Filter")
if sorting == 'priority':
getPriority = lambda x: x.string.priority
results = sorted(results, None, getPriority)
if sorting == 'alpha':
getAbbrevation = lambda x: getAbbr(x.string).lower()
results = sorted(results, None, getAbbrevation)
timer.registerEvent("Sort")
result = map(formatResult, results)
timer.registerEvent("Format")
return (str(result), timer)
def getAbbr(strings):
for chunks in strings:
if chunks.isKindTypedText():
return chunks.spelling
return ""
def jumpToLocation(filename, line, column):
if filename != vim.current.buffer.name:
try:
vim.command("edit %s" % filename)
except:
# For some unknown reason, whenever an exception occurs in
# vim.command, vim goes crazy and output tons of useless python
# errors, catch those.
return
else:
vim.command("normal m'")
vim.current.window.cursor = (line, column - 1)
def gotoDefinition():
global debug
debug = int(vim.eval("g:clang_debug")) == 1
params = getCompileParams(vim.current.buffer.name)
line, col = vim.current.window.cursor
timer = CodeCompleteTimer(debug, vim.current.buffer.name, line, col, params)
with workingDir(params['cwd']):
with libclangLock:
tu = getCurrentTranslationUnit(params['args'], getCurrentFile(),
vim.current.buffer.name, timer,
update = True)
if tu is None:
print "Couldn't get the TranslationUnit"
return
f = File.from_name(tu, vim.current.buffer.name)
loc = SourceLocation.from_position(tu, f, line, col + 1)
cursor = Cursor.from_location(tu, loc)
if cursor.referenced is not None and loc != cursor.referenced.location:
loc = cursor.referenced.location
jumpToLocation(loc.file.name, loc.line, loc.column)
timer.finish()
def gotoDeclaration():
global debug
debug = int(vim.eval("g:clang_debug")) == 1
params = getCompileParams(vim.current.buffer.name)
line, col = vim.current.window.cursor
timer = CodeCompleteTimer(debug, vim.current.buffer.name, line, col, params)
with workingDir(params['cwd']):
with libclangLock:
tu = getCurrentTranslationUnit(params['args'], getCurrentFile(),
vim.current.buffer.name, timer,
update = True)
if tu is None:
print "Couldn't get the TranslationUnit"
return
f = File.from_name(tu, vim.current.buffer.name)
loc = SourceLocation.from_position(tu, f, line, col + 1)
cursor = Cursor.from_location(tu, loc)
defs = [cursor.get_definition(), cursor.referenced]
for d in defs:
if d is not None and loc != d.location:
loc = d.location
jumpToLocation(loc.file.name, loc.line, loc.column)
break
timer.finish()
# Manually extracted from Index.h
# Doing it by hand is long, error prone and horrible, we must find a way
# to do that automatically.
kinds = dict({ \
# Declarations \
1 : 't', # CXCursor_UnexposedDecl (A declaration whose specific kind is not \
# exposed via this interface) \
2 : 't', # CXCursor_StructDecl (A C or C++ struct) \
3 : 't', # CXCursor_UnionDecl (A C or C++ union) \
4 : 't', # CXCursor_ClassDecl (A C++ class) \
5 : 't', # CXCursor_EnumDecl (An enumeration) \
6 : 'm', # CXCursor_FieldDecl (A field (in C) or non-static data member \
# (in C++) in a struct, union, or C++ class) \
7 : 'e', # CXCursor_EnumConstantDecl (An enumerator constant) \
8 : 'f', # CXCursor_FunctionDecl (A function) \
9 : 'v', # CXCursor_VarDecl (A variable) \
10 : 'a', # CXCursor_ParmDecl (A function or method parameter) \
11 : '11', # CXCursor_ObjCInterfaceDecl (An Objective-C @interface) \
12 : '12', # CXCursor_ObjCCategoryDecl (An Objective-C @interface for a \
# category) \
13 : '13', # CXCursor_ObjCProtocolDecl (An Objective-C @protocol declaration) \
14 : '14', # CXCursor_ObjCPropertyDecl (An Objective-C @property declaration) \
15 : '15', # CXCursor_ObjCIvarDecl (An Objective-C instance variable) \
16 : '16', # CXCursor_ObjCInstanceMethodDecl (An Objective-C instance method) \
17 : '17', # CXCursor_ObjCClassMethodDecl (An Objective-C class method) \
18 : '18', # CXCursor_ObjCImplementationDec (An Objective-C @implementation) \
19 : '19', # CXCursor_ObjCCategoryImplDecll (An Objective-C @implementation \
# for a category) \
20 : 't', # CXCursor_TypedefDecl (A typedef) \
21 : 'f', # CXCursor_CXXMethod (A C++ class method) \
22 : 'n', # CXCursor_Namespace (A C++ namespace) \
23 : '23', # CXCursor_LinkageSpec (A linkage specification, e.g. 'extern "C"') \
24 : '+', # CXCursor_Constructor (A C++ constructor) \
25 : '~', # CXCursor_Destructor (A C++ destructor) \
26 : '26', # CXCursor_ConversionFunction (A C++ conversion function) \
27 : 'a', # CXCursor_TemplateTypeParameter (A C++ template type parameter) \
28 : 'a', # CXCursor_NonTypeTemplateParameter (A C++ non-type template \
# parameter) \
29 : 'a', # CXCursor_TemplateTemplateParameter (A C++ template template \
# parameter) \
30 : 'f', # CXCursor_FunctionTemplate (A C++ function template) \
31 : 'p', # CXCursor_ClassTemplate (A C++ class template) \
32 : '32', # CXCursor_ClassTemplatePartialSpecialization (A C++ class template \
# partial specialization) \
33 : 'n', # CXCursor_NamespaceAlias (A C++ namespace alias declaration) \
34 : '34', # CXCursor_UsingDirective (A C++ using directive) \
35 : '35', # CXCursor_UsingDeclaration (A C++ using declaration) \
36 : 't', # CXCursor_TypeAliasDecl (A C++ alias declaration) \
37 : '37', # CXCursor_ObjCSynthesizeDecl (An Objective-C synthesize definition)\
38 : '38', # CXCursor_ObjCDynamicDecl (An Objective-C dynamic definition) \
39 : '39', # CXCursor_CXXAccessSpecifier (An access specifier) \
\
# References \
40 : '40', # CXCursor_ObjCSuperClassRef \
41 : '41', # CXCursor_ObjCProtocolRef \
42 : '42', # CXCursor_ObjCClassRef \
43 : '43', # CXCursor_TypeRef \
44 : '44', # CXCursor_CXXBaseSpecifier \
45 : '45', # CXCursor_TemplateRef (A reference to a class template, function \
# template, template template parameter, or class template partial \
# specialization) \
46 : '46', # CXCursor_NamespaceRef (A reference to a namespace or namespace \
# alias) \
47 : '47', # CXCursor_MemberRef (A reference to a member of a struct, union, \
# or class that occurs in some non-expression context, e.g., a \
# designated initializer) \
48 : '48', # CXCursor_LabelRef (A reference to a labeled statement) \
49 : '49', # CXCursor_OverloadedDeclRef (A reference to a set of overloaded \
# functions or function templates that has not yet been resolved to \
# a specific function or function template) \
50 : '50', # CXCursor_VariableRef \
\
# Error conditions \
#70 : '70', # CXCursor_FirstInvalid \
70 : '70', # CXCursor_InvalidFile \
71 : '71', # CXCursor_NoDeclFound \
72 : 'u', # CXCursor_NotImplemented \
73 : '73', # CXCursor_InvalidCode \
\
# Expressions \
100 : '100', # CXCursor_UnexposedExpr (An expression whose specific kind is \
# not exposed via this interface) \
101 : '101', # CXCursor_DeclRefExpr (An expression that refers to some value \
# declaration, such as a function, varible, or enumerator) \
102 : '102', # CXCursor_MemberRefExpr (An expression that refers to a member \
# of a struct, union, class, Objective-C class, etc) \
103 : '103', # CXCursor_CallExpr (An expression that calls a function) \
104 : '104', # CXCursor_ObjCMessageExpr (An expression that sends a message \
# to an Objective-C object or class) \
105 : '105', # CXCursor_BlockExpr (An expression that represents a block \
# literal) \
106 : '106', # CXCursor_IntegerLiteral (An integer literal) \
107 : '107', # CXCursor_FloatingLiteral (A floating point number literal) \
108 : '108', # CXCursor_ImaginaryLiteral (An imaginary number literal) \
109 : '109', # CXCursor_StringLiteral (A string literal) \
110 : '110', # CXCursor_CharacterLiteral | |
return True
def stationCount(self):
return len(self.line.stations)
def clearLineStationInfo(self):
self.line.clear()
def stationExisted(self, name: str)->bool:
return self.line.stationExisted(name)
def addStationDict(self, info: dict):
self.line.stations.append(info)
def adjustLichengTo0(self):
if self.isEmpty():
return
start_mile = self.line.stations[0]["licheng"]
for st in self.line.stations:
st["licheng"] = st["licheng"] - start_mile
def trainCount(self):
return len(self._trains)
def rulers(self):
for ruler in self.line.rulers:
yield ruler
def gapBetween(self, st1: str, st2: str)->float:
"""
计算两个站间距离.
2020.01.23新增:如果是上行方向,则尝试使用对里程。
对里程按照点对点原则使用,只考虑两端点的对里程数据,不考虑中间的。
"""
return self.line.gapBetween(st1,st2)
def lineSplited(self):
"""
返回本线是否存在上下行分设站的情况
"""
if self.line.isSplited():
return True
return False
def rulerNameExisted(self, name, ignore: Ruler = None):
for r in self.line.rulers:
if r is not ignore and r.name() == name:
return True
return False
def validRulerName(self)->str:
s = "新标尺0"
i = 0
while self.rulerNameExisted(s):
i+=1
s = f"新标尺{i}"
return s
def circuitNameExisted(self, name, ignore: Circuit = None):
for c in self.circuits():
if c is not ignore and c.name() == name:
return True
return False
def validNewCircuitName(self)->str:
"""
以新建x的格式返回一个有效的新建标尺名称。
"""
s = "新建0"
i = 0
while self.circuitNameExisted(s):
i+=1
s = f"新建{i}"
return s
def isNewRuler(self, ruler: Ruler):
for r in self.line.rulers:
if ruler is r:
return False
return True
def stationDirection(self, name: str):
return self.line.stationViaDirection(name)
def lineStationBusiness(self, name: str, passenger: int, default=False) -> bool:
"""
2.0.2新增,返回车站是否办理业务。passenger是Train中规定的枚举常量,标志是否办客。
如果找不到,返回default。
"""
dct = self.line.stationDictByName(name)
if dct is None:
# print("graph::lineStationBusiness: no such station! return",default,name)
return default
if passenger == Train.PassengerTrue:
# print("graph::lineStationBusiness passengerTrue",dct.get('passenger',"无数据"))
return dct.get('passenger', True)
else:
# print("graph::lineStationBusiness passengerFalse",dct.get("freight","无数据"))
return dct.get("freight", True)
def formerBothStation(self, name: str):
"""
寻找本站往前的第一个【下行方向通过】的站。
TODO 2019.02.02 保留线性算法。下一个函数同。
"""
former_dict = None
for st in self.line.stations:
if st["zhanming"] == name:
return former_dict
if st["direction"] == 0x3:
former_dict = st
raise Exception("No former station")
def latterBothStation(self, name: str):
start = False
for st in self.line.stations:
if st["zhanming"] == name:
start = True
if start and st["direction"] == 0x3:
return st
raise Exception("No latter station")
def stationLevel(self, name: str):
"""
返回车站等级。若不存在,返回None;若没有这个字段,设为并返回4. 不支持域解析符
"""
st = self.line.stationDictByName(name, strict=True)
if st is None:
return None
return st.setdefault('dengji', 4)
def setNotShowTypes(self, not_show):
self.UIConfigData()["not_show_types"] = not_show
for train in self.trains():
if train.type in not_show:
train.setIsShow(False, affect_item=False)
else:
train.setIsShow(True, affect_item=False)
def setDirShow(self, down, show, itemWise=False):
"""
:param itemWise 是否精确到每一段运行线。True-按照每一段运行线的上下行组织,可能导致运行线的部分显示;False-仅按照入图上下行。
"""
for train in self.trains():
if train.type not in self.UIConfigData()['not_show_types']:
if itemWise:
# 如果要显示,且没有铺画过,必须强制设成要显示,以免漏掉
if show and not train._itemInfo:
train.setIsShow(True, affect_item=False)
elif show:
# 显示所有包含指定方向的
if not train.isShow():
for info in train.itemInfo():
if info['down'] == down:
train.setIsShow(True, affect_item=False)
break
else: # not show
if train.isShow():
foundNonThis = False
for info in train.itemInfo():
if info['down'] != down:
foundNonThis = True
break
if not foundNonThis:
# 不显示【所有运行线全为本方向】的车次
train.setIsShow(False, affect_item=False)
else:
if train.firstDown() == down:
train.setIsShow(show, affect_item=False)
def trainExisted(self, train: Train, ignore: Train = None):
"""
比较Train对象,线性算法
"""
for t in self._trains:
if train is t and t is not ignore:
return True
return False
def checiExisted(self, checi: str, ignore: Train = None):
"""
比较全车次。2019.02.03替换掉线性算法。
"""
# for t in self._trains:
# if t is not ignore and t.fullCheci() == checi:
# return True
# return False
t = self.fullCheciMap.get(checi, None)
if t is not None and (ignore is None or t is not ignore):
return True
return False
def rulerCount(self):
return len(self.line.rulers)
def checiType(self, checi: str) -> str:
"""
2.0.2新增。根据系统设置的判断规则,返回车次对应的类型。如果不符合任何一个,返回 其他。
"""
for nm, rg, _ in self.UIConfigData()['type_regex']:
try:
rgx = re.compile(rg)
except:
print("Invalid Regex! ", rg)
continue
if re.match(rgx, checi):
return nm
return '其他'
def checiTypePassenger(self, checi: str) -> (str, int):
"""
根据车次返回类型以及是否是客车。是否是客车按照Train中定义的常量。
如果不符合任何一个,返回 其他, PassengerAuto。
"""
for nm, rg, ps in self.UIConfigData()['type_regex']:
if re.match(rg, checi):
if ps:
return nm, Train.PassengerTrue
else:
return nm, Train.PassengerFalse
return '其他', Train.PassengerAuto
def typePassenger(self, tp: str, default=Train.PassengerAuto) -> int:
"""
根据类型返回是否为客车。返回是Train中的PassengerTrue或PassengerFalse,如果找不到返回默认。
"""
for name, _, ps in self.UIConfigData()['type_regex']:
if name == tp:
if ps:
return Train.PassengerTrue
else:
return Train.PassengerFalse
return default
def stationTimeTable(self, name: str):
"""
返回车站的图定时刻表
list<dict>
dict{
"station_name":str,
"ddsj":datetime,
"cfsj":datetime,
"down":bool,
"train":Train,
"track": !str, //2020.01.24新增股道
}
"""
timeTable = []
for train in self.trains():
st_dict = train.stationDict(name)
if st_dict is None:
continue
else:
node = {
"ddsj": st_dict["ddsj"],
"cfsj": st_dict["cfsj"],
"station_name": st_dict["zhanming"],
"down": train.stationDown(st_dict['zhanming'], self),
"note": st_dict.get("note", ''),
"train": train,
"track":st_dict.get("track",None),
}
timeTable.append(node)
# 排序
for i in range(len(timeTable) - 1):
t = i
for j in range(i + 1, len(timeTable)):
if timeTable[j]["ddsj"] < timeTable[t]["ddsj"]:
t = j
temp = timeTable[t]
timeTable[t] = timeTable[i]
timeTable[i] = temp
return timeTable
def reverse(self):
"""
反排运行图。
2020年1月24日将线路部分逻辑封装到line里。
"""
self.line.reverse()
# 列车上下行调整、上下行车次交换
for train in self._trains:
# 上下行交换
train.reverseAllItemDown()
# 车次交换
train.setCheci(train.fullCheci(), train.upCheci(), train.downCheci())
def downTrainCount(self):
count = 0
for train in self.trains():
if train.firstDown() is True:
count += 1
return count
def upTrainCount(self):
count = 0
for train in self.trains():
if train.firstDown() is False:
count += 1
return count
def loadTrcGraph(self, filename):
"""
阅读旧版trc格式的运行图
"""
fp = open(filename, encoding='utf-8', errors='ignore')
self.line.forbid.setDifferent(False)
self.line.forbid.setShow(True, True)
inTrainArea = False
now_list = []
last_name = None
circuit_dict = {}
for i, line in enumerate(fp):
line = line.strip()
if not line:
continue
if not inTrainArea and line == "===Train===":
inTrainArea = True
if line[0] == '-':
break
# 处理线路信息部分
if not inTrainArea:
if line == "***Circuit***":
continue
elif i == 1:
self.setLineName(line)
else:
# 线路信息部分
try:
splited = line.split(',')
st_name = splited[0]
self.line.addStation_by_info(splited[0], int(splited[1]), int(splited[2]))
if last_name is not None:
try:
start_str, end_str = splited[9].split('-', 1)
begin = datetime.strptime(start_str, '%H:%M')
end = datetime.strptime(end_str, '%H:%M')
self.line.forbid.addForbid(last_name, st_name, begin, end)
except Exception as e:
pass
last_name = st_name
except:
pass
# 处理列车信息部分
else:
# 这部分从trc_check_new中复制过来
if line != '===Train===':
now_list.append(line)
else:
self._decodeTrcTrain(now_list, circuit_dict)
now_list = []
self._decodeTrcTrain(now_list, circuit_dict)
self._decodeTrcCircuit(circuit_dict)
self.setGraphFileName('')
def _decodeTrcTrain(self, now_list: list, circuit_dict: dict):
"""
阅读trc中单个车次的信息,不含===Train===标志头。
circuit_dict: 抽取车次中含有的交路信息。数据结构为
Dict<List<Tuple<int,Train>>>
eg.
{
"CRH380-2081":[
(0, Train<G1>),
(1, Train<G4>),
...
],
...
}
交路数据保证只有一个下划线“_”,且split之后是一个整数。
"""
train = Train(self)
for i, line in enumerate(now_list):
if i == 0:
splited = line.split(',')
train.setCheci(splited[1], splited[2], splited[3])
if len(splited) >= 5:
circuit_str = splited[4]
try:
num = int(circuit_str.split('_')[-1])
name = circuit_str.split('_')[0]
except ValueError:
if circuit_str not in ('NA', ''):
print("Graph::decodeTrcTrain: Unexpected circuit info:", circuit_str)
else:
circuit_dict.setdefault(name, []).append((num, train))
elif i == 1:
train.setStartEnd(sfz=line)
elif i == 2:
train.setStartEnd(zdz=line)
else:
splited = line.split(',')
train.addStation(splited[0], splited[1], splited[2])
train.autoTrainType()
if train.timetable:
self.addTrain(train)
def _decodeTrcCircuit(self, circuit_dict: dict):
"""
解析从前面收集到的交路数据,生成交路对象。
"""
for name, lst in circuit_dict.items():
lst.sort()
circuit = Circuit(self, name)
for _, train in lst:
circuit.addTrain(train)
self.addCircuit(circuit)
def jointGraph(self, graph, former: bool, reverse: bool, line_only: bool):
"""
拼接两运行图。
:param graph: 另一运行图
:param former: 另一运行图是否在本运行图前侧链接
:param reverse: 另一运行图是否转置
"""
if reverse:
graph.reverse()
if not line_only:
# 车次连接
for train_append in graph.trains():
if self.checiExisted(train_append.fullCheci()):
# 本线有相同车次
train_main: Train = self.trainFromCheci(train_append.fullCheci())
train_main.delNonLocal(self)
train_append.delNonLocal(graph)
# 方向以本线为准
# down表示列车在两线路连接点附近的上下行情况。
if former:
down = train_main.firstDown()
else:
down = train_main.lastDown()
if down is None:
# 如果本线无法判断,例如Z90终到石家庄在京石段只有一个站,则用另一条线的。
if former:
down = train_append.lastDown()
else:
down = train_append.firstDown()
if down is None:
# 如果都无法判断,直接判断为下行车次
print("cannot judge down. use default.", train_main.fullCheci())
down = True
train_former = not (down ^ former)
train_main.jointTrain(train_append, train_former, graph) # 当前站点已经拼接好
else:
self.addTrain(train_append)
# 线路连接
self.line.jointLine(graph.line,former,reverse)
def resetAllItems(self):
for train in self.trains():
train.setItem(None)
def stationMile(self, name: str):
"""
返回车站的里程数据,若不存在返回-1.不支持域解析符。2019.02.03删除线性算法。
2019.02.23改为:支持域解析符。
"""
st = self.line.stationDictByName(name)
if st is None:
return -1
return st["licheng"]
def adjacentStation(self, name: str, ignore: list):
index = self.stationIndex(name)
if index > 0:
if self.line.stations[index - 1]['zhanming'] not in ignore:
# 2019.02.23修改,条件少了zhanming,not in的判断相当于没用
return self.line.stations[index - 1]["zhanming"]
if index < len(self.line.stations) - 1:
if self.line.stations[index + 1]['zhanming'] not in ignore:
return self.line.stations[index + 1]["zhanming"]
print("no adj")
return None
def stationIndex(self, name: str):
"""
2019.07.12新增常量级别算法。理论上应保证站名存在。
"""
if self.line.numberMap is None:
return self.stationIndex_bf(name)
else:
try:
return self.line.numberMap[self.nameMapToLine(name)]
except KeyError:
print("Graph::stationIndex: Unexpected station name:", name)
return self.stationIndex_bf(name)
def stationIndex_bf(self, name: str):
"""
原来的暴力方法查找序号。分离此函数是为了尝试统计有多少次使用暴力方法。
"""
for i, st in enumerate(self.line.stations):
if stationEqual(st["zhanming"], name):
return i
raise StationNotInLineException(name)
def stationByDict(self, name: str, strict=False)->LineStation:
"""
根据站名返回dict对象,函数名写错了。支持域解析符。
2019.02.02删除线性算法。
"""
return self.line.stationDictByName(name, strict)
def passedStationCount(self, st1: str, st2: str, down: bool) -> int:
"""
检查以st1为发站,st2为到站,方向为down的区间内有多少个站。2.0新增。
"""
s1 = self.stationIndex(st1)
s2 = self.stationIndex(st2)
dir_ = 0b1 if down else 0b10
cnt = 0
t1 = min((s1, s2))
t2 = max((s1, s2))
# print("t1 t2",t1,t2)
for i in range(t1 + 1, t2):
dct = self.line.stationDictByIndex(i)
if dir_ & dct.get('direction', 0b11):
cnt | |
on a node in the node pool.
name: The name of this resource. Node pool names are formatted as
`projects//locations//awsClusters//awsNodePools/`. For more details on
Google Cloud resource names, see [Resource
Names](https://cloud.google.com/apis/design/resource_names)
reconciling: Output only. If set, there are currently changes in flight to
the node pool.
state: Output only. The lifecycle state of the node pool.
subnetId: Required. The subnet where the node pool node run.
uid: Output only. A globally unique identifier for the node pool.
updateTime: Output only. The time at which this node pool was last
updated.
version: Required. The Kubernetes version to run on this node pool (e.g.
`1.19.10-gke.1000`). You can list all supported versions on a given
Google Cloud region by calling GetAwsServerConfig.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. The lifecycle state of the node pool.
Values:
STATE_UNSPECIFIED: Not set.
PROVISIONING: The PROVISIONING state indicates the node pool is being
created.
RUNNING: The RUNNING state indicates the node pool has been created and
is fully usable.
RECONCILING: The RECONCILING state indicates that the node pool is being
reconciled.
STOPPING: The STOPPING state indicates the node pool is being deleted.
ERROR: The ERROR state indicates the node pool is in a broken
unrecoverable state.
DEGRADED: The DEGRADED state indicates the node pool requires user
action to restore full functionality.
"""
STATE_UNSPECIFIED = 0
PROVISIONING = 1
RUNNING = 2
RECONCILING = 3
STOPPING = 4
ERROR = 5
DEGRADED = 6
@encoding.MapUnrecognizedFields('additionalProperties')
class AnnotationsValue(_messages.Message):
r"""Optional. Annotations on the node pool. This field has the same
restrictions as Kubernetes annotations. The total size of all keys and
values combined is limited to 256k. Key can have 2 segments: prefix
(optional) and name (required), separated by a slash (/). Prefix must be a
DNS subdomain. Name must be 63 characters or less, begin and end with
alphanumerics, with dashes (-), underscores (_), dots (.), and
alphanumerics between.
Messages:
AdditionalProperty: An additional property for a AnnotationsValue
object.
Fields:
additionalProperties: Additional properties of type AnnotationsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a AnnotationsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
annotations = _messages.MessageField('AnnotationsValue', 1)
autoscaling = _messages.MessageField('GoogleCloudGkemulticloudV1AwsNodePoolAutoscaling', 2)
config = _messages.MessageField('GoogleCloudGkemulticloudV1AwsNodeConfig', 3)
createTime = _messages.StringField(4)
etag = _messages.StringField(5)
maxPodsConstraint = _messages.MessageField('GoogleCloudGkemulticloudV1MaxPodsConstraint', 6)
name = _messages.StringField(7)
reconciling = _messages.BooleanField(8)
state = _messages.EnumField('StateValueValuesEnum', 9)
subnetId = _messages.StringField(10)
uid = _messages.StringField(11)
updateTime = _messages.StringField(12)
version = _messages.StringField(13)
class GoogleCloudGkemulticloudV1AwsNodePoolAutoscaling(_messages.Message):
r"""AwsNodePoolAutoscaling contains information required by cluster
autoscaler to adjust the size of the node pool to the current cluster usage.
Fields:
maxNodeCount: Required. Maximum number of nodes in the node pool. Must be
greater than or equal to min_node_count and less than or equal to 50.
minNodeCount: Required. Minimum number of nodes in the node pool. Must be
greater than or equal to 1 and less than or equal to max_node_count.
"""
maxNodeCount = _messages.IntegerField(1, variant=_messages.Variant.INT32)
minNodeCount = _messages.IntegerField(2, variant=_messages.Variant.INT32)
class GoogleCloudGkemulticloudV1AwsOpenIdConfig(_messages.Message):
r"""AwsOpenIdConfig is an OIDC discovery document for the cluster. See the
OpenID Connect Discovery 1.0 specification for details.
Fields:
claims_supported: Supported claims.
grant_types: Supported grant types.
id_token_signing_alg_values_supported: supported ID Token signing
Algorithms.
issuer: OIDC Issuer.
jwks_uri: JSON Web Key uri.
response_types_supported: Supported response types.
subject_types_supported: Supported subject types.
"""
claims_supported = _messages.StringField(1, repeated=True)
grant_types = _messages.StringField(2, repeated=True)
id_token_signing_alg_values_supported = _messages.StringField(3, repeated=True)
issuer = _messages.StringField(4)
jwks_uri = _messages.StringField(5)
response_types_supported = _messages.StringField(6, repeated=True)
subject_types_supported = _messages.StringField(7, repeated=True)
class GoogleCloudGkemulticloudV1AwsProxyConfig(_messages.Message):
r"""Details of a proxy config stored in AWS Secret Manager.
Fields:
secretArn: The ARN of the AWS Secret Manager secret that contains the
HTTP(S) proxy configuration.
secretVersion: The version string of the AWS Secret Manager secret that
contains the HTTP(S) proxy configuration.
"""
secretArn = _messages.StringField(1)
secretVersion = _messages.StringField(2)
class GoogleCloudGkemulticloudV1AwsServerConfig(_messages.Message):
r"""AwsServerConfig is the configuration of GKE cluster on AWS.
Fields:
name: The resource name of the config.
supportedAwsRegions: The list of supported AWS regions.
validVersions: List of valid Kubernetes versions.
"""
name = _messages.StringField(1)
supportedAwsRegions = _messages.StringField(2, repeated=True)
validVersions = _messages.MessageField('GoogleCloudGkemulticloudV1AwsK8sVersionInfo', 3, repeated=True)
class GoogleCloudGkemulticloudV1AwsServicesAuthentication(_messages.Message):
r"""Authentication configuration for the management of AWS resources.
Fields:
roleArn: Required. The Amazon Resource Name (ARN) of the role that the
Anthos Multi-Cloud API will assume when managing AWS resources on your
account.
roleSessionName: Optional. An identifier for the assumed role session.
When unspecified, it defaults to `multicloud-service-agent`.
"""
roleArn = _messages.StringField(1)
roleSessionName = _messages.StringField(2)
class GoogleCloudGkemulticloudV1AwsSshConfig(_messages.Message):
r"""SSH configuration for AWS resources.
Fields:
ec2KeyPair: Required. The name of the EC2 key pair used to login into
cluster machines.
"""
ec2KeyPair = _messages.StringField(1)
class GoogleCloudGkemulticloudV1AwsVolumeTemplate(_messages.Message):
r"""Configuration template for AWS EBS volumes.
Enums:
VolumeTypeValueValuesEnum: Optional. Type of the EBS volume. When
unspecified, it defaults to GP2 volume.
Fields:
iops: Optional. The number of I/O operations per second (IOPS) to
provision for GP3 volume.
kmsKeyArn: Optional. The Amazon Resource Name (ARN) of the Customer
Managed Key (CMK) used to encrypt AWS EBS volumes. If not specified, the
default Amazon managed key associated to the AWS region where this
cluster runs will be used.
sizeGib: Optional. The size of the volume, in GiBs. When unspecified, a
default value is provided. See the specific reference in the parent
resource.
volumeType: Optional. Type of the EBS volume. When unspecified, it
defaults to GP2 volume.
"""
class VolumeTypeValueValuesEnum(_messages.Enum):
r"""Optional. Type of the EBS volume. When unspecified, it defaults to GP2
volume.
Values:
VOLUME_TYPE_UNSPECIFIED: Not set.
GP2: GP2 (General Purpose SSD volume type).
GP3: GP3 (General Purpose SSD volume type).
"""
VOLUME_TYPE_UNSPECIFIED = 0
GP2 = 1
GP3 = 2
iops = _messages.IntegerField(1, variant=_messages.Variant.INT32)
kmsKeyArn = _messages.StringField(2)
sizeGib = _messages.IntegerField(3, variant=_messages.Variant.INT32)
volumeType = _messages.EnumField('VolumeTypeValueValuesEnum', 4)
class GoogleCloudGkemulticloudV1AzureAuthorization(_messages.Message):
r"""Configuration related to the cluster RBAC settings.
Fields:
adminUsers: Required. Users that can perform operations as a cluster
admin. A new ClusterRoleBinding will be created to grant the cluster-
admin ClusterRole to the users. At most one user can be specified. For
more info on RBAC, see https://kubernetes.io/docs/reference/access-
authn-authz/rbac/#user-facing-roles
"""
adminUsers = _messages.MessageField('GoogleCloudGkemulticloudV1AzureClusterUser', 1, repeated=True)
class GoogleCloudGkemulticloudV1AzureClient(_messages.Message):
r"""`AzureClient` resources hold client authentication information needed by
the Anthos Multi-Cloud API to manage Azure resources on your Azure
subscription. When an AzureCluster is created, an `AzureClient` resource
needs to be provided and all operations on Azure resources associated to
that cluster will authenticate to Azure services using the given client.
`AzureClient` resources are immutable and cannot be modified upon creation.
Each `AzureClient` resource is bound to a single Azure Active Directory
Application and tenant.
Messages:
AnnotationsValue: Optional. Annotations on the resource. This field has
the same restrictions as Kubernetes annotations. The total size of all
keys and values combined is limited to 256k. Keys can have 2 segments:
prefix (optional) and name (required), separated by a slash (/). Prefix
must be a DNS subdomain. Name must be 63 characters or less, begin and
end with alphanumerics, with dashes (-), underscores (_), dots (.), and
alphanumerics between.
Fields:
annotations: Optional. Annotations on the resource. This field has the
same restrictions as Kubernetes annotations. The total size of all keys
and values combined is limited to 256k. Keys can have 2 segments: prefix
(optional) and name (required), separated by a slash (/). Prefix must be
a DNS subdomain. Name must be 63 characters or less, begin and end with
alphanumerics, with dashes (-), underscores (_), dots (.), and
alphanumerics between.
applicationId: Required. The Azure Active Directory Application ID.
createTime: Output only. The time at which this resource was created.
name: The name of this resource. `AzureClient` resource names are
formatted as `projects//locations//azureClients/`. See [Resource
Names](https://cloud.google.com/apis/design/resource_names) for more
details on Google Cloud resource names.
pemCertificate: Output only. The PEM encoded x509 certificate.
tenantId: Required. The Azure Active Directory Tenant ID.
uid: Output only. A globally unique identifier for the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class AnnotationsValue(_messages.Message):
r"""Optional. Annotations on the resource. This field has the same
restrictions as Kubernetes annotations. The total size of all keys and
values combined | |
"""
Python Tests for the ETCD Model Base Class
MIT License
(C) Copyright [2020] Hewlett Packard Enterprise Development LP
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE."""
from time import sleep
from queue import Empty
from etcd3_model import (
create_instance,
Etcd3Attr,
Etcd3Model,
DELETING,
UPDATING,
READY
)
ETCD = create_instance()
def test_basic_etcd3_locking():
"""Make sure the underlying locking for etcd3 works as expected. This
is mostly here to test the mocking of using a raw etcd3 lock as a
context manager because the etcd3_model code won't test that.
"""
# Test that the lock works in a simple managed context.
with ETCD.lock("foobar") as my_lock:
assert my_lock.is_acquired()
assert not my_lock.is_acquired()
# Test that exceeding the TTL causes the lock to drop.
with ETCD.lock("foo", ttl=1) as my_lock:
sleep(2)
assert not my_lock.is_acquired()
assert not my_lock.is_acquired()
def test_basic_instantiation():
"""Create a basic Etcd3Model derived class instance using a legal
class definition.
"""
class MyModel(Etcd3Model):
""" Test Model"""
etcd_instance = ETCD
model_prefix = "/testing/etcd3model/%s " % ("MyModel")
# The Object ID used to locate each instance
my_model_id = Etcd3Attr(is_object_id=True)
# Some fields...
stuff = Etcd3Attr(default="")
more_stuff = Etcd3Attr(default="")
even_more_stuff = Etcd3Attr(default=0)
my_model = MyModel(stuff="here is some stuff",
more_stuff="here is some more stuff",
even_more_stuff="here is even more stuff")
assert my_model.my_model_id
assert my_model.stuff == "here is some stuff"
assert my_model.more_stuff == "here is some more stuff"
assert my_model.even_more_stuff == "here is even more stuff"
assert my_model.get_id() == my_model.my_model_id
# Store it to ETCD
my_model.put()
# Get it back again in a different instance and compare the
# instances
retrieved = MyModel.get(my_model.my_model_id)
assert retrieved is not None
assert retrieved.my_model_id == my_model.my_model_id
assert retrieved.stuff == my_model.stuff
assert retrieved.more_stuff == my_model.more_stuff
assert retrieved.even_more_stuff == my_model.even_more_stuff
assert retrieved.get_id() == my_model.my_model_id
# Get all MyModel instances and make sure ours is (the only one)
# there
all_models = MyModel.get_all()
assert isinstance(all_models, type([]))
assert len(all_models) == 1
retrieved = all_models[0]
assert retrieved is not None
assert retrieved.my_model_id == my_model.my_model_id
assert retrieved.stuff == my_model.stuff
assert retrieved.more_stuff == my_model.more_stuff
assert retrieved.even_more_stuff == my_model.even_more_stuff
# Post a message to it and make sure the message gets posted
msg = "hello world!"
my_model.post_message(msg)
found = False
for message in my_model.messages:
if msg in message:
found = True
assert found
retrieved = MyModel.get(my_model.my_model_id)
found = False
for message in retrieved.messages:
if msg in message:
found = True
assert found
# Post a one-time message several times and make sure it only
# shows up once
msg = "should only appear once"
my_model.post_message_once(msg)
my_model.post_message_once(msg)
my_model.post_message_once(msg)
my_model.post_message_once(msg)
my_model.post_message_once(msg)
found = 0
for message in my_model.messages:
if msg in message:
found += 1
assert found == 1
retrieved = MyModel.get(my_model.my_model_id)
found = 0
for message in retrieved.messages:
if msg in message:
found += 1
assert found == 1
# Try some locking to make sure the locking mechanisms work
with my_model.lock(ttl=2) as my_lock:
assert my_lock.is_acquired()
# Try a nested non-blockng lock and show that it cannot be acquired
# and that it returns without locking.
with my_model.lock(timeout=0) as my_second_lock:
assert not my_second_lock.is_acquired()
# Make sure adding the second lock didn't break the first one...
assert my_lock.is_acquired()
# Make sure coming out of the managed context, the lock is released
assert not my_lock.is_acquired()
# Try some more locking with ttl exhaustion this one is a bit
# weird because the lock times out in the middle of the managed
# context so it is not held at the end.
with my_model.lock(ttl=2) as my_lock:
assert my_lock.is_acquired()
with my_model.lock(ttl=2) as my_second_lock:
assert not my_lock.is_acquired()
assert my_second_lock.is_acquired()
assert not my_second_lock.is_acquired()
assert not my_lock.is_acquired()
# Set it to READY and make sure the messages go away and the state
# goes to READY. Do this under lock to test locking as well...
my_model.set_ready()
assert my_model.state == READY
assert my_model.messages == []
retrieved = MyModel.get(my_model.my_model_id)
assert retrieved.state == READY
assert retrieved.messages == []
# Delete it and make sure its state goes to DELETING
msg = "Goodbye cruel world"
my_model.delete(msg)
assert my_model.state == DELETING
retrieved = MyModel.get(my_model.my_model_id)
assert retrieved.state == DELETING
found = False
for message in retrieved.messages:
if msg in message:
found = True
assert found
# Remove it and make sure it is gone
my_model.remove()
retrieved = MyModel.get(my_model.my_model_id)
assert retrieved is None
# And, for good measure, make sure it doesn't show up in the list
# either
all_models = MyModel.get_all()
assert isinstance(all_models, type([]))
assert all_models == []
# pylint: disable=redefined-outer-name
def test_field_defaults():
"""Test defining a model with a non-standard object id generator and
show that the generator generates the expected sequence of
object-ids.
"""
class MyModel(Etcd3Model):
""" Test Model"""
etcd_instance = ETCD
model_prefix = "/testing/etcd3model/%s " % ("MyModel")
# The Object ID used to locate each instance
my_model_id = Etcd3Attr(is_object_id=True)
# Some fields...
stuff = Etcd3Attr(default="default stuff")
more_stuff = Etcd3Attr(default="more default stuff")
even_more_stuff = Etcd3Attr(default="even more default stuff")
my_model = MyModel()
assert my_model.my_model_id
assert my_model.stuff == "default stuff"
assert my_model.more_stuff == "more default stuff"
assert my_model.even_more_stuff == "even more default stuff"
# pylint: disable=redefined-outer-name
def test_object_id_default():
"""Test defining a model with a non-standard object id generator and
show that the generator generates the expected sequence of
object-ids.
"""
next_obj_id = 0
def object_id_gen():
""" Example non-default object-id generator.
"""
nonlocal next_obj_id
ret = next_obj_id
next_obj_id += 1
return ret
class MyModel(Etcd3Model):
""" Test Model"""
etcd_instance = ETCD
model_prefix = "/testing/etcd3model/%s " % ("MyModel")
# The Object ID used to locate each instance
my_model_id = Etcd3Attr(is_object_id=True, default=object_id_gen)
# Some fields...
stuff = Etcd3Attr(default="")
more_stuff = Etcd3Attr(default="")
even_more_stuff = Etcd3Attr(default=0)
for i in range(0, 10):
my_model = MyModel(stuff="here is some stuff",
more_stuff="here is some more stuff",
even_more_stuff="here is even more stuff")
assert my_model.my_model_id == i
# pylint: disable=redefined-outer-name
def test_watch_and_learn():
"""Test watching and learning of ETCD model objects. Verify that
watching an object of a given type causes put events to flow down
the queue associated with the object class but not down any queue
associated with another object class. Also show that delete
events are ignored as are put events on READY objects. Finally,
show that the 'learn' method on a class causes all objects of that
class to show up on the queue.
"""
class MyWatchModel(Etcd3Model):
""" Test Model"""
etcd_instance = ETCD
model_prefix = "/testing/etcd3model/%s" % ("MyWatchModel")
# The Object ID used to locate each instance
my_model_id = Etcd3Attr(is_object_id=True)
class MyOtherModel(Etcd3Model):
""" Test Model"""
etcd_instance = ETCD
model_prefix = "/testing/etcd3model/%s" % ("MyOtherModel")
# The Object ID used to locate each instance
my_model_id = Etcd3Attr(is_object_id=True)
# Set up watching on MyWatchModel()
queue = MyWatchModel.watch()
second_queue = MyWatchModel.watch()
# Create a bunch of MyWatchModel instances
instances = [MyWatchModel() for i in range(0, 5)]
# Actually put the new objects into ETCD, they should flow down
# 'queue' as they are created.
for instance in instances:
instance.put()
# Set up watching on MyOtherModel
other_queue = MyOtherModel.watch()
# Create a bunch of MyOtherModel instances, these should flow down
# 'other_queue' as they are created.
other_instances = [MyOtherModel() for i in range(0, 5)]
# Actually put the new objects into ETCD, they should flow down
# 'other_queue' as they are created.
for instance in other_instances:
instance.put()
# Check that | |
the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def update_volume(
self,
request: Union[baremetalsolution.UpdateVolumeRequest, dict] = None,
*,
volume: baremetalsolution.Volume = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Update details of a single storage volume.
.. code-block:: python
from google.cloud import bare_metal_solution_v2
async def sample_update_volume():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionAsyncClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.UpdateVolumeRequest(
)
# Make the request
operation = client.update_volume(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.UpdateVolumeRequest, dict]):
The request object. Message for updating a volume.
volume (:class:`google.cloud.bare_metal_solution_v2.types.Volume`):
Required. The volume to update.
The ``name`` field is used to identify the volume to
update. Format:
projects/{project}/locations/{location}/volumes/{volume}
This corresponds to the ``volume`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
The list of fields to update. The only currently
supported fields are: ``snapshot_auto_delete_behavior``
``snapshot_schedule_policy_name``
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.bare_metal_solution_v2.types.Volume`
A storage volume.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([volume, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = baremetalsolution.UpdateVolumeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if volume is not None:
request.volume = volume
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_volume,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("volume.name", request.volume.name),)
),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
baremetalsolution.Volume,
metadata_type=baremetalsolution.OperationMetadata,
)
# Done; return the response.
return response
async def list_networks(
self,
request: Union[baremetalsolution.ListNetworksRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListNetworksAsyncPager:
r"""List network in a given project and location.
.. code-block:: python
from google.cloud import bare_metal_solution_v2
async def sample_list_networks():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionAsyncClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.ListNetworksRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_networks(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.ListNetworksRequest, dict]):
The request object. Message for requesting a list of
networks.
parent (:class:`str`):
Required. Parent value for
ListNetworksRequest.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.services.bare_metal_solution.pagers.ListNetworksAsyncPager:
Response message containing the list
of networks.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = baremetalsolution.ListNetworksRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_networks,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListNetworksAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def get_network(
self,
request: Union[baremetalsolution.GetNetworkRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> baremetalsolution.Network:
r"""Get details of a single network.
.. code-block:: python
from google.cloud import bare_metal_solution_v2
async def sample_get_network():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionAsyncClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.GetNetworkRequest(
name="name_value",
)
# Make the request
response = await client.get_network(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.GetNetworkRequest, dict]):
The request object. Message for requesting network
information.
name (:class:`str`):
Required. Name of the resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.types.Network:
A Network.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = baremetalsolution.GetNetworkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_network,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_snapshot_schedule_policies(
self,
request: Union[
baremetalsolution.ListSnapshotSchedulePoliciesRequest, dict
] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListSnapshotSchedulePoliciesAsyncPager:
r"""List snapshot schedule policies in a given project
and location.
.. code-block:: python
from google.cloud import bare_metal_solution_v2
async def sample_list_snapshot_schedule_policies():
# Create a client
client = bare_metal_solution_v2.BareMetalSolutionAsyncClient()
# Initialize request argument(s)
request = bare_metal_solution_v2.ListSnapshotSchedulePoliciesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_snapshot_schedule_policies(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Union[google.cloud.bare_metal_solution_v2.types.ListSnapshotSchedulePoliciesRequest, dict]):
The request object. Message for requesting a list of
snapshot schedule policies.
parent (:class:`str`):
Required. The parent project
containing the Snapshot Schedule
Policies.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bare_metal_solution_v2.services.bare_metal_solution.pagers.ListSnapshotSchedulePoliciesAsyncPager:
Response message containing the list
of snapshot schedule policies.
Iterating over this object will yield
results and resolve additional pages
| |
import gym
from gym.utils import seeding
import numpy as np
import pybullet as pb
import pybullet_data
import os
import yaml
import time
import matplotlib.pyplot as plt
class AslaugBaseEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, version, params, gui=False, init_seed=None,
free_cam=False, easy_bookcases=False):
self.free_cam = free_cam
self.version = version
self.gui = gui
self.done_info = None
if params is None:
print("No env params specified, using default.")
with open("params.yaml") as f:
params_all = yaml.load(f)
params = params_all["environment_params"]
params = self.numpyfy_dict(params)
self.p = params
self.viewer = None
self.fixed_joint_states = self.p["joints"]["init_states"]
self.tau = self.p["world"]["tau"]
self.metadata["video.frames_per_second"] = int(round(1.0/self.tau))
self.seed(init_seed)
self.n_joints = len(self.p["joints"]["joint_names"])
self.n_links = len(self.p["joints"]["link_names"])
self.timeout_steps = (self.p["world"]["timeout"]
/ self.p["world"]["tau"])
self.step_no = 0
self.valid_buffer_scan = False
self.valid_line_buffer_scan = False
# Set up simulation
self.setup_simulation(gui=gui, easy_bookcases=easy_bookcases)
self.setup_action_observation_spaces()
def step(self, action_d):
'''
Executes one step.
'''
t0=time.time()
self.step_no += 1
# Extract current state
state_c = self.state
mb_vel_c_r = state_c["base_vel"]
joint_vel_c = state_c["joint_vel"]
# Obtain actions
self.action_d = action_d
joint_actions = np.zeros(7)
if self.p["world"]["action_discretization"] > 0:
mb_actions = np.choose(action_d[:3], self.actions[:, :3])
act_joint_actions = np.choose(action_d[3:], self.actions[:, 3:])
joint_actions[self.actuator_selection] = act_joint_actions
else:
mb_actions = action_d[:3]
lim_up = self.n_joints + 3
joint_actions[self.actuator_selection] = action_d[3:lim_up]
if self.p["world"]["use_stop_action"]:
stop_base = action_d[lim_up] > 0.5
stop_arm = action_d[lim_up+1] > 0.5
if stop_base:
mb_actions[:3] = np.clip(-mb_vel_c_r,
self.action_space.low[:3],
self.action_space.high[:3])
if stop_arm:
joint_actions[self.actuator_selection] = (
np.clip(-joint_vel_c[self.actuator_selection],
self.action_space.low[3:lim_up],
self.action_space.high[3:lim_up])
)
# Add noise to base accelerations
std_lin = self.p["base"]["std_acc_lin"]
std_ang = self.p["base"]["std_acc_ang"]
mb_noise_fac_lin = self.np_random.normal(1, std_lin, 2)
mb_noise_fac_ang = self.np_random.normal(1, std_ang, 1)
mb_actions[0:2] *= mb_noise_fac_lin
mb_actions[2:3] *= mb_noise_fac_ang
# Add noise to joint accelerations
j_std = self.p["joints"]["std_acc"]
joint_noise_fac = self.np_random.normal(1, j_std, joint_actions.shape)
joint_actions *= joint_noise_fac
# Calculate new velocities and clip limits
mb_vel_n_r = mb_vel_c_r + mb_actions
mb_vel_abs_lin = np.linalg.norm(mb_vel_n_r[0:2])
mb_vel_abs_ang = np.linalg.norm(mb_vel_n_r[2])
if mb_vel_abs_lin > 0.0:
cut_vel = min(mb_vel_abs_lin, self.p['base']['vel_mag_lin'])
mb_vel_n_r[0:2] = mb_vel_n_r[0:2] / mb_vel_abs_lin * cut_vel
if mb_vel_abs_ang > 0.0:
cut_vel = min(mb_vel_abs_ang, self.p['base']['vel_mag_ang'])
mb_vel_n_r[2] = mb_vel_n_r[2] / mb_vel_abs_ang * cut_vel
joint_vel_n = np.clip(joint_vel_c + joint_actions,
-self.p["joints"]["vel_mag"],
+self.p["joints"]["vel_mag"])
# Apply new velocity commands to robot
self.set_velocities(mb_vel_n_r, joint_vel_n)
# Ensure that fixed joints do not move at all
for i in range(len(self.actuator_selection)):
if not self.actuator_selection[i]:
pb.resetJointState(self.robotId, self.joint_mapping[i],
self.fixed_joint_states[i], 0.0,
self.clientId)
for human in self.humans:
human_done = human.step()
if human_done:
h_s_x = self.np_random.uniform(self.sp_init_pos[0]-7.5, self.sp_init_pos[0]+7.5)
h_s_y = self.np_random.uniform(-0.5, self.corridor_width+0.5)
h_e_x = self.np_random.uniform(self.sp_init_pos[0]-7.5, self.sp_init_pos[0]+7.5)
h_e_y = self.np_random.uniform(-0.5, self.corridor_width+0.5)
human.set_start_end([h_s_x, h_s_y], [h_e_x, h_e_y])
human.setEnabled(self.np_random.uniform() <= self.p['world']['p_spawn_human'])
# Execute one step in simulation
pb.stepSimulation(self.clientId)
self.valid_buffer_scan = False
self.valid_line_buffer_scan = False
# Update internal state
self.state = {"base_vel": mb_vel_n_r, "joint_vel": joint_vel_n}
# Calculate reward
reward, done, info = self.calculate_reward()
if done:
self.done_info = info
else:
self.done_info = None
# Obtain observation
obs = self.calculate_observation()
#print(time.time()-t0)
return obs, reward, done, info
def render(self, mode='human', w=1280, h=720):
'''
Renders the environment. Currently does nothing.
'''
if mode == 'rgb_array' or mode == 'human_fast' or not self.free_cam:
camDistance = 4
dis, _ = self.calculate_goal_distance()
x1, y1 = 0.5, 1.5
x2, y2 = 2.0, 4.0
f = lambda x: min(y2, max(y1, (y2-y1)/(x2-x1)*x+y1-(y2-y1)/(x2-x1)*x1))
camDistance = f(dis)
x1, y1 = 0.5, -55
x2, y2 = 2.0, -80
f = lambda x: min(max(y1, y2), max(min(y1, y2), (y2-y1)/(x2-x1)*x+y1-(y2-y1)/(x2-x1)*x1))
pitch = f(dis)
nearPlane = 0.01
farPlane = 15
fov = 60
cam_pos, rpy = self.get_camera_pose()
viewMatrix = pb.computeViewMatrixFromYawPitchRoll(cam_pos,
camDistance,
rpy[2], pitch,
rpy[0], 2,
self.clientId)
if not self.free_cam:
pb.resetDebugVisualizerCamera(camDistance, rpy[2], pitch, cam_pos,
self.clientId)
if mode == 'rgb_array' or mode == 'human_fast':
aspect = w / h
projectionMatrix = pb.computeProjectionMatrixFOV(fov, aspect,
nearPlane,
farPlane,
self.clientId)
img_arr = pb.getCameraImage(w,
h,
viewMatrix,
projectionMatrix,
shadow=1,
lightDirection=[0.5, 0.3, 1],
renderer=pb.ER_BULLET_HARDWARE_OPENGL,
physicsClientId=self.clientId)
img = np.array(img_arr[2])[:, :, 0:3]
if mode == 'rgb_array':
return img
if mode == 'human_fast':
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
return img
# elif mode == 'human':
# assert self.gui, "Must use GUI for render mode human!"
def setup_simulation(self, gui=False, easy_bookcases=False, clientId=None):
'''
Initializes the simulation by setting up the environment and spawning
all objects used later.
Params:
gui (bool): Specifies if a GUI should be spawned.
'''
# Setup simulation parameters
if clientId is None:
mode = pb.GUI if gui else pb.DIRECT
self.clientId = pb.connect(mode)
else:
self.clientId = clientId
pb.setGravity(0.0, 0.0, 0.0, self.clientId)
pb.setPhysicsEngineParameter(fixedTimeStep=self.p["world"]["tau"],
physicsClientId=self.clientId)
pb.setAdditionalSearchPath(pybullet_data.getDataPath())
pb.configureDebugVisualizer(pb.COV_ENABLE_GUI, 0)
pb.configureDebugVisualizer(pb.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, 0)
pb.configureDebugVisualizer(pb.COV_ENABLE_DEPTH_BUFFER_PREVIEW, 0)
pb.configureDebugVisualizer(pb.COV_ENABLE_RGB_BUFFER_PREVIEW, 0)
pb.setPhysicsEngineParameter(enableFileCaching=0)
# Setup humans
self.humans = [Human(self.clientId, self.tau) for _ in range(self.p['world']['n_humans'])]
# Spawn robot
self.robotId = self.spawn_robot()
# Spawn setpoint
self.spId = self.spawn_setpoint()
# Spawn all objects in the environment
self.additionalIds = self.spawn_additional_objects()
# Enable collision of base and all objects
# for id in self.additionalIds:
# pb.setCollisionFilterPair(self.robotId, id, -1, -1, True,
# self.clientId)
# Spawn bookcases
self.spawn_kallax()
# Figure out joint mapping: self.joint_mapping maps as in
# desired_mapping list.
self.joint_mapping = np.zeros(7, dtype=int)
self.link_mapping = np.zeros(self.n_links, dtype=int)
self.joint_limits = np.zeros((7, 2), dtype=float)
self.eeLinkId = None
self.baseLinkId = None
self.lidarLinkId1 = None
self.lidarLinkId2 = None
joint_names = ["panda_joint{}".format(x) for x in range(1, 8)]
link_names = self.p["joints"]["link_names"]
for j in range(pb.getNumJoints(self.robotId,
physicsClientId=self.clientId)):
info = pb.getJointInfo(self.robotId, j,
physicsClientId=self.clientId)
j_name, l_name = info[1].decode("utf-8"), info[12].decode("utf-8")
idx = info[0]
if j_name in joint_names:
map_idx = joint_names.index(j_name)
self.joint_mapping[map_idx] = idx
self.joint_limits[map_idx, :] = info[8:10]
if l_name in link_names:
self.link_mapping[link_names.index(l_name)] = idx
if l_name == self.p["joints"]["ee_link_name"]:
self.eeLinkId = idx
if l_name == self.p["joints"]["base_link_name"]:
self.baseLinkId = idx
if l_name == self.p["sensors"]["lidar"]["link_id1"]:
self.lidarLinkId1 = idx
if l_name == self.p["sensors"]["lidar"]["link_id2"]:
self.lidarLinkId2 = idx
for j in range(pb.getNumJoints(self.spId,
physicsClientId=self.clientId)):
info = pb.getJointInfo(self.spId, j,
physicsClientId=self.clientId)
link_name = info[12].decode("utf-8")
idx = info[0]
if link_name == "grasp_loc":
self.spGraspLinkId = idx
self.actuator_selection = np.zeros(7, bool)
for i, name in enumerate(joint_names):
if name in self.p["joints"]["joint_names"]:
self.actuator_selection[i] = 1
# Prepare lidar
n_scans = self.p["sensors"]["lidar"]["n_scans"]
mag_ang = self.p["sensors"]["lidar"]["ang_mag"]
scan_range = self.p["sensors"]["lidar"]["range"]
angs = ((np.array(range(n_scans))
- (n_scans-1)/2.0)*2.0/n_scans*mag_ang)
r_uv = np.vstack((np.cos(angs), np.sin(angs),
np.zeros(angs.shape[0])))
r_from = r_uv * 0.1
r_to = r_uv * scan_range
self.rays = (r_from, r_to)
for human in self.humans:
self.configure_ext_collisions(human.leg_l, self.robotId, self.collision_links)
self.configure_ext_collisions(human.leg_r, self.robotId, self.collision_links)
def seed(self, seed=None):
'''
Initializes numpy's random package with a given seed.
Params:
seed (int): Seed to use. None means a random seed.
Returns:
list: The seed packed in a list.
'''
self.np_random, seed = seeding.np_random(seed)
return [seed]
def rotation_matrix(self, ang):
'''
Calculates a rotation matrix around z-axis.
Params:
ang (float): Angle to rotate.
Returns:
numpy.array: 3x3 rotation matrix
'''
return np.array([[np.cos(ang), -np.sin(ang), 0],
[np.sin(ang), +np.cos(ang), 0],
[0, 0, 1]])
def homography(self, linkId):
parent_link_state = pb.getLinkState(
self.robotId, linkId,
False, False, self.clientId)
parent_pos = np.array(parent_link_state[0])
parent_ori = parent_link_state[1]
R = np.array(pb.getMatrixFromQuaternion(parent_ori))
T = np.array([
[R[0], R[1], R[2], parent_pos[0]],
[R[3], R[4], R[5], parent_pos[1]],
[R[6], R[7], R[8], parent_pos[2]],
[0.0,0.0,0.0,1.0]
])
return T
def homography_vector(self, v):
v_h = np.ones((4,v.shape[1]))
v_h[:3,:] = v[:,:]
return v_h
def get_ee_velocity(self):
state_ee = pb.getLinkState(self.robotId, self.eeLinkId, True, False,
self.clientId)
return np.array(state_ee[6])
def get_ee_sp_transform(self):
'''
Calculates pose of setpoint w.r.t. end effector frame.
Returns:
numpy.array: 6D pose of setpoint in end effector frame.
'''
state_ee = pb.getLinkState(self.robotId, self.eeLinkId,
False, False, self.clientId)
ee_pos_w, ee_ori_w = state_ee[4:6]
w_pos_ee, w_ori_ee = pb.invertTransform(ee_pos_w, ee_ori_w,
self.clientId)
state_sp = pb.getLinkState(self.spId, self.spGraspLinkId,
False, False, self.clientId)
sp_pos_w, sp_ori_w = state_sp[4:6]
sp_pos_ee, sp_ori_ee = pb.multiplyTransforms(w_pos_ee, w_ori_ee,
sp_pos_w, sp_ori_w,
self.clientId)
sp_eul_ee = pb.getEulerFromQuaternion(sp_ori_ee, self.clientId)
return np.array(sp_pos_ee + sp_eul_ee)
def get_base_sp_transform(self):
'''
Calculates pose of setpoint w.r.t. base frame.
Returns:
numpy.array: 6D pose of setpoint in base frame.
'''
state_ee = pb.getLinkState(self.robotId, self.baseLinkId,
False, False, self.clientId)
ee_pos_w, ee_ori_w = state_ee[4:6]
w_pos_ee, w_ori_ee = pb.invertTransform(ee_pos_w, ee_ori_w,
self.clientId)
state_sp = pb.getLinkState(self.spId, self.spGraspLinkId,
False, False, self.clientId)
sp_pos_w, sp_ori_w = state_sp[4:6]
sp_pos_ee, sp_ori_ee = pb.multiplyTransforms(w_pos_ee, w_ori_ee,
sp_pos_w, sp_ori_w,
self.clientId)
sp_eul_ee = pb.getEulerFromQuaternion(sp_ori_ee, self.clientId)
return np.array(sp_pos_ee + sp_eul_ee)
def get_link_states(self, links_idx):
'''
Obtain matrix with 6D poses of links specified.
Args:
links_idx (list): Indices of links from link_names list in params.
Returns:
numpy.array: 3D poses for all link indices.
Shape [len(links_idx, 6)] where second dim. (x,y,z,r,p,y)
'''
# NOTE: Using euler angles, might this be a problem?
link_poses = np.zeros((len(links_idx), 6))
states = pb.getLinkStates(self.robotId, links_idx, False, False,
self.clientId)
states_mb = pb.getLinkState(self.robotId, self.baseLinkId, False,
False, self.clientId)
mb_pos_w, mb_ori_w = states_mb[4:6]
w_pos_mb, w_ori_mb = pb.invertTransform(mb_pos_w, mb_ori_w,
self.clientId)
for i, state in enumerate(states):
link_pos_w, link_ori_w = state[4:6]
link_pos_r, link_ori_r = pb.multiplyTransforms(w_pos_mb, w_ori_mb,
link_pos_w,
| |
<filename>Cryptogram.py
import os
import colorama
from termcolor import colored
# Replaces letters in cryptogram and records if they should be considered changed or not
def replacer(old, new, changed=False, red=False):
print()
for i in range(len(cryptogram)):
# Ensures letters of only one category (changed or unchanged) are replaced
if cryptogram[i][0] == old and cryptogram[i][1] is changed:
if red is True: # It should be considered changed and be printed as red
cryptogram[i] = [new, True]
else: # # It should be considered unchanged and be printed as white
cryptogram[i] = [new, False]
printing(cryptogram[i])
print()
# Prints cryptogram, making changed letters appear as red
def printing(curr):
if curr[1] is True:
print(colored(curr[0], 'red'), end=' ')
else:
if curr[0] == '\n': # Avoids adding spaces to newline characters
print(curr[0])
else:
print(curr[0], end=' ')
# Used to check user inputs, whether they be responses to prompts or letters for substituting
def input_check(prompt, sec=False, responses=None, letter=False, revert=False):
if responses is None: # If a letter is being substituted, a list of responses is unneeded
responses = []
if letter is True: # Checks letters in cryptogram for replacing
if revert is False: # If there's no reverting, a substituted letter is needed
string = input(prompt).upper()
else:
string = alphaold[0] # The string can be anything that passes upcoming checks
if string.isalpha() is False: # String is not a letter
print("Please pick a letter from A-Z.")
return input_check(prompt, letter=True, sec=sec)
elif len(string) != 1: # Multiple letters are given
print("Please pick only one letter.")
return input_check(prompt, letter=True, sec=sec)
elif sec is False: # If this is the letter to be replaced
# Keeps track of whether the letter to be changed falls under two states: changed and unchanged
difference = [False, False]
for i in range(len(cryptogram)):
if string == cryptogram[i][0]: # Match is found
if cryptogram[i][1] is True: # The letter is changed
difference[0] = True
else:
difference[1] = True # The letter is unchanged
# Since there can't be two falses, the letter must be present in both changed and unchanged forms
if difference[0] is difference[1]:
# Asks the user if they want to change the changed or unchanged string
return [string,
input_check(f"\nWould you like to replace the changed {string} or the unchanged "
f"{string}? [Changed/Unchanged/C/U] ", responses=["changed", "unchanged", "c", "u"])
in ['changed', 'c']]
# The letter is only present in changed form, so no user input is required
elif difference[0] is True:
return [string, True]
# The letter is only present in unchanged form, so no user input is required
return [string, False]
elif sec is True: # If this is the letter for replacing
if revert is False and string == alphaold[0]: # The letter is replacing itself
print("You cannot substitute a letter with itself.")
return input_check(f"\nWhat letter do you want to replace {alphaold[0]} with? ",
sec=True, letter=True)
og = False # Will track if the letter for replacing is from the original cryptogram
substituted = [False, '']
for i in range(len(cryptogram)):
if string == cryptogram[i][0]: # Match is found
if cryptogram[i][1] is True: # The letter for replacing is already substituted
substituted = [True, i]
# Matches with the letter to be replaced, to determine if the replacing letter is an original letter
elif alphaold == cryptogram[i]:
if data[1][i] == string: # If the original letter equals the letter for replacing
og = True
if revert is True:
return[data[1][substituted[1]], True] # Return the original letter and that it's an original
# The letter for replacing is substituted and it's not meant to be an original
if og is False and substituted[0] is True:
print(f"{string} is already substituted. Please undo '{string}' to '{data[1][substituted[1]]}' "
"or substitute a different letter.") # Tells the user how to undo the substituted letter
return input_check(f"\nWhat letter do you want to replace {alphaold[0]} with? ",
sec=True, letter=True)
else:
return [string, og]
else: # Regular input checker
rawinput = input(prompt).rstrip().lower()
for response in responses:
if rawinput == response: # The user put an accepted response
return response
# The user didn't put an accepted response
print("Please respond with {}.". format(' or '.join(map(lambda x: f'"{x}"', responses))))
return input_check(prompt, responses=responses) # Re-prompt them
# Prompts the user to enter their cryptogram
def creation():
try:
lines = int(input("\nHow many lines is your cryptogram? "))
except ValueError: # The user didn't enter an int
print("Please type a whole number.")
return creation()
print()
res = "" # Will store cryptogram
for i in range(lines):
line = input(f"Line {i + 1}: ").rstrip().upper()
if i + 1 == lines: # Don't add a newline to the last line
res += line
else:
res += line + '\n'
return res
# Sets up the program by opening and handling files
def setup(dir):
try: # Checks if the file exists
file = open(dir, 'r')
reader = file.read().rstrip()
try: # Checks if the file contains any alphabetical data
if reader[0].isalpha() is False: # If it doesn't, have the user fill it
file.close()
with open(dir, 'w') as file:
file.write(creation())
except IndexError: # If the file doesn't contain anything, have the user fill it
file.close()
with open(dir, 'w') as file:
file.write(creation())
except FileNotFoundError: # If the file doesn't exist, create one and have the user fill it
with open(dir, 'w') as file:
file.write(creation())
with open(dir, 'r') as file: # Reads cryptogram into memory
rawcryptogram = file.read()
# If the program has been run before, changed and unchanged data will be separated by the string "##"
data = rawcryptogram.split("##")
if len(data) == 1: # The program has not been run before, so the cryptogram is still in an unchanged state.
data.append(data[0]) # Makes a copy of the unchanged data for later use
else: # The program has been run before
# Prompts the user if they'd like to continue where they left off or start over
if input_check("\nWould you like to continue where you left off or start over? [Left off/Start over] ",
responses=["left off", "start over"]) == 'start over':
# They can clear the current cryptogram, or make a new one
action = input_check("\nWould you like to work on a new cryptogram or clear your current one? [New/Clear] ",
responses=["new", "clear"])
# Extra confirmation check
if 'y' in input_check("\nAre you sure? Your progress will be deleted and this can't be undone. [Y/N] ",
responses=["yes", "no", 'y', 'n']):
# If the user would like to start over, delete the save file and return false
if action == "new":
os.remove(dir)
return False
else: # The user would like to clear their changes
data[0] = data[1] # Set the changed cryptogram equal to the unchanged one
print()
return data
colorama.init()
dir = os.path.dirname(os.path.realpath(__file__)) + "\input.txt" # Path to save file
data = setup(dir) # Sets up program
while data is False: # Calls setup until a new cryptogram is entered
data = setup(dir)
cryptogram = [] # Will store the user changed cryptogram
# Finds differences between changed and unchanged cryptograms and notates the changed letters
for x, y in zip(data[0], data[1]):
if x != y:
itm = [x, True]
else:
itm = [x, False]
cryptogram.append(itm)
printing(itm) # Print the cryptogram
print()
while True:
alphaold = input_check("\nWhat letter do you want to replace? ", letter=True)
rev = False
alphanew = []
if alphaold[1] is True:
if input_check(f"\nWould you like to revert {alphaold[0]} to its original letter?[Y/N] ",
responses=["yes", "no", 'y', 'n']) in ["y", "yes"]:
alphanew = input_check("\n", sec=True, letter=True, revert=True)
rev = True
if rev is False:
alphanew = input_check(f"\nWhat letter do you want to replace {alphaold[0]} with? ", sec=True, letter=True)
# Replaces letters, notating if old one is the changed version or not and if the new one is an original
replacer(alphaold[0], alphanew[0], changed=alphaold[1], red=not alphanew[1])
# Gives the user an option to undo their change
undo = input_check("\nDo you want to undo?[Y/N] ", responses=["yes", "no", 'y', 'n'])
if 'y' in undo:
# Reverses the first replacement, retaining if the original letter was changed
# and noting | |
duration(self):
"""
Returns a ``string`` of the game's duration in the format 'H:MM'.
"""
return self._duration
@property
def time_of_day(self):
"""
Returns a ``string`` constant indicated whether the game was played
during the day or at night.
"""
if 'night' in self._time_of_day.lower():
return NIGHT
return DAY
@property
def winner(self):
"""
Returns a ``string`` constant indicating whether the home or away team
won.
"""
if self.home_runs > self.away_runs:
return HOME
return AWAY
@property
def winning_name(self):
"""
Returns a ``string`` of the winning team's name, such as 'Houston
Astros'.
"""
if self.winner == HOME:
return self._home_name.text()
return self._away_name.text()
@property
def winning_abbr(self):
"""
Returns a ``string`` of the winning team's abbreviation, such as 'HOU'
for the Houston Astros.
"""
if self.winner == HOME:
return utils._parse_abbreviation(self._home_name)
return utils._parse_abbreviation(self._away_name)
@property
def losing_name(self):
"""
Returns a ``string`` of the losing team's name, such as '<NAME>
Dodgers'.
"""
if self.winner == HOME:
return self._away_name.text()
return self._home_name.text()
@property
def losing_abbr(self):
"""
Returns a ``string`` of the losing team's abbreviation, such as 'LAD'
for the Los Angeles Dodgers.
"""
if self.winner == HOME:
return utils._parse_abbreviation(self._away_name)
return utils._parse_abbreviation(self._home_name)
@int_property_decorator
def away_at_bats(self):
"""
Returns an ``int`` of the number of at bats the away team had.
"""
return self._away_at_bats
@int_property_decorator
def away_runs(self):
"""
Returns an ``int`` of the number of runs the away team scored.
"""
return self._away_runs
@int_property_decorator
def away_hits(self):
"""
Returns an ``int`` of the number of hits the away team had.
"""
return self._away_hits
@int_property_decorator
def away_rbi(self):
"""
Returns an ``int`` of the number of runs batted in the away team
registered.
"""
return self._away_rbi
@float_property_decorator
def away_earned_runs(self):
"""
Returns a ``float`` of the number of runs the away team earned.
"""
return self._away_earned_runs
@int_property_decorator
def away_bases_on_balls(self):
"""
Returns an ``int`` of the number of bases the away team registerd as a
result of balls.
"""
return self._away_bases_on_balls
@int_property_decorator
def away_strikeouts(self):
"""
Returns an ``int`` of the number of times the away team was struck out.
"""
return self._away_strikeouts
@int_property_decorator
def away_plate_appearances(self):
"""
Returns an ``int`` of the number of plate appearances the away team
made.
"""
return self._away_plate_appearances
@float_property_decorator
def away_batting_average(self):
"""
Returns a ``float`` of the batting average for the away team.
"""
return self._away_batting_average
@float_property_decorator
def away_on_base_percentage(self):
"""
Returns a ``float`` of the percentage of at bats that result in the
batter getting on base.
"""
return self._away_on_base_percentage
@float_property_decorator
def away_slugging_percentage(self):
"""
Returns a ``float`` of the slugging percentage for the away team based
on the number of bases gained per at-bat with bigger plays getting more
weight.
"""
return self._away_slugging_percentage
@float_property_decorator
def away_on_base_plus(self):
"""
Returns a ``float`` of the on base percentage plus the slugging
percentage. Percentage ranges from 0-1.
"""
return self._away_on_base_plus
@int_property_decorator
def away_pitches(self):
"""
Returns an ``int`` of the number of pitches the away team faced.
"""
return self._away_pitches
@int_property_decorator
def away_strikes(self):
"""
Returns an ``int`` of the number of times a strike was called against
the away team.
"""
return self._away_strikes
@float_property_decorator
def away_win_probability_for_offensive_player(self):
"""
Returns a ``float`` of the overall influence the away team's offense
had on the outcome of the game where 0.0 denotes no influence and 1.0
denotes the offense was solely responsible for the outcome.
"""
return self._away_win_probability_for_offensive_player
@float_property_decorator
def away_average_leverage_index(self):
"""
Returns a ``float`` of the amount of pressure the away team's pitcher
faced during the game. 1.0 denotes average pressure while numbers less
than 0 denote lighter pressure.
"""
return self._away_average_leverage_index
@float_property_decorator
def away_win_probability_added(self):
"""
Returns a ``float`` of the total positive influence the away team's
offense had on the outcome of the game.
"""
return self._away_win_probability_added
@float_property_decorator
def away_win_probability_subtracted(self):
"""
Returns a ``float`` of the total negative influence the away team's
offense had on the outcome of the game.
"""
return self._away_win_probability_subtracted
@float_property_decorator
def away_base_out_runs_added(self):
"""
Returns a ``float`` of the number of base out runs added by the away
team.
"""
return self._away_base_out_runs_added
@int_property_decorator
def away_putouts(self):
"""
Returns an ``int`` of the number of putouts the away team registered.
"""
return self._away_putouts
@int_property_decorator
def away_assists(self):
"""
Returns an ``int`` of the number of assists the away team registered.
"""
return self._away_assists
@float_property_decorator
def away_innings_pitched(self):
"""
Returns a ``float`` of the number of innings the away team pitched.
"""
return self._away_innings_pitched
@int_property_decorator
def away_home_runs(self):
"""
Returns an ``int`` of the number of times the away team gave up a home
run.
"""
return self._away_home_runs
@int_property_decorator
def away_strikes_by_contact(self):
"""
Returns an ``int`` of the number of times the away team struck out a
batter who made contact with the pitch.
"""
return self._away_strikes_by_contact
@int_property_decorator
def away_strikes_swinging(self):
"""
Returns an ``int`` of the number of times the away team struck out a
batter who was swinging.
"""
return self._away_strikes_swinging
@int_property_decorator
def away_strikes_looking(self):
"""
Returns an ``int`` of the number of times the away team struck out a
batter who was looking.
"""
return self._away_strikes_looking
@int_property_decorator
def away_grounded_balls(self):
"""
Returns an ``int`` of the number of grounded balls the away team
allowed.
"""
return self._away_grounded_balls
@int_property_decorator
def away_fly_balls(self):
"""
Returns an ``int`` of the number of fly balls the away team allowed.
"""
return self._away_fly_balls
@int_property_decorator
def away_line_drives(self):
"""
Returns an ``int`` of the number of line drives the away team allowed.
"""
return self._away_line_drives
@int_property_decorator
def away_unknown_bat_type(self):
"""
Returns an ``int`` of the number of away at bats that were not properly
tracked and therefore cannot be safely placed in another statistical
category.
"""
return self._away_unknown_bat_type
@int_property_decorator
def away_game_score(self):
"""
Returns an ``int`` of the starting away pitcher's score determine by
many factors, such as number of runs scored against, number of strikes,
etc.
"""
return self._away_game_score
@int_property_decorator
def away_inherited_runners(self):
"""
Returns an ``int`` of the number of runners a pitcher inherited when he
entered the game.
"""
return self._away_inherited_runners
@int_property_decorator
def away_inherited_score(self):
"""
Returns an ``int`` of the number of scorers a pitcher inherited when he
entered the game.
"""
return self._away_inherited_score
@float_property_decorator
def away_win_probability_by_pitcher(self):
"""
Returns a ``float`` of the amount of influence the away pitcher had on
the game's result with 0.0 denoting zero influence and 1.0 denoting he
was solely responsible for the team's win.
"""
return self._away_win_probability_by_pitcher
@float_property_decorator
def away_base_out_runs_saved(self):
"""
Returns a ``float`` of the number of runs saved by the away pitcher
based on the number of players on bases. 0.0 denotes an average value.
"""
return self._away_base_out_runs_saved
@int_property_decorator
def home_at_bats(self):
"""
Returns an ``int`` of the number of at bats the home team had.
"""
return self._home_at_bats
@int_property_decorator
def home_runs(self):
"""
Returns an ``int`` of the number of runs the home team scored.
"""
return self._home_runs
@int_property_decorator
def home_hits(self):
"""
Returns an ``int`` of the number of hits the home team had.
"""
return self._home_hits
@int_property_decorator
def home_rbi(self):
"""
Returns an ``int`` of the number of runs batted in the home team
registered.
"""
return self._home_rbi
@float_property_decorator
def home_earned_runs(self):
"""
Returns a ``float`` of the number of runs the home team earned.
"""
return self._home_earned_runs
@int_property_decorator
def home_bases_on_balls(self):
"""
Returns an ``int`` of the number of bases the home team registerd as a
result of balls.
"""
return self._home_bases_on_balls
@int_property_decorator
def home_strikeouts(self):
"""
Returns an ``int`` of the number of times the home team was struck out.
"""
return self._home_strikeouts
@int_property_decorator
def home_plate_appearances(self):
"""
Returns an ``int`` of the number of plate appearances the home team
made.
"""
return self._home_plate_appearances
@float_property_decorator
def home_batting_average(self):
"""
Returns a ``float`` of the batting average for the home team.
"""
return self._home_batting_average
@float_property_decorator
def home_on_base_percentage(self):
"""
Returns a ``float`` of the percentage of at bats that result in the
batter getting on base.
"""
return self._home_on_base_percentage
@float_property_decorator
def home_slugging_percentage(self):
"""
Returns a ``float`` of the slugging percentage for the home team based
on the number of bases gained per at-bat with bigger plays getting more
weight.
"""
return self._home_slugging_percentage
@float_property_decorator
def home_on_base_plus(self):
"""
Returns a ``float`` of the on base percentage plus | |
print('\nconfiguration file: %s\n' % fn)
f = open(fn, 'r')
s = f.read()
f.close()
return s
else:
print('\nconfiguration file not found: %s' % fn)
return ''
#@+node:ekr.20160317054700.130: *4* msf.init_parser
def init_parser(self, s):
'''Add double back-slashes to all patterns starting with '['.'''
if not s: return
aList = []
for s in s.split('\n'):
if self.is_section_name(s):
aList.append(s)
elif s.strip().startswith('['):
aList.append(r'\\'+s[1:])
else:
aList.append(s)
s = '\n'.join(aList)+'\n'
file_object = io.StringIO(s)
# pylint: disable=deprecated-method
self.parser.readfp(file_object)
#@+node:ekr.20160317054700.131: *4* msf.is_section_name
def is_section_name(self, s):
def munge(s):
return s.strip().lower().replace(' ','')
s = s.strip()
if s.startswith('[') and s.endswith(']'):
s = munge(s[1:-1])
for s2 in self.section_names:
if s == munge(s2):
return True
return False
#@+node:ekr.20160317054700.132: *4* msf.make_patterns_dict
def make_patterns_dict(self):
'''Assign all patterns to the appropriate ast.Node.'''
trace = self.trace_patterns
for pattern in self.general_patterns:
ops = self.find_pattern_ops(pattern)
if ops:
for op in ops:
# Add the pattern to op's list.
op_names = self.op_name_dict.get(op)
for op_name in op_names:
aList = self.patterns_dict.get(op_name, [])
aList.append(pattern)
self.patterns_dict[op_name] = aList
else:
# Enter the name in self.names_dict.
name = pattern.find_s
# Special case for 'number'
if name == 'number':
aList = self.patterns_dict.get('Num', [])
aList.append(pattern)
self.patterns_dict['Num'] = aList
elif name in self.names_dict:
g.trace('duplicate pattern', pattern)
else:
self.names_dict [name] = pattern.repl_s
if trace:
g.trace('names_dict...')
for z in sorted(self.names_dict):
print(' %s: %s' % (z, self.names_dict.get(z)))
if trace:
g.trace('patterns_dict...')
for z in sorted(self.patterns_dict):
aList = self.patterns_dict.get(z)
print(z)
for pattern in sorted(aList):
print(' '+repr(pattern))
# Note: retain self.general_patterns for use in argument lists.
#@+node:ekr.20160317054700.133: *4* msf.scan_patterns
def scan_patterns(self, section_name):
'''Parse the config section into a list of patterns, preserving order.'''
trace = self.trace_patterns
parser = self.parser
aList = []
if parser.has_section(section_name):
seen = set()
for key in parser.options(section_name):
value = parser.get(section_name, key)
# A kludge: strip leading \\ from patterns.
if key.startswith(r'\\'):
key = '[' + key[2:]
if trace: g.trace('removing escapes', key)
if key in seen:
g.trace('duplicate key', key)
else:
seen.add(key)
aList.append(Pattern(key, value))
if trace:
g.trace('%s...\n' % section_name)
for z in aList:
print(z)
print('')
return aList
#@-others
#@+node:ekr.20160317054700.134: ** class Stub(object)
class Stub:
'''
A class representing all the generated stub for a class or def.
stub.full_name should represent the complete context of a def.
'''
#@+others
#@+node:ekr.20160317054700.135: *3* stub.ctor
def __init__(self, kind, name, parent=None, stack=None):
'''Stub ctor. Equality depends only on full_name and kind.'''
self.children = []
self.full_name = '%s.%s' % ('.'.join(stack), name) if stack else name
self.kind = kind
self.name = name
self.out_list = []
self.parent = parent
self.stack = stack # StubTraverser.context_stack.
if stack:
assert stack[-1] == parent.name, (stack[-1], parent.name)
if parent:
assert isinstance(parent, Stub)
parent.children.append(self)
#@+node:ekr.20160317054700.136: *3* stub.__eq__ and __ne__
def __eq__(self, obj):
'''
Stub.__eq__. Return whether two stubs refer to the same method.
Do *not* test parent links. That would interfere with --update logic.
'''
if isinstance(obj, Stub):
return self.full_name == obj.full_name and self.kind == obj.kind
else:
return NotImplementedError
def __ne__(self, obj):
"""Stub.__ne__"""
return not self.__eq__(obj)
#@+node:ekr.20160317054700.137: *3* stub.__hash__
def __hash__(self):
'''Stub.__hash__. Equality depends *only* on full_name and kind.'''
return len(self.kind) + sum([ord(z) for z in self.full_name])
#@+node:ekr.20160317054700.138: *3* stub.__repr__and __str__
def __repr__(self):
'''Stub.__repr__.'''
return 'Stub: %s %s' % (id(self), self.full_name)
def __str__(self):
'''Stub.__repr__.'''
return 'Stub: %s' % self.full_name
#@+node:ekr.20160317054700.139: *3* stub.parents and level
def level(self):
'''Return the number of parents.'''
return len(self.parents())
def parents(self):
'''Return a list of this stub's parents.'''
return self.full_name.split('.')[:-1]
#@-others
#@+node:ekr.20160317054700.140: ** class StubFormatter (AstFormatter)
class StubFormatter (AstFormatter):
'''
Formats an ast.Node and its descendants,
making pattern substitutions in Name and operator nodes.
'''
#@+others
#@+node:ekr.20160317054700.141: *3* sf.ctor
def __init__(self, controller, traverser):
'''Ctor for StubFormatter class.'''
self.controller = x = controller
self.traverser = traverser
# 2016/02/07: to give the formatter access to the class_stack.
self.def_patterns = x.def_patterns
self.general_patterns = x.general_patterns
self.names_dict = x.names_dict
self.patterns_dict = x.patterns_dict
self.raw_format = AstFormatter().format
self.regex_patterns = x.regex_patterns
self.trace_matches = x.trace_matches
self.trace_patterns = x.trace_patterns
self.trace_reduce = x.trace_reduce
self.trace_visitors = x.trace_visitors
self.verbose = x.verbose
#@+node:ekr.20160317054700.142: *3* sf.match_all
matched_d = {}
def match_all(self, node, s, trace=False):
'''Match all the patterns for the given node.'''
trace = False or trace or self.trace_matches
# verbose = True
d = self.matched_d
name = node.__class__.__name__
s1 = truncate(s, 40)
caller = g.callers(2).split(',')[1].strip()
# The direct caller of match_all.
patterns = self.patterns_dict.get(name, []) + self.regex_patterns
for pattern in patterns:
found, s = pattern.match(s,trace=False)
if found:
if trace:
aList = d.get(name, [])
if pattern not in aList:
aList.append(pattern)
d [name] = aList
print('match_all: %-12s %26s %40s ==> %s' % (caller, pattern, s1, s))
break
return s
#@+node:ekr.20160317054700.143: *3* sf.visit
def visit(self, node):
'''StubFormatter.visit: supports --verbose tracing.'''
s = AstFormatter.visit(self, node)
return s
#@+node:ekr.20160317054700.144: *3* sf.trace_visitor
def trace_visitor(self, node, op, s):
'''Trace node's visitor.'''
if self.trace_visitors:
caller = g.callers(2).split(',')[1]
s1 = AstFormatter().format(node).strip()
print('%12s op %-6s: %s ==> %s' % (caller, op.strip(), s1, s))
#@+node:ekr.20160317054700.145: *3* sf.Operands
# StubFormatter visitors for operands...
#@+node:ekr.20160317054700.146: *4* sf.Attribute
# Attribute(expr value, identifier attr, expr_context ctx)
attrs_seen = []
def do_Attribute(self, node):
'''StubFormatter.do_Attribute.'''
trace = False
s = '%s.%s' % (
self.visit(node.value),
node.attr) # Don't visit node.attr: it is always a string.
s2 = self.names_dict.get(s)
if trace and s2 and s2 not in self.attrs_seen:
self.attrs_seen.append(s2)
g.trace(s, '==>', s2)
return s2 or s
#@+node:ekr.20160317054700.147: *4* sf.Constants: Bytes, Num, Str
# Return generic markers to allow better pattern matches.
def do_Bytes(self, node): # Python 3.x only.
return 'bytes' # return str(node.s)
def do_Num(self, node):
# make_patterns_dict treats 'number' as a special case.
# return self.names_dict.get('number', 'number')
return 'number' # return repr(node.n)
def do_Str(self, node):
'''This represents a string constant.'''
return 'str' # return repr(node.s)
#@+node:ekr.20160317054700.148: *4* sf.Dict
def do_Dict(self, node):
result = []
keys = [self.visit(z) for z in node.keys]
values = [self.visit(z) for z in node.values]
if len(keys) == len(values):
result.append('{')
items = []
# pylint: disable=consider-using-enumerate
for i in range(len(keys)):
items.append('%s:%s' % (keys[i], values[i]))
result.append(', '.join(items))
result.append('}')
else:
print('Error: f.Dict: len(keys) != len(values)\nkeys: %s\nvals: %s' % (
repr(keys), repr(values)))
# return ''.join(result)
return 'Dict[%s]' % ''.join(result)
#@+node:ekr.20160317054700.149: *4* sf.List
def do_List(self, node):
'''StubFormatter.List.'''
elts = [self.visit(z) for z in node.elts]
elts = [z for z in elts if z] # Defensive.
return 'List[%s]' % ', '.join(elts)
#@+node:ekr.20160317054700.150: *4* sf.Name
seen_names = []
def do_Name(self, node):
'''StubFormatter ast.Name visitor.'''
trace = False
d = self.names_dict
name = d.get(node.id, node.id)
s = 'bool' if name in ('True', 'False') else name
if trace and node.id not in self.seen_names:
self.seen_names.append(node.id)
if d.get(node.id):
g.trace(node.id, '==>', d.get(node.id))
elif node.id == 'aList':
g.trace('**not found**', node.id)
return s
#@+node:ekr.20160317054700.151: *4* sf.Tuple
def do_Tuple(self, node):
'''StubFormatter.Tuple.'''
elts = [self.visit(z) for z in node.elts]
if 1:
return 'Tuple[%s]' % ', '.join(elts)
else:
s = '(%s)' % ', '.join(elts)
return self.match_all(node, s)
# return 'Tuple[%s]' % ', '.join(elts)
#@+node:ekr.20160317054700.152: *3* sf.Operators
# StubFormatter visitors for operators...
#@+node:ekr.20160317054700.153: *4* sf.BinOp
# BinOp(expr left, operator op, expr right)
def do_BinOp(self, node):
'''StubFormatter.BinOp visitor.'''
trace = False or self.trace_reduce ; verbose = False
numbers = ['number', 'complex', 'float', 'long', 'int',]
op = self.op_name(node.op)
lhs = self.visit(node.left)
rhs = self.visit(node.right)
if op.strip() in ('is', 'is not', 'in', 'not in'):
s = 'bool'
elif lhs == rhs:
s = lhs
# Perhaps not always right,
# but it is correct for Tuple, List, Dict.
elif lhs in numbers and rhs in numbers:
s = reduce_types([lhs, rhs], trace=trace)
# reduce_numbers would be wrong: it returns a list.
elif lhs == 'str' and op in '%+*':
# str + any implies any is a string.
s = 'str'
else:
if trace and verbose and lhs == 'str':
g.trace('***** unknown string op', lhs, op, rhs)
# Fall back to the base-class behavior.
s = '%s%s%s' % (
self.visit(node.left),
op,
self.visit(node.right))
s = self.match_all(node, s)
self.trace_visitor(node, op, s)
return s
#@+node:ekr.20160317054700.154: *4* sf.BoolOp
# BoolOp(boolop op, expr* values)
def do_BoolOp(self, node): # Python 2.x only.
'''StubFormatter.BoolOp visitor for 'and' and 'or'.'''
trace = False or self.trace_reduce
op = self.op_name(node.op)
values = [self.visit(z).strip() for z in node.values]
s = reduce_types(values, trace=trace)
s = self.match_all(node, s)
self.trace_visitor(node, op, s)
return s
#@+node:ekr.20160317054700.155: *4* sf.Call & sf.keyword
# Call(expr func, | |
"item 14",
"item 15",
"item 16",
],
manager=default_ui_manager,
container=test_container,
allow_multi_select=True,
anchors={
"left": "left",
"right": "left",
"top": "top",
"bottom": "top",
},
)
selection_list.set_position((20, 20))
assert selection_list.relative_rect.topleft == (10, 10)
assert selection_list.relative_rect.size == (50, 50)
assert selection_list.relative_rect.bottomright == (60, 60)
selection_list = UISelectionList(
relative_rect=pygame.Rect(0, 0, 50, 50),
item_list=[
"item 1",
"item 2",
"item 3",
"item 4",
"item 5",
"item 6",
"item 7",
"item 8",
"item 9",
"item 10",
"item 11",
"item 12",
"item 13",
"item 14",
"item 15",
"item 16",
],
manager=default_ui_manager,
container=test_container,
allow_multi_select=True,
anchors={
"left": "right",
"right": "right",
"top": "top",
"bottom": "top",
},
)
selection_list.set_position((280, 120))
assert selection_list.relative_rect.topleft == (-30, 110)
assert selection_list.relative_rect.size == (50, 50)
assert selection_list.relative_rect.bottomright == (20, 160)
selection_list = UISelectionList(
relative_rect=pygame.Rect(0, 0, 50, 50),
item_list=[
"item 1",
"item 2",
"item 3",
"item 4",
"item 5",
"item 6",
"item 7",
"item 8",
"item 9",
"item 10",
"item 11",
"item 12",
"item 13",
"item 14",
"item 15",
"item 16",
],
manager=default_ui_manager,
container=test_container,
allow_multi_select=True,
anchors={
"left": "right",
"right": "right",
"top": "bottom",
"bottom": "bottom",
},
)
selection_list.set_position((230, 230))
assert selection_list.relative_rect.topleft == (-80, -80)
assert selection_list.relative_rect.size == (50, 50)
assert selection_list.relative_rect.bottomright == (-30, -30)
selection_list = UISelectionList(
relative_rect=pygame.Rect(0, 0, 50, 50),
item_list=[
"item 1",
"item 2",
"item 3",
"item 4",
"item 5",
"item 6",
"item 7",
"item 8",
"item 9",
"item 10",
"item 11",
"item 12",
"item 13",
"item 14",
"item 15",
"item 16",
],
manager=default_ui_manager,
container=test_container,
allow_multi_select=True,
anchors={
"left": "left",
"right": "left",
"top": "bottom",
"bottom": "bottom",
},
)
selection_list.set_position((130, 230))
assert selection_list.relative_rect.topleft == (120, -80)
assert selection_list.relative_rect.size == (50, 50)
assert selection_list.relative_rect.bottomright == (170, -30)
selection_list = UISelectionList(
relative_rect=pygame.Rect(0, 0, 50, 50),
item_list=[
"item 1",
"item 2",
"item 3",
"item 4",
"item 5",
"item 6",
"item 7",
"item 8",
"item 9",
"item 10",
"item 11",
"item 12",
"item 13",
"item 14",
"item 15",
"item 16",
],
manager=default_ui_manager,
container=test_container,
allow_multi_select=True,
anchors={
"left": "left",
"right": "right",
"top": "top",
"bottom": "bottom",
},
)
assert selection_list.relative_right_margin == 250
assert selection_list.relative_bottom_margin == 250
selection_list.set_position((20, 20))
assert selection_list.relative_rect.topleft == (10, 10)
assert selection_list.relative_rect.size == (50, 50)
assert selection_list.relative_rect.bottomright == (60, 60)
assert selection_list.relative_right_margin == 240
assert selection_list.relative_bottom_margin == 240
def test_set_dimensions(
self, _init_pygame, default_ui_manager, _display_surface_return_none
):
test_container = UIContainer(
relative_rect=pygame.Rect(10, 10, 300, 300),
manager=default_ui_manager,
)
selection_list = UISelectionList(
relative_rect=pygame.Rect(30, 30, 50, 50),
item_list=[
"item 1",
"item 2",
"item 3",
"item 4",
"item 5",
"item 6",
"item 7",
"item 8",
"item 9",
"item 10",
"item 11",
"item 12",
"item 13",
"item 14",
"item 15",
"item 16",
],
manager=default_ui_manager,
container=test_container,
allow_multi_select=True,
anchors={
"left": "left",
"right": "left",
"top": "top",
"bottom": "top",
},
)
assert selection_list.relative_right_margin is None
assert selection_list.relative_bottom_margin is None
selection_list.set_dimensions((20, 20))
assert selection_list.relative_rect.topleft == (30, 30)
assert selection_list.relative_rect.size == (20, 20)
assert selection_list.relative_rect.bottomright == (50, 50)
assert selection_list.rect.topleft == (40, 40)
assert selection_list.rect.size == (20, 20)
assert selection_list.rect.bottomright == (60, 60)
assert selection_list.relative_right_margin is None
assert selection_list.relative_bottom_margin is None
selection_list = UISelectionList(
relative_rect=pygame.Rect(-60, 10, 50, 50),
item_list=[
"item 1",
"item 2",
"item 3",
"item 4",
"item 5",
"item 6",
"item 7",
"item 8",
"item 9",
"item 10",
"item 11",
"item 12",
"item 13",
"item 14",
"item 15",
"item 16",
],
manager=default_ui_manager,
container=test_container,
allow_multi_select=True,
anchors={
"left": "right",
"right": "right",
"top": "top",
"bottom": "top",
},
)
assert selection_list.relative_right_margin == 10
assert selection_list.relative_bottom_margin is None
selection_list.set_dimensions((60, 60))
assert selection_list.relative_rect.topleft == (-60, 10)
assert selection_list.relative_rect.size == (60, 60)
assert selection_list.relative_rect.bottomright == (0, 70)
assert selection_list.rect.topleft == (250, 20)
assert selection_list.rect.size == (60, 60)
assert selection_list.rect.bottomright == (310, 80)
assert selection_list.relative_right_margin == 0
assert selection_list.relative_bottom_margin is None
selection_list = UISelectionList(
relative_rect=pygame.Rect(-70, -70, 50, 50),
item_list=[
"item 1",
"item 2",
"item 3",
"item 4",
"item 5",
"item 6",
"item 7",
"item 8",
"item 9",
"item 10",
"item 11",
"item 12",
"item 13",
"item 14",
"item 15",
"item 16",
],
manager=default_ui_manager,
container=test_container,
allow_multi_select=True,
anchors={
"left": "right",
"right": "right",
"top": "bottom",
"bottom": "bottom",
},
)
assert selection_list.relative_right_margin == 20
assert selection_list.relative_bottom_margin == 20
selection_list.set_dimensions((30, 30))
assert selection_list.relative_rect.topleft == (-70, -70)
assert selection_list.relative_rect.size == (30, 30)
assert selection_list.relative_rect.bottomright == (-40, -40)
assert selection_list.rect.topleft == (240, 240)
assert selection_list.rect.size == (30, 30)
assert selection_list.rect.bottomright == (270, 270)
assert selection_list.relative_right_margin == 40
assert selection_list.relative_bottom_margin == 40
selection_list = UISelectionList(
relative_rect=pygame.Rect(50, -50, 50, 50),
item_list=[
"item 1",
"item 2",
"item 3",
"item 4",
"item 5",
"item 6",
"item 7",
"item 8",
"item 9",
"item 10",
"item 11",
"item 12",
"item 13",
"item 14",
"item 15",
"item 16",
],
manager=default_ui_manager,
container=test_container,
allow_multi_select=True,
anchors={
"left": "left",
"right": "left",
"top": "bottom",
"bottom": "bottom",
},
)
assert selection_list.relative_right_margin is None
assert selection_list.relative_bottom_margin == 0
selection_list.set_dimensions((100, 100))
assert selection_list.relative_rect.topleft == (50, -50)
assert selection_list.relative_rect.size == (100, 100)
assert selection_list.relative_rect.bottomright == (150, 50)
assert selection_list.rect.topleft == (60, 260)
assert selection_list.rect.size == (100, 100)
assert selection_list.rect.bottomright == (160, 360)
assert selection_list.relative_right_margin is None
assert selection_list.relative_bottom_margin == -50
selection_list = UISelectionList(
relative_rect=pygame.Rect(10, 10, 50, 50),
item_list=[
"item 1",
"item 2",
"item 3",
"item 4",
"item 5",
"item 6",
"item 7",
"item 8",
"item 9",
"item 10",
"item 11",
"item 12",
"item 13",
"item 14",
"item 15",
"item 16",
],
manager=default_ui_manager,
container=test_container,
allow_multi_select=True,
anchors={
"left": "left",
"right": "right",
"top": "top",
"bottom": "bottom",
},
)
assert selection_list.relative_right_margin == 240
assert selection_list.relative_bottom_margin == 240
selection_list.set_dimensions((90, 90))
assert selection_list.relative_rect.topleft == (10, 10)
assert selection_list.relative_rect.size == (90, 90)
assert selection_list.relative_rect.bottomright == (100, 100)
assert selection_list.rect.topleft == (20, 20)
assert selection_list.rect.size == (90, 90)
assert selection_list.rect.bottomright == (110, 110)
assert selection_list.relative_right_margin == 200
assert selection_list.relative_bottom_margin == 200
def test_kill(
self,
_init_pygame,
default_ui_manager: IUIManagerInterface,
_display_surface_return_none,
):
selection_list = UISelectionList(
relative_rect=pygame.Rect(0, 0, 50, 80),
item_list=[
"item 1",
"item 2",
"item 3",
"item 4",
"item 5",
"item 6",
"item 7",
"item 8",
"item 9",
"item 10",
"item 11",
"item 12",
"item 13",
"item 14",
"item 15",
"item 16",
],
manager=default_ui_manager,
allow_multi_select=True,
)
assert len(default_ui_manager.get_root_container().elements) == 2
assert len(default_ui_manager.get_sprite_group().sprites()) == 13
selection_list_sprites = [
default_ui_manager.get_root_container(),
selection_list,
selection_list.list_and_scroll_bar_container,
selection_list.item_list_container,
selection_list.scroll_bar,
selection_list.scroll_bar.button_container,
*selection_list.item_list_container.elements,
selection_list.scroll_bar.top_button,
selection_list.scroll_bar.bottom_button,
selection_list.scroll_bar.sliding_button,
]
assert (
default_ui_manager.get_sprite_group().sprites()
== selection_list_sprites
)
selection_list.kill()
assert len(default_ui_manager.get_root_container().elements) == 0
assert len(default_ui_manager.get_sprite_group().sprites()) == 1
empty_ui_sprites = [default_ui_manager.get_root_container()]
assert (
default_ui_manager.get_sprite_group().sprites() == empty_ui_sprites
)
def test_rebuild_from_changed_theme_data_non_default(
self, _init_pygame, _display_surface_return_none
):
manager = UIManager(
(800, 600),
os.path.join(
"tests", "data", "themes", "ui_selection_list_non_default.json"
),
)
selection_list = UISelectionList(
relative_rect=pygame.Rect(0, 0, 50, 80),
item_list=[
"item 1",
"item 2",
"item 3",
"item 4",
"item 5",
"item 6",
"item 7",
"item 8",
"item 9",
"item 10",
"item 11",
"item 12",
"item 13",
"item 14",
"item 15",
"item 16",
],
manager=manager,
allow_multi_select=True,
)
assert selection_list.image is not None
@pytest.mark.filterwarnings("ignore:Invalid value")
@pytest.mark.filterwarnings("ignore:Colour hex code")
@pytest.mark.filterwarnings("ignore:Invalid gradient")
@pytest.mark.filterwarnings("ignore:Unable to load")
def test_rebuild_from_changed_theme_data_bad_values(
self, _init_pygame, _display_surface_return_none
):
manager = UIManager(
(800, 600),
os.path.join(
"tests", "data", "themes", "ui_selection_list_bad_values.json"
),
)
selection_list = UISelectionList(
relative_rect=pygame.Rect(0, 0, 50, 80),
item_list=[
"item 1",
"item 2",
"item 3",
"item 4",
"item 5",
"item 6",
"item 7",
"item 8",
"item 9",
"item 10",
"item 11",
"item 12",
"item 13",
"item 14",
"item 15",
"item 16",
],
manager=manager,
allow_multi_select=True,
)
assert selection_list.image is not None
def test_disable(
self,
_init_pygame: None,
default_ui_manager: UIManager,
_display_surface_return_none: None,
):
selection_list = UISelectionList(
relative_rect=pygame.Rect(50, 50, 150, 400),
item_list=["green", "eggs", "and", "ham"],
manager=default_ui_manager,
)
assert selection_list.get_single_selection() is None
selection_list.disable()
assert selection_list.is_enabled is False
assert selection_list.item_list_container.is_enabled is False
# process a mouse button down event
list_button = selection_list.item_list_container.elements[0]
list_button.process_event(
pygame.event.Event(
pygame.MOUSEBUTTONDOWN,
{"button": 1, "pos": list_button.rect.center},
)
)
# process a mouse button up event
list_button.process_event(
pygame.event.Event(
pygame.MOUSEBUTTONUP,
{"button": 1, "pos": list_button.rect.center},
)
)
for event in pygame.event.get():
default_ui_manager.process_events(event)
assert selection_list.get_single_selection() is None
def test_enable(
self,
_init_pygame: None,
default_ui_manager: UIManager,
_display_surface_return_none: None,
):
selection_list = UISelectionList(
relative_rect=pygame.Rect(50, 50, 150, 400),
item_list=["green", "eggs", "and", "ham"],
manager=default_ui_manager,
)
assert selection_list.get_single_selection() is None
selection_list.disable()
selection_list.enable()
assert selection_list.is_enabled is True
assert selection_list.item_list_container.is_enabled is True
# process a mouse button down event
list_button = selection_list.item_list_container.elements[0]
list_button.process_event(
pygame.event.Event(
pygame.MOUSEBUTTONDOWN,
{"button": 1, "pos": list_button.rect.center},
)
)
# process a mouse button up event
list_button.process_event(
pygame.event.Event(
pygame.MOUSEBUTTONUP,
{"button": 1, "pos": list_button.rect.center},
)
)
for event in pygame.event.get():
default_ui_manager.process_events(event)
assert selection_list.get_single_selection() == "green"
def test_show(
self, _init_pygame, | |
- timedelta(minutes=5):
request.user.profile.Refiner_Illuminator_TagPostCounter += 1
request.user.profile.save()
TagBadge.objects.get_or_create(
awarded_to_user=request.user,
badge_type="BRONZE",
tag_name="Explainer",
bade_position="BADGE")
PrivRepNotification.objects.get_or_create(
for_user=request.user,
type_of_PrivNotify="BADGE_EARNED",
url=question_URL,
for_if="Explainer",
description="Edit and answer 1 question (both actions within 12 hours, answer score > 0)"
)
if request.user.profile.Refiner_Illuminator_TagPostCounter >= 50:
TagBadge.objects.get_or_create(
awarded_to_user=request.user,
badge_type="SILVER",
tag_name="Refiner",
bade_position="BADGE")
PrivRepNotification.objects.get_or_create(
for_user=request.user,
type_of_PrivNotify="BADGE_EARNED",
url=question_URL,
for_if="Refiner",
description="Edit and answer 50 questions (both actions within 12 hours, answer score > 0)"
)
if request.user.profile.Refiner_Illuminator_TagPostCounter >= 500:
TagBadge.objects.get_or_create(
awarded_to_user=request.user,
badge_type="GOLD",
tag_name="Illuminator",
bade_position="BADGE")
PrivRepNotification.objects.get_or_create(
for_user=request.user,
type_of_PrivNotify="BADGE_EARNED",
url=question_URL,
for_if="Illuminator",
description="Edit and answer 500 questions (both actions within 12 hours, answer score > 0)"
)
if post.revival_stage_one and post.a_vote_ups.all().count() >= 2:
TagBadge.objects.get_or_create(
awarded_to_user=post.answer_owner,
badge_type="Bronze",
tag_name="Revival",
bade_position="BADGE",
answerIf_TagOf_A=post)
PrivRepNotification.objects.get_or_create(
for_user=post.answer_owner,
type_of_PrivNotify="BADGE_EARNED",
url=question_URL,
for_if="Revival",
description="Answer more than 30 days after a question was asked as first answer scoring 2 or more"
)
if post.necromancer_check and post.a_vote_ups.all().count() >= 5:
TagBadge.objects.get_or_create(
awarded_to_user=post.answer_owner,
badge_type="SILVER",
tag_name="Necromancer",
bade_position="BADGE",
answerIf_TagOf_A=post)
PrivRepNotification.objects.get_or_create(
for_user=post.answer_owner,
type_of_PrivNotify="BADGE_EARNED",
url=question_URL,
for_if="Necromancer",
description="Answer a question more than 60 days later with score of 5 or more"
)
last_24_hours = timezone.now() - timedelta(hours=24)
getQ_Votes_in_24_Hours = QUpvote.objects.filter(
date__gt=last_24_hours).count()
getQ_DownVotes_in_24_Hours = QDownvote.objects.filter(
downvote_by_q=request.user, date__gt=last_24_hours).count()
getA_Votes_in_24_Hours = Answer.objects.filter(
a_vote_ups=request.user, date__gt=last_24_hours).count()
getA_DownVotes_in_24_Hours = Answer.objects.filter(
a_vote_downs=request.user, date__gt=last_24_hours).count()
totalIs = getQ_Votes_in_24_Hours + getQ_DownVotes_in_24_Hours + \
getA_Votes_in_24_Hours + getA_DownVotes_in_24_Hours
if totalIs >= 30:
TagBadge.objects.get_or_create(
awarded_to_user=post.answer_owner,
badge_type="BRONZE",
tag_name="Suffrage",
bade_position="BADGE")
PrivRepNotification.objects.get_or_create(
for_user=post.answer_owner,
type_of_PrivNotify="BADGE_EARNED",
url=question_URL,
for_if="Suffrage",
description="Use 30 votes in a day"
)
if post.answer_owner == post.questionans.post_owner and post.a_vote_ups.count() >= 3:
TagBadge.objects.get_or_create(
awarded_to_user=post.answer_owner,
badge_type="SILVER",
tag_name="Self-Learner",
bade_position="BADGE",
answerIf_TagOf_A=post)
PrivRepNotification.objects.get_or_create(
for_user=post.answer_owner,
type_of_PrivNotify="BADGE_EARNED",
url=question_URL,
for_if="Self-Learner",
description="Answer your own question with score of 3 or more"
)
if post.a_vote_ups.count() >= 1:
TagBadge.objects.get_or_create(
awarded_to_user=post.answer_owner,
badge_type="SILVER",
tag_name="Teacher",
bade_position="BADGE",
answerIf_TagOf_A=post)
PrivRepNotification.objects.get_or_create(
for_user=post.answer_owner,
type_of_PrivNotify="BADGE_EARNED",
url=question_URL,
for_if="Teacher",
description="Answer a question with score of 1 or more"
)
if post.accepted and post.a_vote_ups.count() >= 40:
TagBadge.objects.get_or_create(
awarded_to_user=post.answer_owner,
badge_type="SILVER",
tag_name="Guru",
bade_position="BADGE",
answerIf_TagOf_A=post)
PrivRepNotification.objects.get_or_create(
for_user=post.answer_owner,
type_of_PrivNotify="BADGE_EARNED",
url=question_URL,
for_if="Guru",
description="Accepted answer and score of 40 or more"
)
# subquery = ''
# for s in data.answer_set.all():
subquery = Tag.objects.filter(
question__answer__answer_owner=post.answer_owner).annotate(
num_name=Count('name'))
for s in subquery:
gettingAnswers = Answer.objects.filter(questionans__tags__name=s)
# print(gettingAnswers)
# print(s)
for s5 in gettingAnswers:
# print(s5)
# print(s5.a_reputation)
if s.num_name >= 2 and s5.a_reputation >= 15:
# createTag = Tag.objects.get_or_create(name=s)
# print("\nTag Awarding Worked\n")
TagBadge.objects.get_or_create(
awarded_to_user=post.answer_owner, badge_type="GOLD", tag_name=s)
PrivRepNotification.objects.get_or_create(
for_user=post.answer_owner,
type_of_PrivNotify="BADGE_EARNED",
url=question_URL,
for_if=s,
description="N/A"
)
# if s.num_name >= 2 and s2.a_reputation >= 50:
# print("Worked")
# GoldBadge.objects.get_or_create(awarded_to=request.user,badge_type="Gold",tag_name=s)
if request.GET.get('submit') == 'like':
if request.user in post.a_vote_downs.all():
# REMOVE DOWOVOTE AND UPVOTE
post.a_vote_downs.remove(request.user)
print("First Statement is Excecuting")
post.a_vote_ups.add(request.user)
# Check if user downvoted the post if then delete that downvote
# reputation (-2) and add new (+10) reputation
if Reputation.objects.filter(
awarded_to=post.answer_owner,
answer_O=post,
answer_rep_C=-2,
reputation_on_what="DOWN_VOTE_ANSWER_REP_M").exists():
Reputation.objects.filter(
awarded_to=post.answer_owner,
answer_O=post,
answer_rep_C=-2,
reputation_on_what="DOWN_VOTE_ANSWER_REP_M").delete()
Reputation.objects.get_or_create(
awarded_to=post.answer_owner,
answer_O=post,
answer_rep_C=10,
reputation_on_what="MY_ANSWER_UPVOTE_REP_P")
PrivRepNotification.objects.get_or_create(
for_user=post.answer_owner,
type_of_PrivNotify="MY_ANSWER_UPVOTE_REP_P",
url=question_URL,
for_if="",
description="",
answer_priv_noti=post,
)
rewardPrivielege(request, post.answer_owner)
return JsonResponse({'action': 'unDownVoteAndLike'})
elif request.user in post.a_vote_ups.all():
# REMOVE UPVOTE
post.a_reputation -= 10
print("Second Statement is Excecuting")
post.save()
post.a_vote_ups.remove(request.user)
if Reputation.objects.filter(
awarded_to=post.answer_owner,
answer_O=post,
answer_rep_C=10,
reputation_on_what="MY_ANSWER_UPVOTE_REP_P").exists():
Reputation.objects.filter(
awarded_to=post.answer_owner,
answer_O=post,
answer_rep_C=10,
reputation_on_what="MY_ANSWER_UPVOTE_REP_P").delete()
if PrivRepNotification.objects.filter(
for_user=post.answer_owner,
is_read=False,
url=question_URL,
type_of_PrivNotify="ANSWER_ACCEPT_REP_P",
missingReputation=10).exists():
PrivRepNotification.objects.update(
for_user=post.answer_owner,
is_read=True,
url=question_URL,
type_of_PrivNotify="ANSWER_ACCEPT_REP_P",
missingReputation=10)
rewardPrivielege(request, post.answer_owner)
return JsonResponse({'action': 'unlikeAnswer'})
elif request.user == post.answer_owner:
return JsonResponse({'action': 'cannotLikeOwnPost'})
else:
# UPVOTE
if request.user.profile.voteUpPriv:
post.a_reputation += 10
# post.date = timezone.now()
post.save()
post.a_vote_ups.add(request.user)
if getQuestion.reversal_monitor and post.a_vote_ups.all().count() >= 20:
TagBadge.objects.get_or_create(
awarded_to_user=post.answer_owner,
badge_type="GOLD",
tag_name="Reversal",
bade_position="BADGE",
answerIf_TagOf_A=post)
PrivRepNotification.objects.get_or_create(
for_user=post.answer_owner,
type_of_PrivNotify="BADGE_EARNED",
url=question_URL,
for_if="Reversal",
description="Provide an answer of +20 score to a question of -5 score")
PrivRepNotification.objects.get_or_create(
for_user=post.answer_owner,
is_read=False,
url=question_URL,
type_of_PrivNotify="ANSWER_ACCEPT_REP_P",
missingReputation=10)
Reputation.objects.get_or_create(
awarded_to=post.answer_owner,
answer_O=post,
answer_rep_C=10,
reputation_on_what="MY_ANSWER_UPVOTE_REP_P")
PrivRepNotification.objects.get_or_create(
for_user=post.answer_owner,
type_of_PrivNotify="MY_ANSWER_UPVOTE_REP_P",
url=question_URL,
for_if="",
description="",
# question_priv_noti=post,
answer_priv_noti=post,
)
if post.a_vote_ups.all().count() >= 10:
TagBadge.objects.get_or_create(
awarded_to_user=post.answer_owner,
badge_type="BRONZE",
tag_name="Nice Answer",
bade_position="BADGE",
answerIf_TagOf_A=post)
PrivRepNotification.objects.get_or_create(
for_user=post.answer_owner,
type_of_PrivNotify="BADGE_EARNED",
url=question_URL,
for_if="Nice Answer",
description="Answer score of 10 or more"
)
if post.a_vote_ups.all().count() >= 25:
TagBadge.objects.get_or_create(
awarded_to_user=post.answer_owner,
badge_type="SILVER",
tag_name="Good Answer",
bade_position="BADGE",
answerIf_TagOf_A=post)
PrivRepNotification.objects.get_or_create(
for_user=post.answer_owner,
type_of_PrivNotify="BADGE_EARNED",
url=question_URL,
for_if="Good Answer",
description="Answer score of 25 or more"
)
if post.a_vote_ups.all().count() >= 100:
TagBadge.objects.get_or_create(
awarded_to_user=post.answer_owner,
badge_type="GOLD",
tag_name="Great Answer",
bade_position="BADGE",
answerIf_TagOf_A=post)
PrivRepNotification.objects.get_or_create(
for_user=post.answer_owner,
type_of_PrivNotify="BADGE_EARNED",
url=question_URL,
for_if="Great Answer",
description="Answer score of 100 or more"
)
if post == Answer.objects.filter(
a_vote_ups=request.user).first():
TagBadge.objects.get_or_create(
awarded_to_user=request.user,
badge_type="BRONZE",
tag_name="Supporter",
bade_position="BADGE")
PrivRepNotification.objects.get_or_create(
for_user=request.user,
type_of_PrivNotify="BADGE_EARNED",
url=question_URL,
for_if="Supporter",
description="First up vote"
)
rewardPrivielege(request, post.answer_owner)
return JsonResponse({'action': 'upv'})
else:
return JsonResponse({'action': 'lackOfPrivelege'})
elif request.GET.get('submit') == 'ansDownVote':
# Remove Upvote and Downvote
if request.user in post.a_vote_ups.all():
post.a_vote_ups.remove(request.user)
post.a_vote_downs.add(request.user)
if Reputation.objects.filter(
awarded_to=post.answer_owner,
answer_O=post,
answer_rep_C=10,
reputation_on_what="MY_ANSWER_UPVOTE_REP_P").exists():
Reputation.objects.filter(
awarded_to=post.answer_owner,
answer_O=post,
answer_rep_C=10,
reputation_on_what="MY_ANSWER_UPVOTE_REP_P").delete()
Reputation.objects.get_or_create(
awarded_to=post.answer_owner,
answer_O=post,
answer_rep_C=-2,
reputation_on_what="DOWN_VOTE_ANSWER_REP_M")
rewardPrivielege(request, post.answer_owner)
return JsonResponse({'action': 'unUpvoteAndDownVote'})
elif request.user in post.a_vote_downs.all():
# Remove DownVote
post.a_vote_downs.remove(request.user)
if Reputation.objects.filter(
awarded_to=post.answer_owner,
answer_O=post,
answer_rep_C=-2,
reputation_on_what="DOWN_VOTE_ANSWER_REP_M").exists():
Reputation.objects.filter(
awarded_to=post.answer_owner,
answer_O=post,
answer_rep_C=-2,
reputation_on_what="DOWN_VOTE_ANSWER_REP_M").delete()
rewardPrivielege(request, post.answer_owner)
return JsonResponse({'action': 'undislike'})
elif request.user == post.answer_owner:
return JsonResponse({'action': 'cannotLikeOwnPost'})
else:
# Down Vote
if request.user.profile.voteDownPriv:
print("Sixth Statement is Excecuting")
post.a_vote_downs.add(request.user)
# post.date = timezone.now()
post.save()
Reputation.objects.get_or_create(
awarded_to=post.answer_owner,
answer_O=post,
answer_rep_C=-2,
reputation_on_what="DOWN_VOTE_ANSWER_REP_M")
rewardPrivielege(request, post.answer_owner)
return JsonResponse({'action': 'downVoteOnly'})
else:
return JsonResponse({'action': 'lackOfPrivelege'})
else:
messages.error(request, 'Something went wrong')
return redirect('profile:posts')
def upvote_comment(request, commentq_id):
com = get_object_or_404(CommentQ, pk=commentq_id)
if request.GET.get('submit') == 'upVoteCommentIt':
if request.user in com.com_upvote.all():
com.com_upvote.remove(request.user)
return JsonResponse({'action': 'unVoteUp'})
else:
com.com_upvote.add(request.user)
getCommentsUVotes_MoreThan_5 = CommentQ.objects.filter(
commented_by=request.user).annotate(
num_comment_upvote=Count('com_upvote')).filter(
num_comment_upvote__gte=5)
if getCommentsUVotes_MoreThan_5.count() >= 10:
TagBadge.objects.get_or_create(
awarded_to_user=request.user,
badge_type="SILVER",
tag_name="Pundit",
bade_position="BADGE")
PrivRepNotification.objects.get_or_create(
for_user=request.user,
type_of_PrivNotify="BADGE_EARNED",
url="#",
for_if="Pundit",
description="Leave 10 comments with score of 5 or more"
)
return JsonResponse({'action': 'voteUp'})
else:
messages.error(request, 'Something went wrong')
return redirect('qa:questions')
@login_required
def edit_answer(request, answer_id):
post = Answer.objects.get(id=answer_id)
post_owner = post.answer_owner
data = get_object_or_404(Answer, id=answer_id)
active_time = data.active_time
edited_time = data.a_edited_time
# NEED TO CHECK THE EDITING 6_MON ALGORITHM IS WORKING in Answering Also.
if request.method == 'POST':
form = EditAnswerForm(instance=post,
data=request.POST,
files=request.FILES)
if form.is_valid():
formWhyEditing = form.cleaned_data['why_editing_answer']
form.save(commit=False)
post.a_edited_time = timezone.now()
post.active_time = timezone.now()
post.a_edited_by = request.user
request.user.profile.editPostTimeOfUser = timezone.now()
request.user.profile.save()
# ! ARCHAEOLOGIST BADGE - EDIT 100 POST WHICH ARE INACTIVE FOR 6-MONTHS
# Question Old Algorithm - START
was_inactive_for_six_months = timezone.now() - timedelta(seconds=20)
if active_time < was_inactive_for_six_months:
is_in = True
else:
is_in = False
# I CAN ALSO DO THIS WITH :-: if active_time < was_inactive_for_six_months
# and data.date < edited_time: BUT HAVEN'T TRIED YET.
if is_in and data.date < edited_time:
is_boolean_true = True
if is_boolean_true:
request.user.profile.post_edit_inactive_for_six_month += 1
request.user.profile.save()
if request.user.profile.post_edit_inactive_for_six_month == 5:
request.user.profile.archaeologist_S = True
request.user.profile.save()
else:
is_boolean_true = False
data_url = request.build_absolute_uri(
post.questionans.get_absolute_url())
if not request.user.profile.edit_questions_answers:
sendForReview = QuestionEditVotes.objects.create(
edit_suggested_by=request.user, edited_answer=post)
reviewInstance = ReviewQuestionEdit.objects.create(
queue_of=sendForReview, answer_to_view_if=post, is_reviewed=False)
reviewInstance.edit_reviewed_by.add(request.user)
request.user.profile.posts_edited_counter += 1
request.user.profile.save()
getReviewingInstance = ReviewQuestionEdit.objects.filter(
queue_of=sendForReview, answer_to_view_if=post, is_reviewed=False).first()
# data_url = request.build_absolute_uri(getReviewingInstance.get_absolute_url())
data_url = request.build_absolute_uri(
reverse(
'review:reviewSuggesstedEdit', args=(
getReviewingInstance.pk, )))
Notification.objects.create(
noti_receiver=post_owner,
type_of_noti="answer_edit",
url=data_url,
answer_noti=post)
elif request.user == post.answer_owner:
QuestionEditVotes.objects.create(
edit_suggested_by=request.user,
edited_answer=post,
rev_Action="Approve")
else:
request.user.profile.posts_edited_counter += 1
request.user.profile.save()
data_url = request.build_absolute_uri(
post.questionans.get_absolute_url())
Notification.objects.create(
noti_receiver=post_owner,
type_of_noti="answer_edit",
url=data_url,
answer_noti=post)
QuestionEditVotes.objects.create(
edit_suggested_by=request.user,
edited_answer=post,
rev_Action="Approve")
if request.user.profile.posts_edited_counter <= 1:
TagBadge.objects.get_or_create(
awarded_to_user=request.user,
badge_type="BRONZE",
tag_name="Editor",
bade_position="BADGE")
PrivRepNotification.objects.get_or_create(
for_user=request.user,
type_of_PrivNotify="BADGE_EARNED",
url="#",
for_if="Editor",
description="First Edit"
)
if request.user.profile.posts_edited_counter == 80:
TagBadge.objects.get_or_create(
awarded_to_user=request.user,
badge_type="SILVER",
tag_name="Strunk & White",
bade_position="BADGE")
PrivRepNotification.objects.get_or_create(
for_user=request.user,
type_of_PrivNotify="BADGE_EARNED",
url=data_url,
for_if="Strunk & White",
description="Edit 80 posts"
)
if request.user.profile.posts_edited_counter == 500:
TagBadge.objects.get_or_create(
awarded_to_user=request.user,
badge_type="GOLD",
tag_name="Copy Editor",
bade_position="BADGE")
PrivRepNotification.objects.get_or_create(
for_user=request.user,
type_of_PrivNotify="BADGE_EARNED",
url=data_url,
for_if="Copy Editor",
description="Edit 500 posts (excluding own or deleted posts and tag edits)")
# getEditingTime = request.user.profile.editPostTimeOfUser
# getRecentAnswer = Answer.objects.filter(answer_owner=request.user).last()
# if getEditingTime >= timezone.now() - timedelta(minutes=5) and getRecentAnswer.date >= timezone.now() - timedelta(minutes=5) and getRecentAnswer.a_vote_ups.all().count() >= 1:
# request.user.profile.Refiner_Illuminator_TagPostCounter += 1
# request.user.profile.save()
# TagBadge.objects.get_or_create(awarded_to_user=request.user,badge_type="BRONZE", tag_name="Explainer",bade_position="BADGE")
# print("Explainer Awarded")
# if request.user.profile.Refiner_Illuminator_TagPostCounter >= 50:
# TagBadge.objects.get_or_create(awarded_to_user=request.user,badge_type="SILVER", tag_name="Refiner",bade_position="BADGE")
# if request.user.profile.Refiner_Illuminator_TagPostCounter >= 500:
# TagBadge.objects.get_or_create(awarded_to_user=request.user,badge_type="GOLD", tag_name="Illuminator",bade_position="BADGE")
getEditingTime = request.user.profile.editPostTimeOfUser
getRecentAnswer = Answer.objects.filter(
answer_owner=request.user).last()
if getRecentAnswer:
if getEditingTime >= timezone.now() - \
timedelta(minutes=5) and getRecentAnswer.date >= timezone.now() - timedelta(minutes=5):
request.user.profile.Refiner_Illuminator_TagPostCounter += 1
request.user.profile.save()
form.save()
update_change_reason(post, formWhyEditing)
# sendForReview = QuestionEditVotes.objects.create(edit_suggested_by=request.user, edited_answer=post)
# reviewInstance = ReviewQuestionEdit.objects.create(queue_of=sendForReview,answer_to_view_if=post, is_reviewed=False)
# data_url = request.build_absolute_uri(reverse('review:reviewSuggesstedEdit', args=(reviewInstance.pk, )))
# Notification.objects.create(noti_receiver=post_owner, type_of_noti="answer_comment", url=data_url)
return redirect('profile:home')
else:
messages.error(request, 'Form is Not Valid for some reason')
else:
form = EditAnswerForm(request.POST or None,
request.FILES or None,
instance=post)
context = {'post': post, 'form': form, 'post_owner': post_owner}
return render(request, 'qa/edit_answer.html', context)
@login_required
def edit_question(request, question_id):
post = Question.objects.get(id=question_id)
allComments = post.commentq_set.all()
post_owner = post.post_owner
data = get_object_or_404(Question, id=question_id)
active_time = data.active_date
edited_time = data.q_edited_time
if request.method != 'POST':
form = UpdateQuestion(request.POST or None,
request.FILES or None, instance=post)
else:
form = UpdateQuestion(
instance=post, data=request.POST, files=request.FILES)
if form.is_valid():
form.save(commit=False)
formWhyEditing = form.cleaned_data['why_editing_question']
post.q_edited_time = timezone.now()
post.active_date = timezone.now()
post.q_edited_by = request.user
request.user.profile.editPostTimeOfUser | |
<gh_stars>0
from data_generation import *
from plots import *
from em import *
from sklearn.model_selection import train_test_split
import json
NUM_SAMPLES = 1000
NUM_COMPONENTS_M1 = 3
NUM_COMPONENTS_M2 = 4
NUM_COMPONENTS_M3 = 8
TexFigure.configure_plots()
tex_figure = TexFigure('./images')
def plot_and_save_data():
"""
Generates samples from each model and saves their plots in a 2d-Euclidean space.
:return:
"""
for model_id in Model.__members__.values():
_, means, covariances = get_parameters(model_id)
samples, samples_per_component = get_data(model_id, NUM_SAMPLES)
plot_and_save_samples_per_components(tex_figure, 'true_gmm_m{}.pdf'.format(model_id.value), means,
covariances, samples_per_component)
plot_and_save_samples(tex_figure, 'samples_m{}.pdf'.format(model_id.value), samples)
def get_rmse(estimated_value, true_value):
"""
Computes the root mean square error between an estimated parameter and its true value.
:param estimated_value: estimated parameter (e.g. mean, covariance)
:param true_value: true value of the parameter
:return: mean squared error
"""
n = len(estimated_value[:]) if isinstance(estimated_value, np.ndarray) else 1
rmse = np.sqrt(np.sum((estimated_value - true_value) ** 2) / n)
return rmse
def run_single_batch_em(models, num_components_per_model, filename_prefix, i_max, seed=42,
batch_size=None, step_size=1.0, known_covariance=False):
"""
Performs batch EM with random initialization defined by a given seed, and saves plots of the log-likelihood (ll)
curve and gmm spatial disposition.
:param models: models to evaluate
:param num_components_per_model: number of components per model
:param filename_prefix: prefix of the filename of generated plot
:param i_max: max number of iterations
:param seed: random seed
:return: list of parameters over iterations per model
"""
params_per_model = {}
resps_per_model = {}
for model_idx, model_id in enumerate(models):
if model_idx > 0:
print('\n')
print('Model {}'.format(model_id.value))
k = num_components_per_model[model_idx]
data, samples_per_component = get_data(model_id, NUM_SAMPLES, unique_variance=known_covariance)
true_weights, true_means, true_covariances = get_parameters(model_id, known_covariance)
true_ll = get_log_likelihood(data, true_weights, true_means, true_covariances)
(weights, means, covariances, _, lls, params, resps) = em(data, k, i_max, seed,
known_covariances=known_covariance,
true_covariances=true_covariances,
batch_size=batch_size,
step_size=step_size)
plot_and_save_log_likelihoods(tex_figure, '{}_ll_m{}.pdf'.format(filename_prefix, model_id.value), [lls],
true_ll, ['Batch EM'])
plot_and_save_samples_per_components(tex_figure, '{}_gmm_m{}.pdf'.format(filename_prefix, model_id.value),
means, covariances, samples_per_component)
params_per_model[model_id.value] = params
resps_per_model[model_id.value] = resps
return params_per_model, resps_per_model
def run_multi_batch_em(models, num_components_per_model, i_max, num_runs, seeds=None, batch_size=None, step_size=1):
"""
Performs batch EM for several runs and stores the configurations the the runs that yielded the smallest error and
highest log-likelihood. Plots are saved for the best configurations.
:param models: models to evaluate
:param num_components_per_model: number of components per model
:param i_max: max number of iterations
:param num_runs: number of runs
:param seed: random seed of the initial run per model
:return:
"""
results_per_model = {}
for model_idx, model_id in enumerate(models):
if model_idx > 0:
print('\n')
print('Model {}'.format(model_id.value))
k = num_components_per_model[model_idx]
data, samples_per_component = get_data(model_id, NUM_SAMPLES)
true_params = get_parameters(model_id)
true_ll = get_log_likelihood(data, *true_params)
results = get_results_multi_em(data, true_params, k, i_max, num_runs, seeds, batch_size, step_size)
filename_suffix = str(model_id.value)
if batch_size:
filename_suffix = '{}_m_{}_sz_{}'.format(model_id.value, batch_size, step_size)
plot_and_save_samples_per_components(tex_figure, 'min_error_gmm_m{}.pdf'.format(filename_suffix),
results['min_error_means'], results['min_error_covariances'],
samples_per_component)
plot_and_save_samples_per_components(tex_figure, 'max_ll_gmm_m{}.pdf'.format(filename_suffix),
results['max_ll_means'], results['max_ll_covariances'],
samples_per_component)
plot_and_save_log_likelihoods(tex_figure, 'min_error_ll_m{}.pdf'.format(filename_suffix),
[results['ll_curve_at_min_error']], true_ll)
plot_and_save_log_likelihoods(tex_figure, 'max_ll_ll_m{}.pdf'.format(filename_suffix),
[results['ll_curve_at_max_ll']], true_ll)
results = get_writable_results(results)
results_per_model[model_id.value] = results
if batch_size:
json.dump(results_per_model, open('eval/multi_em_m_{}_sz_{}.json'.format(batch_size, step_size), 'w'), indent=4)
else:
json.dump(results_per_model, open('eval/multi_em.json', 'w'), indent=4)
def get_results_multi_em(data, true_params, num_components, i_max, num_runs, seeds, batch_size=None, step_size=1):
weights_avg_error = np.zeros(num_components)
means_avg_error = np.zeros(num_components)
covariances_avg_error = np.zeros(num_components)
min_error = np.finfo(np.float).max
min_error_error_per_param = []
min_error_weights, min_error_means, min_error_covariances = None, None, None
min_error_run = 0
ll_curve_at_min_error = []
ll_curve_at_max_ll = [np.finfo(np.float).min]
max_ll_error_per_param = []
max_ll_weights, max_ll_means, max_ll_covariances = None, None, None
max_ll_run = 0
min_error_at_max_ll = 0
for run in range(num_runs):
seed = seeds[run] if seeds else None
(weights, means, covariances, _, lls, *_) = em(data, num_components, i_max, seed, batch_size, step_size, False)
# To match the estimated parameters with the ones from the true components we compare components that
# possess the smallest error because of permutation.
estimated_params = (weights, means, covariances)
error_weights, error_means, error_covariances = get_parameter_error_per_component(estimated_params,
true_params)
weights_avg_error += error_weights / num_runs
means_avg_error += error_means / num_runs
covariances_avg_error += error_covariances / num_runs
total_error = get_total_error(error_weights, error_means, error_covariances)
if total_error < min_error:
min_error = total_error
min_error_run = run
# Store the parameters of the configuration with smallest error
min_error_weights = weights
min_error_means = means
min_error_covariances = covariances
# Store the error per parameter across all components
min_error_error_per_param = [np.sum(error_weights), np.sum(error_means), np.sum(error_covariances)]
# Store the log-likelihood curve
ll_curve_at_min_error = lls
if lls[-1] > ll_curve_at_max_ll[-1]:
min_error_at_max_ll = total_error
max_ll_run = run
# Store the parameters of the configuration with largest final log-likelihood
max_ll_weights = weights
max_ll_means = means
max_ll_covariances = covariances
# Store the error per parameter across all components
max_ll_error_per_param = [np.sum(error_weights), np.sum(error_means), np.sum(error_covariances)]
# Store the log-likelihood curve
ll_curve_at_max_ll = lls
results = {
'min_error_run': min_error_run,
'min_error_total_error': min_error,
'min_error_weights': min_error_weights,
'min_error_means': min_error_means,
'min_error_covariances': min_error_covariances,
'min_error_error_per_parameter': min_error_error_per_param,
'min_error_num_iterations': len(ll_curve_at_min_error),
'll_curve_at_min_error': ll_curve_at_min_error,
'max_ll_run': max_ll_run,
'max_ll_total_error': min_error_at_max_ll,
'max_ll_weights': max_ll_weights,
'max_ll_means': max_ll_means,
'max_ll_covariances': max_ll_covariances,
'max_ll_num_iterations': len(ll_curve_at_max_ll),
'll_curve_at_max_ll': ll_curve_at_max_ll,
'max_ll_error_per_parameter': max_ll_error_per_param,
'avg_error_weights': weights_avg_error,
'avg_error_means': means_avg_error,
'avg_error_covariances': covariances_avg_error
}
return results
def get_permutation(estimated_params, true_params):
weights, means, covariances = estimated_params
true_weights, true_means, true_covariances = true_params
num_components = len(weights)
permutation = []
available_components = set(range(num_components))
for component in range(num_components):
min_error = np.finfo(np.float).max
true_component_idx = 0
for available_component in list(available_components):
error_weight = get_rmse(weights[component], true_weights[available_component])
error_mean = get_rmse(means[component], true_means[available_component])
error_covariance = get_rmse(covariances[component], true_covariances[available_component])
total_error = get_total_error(error_weight, error_mean, error_covariance)
if total_error < min_error:
true_component_idx = available_component
min_error = total_error
permutation.append(true_component_idx)
available_components.remove(true_component_idx)
return permutation
def get_parameter_error_per_component(estimated_params, true_params):
weights, means, covariances = estimated_params
true_weights, true_means, true_covariances = true_params
num_components = len(weights)
error_weights = np.zeros(num_components)
error_means = np.zeros(num_components)
error_covariances = np.zeros(num_components)
true_comp_indices = get_permutation(estimated_params, true_params)
for component in range(num_components):
true_comp_idx = true_comp_indices[component]
error_weights[component] = get_rmse(weights[component], true_weights[true_comp_idx])
error_means[component] = get_rmse(means[component], true_means[true_comp_idx])
error_covariances[component] = get_rmse(covariances[component], true_covariances[true_comp_idx])
return error_weights, error_means, error_covariances
def get_total_error(error_weights, error_means, error_covariances):
return np.sum(error_weights) + np.sum(error_means) + np.sum(error_covariances)
def get_writable_results(results):
results['final_ll_at_min_error'] = results['ll_curve_at_min_error'][-1]
results['final_ll_at_max_ll'] = results['ll_curve_at_max_ll'][-1]
del results['ll_curve_at_min_error']
del results['ll_curve_at_max_ll']
# Convert np.array to list
results['min_error_weights'] = results['min_error_weights'].tolist()
results['min_error_means'] = [mean.tolist() for mean in results['min_error_means']]
results['min_error_covariances'] = [covariance.tolist() for covariance in results['min_error_covariances']]
results['max_ll_weights'] = results['max_ll_weights'].tolist()
results['max_ll_means'] = [mean.tolist() for mean in results['max_ll_means']]
results['max_ll_covariances'] = [covariance.tolist() for covariance in results['max_ll_covariances']]
results['avg_error_weights'] = results['avg_error_weights'].tolist()
results['avg_error_means'] = results['avg_error_means'].tolist()
results['avg_error_covariances'] = results['avg_error_covariances'].tolist()
return results
def run_batch_size_per_error_ll_and_convergence(models, num_components_per_model, i_max, num_runs, seeds=None):
batch_sizes = [32, 64, 128, 256]
step_size = 0.5
for model_idx, model_id in enumerate(models):
if model_idx > 0:
print('\n')
print('Model {}'.format(model_id.value))
k = num_components_per_model[model_idx]
data, samples_per_component = get_data(model_id, NUM_SAMPLES)
true_params = get_parameters(model_id)
min_error_error_per_batch_size = []
max_ll_error_per_batch_size = []
min_error_ll_per_batch_size = []
max_ll_ll_per_batch_size = []
min_error_iter_per_batch_size = []
max_ll_iter_per_batch_size = []
for batch_size in batch_sizes:
print('batch size = {}'.format(batch_size))
results = get_results_multi_em(data, true_params, k, i_max, num_runs, seeds, batch_size, step_size)
min_error_error_per_batch_size.append(results['min_error_total_error'])
max_ll_error_per_batch_size.append(results['max_ll_total_error'])
min_error_ll_per_batch_size.append(results['ll_curve_at_min_error'][-1])
max_ll_ll_per_batch_size.append(results['ll_curve_at_max_ll'][-1])
min_error_iter_per_batch_size.append(results['min_error_num_iterations'])
max_ll_iter_per_batch_size.append(results['max_ll_num_iterations'])
plot_and_save_batch_size_vs_error(tex_figure, 'min_error_batch_size_vs_error_m{}.pdf'.format(model_id.value),
batch_sizes, min_error_error_per_batch_size, step_size)
plot_and_save_batch_size_vs_error(tex_figure, 'max_ll_batch_size_vs_error_m{}.pdf'.format(model_id.value),
batch_sizes, max_ll_error_per_batch_size, step_size)
plot_and_save_batch_size_vs_log_likelihood(tex_figure,
'min_error_batch_size_vs_ll_m{}.pdf'.format(model_id.value),
batch_sizes, min_error_ll_per_batch_size, step_size)
plot_and_save_batch_size_vs_log_likelihood(tex_figure, 'max_ll_batch_size_vs_ll_m{}.pdf'.format(model_id.value),
batch_sizes, max_ll_ll_per_batch_size, step_size)
plot_and_save_batch_size_vs_num_iterations(tex_figure,
'min_error_batch_size_vs_iter_m{}.pdf'.format(model_id.value),
batch_sizes, min_error_iter_per_batch_size, step_size)
plot_and_save_batch_size_vs_num_iterations(tex_figure,
'max_ll_batch_size_vs_iter_m{}.pdf'.format(model_id.value),
batch_sizes, max_ll_iter_per_batch_size, step_size)
def run_step_size_per_error_ll_and_convergence(models, num_components_per_model, i_max, num_runs, seeds=None):
step_sizes = [0.1, 0.3, 0.5, 0.7, 0.9]
batch_size = 64
for model_idx, model_id in enumerate(models):
if model_idx > 0:
print('\n')
print('Model {}'.format(model_id.value))
k = num_components_per_model[model_idx]
data, samples_per_component = get_data(model_id, NUM_SAMPLES)
true_params = get_parameters(model_id)
min_error_error_per_step_size = []
max_ll_error_per_step_size = []
min_error_ll_per_step_size = []
max_ll_ll_per_step_size = []
min_error_iter_per_step_size = []
max_ll_iter_per_step_size = []
for step_size in step_sizes:
print('step size = {}'.format(step_size))
results = get_results_multi_em(data, true_params, k, i_max, num_runs, seeds, batch_size, step_size)
min_error_error_per_step_size.append(results['min_error_total_error'])
max_ll_error_per_step_size.append(results['max_ll_total_error'])
min_error_ll_per_step_size.append(results['ll_curve_at_min_error'][-1])
max_ll_ll_per_step_size.append(results['ll_curve_at_max_ll'][-1])
min_error_iter_per_step_size.append(results['min_error_num_iterations'])
max_ll_iter_per_step_size.append(results['max_ll_num_iterations'])
plot_and_save_step_size_vs_error(tex_figure, 'min_error_step_size_vs_error_m{}.pdf'.format(model_id.value),
step_sizes, min_error_error_per_step_size, batch_size)
plot_and_save_step_size_vs_error(tex_figure, 'max_ll_step_size_vs_error_m{}.pdf'.format(model_id.value),
step_sizes, max_ll_error_per_step_size, batch_size)
plot_and_save_step_size_vs_log_likelihood(tex_figure,
'min_error_step_size_vs_ll_m{}.pdf'.format(model_id.value),
step_sizes, min_error_ll_per_step_size, batch_size)
plot_and_save_step_size_vs_log_likelihood(tex_figure, 'max_ll_step_size_vs_ll_m{}.pdf'.format(model_id.value),
step_sizes, max_ll_ll_per_step_size, batch_size)
plot_and_save_step_size_vs_num_iterations(tex_figure,
'min_error_step_size_vs_iter_m{}.pdf'.format(model_id.value),
step_sizes, min_error_iter_per_step_size, batch_size)
plot_and_save_step_size_vs_num_iterations(tex_figure,
'max_ll_step_size_vs_iter_m{}.pdf'.format(model_id.value),
step_sizes, max_ll_iter_per_step_size, batch_size)
def run_batch_em_vs_minibatch_em(models, num_components_per_model, i_max, num_runs, seeds, batch_size=None,
step_size=1.0):
for model_idx, model_id in enumerate(models):
if model_idx > 0:
print('\n')
print('Model {}'.format(model_id.value))
k = num_components_per_model[model_idx]
data, samples_per_component = get_data(model_id, NUM_SAMPLES)
true_params = get_parameters(model_id)
true_ll = get_log_likelihood(data, *true_params)
print('Vanilla EM')
results_vanilla = get_results_multi_em(data, true_params, k, i_max, num_runs, seeds)
print('Mini-batch EM')
results_mini_batch = get_results_multi_em(data, true_params, k, i_max, num_runs, seeds, batch_size, step_size)
vanilla_ll = results_vanilla['ll_curve_at_min_error']
mini_batch_ll = results_mini_batch['ll_curve_at_min_error']
plot_and_save_log_likelihoods(tex_figure, 'min_error_ll_batch_vs_mb_m{}.pdf'.format(model_id.value),
[vanilla_ll, mini_batch_ll], true_ll, ['Batch EM', 'Mini-batch EM'])
vanilla_ll = results_vanilla['ll_curve_at_max_ll']
mini_batch_ll = results_mini_batch['ll_curve_at_max_ll']
plot_and_save_log_likelihoods(tex_figure, 'max_ll_ll_batch_vs_mb_m{}.pdf'.format(model_id.value),
[vanilla_ll, mini_batch_ll], true_ll, ['Batch EM', 'Mini-batch EM'])
def run_em_on_hold_out(models, num_components_per_model, i_max, num_runs, seeds, batch_size=None, step_size=1.0):
true_nll_per_model = []
min_error_estimated_nll_per_model = []
max_ll_estimated_nll_per_model = []
for model_idx, model_id in enumerate(models):
if model_idx > 0:
print('\n')
print('Model {}'.format(model_id.value))
k = num_components_per_model[model_idx]
data, samples_per_component = get_data(model_id, NUM_SAMPLES)
true_params = get_parameters(model_id)
training_data, test_data_per_component = split_data(samples_per_component, 0.2)
results = get_results_multi_em(training_data, true_params, k, i_max, num_runs, seeds, batch_size, step_size)
min_error_weights = results['min_error_weights']
min_error_means = results['min_error_means']
min_error_covariances = results['min_error_covariances']
max_ll_weights = results['max_ll_weights']
max_ll_means = results['max_ll_means']
max_ll_covariances = results['max_ll_covariances']
test_data = np.vstack(test_data_per_component)
true_nll_per_model.append(-get_log_likelihood(test_data, *true_params))
min_error_estimated_nll_per_model.append(
-get_log_likelihood(test_data, min_error_weights, min_error_means, | |
GetCustomerHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetCustomerRequest(TeaModel):
def __init__(
self,
code: str = None,
):
# 客户code
self.code = code
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.code is not None:
result['code'] = self.code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('code') is not None:
self.code = m.get('code')
return self
class GetCustomerResponseBody(TeaModel):
def __init__(
self,
code: str = None,
name: str = None,
description: str = None,
create_time: int = None,
status: str = None,
):
# 客户Code
self.code = code
# 客户名称
self.name = name
# 客户描述
self.description = description
# 创建时间(单位MS)
self.create_time = create_time
# 状态:启用(valid), 停用(invalid), 删除(deleted)
self.status = status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.code is not None:
result['code'] = self.code
if self.name is not None:
result['name'] = self.name
if self.description is not None:
result['description'] = self.description
if self.create_time is not None:
result['createTime'] = self.create_time
if self.status is not None:
result['status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('code') is not None:
self.code = m.get('code')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('description') is not None:
self.description = m.get('description')
if m.get('createTime') is not None:
self.create_time = m.get('createTime')
if m.get('status') is not None:
self.status = m.get('status')
return self
class GetCustomerResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetCustomerResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetCustomerResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetCategoryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetCategoryRequest(TeaModel):
def __init__(
self,
code: str = None,
):
# 类别code
self.code = code
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.code is not None:
result['code'] = self.code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('code') is not None:
self.code = m.get('code')
return self
class GetCategoryResponseBody(TeaModel):
def __init__(
self,
code: str = None,
type: str = None,
name: str = None,
is_dir: bool = None,
parent_code: str = None,
status: str = None,
):
# 类别code
self.code = code
# 类型:income收入,expense支出
self.type = type
# 名称
self.name = name
# 是否为目录
self.is_dir = is_dir
# 父类别code
self.parent_code = parent_code
# 状态:valid,invalid,deleted
self.status = status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.code is not None:
result['code'] = self.code
if self.type is not None:
result['type'] = self.type
if self.name is not None:
result['name'] = self.name
if self.is_dir is not None:
result['isDir'] = self.is_dir
if self.parent_code is not None:
result['parentCode'] = self.parent_code
if self.status is not None:
result['status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('code') is not None:
self.code = m.get('code')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('isDir') is not None:
self.is_dir = m.get('isDir')
if m.get('parentCode') is not None:
self.parent_code = m.get('parentCode')
if m.get('status') is not None:
self.status = m.get('status')
return self
class GetCategoryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetCategoryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetCategoryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetFinanceAccountHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetFinanceAccountRequest(TeaModel):
def __init__(
self,
account_code: str = None,
):
# 账户code
self.account_code = account_code
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.account_code is not None:
result['accountCode'] = self.account_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('accountCode') is not None:
self.account_code = m.get('accountCode')
return self
class GetFinanceAccountResponseBody(TeaModel):
def __init__(
self,
account_code: str = None,
account_id: str = None,
account_type: str = None,
account_name: str = None,
account_remark: str = None,
amount: str = None,
creator: str = None,
create_time: int = None,
):
# 账户code
self.account_code = account_code
# 关联资金账户id
self.account_id = account_id
# 账户类型:ALIPAY, BANKCARD, CASH, WECHAT
self.account_type = account_type
# 账户名称
self.account_name = account_name
# 备注
self.account_remark = account_remark
# 账户总额,保留2位小数
self.amount = amount
# 创建人工号
self.creator = creator
# 创建时间
self.create_time = create_time
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.account_code is not None:
result['accountCode'] = self.account_code
if self.account_id is not None:
result['accountId'] = self.account_id
if self.account_type is not None:
result['accountType'] = self.account_type
if self.account_name is not None:
result['accountName'] = self.account_name
if self.account_remark is not None:
result['accountRemark'] = self.account_remark
if self.amount is not None:
result['amount'] = self.amount
if self.creator is not None:
result['creator'] = self.creator
if self.create_time is not None:
result['createTime'] = self.create_time
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('accountCode') is not None:
self.account_code = m.get('accountCode')
if m.get('accountId') is not None:
self.account_id = m.get('accountId')
if m.get('accountType') is not None:
self.account_type = m.get('accountType')
if m.get('accountName') is not None:
self.account_name = m.get('accountName')
if m.get('accountRemark') is not None:
self.account_remark = m.get('accountRemark')
if m.get('amount') is not None:
self.amount = m.get('amount')
if m.get('creator') is not None:
self.creator = m.get('creator')
if m.get('createTime') is not None:
self.create_time = m.get('createTime')
return self
class GetFinanceAccountResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetFinanceAccountResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body | |
helpful in ONE of the following case:
- UAC is disabled, account is admin OR SeCreateSymbolicLinkPrivilege was
manually granted.
- UAC is enabled, account is NOT admin AND SeCreateSymbolicLinkPrivilege was
manually granted.
If running Windows 10 and the following is true, then enable_symlink() is
unnecessary.
- Windows 10 with build 14971 or later
- Admin account
- UAC enabled
- Developer mode enabled (not the default)
Returns:
- True if symlink support is enabled.
"""
return enable_privilege(u'SeCreateSymbolicLinkPrivilege')
def kill_children_processes(root):
"""Try to kill all children processes indistriminately and prints updates to
stderr.
Returns:
True if at least one child process was found.
"""
processes = _get_children_processes_win(root)
if not processes:
return False
logging.debug('Enumerating processes:\n')
for _, proc in sorted(processes.items()):
logging.debug('- pid %d; Handles: %d; Exe: %s; Cmd: %s\n', proc.ProcessId,
proc.HandleCount, proc.ExecutablePath, proc.CommandLine)
logging.debug('Terminating %d processes:\n', len(processes))
for pid in sorted(processes):
try:
# Killing is asynchronous.
os.kill(pid, 9)
logging.debug('- %d killed\n', pid)
except OSError as e:
logging.error('- failed to kill %s, error %s\n', pid, e)
return True
## Windows private code.
def _enum_processes_win():
"""Returns all processes on the system that are accessible to this process.
Returns:
Win32_Process COM objects. See
http://msdn.microsoft.com/library/aa394372.aspx for more details.
"""
import win32com.client # pylint: disable=F0401
wmi_service = win32com.client.Dispatch('WbemScripting.SWbemLocator')
wbem = wmi_service.ConnectServer('.', 'root\\cimv2')
return [proc for proc in wbem.ExecQuery('SELECT * FROM Win32_Process')]
def _filter_processes_dir_win(processes, root_dir):
"""Returns all processes which has their main executable located inside
root_dir.
"""
def normalize_path(filename):
try:
return GetLongPathName(six.text_type(filename)).lower()
except: # pylint: disable=W0702
return six.text_type(filename).lower()
root_dir = normalize_path(root_dir)
def process_name(proc):
if proc.ExecutablePath:
return normalize_path(proc.ExecutablePath)
# proc.ExecutablePath may be empty if the process hasn't finished
# initializing, but the command line may be valid.
if proc.CommandLine is None:
return None
parsed_line = shlex.split(proc.CommandLine)
if len(parsed_line) >= 1 and os.path.isabs(parsed_line[0]):
return normalize_path(parsed_line[0])
return None
long_names = ((process_name(proc), proc) for proc in processes)
return [
proc for name, proc in long_names
if name is not None and name.startswith(root_dir)
]
def _filter_processes_tree_win(processes):
"""Returns all the processes under the current process."""
# Convert to dict.
processes = dict((p.ProcessId, p) for p in processes)
root_pid = os.getpid()
out = {root_pid: processes[root_pid]}
while True:
found = set()
for pid in out:
found.update(
p.ProcessId for p in processes.values()
if p.ParentProcessId == pid)
found -= set(out)
if not found:
break
out.update((p, processes[p]) for p in found)
return out.values()
def _get_children_processes_win(root):
"""Returns a list of processes.
Enumerates both:
- all child processes from this process.
- processes where the main executable in inside 'root'. The reason is that
the ancestry may be broken so stray grand-children processes could be
undetected by the first technique.
This technique is not fool-proof but gets mostly there.
"""
processes = _enum_processes_win()
tree_processes = _filter_processes_tree_win(processes)
dir_processes = _filter_processes_dir_win(processes, root)
# Convert to dict to remove duplicates.
processes = dict((p.ProcessId, p) for p in tree_processes)
processes.update((p.ProcessId, p) for p in dir_processes)
processes.pop(os.getpid())
return processes
elif sys.platform == 'darwin':
# On non-windows, keep the stdlib behavior.
isabs = os.path.isabs
def find_item_native_case(root_path, item):
"""Gets the native path case of a single item based at root_path.
There is no API to get the native path case of symlinks on OSX. So it
needs to be done the slow way.
"""
if item == '..':
return item
item = item.lower()
for element in fs.listdir(root_path):
if element.lower() == item:
return element
return None
@tools.profile
@tools.cached
def get_native_path_case(path):
"""Returns the native path case for an existing file.
Technically, it's only HFS+ on OSX that is case preserving and
insensitive. It's the default setting on HFS+ but can be changed.
"""
assert isinstance(path, six.text_type), repr(path)
if not isabs(path):
raise ValueError(
'get_native_path_case(%r): Require an absolute path' % path, path)
if path.startswith('/dev'):
# /dev is not visible from Carbon, causing an exception.
return path
# Starts assuming there is no symlink along the path.
resolved = _native_case(path)
if path.lower() in (resolved.lower(), resolved.lower() + './'):
# This code path is incredibly faster.
#logging.debug('get_native_path_case(%s) = %s' % (path, resolved))
return resolved
# There was a symlink, process it.
base, symlink, rest = _split_at_symlink_native(None, path)
if not symlink:
# TODO(maruel): This can happen on OSX because we use stale APIs on OSX.
# Fixing the APIs usage will likely fix this bug. The bug occurs due to
# hardlinked files, where the API may return one file path or the other
# depending on how it feels.
return base
prev = base
base = safe_join(_native_case(base), symlink)
assert len(base) > len(prev)
while rest:
prev = base
relbase, symlink, rest = _split_at_symlink_native(base, rest)
base = safe_join(base, relbase)
assert len(base) > len(prev), (prev, base, symlink)
if symlink:
base = safe_join(base, symlink)
assert len(base) > len(prev), (prev, base, symlink)
# Make sure no symlink was resolved.
assert base.lower() == path.lower(), (base, path)
#logging.debug('get_native_path_case(%s) = %s' % (path, base))
return base
def enable_symlink():
return True
## OSX private code.
def _native_case(p):
"""Gets the native path case. Warning: this function resolves symlinks."""
try:
out = macos.native_case(p)
if p.endswith(os.path.sep) and not out.endswith(os.path.sep):
return out + os.path.sep
return out
except macos.Error as e:
if macos.get_errno(e) in (-43, -120):
# The path does not exist. Try to recurse and reconstruct the path.
# -43 means file not found.
# -120 means directory not found.
base = os.path.dirname(p)
rest = os.path.basename(p)
return os.path.join(_native_case(base), rest)
raise OSError(
macos.get_errno(e), 'Failed to get native path for %s' % p, p, str(e))
def _split_at_symlink_native(base_path, rest):
"""Returns the native path for a symlink."""
base, symlink, rest = split_at_symlink(base_path, rest)
if symlink:
if not base_path:
base_path = base
else:
base_path = safe_join(base_path, base)
symlink = find_item_native_case(base_path, symlink)
return base, symlink, rest
else: # OSes other than Windows and OSX.
# On non-windows, keep the stdlib behavior.
isabs = os.path.isabs
def find_item_native_case(root, item):
"""Gets the native path case of a single item based at root_path."""
if item == '..':
return item
root = get_native_path_case(root)
return os.path.basename(get_native_path_case(os.path.join(root, item)))
@tools.profile
@tools.cached
def get_native_path_case(path):
"""Returns the native path case for an existing file.
On OSes other than OSX and Windows, assume the file system is
case-sensitive.
TODO(maruel): This is not strictly true. Implement if necessary.
"""
assert isinstance(path, six.text_type), repr(path)
if not isabs(path):
raise ValueError(
'get_native_path_case(%r): Require an absolute path' % path, path)
# Give up on cygwin, as GetLongPathName() can't be called.
# Linux traces tends to not be normalized so use this occasion to normalize
# it. This function implementation already normalizes the path on the other
# OS so this needs to be done here to be coherent between OSes.
out = os.path.normpath(path)
if path.endswith(os.path.sep) and not out.endswith(os.path.sep):
out = out + os.path.sep
# In 99.99% of cases on Linux out == path. Since a return value is cached
# forever, reuse (also cached) |path| object. It safes approx 7MB of ram
# when isolating Chromium tests. It's important on memory constrained
# systems running ARM.
return path if out == path else out
def enable_symlink():
return True
if sys.platform != 'win32': # All non-Windows OSes.
def safe_join(*args):
"""Joins path elements like os.path.join() but doesn't abort on absolute
path.
os.path.join('foo', '/bar') == '/bar'
but safe_join('foo', '/bar') == 'foo/bar'.
"""
out = ''
for element in args:
if element.startswith(os.path.sep):
if out.endswith(os.path.sep):
out += element[1:]
else:
out += element
else:
if out.endswith(os.path.sep):
out += element
else:
out += os.path.sep + element
return out
@tools.profile
def split_at_symlink(base_dir, relfile):
"""Scans each component of relfile and cut the string at the symlink if
there is any.
Returns a tuple (base_path, symlink, rest), with symlink == rest == None if
not symlink was found.
"""
if base_dir:
assert relfile
assert os.path.isabs(base_dir)
index = 0
else:
assert os.path.isabs(relfile)
index = 1
def at_root(rest):
if base_dir:
return safe_join(base_dir, rest)
return rest
while True:
try:
index = relfile.index(os.path.sep, index)
except ValueError:
index = len(relfile)
full = at_root(relfile[:index])
if fs.islink(full):
# A symlink!
base = os.path.dirname(relfile[:index])
symlink = os.path.basename(relfile[:index])
rest = relfile[index:]
logging.debug(
'split_at_symlink(%s, %s) -> (%s, %s, %s)' %
(base_dir, relfile, base, symlink, | |
[BMT__Hash__HPS]>]
>>> sorted (ET_Rat.children)
[]
>>> prepr (sorted (apt.etypes))
['BMT.Beaver', 'BMT.Location', 'BMT.Mouse', 'BMT.Otter', 'BMT.Person', 'BMT.Person_owns_Trap', 'BMT.Person_sets_Trap', 'BMT.Rat', 'BMT.Rodent', 'BMT.Rodent_in_Trap', 'BMT.Rodent_is_sick', 'BMT.Supertrap', 'BMT.Trap', 'MOM.An_Entity', 'MOM.Date_Interval', 'MOM.Date_Interval_C', 'MOM.Date_Interval_N', 'MOM.Entity', 'MOM.Float_Interval', 'MOM.Frequency_Interval', 'MOM.Id_Entity', 'MOM.Int_Interval', 'MOM.Int_Interval_C', 'MOM.Link', 'MOM.Link1', 'MOM.Link2', 'MOM.Link3', 'MOM.MD_Change', 'MOM.MD_Entity', 'MOM.Named_Object', 'MOM.Object', 'MOM._Interval_', 'MOM._Link_n_']
>>> prepr ([t.type_name for t in apt._T_Extension])
['MOM.Entity', 'MOM.An_Entity', 'MOM.Id_Entity', 'MOM.MD_Entity', 'MOM.MD_Change', 'MOM.Link', 'MOM.Link1', 'MOM._Link_n_', 'MOM.Link2', 'MOM.Link3', 'MOM.Object', 'MOM.Date_Interval', 'MOM.Date_Interval_C', 'MOM.Date_Interval_N', 'MOM._Interval_', 'MOM.Float_Interval', 'MOM.Frequency_Interval', 'MOM.Int_Interval', 'MOM.Int_Interval_C', 'MOM.Named_Object', 'BMT.Location', 'BMT.Person', 'BMT.Rodent', 'BMT.Mouse', 'BMT.Rat', 'BMT.Beaver', 'BMT.Otter', 'BMT.Trap', 'BMT.Supertrap', 'BMT.Rodent_is_sick', 'BMT.Rodent_in_Trap', 'BMT.Person_owns_Trap', 'BMT.Person_sets_Trap']
>>> for t in apt._T_Extension [2:] :
... print ("%%-35s %%s" %% (t.type_name, t.epk_sig))
MOM.Id_Entity ()
MOM.MD_Entity ()
MOM.MD_Change ()
MOM.Link ('left',)
MOM.Link1 ('left',)
MOM._Link_n_ ('left', 'right')
MOM.Link2 ('left', 'right')
MOM.Link3 ('left', 'middle', 'right')
MOM.Object ()
MOM.Date_Interval ()
MOM.Date_Interval_C ()
MOM.Date_Interval_N ()
MOM._Interval_ ()
MOM.Float_Interval ()
MOM.Frequency_Interval ()
MOM.Int_Interval ()
MOM.Int_Interval_C ()
MOM.Named_Object ('name',)
BMT.Location ('lon', 'lat')
BMT.Person ('last_name', 'first_name', 'middle_name')
BMT.Rodent ('name',)
BMT.Mouse ('name',)
BMT.Rat ('name',)
BMT.Beaver ('name',)
BMT.Otter ('name',)
BMT.Trap ('name', 'serial_no')
BMT.Supertrap ('name', 'serial_no')
BMT.Rodent_is_sick ('left', 'sick_leave')
BMT.Rodent_in_Trap ('left', 'right')
BMT.Person_owns_Trap ('left', 'right')
BMT.Person_sets_Trap ('left', 'right', 'location')
>>> for t in apt._T_Extension [2:] :
... print ("%%s%%s %%s" %% (t.type_name, NL, portable_repr (t.sorted_by.criteria)))
MOM.Id_Entity
('type_name', 'pid')
MOM.MD_Entity
()
MOM.MD_Change
('-cid',)
MOM.Link
('left',)
MOM.Link1
('left',)
MOM._Link_n_
('left', 'right')
MOM.Link2
('left', 'right')
MOM.Link3
('left', 'middle', 'right')
MOM.Object
('type_name', 'pid')
MOM.Date_Interval
('start', 'finish')
MOM.Date_Interval_C
('start', 'finish')
MOM.Date_Interval_N
('start', 'finish')
MOM._Interval_
('lower', 'upper')
MOM.Float_Interval
('lower', 'upper')
MOM.Frequency_Interval
('lower', 'upper')
MOM.Int_Interval
('lower', 'upper')
MOM.Int_Interval_C
('lower', 'upper')
MOM.Named_Object
('name',)
BMT.Location
('lon', 'lat')
BMT.Person
('last_name', 'first_name', 'middle_name')
BMT.Rodent
('name',)
BMT.Mouse
('name',)
BMT.Rat
('name',)
BMT.Beaver
('name',)
BMT.Otter
('name',)
BMT.Trap
('name', 'serial_no')
BMT.Supertrap
('name', 'serial_no')
BMT.Rodent_is_sick
('left.name', 'sick_leave.start', 'sick_leave.finish')
BMT.Rodent_in_Trap
('left.name', 'right.name', 'right.serial_no')
BMT.Person_owns_Trap
('left.last_name', 'left.first_name', 'left.middle_name', 'right.name', 'right.serial_no')
BMT.Person_sets_Trap
('left.last_name', 'left.first_name', 'left.middle_name', 'right.name', 'right.serial_no', 'location.lon', 'location.lat')
>>> show_ref_map (ET_Person, "Ref_Req_Map")
BMT.Person
('BMT.Person_owns_Trap', ['left'])
('BMT.Person_sets_Trap', ['left'])
>>> show_ref_map (ET_Trap, "Ref_Req_Map")
BMT.Trap
('BMT.Person_owns_Trap', ['right'])
('BMT.Person_sets_Trap', ['right'])
('BMT.Rodent_in_Trap', ['right'])
>>> show_ref_map (ET_Person, "Ref_Opt_Map")
>>> show_ref_map (ET_Trap, "Ref_Opt_Map")
Inheritance introspection
+++++++++++++++++++++++++++
Each entity_type knows about its children. :attr:`children` maps type_names
to the direct children of the entity_type in question; :attr:`children_np`
maps type_names to the non-partial descendents of the entity_type::
>>> show_children (ET_Entity)
MOM.Entity
MOM.An_Entity
MOM.Date_Interval
MOM.Date_Interval_C
MOM.Date_Interval_N
MOM._Interval_
MOM.Float_Interval
MOM.Frequency_Interval
MOM.Int_Interval
MOM.Int_Interval_C
MOM.Id_Entity
MOM.Link
MOM.Link1
BMT.Rodent_is_sick
MOM._Link_n_
MOM.Link2
BMT.Rodent_in_Trap
BMT.Person_owns_Trap
BMT.Person_sets_Trap
MOM.Link3
MOM.Object
MOM.Named_Object
BMT.Rodent
BMT.Mouse
BMT.Beaver
BMT.Otter
BMT.Rat
BMT.Trap
BMT.Supertrap
BMT.Location
BMT.Person
MOM.MD_Entity
MOM.MD_Change
>>> for et in apt._T_Extension :
... if et.children and et.children != et.children_np :
... print (et.type_name)
... print (" ", sorted (et.children))
... print (" ", sorted (et.children_np))
MOM.Entity
['MOM.An_Entity', 'MOM.Id_Entity', 'MOM.MD_Entity']
['BMT.Location', 'BMT.Mouse', 'BMT.Person', 'BMT.Person_owns_Trap', 'BMT.Person_sets_Trap', 'BMT.Rat', 'BMT.Rodent_in_Trap', 'BMT.Rodent_is_sick', 'BMT.Trap', 'MOM.Date_Interval', 'MOM.Float_Interval', 'MOM.Frequency_Interval', 'MOM.Int_Interval', 'MOM.MD_Change']
MOM.An_Entity
['MOM.Date_Interval', 'MOM._Interval_']
['MOM.Date_Interval', 'MOM.Float_Interval', 'MOM.Frequency_Interval', 'MOM.Int_Interval']
MOM.Id_Entity
['MOM.Link', 'MOM.Object']
['BMT.Location', 'BMT.Mouse', 'BMT.Person', 'BMT.Person_owns_Trap', 'BMT.Person_sets_Trap', 'BMT.Rat', 'BMT.Rodent_in_Trap', 'BMT.Rodent_is_sick', 'BMT.Trap']
MOM.Link
['MOM.Link1', 'MOM._Link_n_']
['BMT.Person_owns_Trap', 'BMT.Person_sets_Trap', 'BMT.Rodent_in_Trap', 'BMT.Rodent_is_sick']
MOM._Link_n_
['MOM.Link2', 'MOM.Link3']
['BMT.Person_owns_Trap', 'BMT.Person_sets_Trap', 'BMT.Rodent_in_Trap']
MOM.Object
['BMT.Location', 'BMT.Person', 'MOM.Named_Object']
['BMT.Location', 'BMT.Mouse', 'BMT.Person', 'BMT.Rat', 'BMT.Trap']
MOM.Named_Object
['BMT.Rodent', 'BMT.Trap']
['BMT.Mouse', 'BMT.Rat', 'BMT.Trap']
Scope
-----
A :class:`scope<_MOM.Scope.Scope>` manages the instances of essential
object and link types.
Specifying `None` as `db_url` will create an in memory database::
>>> db_scheme = "hps://"
>>> scope = MOM.Scope.new (apt, db_url = db_scheme)
For each :attr:`~_MOM.Entity.PNS` defining essential
classes, the `scope` provides an object holding
:class:`object managers<_MOM.E_Type_Manager.Object>` and
:class:`link managers<_MOM.E_Type_Manager.Link>`
that support instance creation and queries::
>>> scope.MOM.Id_Entity
<E_Type_Manager for MOM.Id_Entity of scope BMT__Hash__HPS>
>>> scope.BMT.Person
<E_Type_Manager for BMT.Person of scope BMT__Hash__HPS>
>>> scope.BMT.Person_owns_Trap
<E_Type_Manager for BMT.Person_owns_Trap of scope BMT__Hash__HPS>
Object and link creation
-------------------------
One creates objects or links by calling the etype manager of the
appropriate class::
>>> with expect_except (MOM.Error.Partial_Type) :
... scope.MOM.Named_Object ("foo")
Partial_Type: Named_Object
>>> p = scope.BMT.Person ("luke", "lucky")
>>> p
BMT.Person ('luke', 'lucky', '')
>>> q = scope.BMT.Person ("dog", "snoopy")
>>> l1 = scope.BMT.Location (-16.268799, 48.189956)
>>> l2 = scope.BMT.Location (-16.740770, 48.463313)
>>> m = scope.BMT.Mouse ("mighty_mouse")
>>> b = scope.BMT.Beaver ("toothy_beaver")
>>> r = scope.BMT.Rat ("rutty_rat")
>>> axel = scope.BMT.Rat ("axel")
>>> t1 = scope.BMT.Trap ("x", 1)
>>> t2 = scope.BMT.Trap ("x", 2)
>>> t3 = scope.BMT.Trap ("y", 1)
>>> t4 = scope.BMT.Trap ("y", 2)
>>> t5 = scope.BMT.Trap ("z", 3)
>>> Ris = scope.BMT.Rodent_is_sick
>>> RiT = scope.BMT.Rodent_in_Trap
>>> PoT = scope.BMT.Person_owns_Trap
>>> PTL = scope.BMT.Person_sets_Trap
>>> m == m, m != m, m == b, m != b, m == "", m != ""
(True, False, False, True, False, True)
>>> with expect_except (MOM.Error.Wrong_Type) :
... RiT (p, t4)
Wrong_Type: Person 'luke, lucky' not eligible for attribute left,
must be instance of Rodent
>>> rit1 = RiT (m, t1)
>>> rit1
BMT.Rodent_in_Trap (('mighty_mouse', ), ('x', 1))
>>> with expect_except (MOM.Error.Multiplicity) :
... RiT (m, t2)
Multiplicity: The new definition of Rodent in Trap (BMT.Mouse ('mighty_mouse'), BMT.Trap ('x', 2)) would exceed the maximum number [1] of links allowed for BMT.Mouse ('mighty_mouse',).
Already existing:
BMT.Rodent_in_Trap (('mighty_mouse', 'BMT.Mouse'), ('x', '1', 'BMT.Trap'))
>>> RiT (r, t3)
BMT.Rodent_in_Trap (('rutty_rat', ), ('y', 1))
>>> RiT (axel, t2)
BMT.Rodent_in_Trap (('axel', ), ('x', 2))
>>> PoT (p, t1)
BMT.Person_owns_Trap (('luke', 'lucky', ''), ('x', 1))
>>> PoT (p, t2)
BMT.Person_owns_Trap (('luke', 'lucky', ''), ('x', 2))
>>> PoT (q, t3)
BMT.Person_owns_Trap (('dog', 'snoopy', ''), ('y', 1))
>>> PoT (("tin", "tin"), t4)
BMT.Person_owns_Trap (('tin', 'tin', ''), ('y', 2))
Creating a link will automatically change `auto_rev_ref` attributes of the
objects participating of the link, like `Trap.setter`::
>>> t1.attr_prop ("setter")
Role_Ref `setter`
>>> t1.attr_prop ("setter_links")
Link_Ref_List `setter_links`
>>> prepr (t1.setter) ### before creation of Person_sets_Trap for t1
None
>>> PTL (p, t1, l1)
BMT.Person_sets_Trap (('luke', 'lucky', ''), ('x', 1), (-16.268799, 48.189956))
>>> t1.setter ### after creation of Person_sets_Trap for t1
BMT.Person ('luke', 'lucky', '')
>>> t1.setter_links
[BMT.Person_sets_Trap (('luke', 'lucky', ''), ('x', 1), (-16.268799, 48.189956))]
>>> prepr (t2.setter) ### before creation of Person_sets_Trap for t2
None
>>> PTL (p, t2, l2)
BMT.Person_sets_Trap (('luke', 'lucky', ''), ('x', 2), (-16.74077, 48.463313))
>>> t2.setter ### after creation of Person_sets_Trap for t2
BMT.Person ('luke', 'lucky', '')
>>> t2.setter_links
[BMT.Person_sets_Trap (('luke', 'lucky', ''), ('x', 2), (-16.74077, 48.463313))]
>>> prepr (t3.setter) ### before creation of Person_sets_Trap for t3
None
>>> PTL (p, t3, l2)
BMT.Person_sets_Trap (('luke', 'lucky', ''), ('y', 1), (-16.74077, 48.463313))
>>> t3.setter ### after creation of Person_sets_Trap for t3
BMT.Person ('luke', 'lucky', '')
>>> t3.setter_links
[BMT.Person_sets_Trap (('luke', 'lucky', ''), ('y', 1), (-16.74077, 48.463313))]
Queries
-------
One queries the object model by calling query methods of the
appropriate etype manager. Strict queries return only instances
of the essential class in question,
but not instances of derived classes. Non-strict queries are
transitive, i.e., they return instances of the essential class in
question and all its descendants. For partial types, strict queries
return nothing. By default, queries are non-strict (transitive).
Passing `strict = True` to a query makes it strict.
The query :meth:`instance<_MOM.E_Type_Manager.E_Type_Manager.instance>` can
only be applied to `E_Type_Managers` for essential types that are, or
inherit from, a `relevant_root`::
>>> scope.MOM.Object.instance ("mighty_mouse")
Traceback (most recent call last):
...
TypeError: Object needs the arguments (), got ('mighty_mouse',) instead
>>> scope.MOM.Named_Object.instance ("mighty_mouse")
BMT.Mouse ('mighty_mouse')
>>> scope.BMT.Rodent.instance ("mighty_mouse")
BMT.Mouse ('mighty_mouse')
>>> prepr (scope.BMT.Rat.instance ("mighty_mouse"))
None
>>> prepr (scope.BMT.Rat.query (name = "mighty_mouse").all ())
[]
>>> PoT.query_s ().all ()
[BMT.Person_owns_Trap (('dog', 'snoopy', ''), ('y', 1)), BMT.Person_owns_Trap (('luke', 'lucky', ''), ('x', 1)), BMT.Person_owns_Trap (('luke', 'lucky', ''), ('x', 2)), BMT.Person_owns_Trap (('tin', 'tin', ''), ('y', 2))]
>>> PoT.instance (('dog', 'snoopy'), ('y', 1))
BMT.Person_owns_Trap (('dog', 'snoopy', ''), ('y', 1))
>>> PoT.instance (('dog', 'snoopy', ''), ('x', 2))
>>> prepr (PoT.instance (("Man", "tin"), t4))
None
The query :meth:`exists<_MOM.E_Type_Manager.E_Type_Manager.exists>`
returns a list of all `E_Type_Managers` for which an object or link
with the specified `epk` exists::
>>> scope.MOM.Named_Object.exists ("mighty_mouse")
[<E_Type_Manager for BMT.Mouse of scope BMT__Hash__HPS>]
>>> scope.BMT.Mouse.exists ("mighty_mouse")
[<E_Type_Manager for BMT.Mouse of scope BMT__Hash__HPS>]
>>> scope.BMT.Rat.exists ("mighty_mouse")
[]
>>> PoT.exists (('dog', 'snoopy'), ('y', 1))
[<E_Type_Manager for BMT.Person_owns_Trap of scope BMT__Hash__HPS>]
>>> PoT.exists (("Man", "tin"), t4)
[]
The queries :attr:`~_MOM.E_Type_Manager.E_Type_Manager.count_strict`,
:attr:`~_MOM.E_Type_Manager.E_Type_Manager.count`,
:meth:`~_MOM.E_Type_Manager.E_Type_Manager.query`, and
:meth:`~_MOM.E_Type_Manager.E_Type_Manager.r_query` return the
number, or list, of instances of the specified
etype::
>>> scope.BMT.Mouse.count_strict
1
>>> list (scope.BMT.Mouse.query_s (strict = True))
[BMT.Mouse ('mighty_mouse')]
>>> scope.BMT.Mouse.count
2
>>> list (scope.BMT.Mouse.query_s ())
[BMT.Mouse ('mighty_mouse'), BMT.Beaver ('toothy_beaver')]
>>> scope.BMT.Rat.count_strict
2
>>> list (scope.BMT.Rat.query_s (strict = True))
[BMT.Rat ('axel'), BMT.Rat ('rutty_rat')]
>>> scope.BMT.Rat.count
2
>>> list (scope.BMT.Rat.query_s ())
[BMT.Rat ('axel'), BMT.Rat ('rutty_rat')]
>>> scope.BMT.Rodent.count_strict
| |
states include waiting, flight, and memory
* **who_has**: ``{dep: {worker}}``
Workers that we believe have this data
* **has_what**: ``{worker: {deps}}``
The data that we care about that we think a worker has
* **pending_data_per_worker**: ``{worker: [dep]}``
The data on each worker that we still want, prioritized as a deque
* **in_flight_tasks**: ``{task: worker}``
All dependencies that are coming to us in current peer-to-peer
connections and the workers from which they are coming.
* **in_flight_workers**: ``{worker: {task}}``
The workers from which we are currently gathering data and the
dependencies we expect from those connections
* **comm_bytes**: ``int``
The total number of bytes in flight
* **suspicious_deps**: ``{dep: int}``
The number of times a dependency has not been where we expected it
* **nbytes**: ``{key: int}``
The size of a particular piece of data
* **types**: ``{key: type}``
The type of a particular piece of data
* **threads**: ``{key: int}``
The ID of the thread on which the task ran
* **exceptions**: ``{key: exception}``
The exception caused by running a task if it erred
* **tracebacks**: ``{key: traceback}``
The exception caused by running a task if it erred
* **startstops**: ``{key: [(str, float, float)]}``
Log of transfer, load, and compute times for a task
* **priorities**: ``{key: tuple}``
The priority of a key given by the scheduler. Determines run order.
* **durations**: ``{key: float}``
Expected duration of a task
* **resource_restrictions**: ``{key: {str: number}}``
Abstract resources required to run a task
Parameters
----------
scheduler_ip: str
scheduler_port: int
ip: str, optional
ncores: int, optional
loop: tornado.ioloop.IOLoop
local_dir: str, optional
Directory where we place local resources
name: str, optional
heartbeat_interval: int
Milliseconds between heartbeats to scheduler
memory_limit: int
Number of bytes of data to keep in memory before using disk
executor: concurrent.futures.Executor
resources: dict
Resources that thiw worker has like ``{'GPU': 2}``
Examples
--------
Use the command line to start a worker::
$ dask-scheduler
Start scheduler at 127.0.0.1:8786
$ dask-worker 127.0.0.1:8786
Start worker at: 127.0.0.1:1234
Registered with scheduler at: 127.0.0.1:8786
See Also
--------
distributed.scheduler.Scheduler
distributed.nanny.Nanny
"""
def __init__(self, *args, **kwargs):
self.tasks = dict()
self.task_state = dict()
self.dep_state = dict()
self.dependencies = dict()
self.dependents = dict()
self.waiting_for_data = dict()
self.who_has = dict()
self.has_what = defaultdict(set)
self.pending_data_per_worker = defaultdict(deque)
self.extensions = {}
self.data_needed = deque() # TODO: replace with heap?
self.in_flight_tasks = dict()
self.in_flight_workers = dict()
self.total_connections = 50
self.total_comm_nbytes = 10e6
self.comm_nbytes = 0
self.suspicious_deps = defaultdict(lambda: 0)
self._missing_dep_flight = set()
self.nbytes = dict()
self.types = dict()
self.threads = dict()
self.exceptions = dict()
self.tracebacks = dict()
self.priorities = dict()
self.priority_counter = 0
self.durations = dict()
self.startstops = defaultdict(list)
self.resource_restrictions = dict()
self.ready = list()
self.constrained = deque()
self.executing = set()
self.executed_count = 0
self.long_running = set()
self.batched_stream = None
self.target_message_size = 200e6 # 200 MB
self.log = deque(maxlen=100000)
self.validate = kwargs.pop('validate', False)
self._transitions = {
('waiting', 'ready'): self.transition_waiting_ready,
('waiting', 'memory'): self.transition_waiting_memory,
('ready', 'executing'): self.transition_ready_executing,
('ready', 'memory'): self.transition_ready_memory,
('constrained', 'executing'): self.transition_constrained_executing,
('executing', 'memory'): self.transition_executing_done,
('executing', 'error'): self.transition_executing_done,
('executing', 'long-running'): self.transition_executing_long_running,
('long-running', 'error'): self.transition_executing_done,
('long-running', 'memory'): self.transition_executing_done,
}
self._dep_transitions = {
('waiting', 'flight'): self.transition_dep_waiting_flight,
('waiting', 'memory'): self.transition_dep_waiting_memory,
('flight', 'waiting'): self.transition_dep_flight_waiting,
('flight', 'memory'): self.transition_dep_flight_memory
}
self.incoming_transfer_log = deque(maxlen=(100000))
self.incoming_count = 0
self.outgoing_transfer_log = deque(maxlen=(100000))
self.outgoing_count = 0
WorkerBase.__init__(self, *args, **kwargs)
def __str__(self):
return "<%s: %s, %s, stored: %d, running: %d/%d, ready: %d, comm: %d, waiting: %d>" % (
self.__class__.__name__, self.address, self.status,
len(self.data), len(self.executing), self.ncores,
len(self.ready), len(self.in_flight_tasks),
len(self.waiting_for_data))
__repr__ = __str__
################
# Update Graph #
################
@gen.coroutine
def compute_stream(self, comm):
try:
self.batched_stream = BatchedSend(interval=2, loop=self.loop)
self.batched_stream.start(comm)
def on_closed():
if self.reconnect and self.status not in ('closed', 'closing'):
logger.info("Connection to scheduler broken. Reregistering")
self._register_with_scheduler()
else:
self._close(report=False)
#stream.set_close_callback(on_closed)
closed = False
while not closed:
try:
msgs = yield comm.read()
except CommClosedError:
on_closed()
break
except EnvironmentError as e:
break
start = time()
for msg in msgs:
op = msg.pop('op', None)
if 'key' in msg:
validate_key(msg['key'])
if op == 'close':
closed = True
self._close()
break
elif op == 'compute-task':
priority = msg.pop('priority')
self.add_task(priority=priority, **msg)
elif op == 'release-task':
self.log.append((msg['key'], 'release-task'))
self.release_key(**msg)
elif op == 'delete-data':
self.delete_data(**msg)
else:
logger.warning("Unknown operation %s, %s", op, msg)
self.ensure_communicating()
self.ensure_computing()
end = time()
if self.digests is not None:
self.digests['handle-messages-duration'].add(end - start)
yield self.batched_stream.close()
logger.info('Close compute stream')
except Exception as e:
logger.exception(e)
raise
def add_task(self, key, function=None, args=None, kwargs=None, task=None,
who_has=None, nbytes=None, priority=None, duration=None,
resource_restrictions=None, **kwargs2):
try:
if key in self.tasks:
state = self.task_state[key]
if state in ('memory', 'error'):
if state == 'memory':
assert key in self.data
logger.debug("Asked to compute pre-existing result: %s: %s" ,
key, state)
self.send_task_state_to_scheduler(key)
return
if state in IN_PLAY:
return
if self.dep_state.get(key) == 'memory':
self.task_state[key] = 'memory'
self.send_task_state_to_scheduler(key)
self.tasks[key] = None
self.log.append((key, 'new-task-already-in-memory'))
self.priorities[key] = priority
self.durations[key] = duration
return
self.log.append((key, 'new'))
try:
self.tasks[key] = self._deserialize(function, args, kwargs, task)
raw = {'function': function, 'args': args, 'kwargs': kwargs,
'task': task}
except Exception as e:
logger.warn("Could not deserialize task", exc_info=True)
emsg = error_message(e)
emsg['key'] = key
emsg['op'] = 'task-erred'
self.batched_stream.send(emsg)
self.log.append((key, 'deserialize-error'))
return
self.priorities[key] = priority
self.durations[key] = duration
if resource_restrictions:
self.resource_restrictions[key] = resource_restrictions
self.task_state[key] = 'waiting'
if nbytes is not None:
self.nbytes.update(nbytes)
who_has = who_has or {}
self.dependencies[key] = set(who_has)
self.waiting_for_data[key] = set()
for dep in who_has:
if dep not in self.dependents:
self.dependents[dep] = set()
self.dependents[dep].add(key)
if dep not in self.dep_state:
if self.task_state.get(dep) == 'memory':
self.dep_state[dep] = 'memory'
else:
self.dep_state[dep] = 'waiting'
if self.dep_state[dep] != 'memory':
self.waiting_for_data[key].add(dep)
for dep, workers in who_has.items():
assert workers
if dep not in self.who_has:
self.who_has[dep] = set(workers)
self.who_has[dep].update(workers)
for worker in workers:
self.has_what[worker].add(dep)
if self.dep_state[dep] != 'memory':
self.pending_data_per_worker[worker].append(dep)
if self.waiting_for_data[key]:
self.data_needed.append(key)
else:
self.transition(key, 'ready')
if self.validate:
if who_has:
assert all(dep in self.dep_state for dep in who_has)
assert all(dep in self.nbytes for dep in who_has)
for dep in who_has:
self.validate_dep(dep)
self.validate_key(key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb; pdb.set_trace()
raise
###############
# Transitions #
###############
def transition_dep(self, dep, finish, **kwargs):
try:
start = self.dep_state[dep]
except KeyError:
return
if start == finish:
return
func = self._dep_transitions[start, finish]
state = func(dep, **kwargs)
self.log.append(('dep', dep, start, state or finish))
if dep in self.dep_state:
self.dep_state[dep] = state or finish
if self.validate:
self.validate_dep(dep)
def transition_dep_waiting_flight(self, dep, worker=None):
try:
if self.validate:
assert dep not in self.in_flight_tasks
assert self.dependents[dep]
self.in_flight_tasks[dep] = worker
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb; pdb.set_trace()
raise
def transition_dep_flight_waiting(self, dep, worker=None):
try:
if self.validate:
assert dep in self.in_flight_tasks
del self.in_flight_tasks[dep]
try:
self.who_has[dep].remove(worker)
except KeyError:
pass
try:
self.has_what[worker].remove(dep)
except KeyError:
pass
if not self.who_has[dep]:
if dep not in self._missing_dep_flight:
self._missing_dep_flight.add(dep)
self.loop.add_callback(self.handle_missing_dep, dep)
for key in self.dependents.get(dep, ()):
if self.task_state[key] == 'waiting':
self.data_needed.appendleft(key)
if not self.dependents[dep]:
self.release_dep(dep)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb; pdb.set_trace()
raise
def transition_dep_flight_memory(self, dep, value=None):
try:
if self.validate:
assert dep in self.in_flight_tasks
del self.in_flight_tasks[dep]
self.dep_state[dep] = 'memory'
self.put_key_in_memory(dep, value)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb; pdb.set_trace()
raise
def transition_dep_waiting_memory(self, dep, value=None):
try:
if self.validate:
try:
assert dep in self.data
assert dep in self.nbytes
assert dep in self.types
assert self.task_state[dep] == 'memory'
except Exception as e:
logger.exception(e)
import pdb; pdb.set_trace()
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb; pdb.set_trace()
raise
def transition(self, key, finish, **kwargs):
start = self.task_state[key]
if start == finish:
return
func = self._transitions[start, finish]
state = func(key, **kwargs)
self.log.append((key, start, state or finish))
self.task_state[key] = state or finish
if self.validate:
self.validate_key(key)
def transition_waiting_ready(self, key):
try:
if self.validate:
assert self.task_state[key] == 'waiting'
assert key in self.waiting_for_data
assert not self.waiting_for_data[key]
assert all(dep in self.data for dep in self.dependencies[key])
assert key not in self.executing
assert key not in self.ready
del self.waiting_for_data[key]
if key in self.resource_restrictions:
self.constrained.append(key)
return 'constrained'
else:
heapq.heappush(self.ready, (self.priorities[key], key))
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb; pdb.set_trace()
raise
def transition_waiting_memory(self, key, value=None):
try:
if self.validate:
assert self.task_state[key] == 'waiting'
assert key in self.waiting_for_data
assert key not in self.executing
assert key not in self.ready
del self.waiting_for_data[key]
self.send_task_state_to_scheduler(key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb; pdb.set_trace()
raise
def transition_ready_executing(self, key):
try:
if self.validate:
assert key not in | |
<filename>tests/test_edgeql_ddl.py
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
import re
import textwrap
import uuid
import edgedb
from edb.testbase import server as tb
from edb.tools import test
class TestEdgeQLDDL(tb.DDLTestCase):
async def test_edgeql_ddl_04(self):
await self.con.execute("""
CREATE TYPE A;
CREATE TYPE B EXTENDING A;
CREATE TYPE Object1 {
CREATE REQUIRED LINK a -> A;
};
CREATE TYPE Object2 {
CREATE LINK a -> B;
};
CREATE TYPE Object_12
EXTENDING Object1, Object2;
""")
async def test_edgeql_ddl_type_05(self):
await self.con.execute("""
CREATE TYPE A5;
CREATE TYPE Object5 {
CREATE REQUIRED LINK a -> A5;
CREATE REQUIRED PROPERTY b -> str;
};
""")
await self.assert_query_result(
r"""
SELECT schema::ObjectType {
links: {
name,
required,
}
FILTER .name = 'a'
ORDER BY .name,
properties: {
name,
required,
}
FILTER .name = 'b'
ORDER BY .name
}
FILTER .name = 'default::Object5';
""",
[{
'links': [{
'name': 'a',
'required': True,
}],
'properties': [{
'name': 'b',
'required': True,
}],
}],
)
await self.con.execute("""
ALTER TYPE Object5 {
ALTER LINK a SET OPTIONAL;
};
ALTER TYPE Object5 {
ALTER PROPERTY b SET OPTIONAL;
};
""")
await self.assert_query_result(
r"""
SELECT schema::ObjectType {
links: {
name,
required,
}
FILTER .name = 'a'
ORDER BY .name,
properties: {
name,
required,
}
FILTER .name = 'b'
ORDER BY .name
}
FILTER .name = 'default::Object5';
""",
[{
'links': [{
'name': 'a',
'required': False,
}],
'properties': [{
'name': 'b',
'required': False,
}],
}],
)
async def test_edgeql_ddl_type_06(self):
await self.con.execute("""
CREATE TYPE A6 {
CREATE PROPERTY name -> str;
};
CREATE TYPE Object6 {
CREATE SINGLE LINK a -> A6;
CREATE SINGLE PROPERTY b -> str;
};
INSERT A6 { name := 'a6' };
INSERT Object6 {
a := (SELECT A6 LIMIT 1),
b := 'foo'
};
""")
await self.assert_query_result(
r"""
SELECT schema::ObjectType {
links: {
name,
cardinality,
}
FILTER .name = 'a'
ORDER BY .name,
properties: {
name,
cardinality,
}
FILTER .name = 'b'
ORDER BY .name
}
FILTER .name = 'default::Object6';
""",
[{
'links': [{
'name': 'a',
'cardinality': 'One',
}],
'properties': [{
'name': 'b',
'cardinality': 'One',
}],
}],
)
await self.assert_query_result(
r"""
SELECT Object6 {
a: {name},
b,
}
""",
[{
'a': {'name': 'a6'},
'b': 'foo',
}]
)
await self.con.execute("""
ALTER TYPE Object6 {
ALTER LINK a SET MULTI;
};
ALTER TYPE Object6 {
ALTER PROPERTY b SET MULTI;
};
""")
await self.assert_query_result(
"""
SELECT schema::ObjectType {
links: {
name,
cardinality,
}
FILTER .name = 'a'
ORDER BY .name,
properties: {
name,
cardinality,
}
FILTER .name = 'b'
ORDER BY .name
}
FILTER .name = 'default::Object6';
""",
[{
'links': [{
'name': 'a',
'cardinality': 'Many',
}],
'properties': [{
'name': 'b',
'cardinality': 'Many',
}],
}],
)
# Check that the data has been migrated correctly.
await self.assert_query_result(
r"""
SELECT Object6 {
a: {name},
b,
}
""",
[{
'a': [{'name': 'a6'}],
'b': ['foo'],
}]
)
# Change it back.
await self.con.execute("""
ALTER TYPE Object6 {
ALTER LINK a SET SINGLE USING (SELECT .a LIMIT 1);
};
ALTER TYPE Object6 {
ALTER PROPERTY b SET SINGLE USING (SELECT .b LIMIT 1);
};
""")
await self.assert_query_result(
"""
SELECT schema::ObjectType {
links: {
name,
cardinality,
}
FILTER .name = 'a'
ORDER BY .name,
properties: {
name,
cardinality,
}
FILTER .name = 'b'
ORDER BY .name
}
FILTER .name = 'default::Object6';
""",
[{
'links': [{
'name': 'a',
'cardinality': 'One',
}],
'properties': [{
'name': 'b',
'cardinality': 'One',
}],
}],
)
# Check that the data has been migrated correctly.
await self.assert_query_result(
r"""
SELECT Object6 {
a: {name},
b,
}
""",
[{
'a': {'name': 'a6'},
'b': 'foo',
}]
)
async def test_edgeql_ddl_rename_type_and_add_01(self):
await self.con.execute("""
CREATE TYPE Foo {
CREATE PROPERTY x -> str;
};
""")
await self.con.execute("""
ALTER TYPE Foo {
DROP PROPERTY x;
RENAME TO Bar;
CREATE PROPERTY a -> str;
CREATE LINK b -> Object;
CREATE CONSTRAINT expression ON (true);
CREATE ANNOTATION description := 'hello';
};
""")
await self.assert_query_result(
r"""
SELECT schema::ObjectType {
links: {name} ORDER BY .name,
properties: {name} ORDER BY .name,
constraints: {name},
annotations: {name}
}
FILTER .name = 'default::Bar';
""",
[
{
"annotations": [{"name": "std::description"}],
"constraints": [{"name": "std::expression"}],
"links": [{"name": "__type__"}, {"name": "b"}],
"properties": [{"name": "a"}, {"name": "id"}],
}
],
)
await self.con.execute("""
ALTER TYPE Bar {
DROP PROPERTY a;
DROP link b;
DROP CONSTRAINT expression ON (true);
DROP ANNOTATION description;
};
""")
async def test_edgeql_ddl_rename_type_and_add_02(self):
await self.con.execute("""
CREATE TYPE Foo;
""")
await self.con.execute("""
ALTER TYPE Foo {
CREATE PROPERTY a -> str;
CREATE LINK b -> Object;
CREATE CONSTRAINT expression ON (true);
CREATE ANNOTATION description := 'hello';
RENAME TO Bar;
};
""")
await self.assert_query_result(
r"""
SELECT schema::ObjectType {
links: {name} ORDER BY .name,
properties: {name} ORDER BY .name,
constraints: {name},
annotations: {name}
}
FILTER .name = 'default::Bar';
""",
[
{
"annotations": [{"name": "std::description"}],
"constraints": [{"name": "std::expression"}],
"links": [{"name": "__type__"}, {"name": "b"}],
"properties": [{"name": "a"}, {"name": "id"}],
}
],
)
await self.con.execute("""
ALTER TYPE Bar {
DROP PROPERTY a;
DROP link b;
DROP CONSTRAINT expression ON (true);
DROP ANNOTATION description;
};
""")
async def test_edgeql_ddl_rename_type_and_drop_01(self):
await self.con.execute("""
CREATE TYPE Foo {
CREATE PROPERTY a -> str;
CREATE LINK b -> Object;
CREATE CONSTRAINT expression ON (true);
CREATE ANNOTATION description := 'hello';
};
""")
await self.con.execute("""
ALTER TYPE Foo {
RENAME TO Bar;
DROP PROPERTY a;
DROP link b;
DROP CONSTRAINT expression ON (true);
DROP ANNOTATION description;
};
""")
await self.assert_query_result(
r"""
SELECT schema::ObjectType {
links: {name} ORDER BY .name,
properties: {name} ORDER BY .name,
constraints: {name},
annotations: {name}
}
FILTER .name = 'default::Bar';
""",
[
{
"annotations": [],
"constraints": [],
"links": [{"name": "__type__"}],
"properties": [{"name": "id"}],
}
],
)
await self.con.execute("""
DROP TYPE Bar;
""")
async def test_edgeql_ddl_rename_type_and_drop_02(self):
await self.con.execute("""
CREATE TYPE Foo {
CREATE PROPERTY a -> str;
CREATE LINK b -> Object;
CREATE CONSTRAINT expression ON (true);
CREATE ANNOTATION description := 'hello';
};
""")
await self.con.execute("""
ALTER TYPE Foo {
DROP PROPERTY a;
DROP link b;
DROP CONSTRAINT expression ON (true);
DROP ANNOTATION description;
RENAME TO Bar;
};
""")
await self.assert_query_result(
r"""
SELECT schema::ObjectType {
links: {name} ORDER BY .name,
properties: {name} ORDER BY .name,
constraints: {name},
annotations: {name}
}
FILTER .name = 'default::Bar';
""",
[
{
"annotations": [],
"constraints": [],
"links": [{"name": "__type__"}],
"properties": [{"name": "id"}],
}
],
)
await self.con.execute("""
DROP TYPE Bar;
""")
async def test_edgeql_ddl_rename_type_and_prop_01(self):
await self.con.execute(r"""
CREATE TYPE Note {
CREATE PROPERTY note -> str;
CREATE LINK friend -> Object;
};
""")
await self.con.execute(r"""
ALTER TYPE Note {
RENAME TO Remark;
ALTER PROPERTY note RENAME TO remark;
ALTER LINK friend RENAME TO enemy;
};
""")
await self.con.execute(r"""
ALTER TYPE Remark {
DROP PROPERTY remark;
DROP LINK enemy;
};
""")
async def test_edgeql_ddl_11(self):
await self.con.execute(r"""
CREATE TYPE TestContainerLinkObjectType {
CREATE PROPERTY test_array_link -> array<std::str>;
# FIXME: for now dimension specs on the array are
# disabled pending a syntax change
# CREATE PROPERTY test_array_link_2 ->
# array<std::str[10]>;
};
""")
async def test_edgeql_ddl_12(self):
with self.assertRaisesRegex(
edgedb.EdgeQLSyntaxError,
r"backtick-quoted names surrounded by double underscores "
r"are forbidden"):
await self.con.execute(r"""
CREATE TYPE TestBadContainerLinkObjectType {
CREATE PROPERTY foo -> std::str {
CREATE CONSTRAINT expression
ON (`__subject__` = 'foo');
};
};
""")
async def test_edgeql_ddl_13(self):
with self.assertRaisesRegex(
edgedb.InvalidReferenceError,
"object type or alias 'default::self' does not exist"):
await self.con.execute(r"""
CREATE TYPE TestBadContainerLinkObjectType {
CREATE PROPERTY foo -> std::str {
CREATE CONSTRAINT expression ON (`self` = 'foo');
};
};
""")
async def test_edgeql_ddl_14(self):
with self.assertRaisesRegex(
edgedb.QueryError,
f'__source__ cannot be used in this expression'):
await self.con.execute("""
CREATE TYPE TestSelfLink1 {
CREATE PROPERTY | |
<reponame>mrtop/TiledMapReadWritePy
# coding:utf-8
# TMX library
# Copyright (c) 2016 wboy <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""=========================================
TMX Map Format (TiledMapEditor 0.17.0)
http://doc.mapeditor.org/reference/tmx-map-format/#tmx-map-format
========================================="""
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import logging
import six
import os
import json
import base64
import gzip
import zlib
import array
from itertools import chain, product
from collections import defaultdict, namedtuple, OrderedDict
#from xml.etree.ElementTree import *
from ElementTree import *
from six.moves import map
logger = logging.getLogger(__name__)
streamHandler = logging.StreamHandler()
streamHandler.setLevel(logging.INFO)
logger.addHandler(streamHandler)
logger.setLevel(logging.INFO)
__all__ = ['TiledObjectType',
'TiledProperty',
'TiledProperties',
'TiledMap',
'TiledTileset',
'TiledTileoffset',
'TiledProperties',
'TiledProperty',
'TiledTerraintypes',
'TiledTerrain',
'TiledTile',
'TiledImage',
'TiledAnimation',
'TiledFrame',
'TiledLayer',
'TiledData',
'TiledImagelayer',
'TiledObjectgroup',
'TiledObject',
'TiledEllipse',
'TiledPolygon',
'TiledPolyline']
types = defaultdict(lambda: str)
types.update({
"version": str,
"orientation": str,
"renderorder": str,
"width": int,
"height": int,
"tilewidth": int,
"tileheight": int,
"hexsidelength": int,
"staggeraxis": str,
"staggerindex": str,
"backgroundcolor": str,
"nextobjectid": int,
"firstgid": int,
"source": str,
"name": str,
"spacing": int,
"margin": int,
"tilecount": int,
"columns": int,
"x": int,
"y": int,
"format": str,
"trans": str,
"tile": int,
"id": int,
"terrain" : str,
"probability": float,
"tileid": int,
"duration": int,
"opacity": float,
"visible": int,
"offsetx": float,
"offsety": float,
"encoding": str,
"compression": str,
"gid": int,
"color": str,
"draworder": str,
"type": str,
"rotation": float,
"points": str,
"value": str,
})
typesdefaultvalue = defaultdict(lambda: str)
typesdefaultvalue.update({
"version": "1.0",
"orientation": "orthogonal",
"renderorder": "right-down",
"width": 10,
"height": 10,
"tilewidth": 32,
"tileheight": 32,
"hexsidelength": 0,
"staggeraxis": "y",
"staggerindex": "odd",
"backgroundcolor": None,
"nextobjectid": 0,
"firstgid": 1,
"source": None,
"name": None,
"spacing": None,
"margin": None,
"tilecount": None,
"columns": None,
"x": None,
"y": None,
"format": None,
"trans": None,
"tile": None,
"id": None,
"terrain" : None,
"probability": None,
"tileid": None,
"duration": None,
"opacity": None,
"visible": None,
"offsetx": None,
"offsety": None,
"encoding": None,
"compression": None,
"gid": None,
"color": None,
"draworder": None,
"type": None,
"rotation": None,
"points": None,
"value": None,
})
classtypesnodename = defaultdict(lambda: str)
classtypesnodename.update({
"TiledMap" : "map",
"TiledProperty" : "property",
"TiledProperties" : "properties",
"TiledTileset" : "tileset",
"TiledTileoffset" : "tileoffset",
"TiledTerraintypes" : "terraintypes",
"TiledTerrain" : "terrain",
"TiledTile" : "tile",
"TiledImage" : "image",
"TiledAnimation" : "animation",
"TiledFrame" : "frame",
"TiledLayer" : "layer",
"TiledData" : "data",
"TiledData_Tile" : "tile",
"TiledImagelayer" : "imagelayer",
"TiledObjectgroup" : "objectgroup",
"TiledObject" : "object",
"TiledEllipse" : "ellipse",
"TiledPolygon" : "polygon",
"TiledPolyline" : "polyline",
})
def get_class_node_name(classname):
if not classtypesnodename.has_key(classname):
logger.error("classname : %s not is standard tmx data format", classname)
raise Exception
return classtypesnodename[classname]
def indent( elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for e in elem:
indent(e, level+1)
if not e.tail or not e.tail.strip():
e.tail = i
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
return elem
def read_positions(text):
"""parse a text string of float tuples and return [(x,...),...]
"""
if text is not None:
return tuple(tuple(map(format_value, i.split(','))) for i in text.split())
return None
def write_positions(positions):
"""parse float tuples of a text string
return string
"""
result = ""
if positions is not None:
for x,y in positions:
result = "%s %s,%s" % (result, x, y)
return result
def format_value(value, typestr = None):
if typestr is not None:
if typestr == "bool":
if value == str(True).lower(): return True
else: return False
elif typestr == "float":
f = float(value)
if f == int(f):
return int(f)
else:
return f
elif typestr == "int":
return int(value)
return value
else:
if str(value).lower() == str(True).lower():
return True
elif str(value).lower() == str(False).lower():
return True
else:
try:
f = float(value)
if f == int(f):
return int(f)
else:
return f
except:
return value
def convert_to_bool(text):
""" Convert a few common variations of "true" and "false" to boolean
:param text: string to test
:return: boolean
:raises: ValueError
"""
try:
return bool(int(text))
except:
pass
text = str(text).lower()
if text == "true":
return True
if text == "yes":
return True
if text == "false":
return False
if text == "no":
return False
if text == 0:
return False
if text == 1:
return True
raise ValueError
def float_to_int(value):
if type(value) is not float:
return value
else:
try:
f = float(value)
if f == int(f):
return int(f)
else:
return f
except:
return value
class Enum(set):
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
TiledObjectType = Enum(["NONE", "TILE", "RECTANGLE", "ELLIPSE", "POLYGON", "POLYLINE"])
class BaseObject(object):
def __init__(self, tiledmap = None, parent=None):
""" Initialize default value
"""
items = vars(self).items()
if items is not None:
for key, value in vars(self).items():
if key.startswith('_'):
continue
if typesdefaultvalue.has_key(key):
setattr(self, key, typesdefaultvalue[key])
self._tiledmap = tiledmap
self._parent = parent
def __str__(self):
try:
return tostring(self.write_xml())
except:
raise ValueError
def read_xml(self, node, clearlevel = 1):
"""read the xml attributes to self
:param node: etree element
:param clearlevel: clear attribute level
0, Don't Clear
1, Clear public
2, Clear public and protect
3, All Clear (public and protect and private)
:rtype : BaseObject instance
"""
# all attr set None before read xml
items = vars(self).items()
if items is not None:
for key, value in vars(self).items():
if clearlevel == 0:
return
elif clearlevel == 1:
if key.startswith('_'): continue
elif clearlevel == 2:
if key.startswith('__'): continue
setattr(self, key, None)
classname = self.__class__.__name__
classnodename = get_class_node_name(classname)
if classnodename != node.tag:
logger.error("classnodename != node.tag. classnodename:%s, node.tag:%s", classnodename, node.tag)
raise Exception
for key, value in node.items():
casted_value = types[key](value)
setattr(self, key, casted_value)
return self
def write_xml(self, outattrorder = None):
"""write the attributes to xml
:param outattrorder: list() out attr in order
None(default) is all attr is out
:rtype : Element instance
"""
classname = self.__class__.__name__
element = Element(get_class_node_name(classname))
dictattr = self.__dict__;
orderdictattr = OrderedDict()
if dictattr:
if outattrorder is None or not outattrorder:
keys = dictattr.keys()
else:
keys = outattrorder
for key in keys:
if key.startswith('_') or not dictattr.has_key(key):
continue
value = dictattr[key]
if value is None : continue
if type(value) is list:
element = self._child_list_attr_write_xml(element, value)
elif classtypesnodename.has_key(type(value).__name__):
element = self._child_attr_write_xml(element, value)
else:
orderdictattr[key] = ("%s" % float_to_int(value))
if orderdictattr:
element.attrib = orderdictattr
return element
def write_json(self):
"""write the attributes to json
:rtype : string
"""
dictattr = self.__dict__;
if dictattr is None:
return None
dic = {}
for key, value in dictattr.items():
if key.startswith('_'):
continue
if value is None : continue
if type(value) is list:
dic.update(self._child_list_attr_write_json(key, value))
elif classtypesnodename.has_key(type(value).__name__):
dic = self._child_attr_write_json(dic, value)
else:
dic[key] = format_value(value)
if dic:
return dic
else:
return None
def _child_attr_read_xml(self, parentelement, type, parent):
childattrelement = parentelement.find(get_class_node_name(type.__name__))
if childattrelement is not None:
return type(self._tiledmap, parent).read_xml(childattrelement)
return None
def _child_list_attr_read_xml(self, parentelement, type, parent):
childlistattrelement = parentelement.findall(get_class_node_name(type.__name__))
if childlistattrelement is not None:
ls = list()
for childelement in childlistattrelement:
if childelement is not None:
child = type(self._tiledmap, parent).read_xml(childelement)
ls.append(child)
if ls:
return ls
return None
def _child_attr_write_xml(self, parentelement, childattr):
if childattr:
childattrelement = childattr.write_xml()
if childattrelement is not None:
parentelement.append(childattrelement)
return parentelement
def _child_list_attr_write_xml(self, parentelement, childattr):
if childattr:
for attr in childattr:
parentelement = self._child_attr_write_xml(parentelement, attr)
return parentelement
def _child_attr_write_json(self, parentdict, childattr):
if childattr:
childattrdict = childattr.write_json()
if childattrdict is not None:
parentdict.update(childattrdict)
return parentdict
def _child_list_attr_write_json(self, parentname, childattr):
ls = list()
if childattr:
for attr in childattr:
ls.append(attr.write_json())
return {parentname : ls}
class TiledMap(BaseObject):
"""TileMap Data. Contains the layers, objects, images, and others
<map>
This class is meant to handle most of the work you need to do to use a map.
Can contain: properties, tileset, layer, objectgroup, imagelayer
"""
def __init__(self, filepath = None):
self.version = "1.0"
self.orientation= "orthogonal"
self.renderorder = "right-down"
self.width = 10
self.height = 10
self.tilewidth = 32
self.tileheight = 32
self.hexsidelength = 0
self.staggeraxis = "y"
self.staggerindex = "odd"
self.backgroundcolor = None
self.nextobjectid = 0
self.properties = None
self.tilesets = None
self.layers = None
super(TiledMap, self).__init__(self, None)
self.__encoding = None
self.__compression = None
self.__unfoldtsx = False
self.__filepath = filepath
if filepath:
elementTree = parse(filepath).getroot()
self.read_xml(elementTree)
def read_xml(self, node):
super(TiledMap, self).read_xml(node)
self.properties = self._child_attr_read_xml(node, TiledProperties, self)
self.tilesets = self._child_list_attr_read_xml(node, | |
import os
import pandas as pd
import numpy as np
import datetime as dt
import sys
from datetime import datetime
import rasterio
import geopandas as gpd
pkg_dir = os.path.join(os.path.dirname(__file__),'..')
sys.path.insert(0, pkg_dir)
from ela.textproc import *
from ela.spatial import *
from ela.classification import *
from ela.io import GeotiffExporter
from ela.utils import flip
from shapely.geometry import Point
def test_create_meshgrid():
xx, yy = create_meshgrid_cartesian(x_min=0.0, x_max=1.1, y_min=1.0, y_max=1.51, grid_res = 0.5)
assert xx.shape[0] == 3
assert xx.shape[1] == 2
assert yy.shape[0] == 3
assert yy.shape[1] == 2
class MockSlicePredictor:
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def f(self, x, y):
return self.a * x + self.b * y + self.c
def predict_one_sample(self, sample):
x = sample[0]
y = sample[1]
return self.f(x, y)
def predict(self, X):
z = [self.predict_one_sample(x) for x in X]
return np.array(z)
def test_interpolate_slice():
m = create_meshgrid_cartesian(x_min=0.0, x_max=1.1, y_min=1.0, y_max=1.51, grid_res = 0.5)
xx, yy = m
a = 1.0
b = 0.1
c = 0.01
p = MockSlicePredictor(a, b, c)
def z_func(xi, yi):
return p.f(xx[xi, yi], yy[xi, yi])
predicted = interpolate_over_meshgrid(p, m)
assert predicted.shape[0] == 3
assert predicted.shape[1] == 2
assert predicted[0,0] == z_func(0, 0)
assert predicted[1,0] == z_func(1, 0)
assert predicted[2,0] == z_func(2, 0)
assert predicted[0,1] == z_func(0, 1)
assert predicted[1,1] == z_func(1, 1)
assert predicted[2,1] == z_func(2, 1)
# work around scikit behavior:
predicted = interpolate_over_meshgrid(None, m)
assert predicted.shape[0] == 3
assert predicted.shape[1] == 2
assert np.isnan(predicted[1,1])
def test_height_coordinate_functor():
z_index_for_ahd = z_index_for_ahd_functor(b=+100)
assert z_index_for_ahd(-100) == 0
assert z_index_for_ahd(-99) == 1
assert z_index_for_ahd(0) == 100
assert z_index_for_ahd(+50) == 150
def test_burn_volume():
dims = (3,4,5)
dim_x,dim_y,dim_z = dims
x = np.arange(0.0, dim_x*dim_y*dim_z, 1.0)
test_vol = np.reshape(x, dims)
z_index_for_ahd = z_index_for_ahd_functor(b=+1) # z = 0 is datum height -1, z = 4 is datum height 3
xx, yy = create_meshgrid_cartesian(x_min=0.0, x_max=0.51, y_min=0.0, y_max=0.76, grid_res = 0.25)
dem = xx + yy
assert dem[0,0] == 0.0
assert dem[1,1] == 0.5
assert dem[2,2] == 1.0
burnt = test_vol.copy()
burn_volume(burnt, dem, z_index_for_ahd, below=False, inclusive=False)
assert not np.isnan(burnt[0,0,0])
assert not np.isnan(burnt[0,0,1])
assert np.isnan(burnt[0,0,2])
assert not np.isnan(burnt[2,2,0])
assert not np.isnan(burnt[2,2,1])
assert not np.isnan(burnt[2,2,2])
assert np.isnan(burnt[2,2,3])
burnt = test_vol.copy()
burn_volume(burnt, dem, z_index_for_ahd, below=False, inclusive=True)
assert not np.isnan(burnt[0,0,0])
assert np.isnan(burnt[0,0,1])
assert np.isnan(burnt[0,0,2])
assert not np.isnan(burnt[2,2,0])
assert not np.isnan(burnt[2,2,1])
assert np.isnan(burnt[2,2,2])
assert np.isnan(burnt[2,2,3])
def test_slice_volume():
dims = (3,4,5)
dim_x,dim_y,dim_z = dims
x = np.arange(0.0, dim_x*dim_y*dim_z, 1.0)
test_vol = np.reshape(x, dims)
dem = np.empty((3, 4))
z_index_for_ahd = z_index_for_ahd_functor(b=+1) # z = 0 is datum height -1, z = 4 is datum height 3
dem[0,0] = -2.0
dem[0,1] = +5.0
dem[0,2] = -1.0
dem[0,3] = -1.0
dem[1,0] = -1.0
dem[1,1] = -1.0
dem[1,2] = -1.0
dem[1,3] = -1.0
dem[2,0] = -1.0
dem[2,1] = -1.0
dem[2,2] = np.nan
dem[2,3] = -1.0
# TODO: I do not really like using volume_value_at. Make sure this is unit tested itself.
def f(x, y):
return volume_value_at(test_vol, dem, z_index_for_ahd, x, y)
assert np.isnan(f(0,0))
assert np.isnan(f(0,1))
assert f(0,2) == test_vol[0,2,0]
assert f(0,3) == test_vol[0,3,0]
assert f(1,0) == test_vol[1,0,0]
assert f(1,1) == test_vol[1,1,0]
assert f(1,2) == test_vol[1,2,0]
assert f(1,3) == test_vol[1,3,0]
assert f(2,0) == test_vol[2,0,0]
assert f(2,1) == test_vol[2,1,0]
assert np.isnan(f(2,2))
assert f(2,3) == test_vol[2,3,0]
s = slice_volume(test_vol, dem, z_index_for_ahd)
assert np.isnan(s[0,0])
assert np.isnan(s[0,1])
assert f(0,2) == s[0,2]
assert f(0,3) == s[0,3]
assert f(1,0) == s[1,0]
assert f(1,1) == s[1,1]
assert f(1,2) == s[1,2]
assert f(1,3) == s[1,3]
assert f(2,0) == s[2,0]
assert f(2,1) == s[2,1]
assert np.isnan(s[2,2])
assert f(2,3) == s[2,3]
sops = SliceOperation(dem, z_index_for_ahd)
test_slices = sops.from_ahd_to_depth_below_ground_level(test_vol, from_depth=-1, to_depth=+1)
s = test_slices
assert s.shape[0] == dim_x
assert s.shape[1] == dim_y
assert s.shape[2] == 3
index_ground_lvl = 1 # the top level is for depth=0 (dem), but it is at index 1 in the resulting volume s. one metre below ground level is what is at index 0 for the third dimension.
assert np.isnan( s[0,0,index_ground_lvl])
assert np.isnan( s[0,1,index_ground_lvl])
assert f(0,2) == s[0,2,index_ground_lvl]
assert f(0,3) == s[0,3,index_ground_lvl]
assert f(1,0) == s[1,0,index_ground_lvl]
assert f(1,1) == s[1,1,index_ground_lvl]
assert f(1,2) == s[1,2,index_ground_lvl]
assert f(1,3) == s[1,3,index_ground_lvl]
assert f(2,0) == s[2,0,index_ground_lvl]
assert f(2,1) == s[2,1,index_ground_lvl]
assert np.isnan( s[2,2,index_ground_lvl])
assert f(2,3) == s[2,3,index_ground_lvl]
averaged_slices = sops.reduce_slices_at_depths(test_vol, from_depth=-1, to_depth=0, reduce_func=SliceOperation.arithmetic_average)
s = averaged_slices
assert np.isnan( s[0,0])
assert np.isnan( s[0,1])
# test_vol was constructed such that Z values increase by one at a given X/Y location, so the slicing/averaging result is like offsetting by 1/2:
assert f(0,2) + 0.5 == s[0,2]
assert f(0,3) + 0.5 == s[0,3]
assert f(1,0) + 0.5 == s[1,0]
assert f(1,1) + 0.5 == s[1,1]
assert f(1,2) + 0.5 == s[1,2]
assert f(1,3) + 0.5 == s[1,3]
assert f(2,0) + 0.5 == s[2,0]
assert f(2,1) + 0.5 == s[2,1]
assert np.isnan( s[2,2])
assert f(2,3) + 0.5 == s[2,3]
def get_test_bore_df():
x_min = 383200
y_max = 6422275
return pd.DataFrame({
EASTING_COL:np.array([x_min-.5,x_min+.5,x_min+1.1,x_min+1.1]),
NORTHING_COL:np.array([y_max-0.1,y_max-0.1,y_max-0.9,y_max-1.1]),
'fake_obs': np.array([.1, .2, .3, .4]),
DEPTH_FROM_COL: np.array([1.11, 2.22, 3.33, 4.44]),
DEPTH_TO_COL: np.array( [2.22, 3.33, 4.44, 5.55])
})
def create_test_slice(ni = 3, nj = 2, start=0.0, incr_1 = 1.0):
return np.array([
[(start + incr_1 * (i + ni * j)) for i in range(ni) ] for j in range(nj)
])
def get_slices_stack(n = 2, ni = 3, nj = 2, start=0.0, incr_1 = 1.0, incr_2 = 0.1):
x = create_test_slice(ni, nj, start, incr_1)
return [x + incr_2 * k for k in range(n)]
def create_test_raster(x_min = 383200, y_max = 6422275, grid_res = 1 , ni = 2, nj = 2, start=1.0, incr_1 = 1.0, output_file='c:/tmp/test_raster_drill.tif'):
crs = rasterio.crs.CRS({'proj': 'utm', 'zone': 50, 'south': True, 'ellps': 'GRS80', 'units': 'm', 'no_defs': True})
# Upper left hand corner is at (x_min, y_max), and in raster terms this is the origin.
from rasterio.transform import from_origin
transform = from_origin(x_min, y_max, grid_res, grid_res)
ge = GeotiffExporter(crs, transform)
# x = np.array([[1.0, 2.0],[3.0, 4.0]])
x = create_test_slice(ni, nj, start, incr_1)
ge.export_geotiff(x, output_file, None)
def test_raster_drill():
# create_test_raster(x_min = 383200, y_max = 6422275, grid_res = 1 ,output_file='c:/tmp/test_raster_drill.tif'):
x_min = 383200
y_max = 6422275
df = get_test_bore_df()
dem = rasterio.open(os.path.join(pkg_dir, 'tests', 'data', 'test_raster_drill.tif'))
cd = HeightDatumConverter(dem)
heights = cd.raster_drill_df(df)
assert np.isnan(heights[0])
assert heights[1] == 1.0
assert heights[2] == 2.0
assert heights[3] == 4.0
def test_add_ahd():
df = get_test_bore_df()
dem = rasterio.open(os.path.join(pkg_dir, 'tests', 'data', 'test_raster_drill.tif'))
cd = HeightDatumConverter(dem)
df_ahd = cd.add_height(df)
from_ahd = df_ahd[DEPTH_FROM_AHD_COL]
to_ahd = df_ahd[DEPTH_TO_AHD_COL]
assert np.isnan(from_ahd[0])
assert from_ahd[1] == 1.0 - 2.22
assert from_ahd[2] == 2.0 - 3.33
assert from_ahd[3] == 4.0 - 4.44
assert np.isnan(to_ahd[0])
assert to_ahd[1] == 1.0 - 3.33
assert to_ahd[2] == 2.0 - 4.44
assert to_ahd[3] == 4.0 - 5.55
def test_average_slices():
slices = get_slices_stack()
avg = average_slices(slices)
incr_2 = 0.1
assert avg[0,0] == incr_2 / 2
assert avg[0,1] == (incr_2 / 2) + 1.0
assert avg[1,0] == (incr_2 / 2) + 3.0
# create_test_raster(x_min = 383200, y_max = 6422275, grid_res = 25 , ni = 10, nj = 10, start=0.0, incr_1 = 1.0, output_file=os.path.join(pkg_dir, 'tests', 'data', 'test_raster_25m.tif'))
def test_surface_array():
dem = rasterio.open(os.path.join(pkg_dir, 'tests', 'data', 'test_raster_25m.tif'))
grid_res = 100
x_min = 383200 + 5 # first column
y_max = 6422275 - 5 # meaning falls within first row/band from top
y_min = y_max - (2 * grid_res) # 200 m offset over a 25m dem res, meaning falls within the 9th row/band from top
x_max = x_min + (2 * grid_res) # 200 m offset over a 25m dem res, meaning falls within the 9th column from left
surf_dem = surface_array(dem, x_min, y_min, x_max, y_max, grid_res)
assert surf_dem.shape[0] == 2
assert surf_dem.shape[1] == 2
assert surf_dem[0,0] == 80 + 0.0
assert surf_dem[0,1] == 80 + -4 * 10.0
assert surf_dem[1,0] == 80 + 4.0
assert surf_dem[1,1] == 80 + -4 * 10.0 + 4.0
def test_flip():
m = np.zeros([2,3,4])
m[1,2,3] = 3.14
assert flip(m, 0)[0,2,3] == 3.14
assert flip(m, 1)[1,0,3] == 3.14
assert flip(m, 2)[1,2,0] == 3.14
def test_get_coords_from_gpd_shape():
easting_values = np.array([.1, .2, .3, .3 ])
northing_values = np.array([.12, .22, .32, .32 ])
coords = get_unique_coordinates(easting_values, northing_values)
assert coords.shape[0] == 3
assert coords.shape[1] == 2
ptsdf = pd.DataFrame({ 'Coordinates' : list(zip(coords[:,0], coords[:,1])) })
ptsdf['Coordinates'] = ptsdf['Coordinates'].apply(Point)
gdf = gpd.GeoDataFrame(ptsdf, geometry='Coordinates')
gdf.crs = "+proj=utm +zone=56 +ellps=GRS80 +south +units=m +no_defs"
geoloc = get_coords_from_gpd_shape(gdf, colname='Coordinates', out_colnames=['xx','yy'])
| |
__setattr__(self, name, value):
self._perform_setattr(OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors.OpenConfigNeighbor.AdjRibInPre.Routes.Route.ExtAttributesList.UnknownAttributes, ['attribute_type', 'attribute_length', 'attribute_value'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_oc_oper as meta
return meta._meta_table['OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors.OpenConfigNeighbor.AdjRibInPre.Routes.Route.ExtAttributesList.UnknownAttributes']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_oc_oper as meta
return meta._meta_table['OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors.OpenConfigNeighbor.AdjRibInPre.Routes.Route.ExtAttributesList']['meta_info']
class LastModifiedDate(_Entity_):
"""
LastModifiedDate
.. attribute:: time_value
TimeValue
**type**\: str
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors.OpenConfigNeighbor.AdjRibInPre.Routes.Route.LastModifiedDate, self).__init__()
self.yang_name = "last-modified-date"
self.yang_parent_name = "route"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('time_value', (YLeaf(YType.str, 'time-value'), ['str'])),
])
self.time_value = None
self._segment_path = lambda: "last-modified-date"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors.OpenConfigNeighbor.AdjRibInPre.Routes.Route.LastModifiedDate, ['time_value'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_oc_oper as meta
return meta._meta_table['OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors.OpenConfigNeighbor.AdjRibInPre.Routes.Route.LastModifiedDate']['meta_info']
class LastUpdateRecieved(_Entity_):
"""
LastUpdateRecieved
.. attribute:: time_value
TimeValue
**type**\: str
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors.OpenConfigNeighbor.AdjRibInPre.Routes.Route.LastUpdateRecieved, self).__init__()
self.yang_name = "last-update-recieved"
self.yang_parent_name = "route"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('time_value', (YLeaf(YType.str, 'time-value'), ['str'])),
])
self.time_value = None
self._segment_path = lambda: "last-update-recieved"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors.OpenConfigNeighbor.AdjRibInPre.Routes.Route.LastUpdateRecieved, ['time_value'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_oc_oper as meta
return meta._meta_table['OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors.OpenConfigNeighbor.AdjRibInPre.Routes.Route.LastUpdateRecieved']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_oc_oper as meta
return meta._meta_table['OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors.OpenConfigNeighbor.AdjRibInPre.Routes.Route']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_oc_oper as meta
return meta._meta_table['OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors.OpenConfigNeighbor.AdjRibInPre.Routes']['meta_info']
class NumRoutes(_Entity_):
"""
Number of routes in adjacency rib out\-bound
post\-policy table
.. attribute:: num_routes
NumRoutes
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors.OpenConfigNeighbor.AdjRibInPre.NumRoutes, self).__init__()
self.yang_name = "num-routes"
self.yang_parent_name = "adj-rib-in-pre"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('num_routes', (YLeaf(YType.uint64, 'num-routes'), ['int'])),
])
self.num_routes = None
self._segment_path = lambda: "num-routes"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors.OpenConfigNeighbor.AdjRibInPre.NumRoutes, ['num_routes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_oc_oper as meta
return meta._meta_table['OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors.OpenConfigNeighbor.AdjRibInPre.NumRoutes']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_oc_oper as meta
return meta._meta_table['OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors.OpenConfigNeighbor.AdjRibInPre']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_oc_oper as meta
return meta._meta_table['OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors.OpenConfigNeighbor']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_oc_oper as meta
return meta._meta_table['OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast.OpenConfigNeighbors']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_bgp_oc_oper as meta
return meta._meta_table['OcBgp.BgpRib.AfiSafiTable.Ipv4Unicast']['meta_info']
class Ipv6Unicast(_Entity_):
"""
IPv6 Unicast
.. attribute:: loc_rib
Local rib route table
**type**\: :py:class:`LocRib <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib>`
**config**\: False
.. attribute:: open_config_neighbors
Neighbor list
**type**\: :py:class:`OpenConfigNeighbors <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.OpenConfigNeighbors>`
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast, self).__init__()
self.yang_name = "ipv6-unicast"
self.yang_parent_name = "afi-safi-table"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("loc-rib", ("loc_rib", OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib)), ("open-config-neighbors", ("open_config_neighbors", OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.OpenConfigNeighbors))])
self._leafs = OrderedDict()
self.loc_rib = OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib()
self.loc_rib.parent = self
self._children_name_map["loc_rib"] = "loc-rib"
self.open_config_neighbors = OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.OpenConfigNeighbors()
self.open_config_neighbors.parent = self
self._children_name_map["open_config_neighbors"] = "open-config-neighbors"
self._segment_path = lambda: "ipv6-unicast"
self._absolute_path = lambda: "Cisco-IOS-XR-ipv4-bgp-oc-oper:oc-bgp/bgp-rib/afi-safi-table/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast, [], name, value)
class LocRib(_Entity_):
"""
Local rib route table
.. attribute:: routes
routes table
**type**\: :py:class:`Routes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes>`
**config**\: False
.. attribute:: num_routes
Number of routes in adjacency rib out\-bound post\-policy table
**type**\: :py:class:`NumRoutes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.NumRoutes>`
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib, self).__init__()
self.yang_name = "loc-rib"
self.yang_parent_name = "ipv6-unicast"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("routes", ("routes", OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes)), ("num-routes", ("num_routes", OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.NumRoutes))])
self._leafs = OrderedDict()
self.routes = OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes()
self.routes.parent = self
self._children_name_map["routes"] = "routes"
self.num_routes = OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.NumRoutes()
self.num_routes.parent = self
self._children_name_map["num_routes"] = "num-routes"
self._segment_path = lambda: "loc-rib"
self._absolute_path = lambda: "Cisco-IOS-XR-ipv4-bgp-oc-oper:oc-bgp/bgp-rib/afi-safi-table/ipv6-unicast/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib, [], name, value)
class Routes(_Entity_):
"""
routes table
.. attribute:: route
route entry
**type**\: list of :py:class:`Route <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route>`
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes, self).__init__()
self.yang_name = "routes"
self.yang_parent_name = "loc-rib"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("route", ("route", OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route))])
self._leafs = OrderedDict()
self.route = YList(self)
self._segment_path = lambda: "routes"
self._absolute_path = lambda: "Cisco-IOS-XR-ipv4-bgp-oc-oper:oc-bgp/bgp-rib/afi-safi-table/ipv6-unicast/loc-rib/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes, [], name, value)
class Route(_Entity_):
"""
route entry
.. attribute:: route
Network in prefix/length format
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
**config**\: False
.. attribute:: neighbor_address
Neighbor address
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: path_id
Path ID
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: prefix_name
Prefix
**type**\: :py:class:`PrefixName <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.PrefixName>`
**config**\: False
.. attribute:: route_attr_list
RouteAttributesList
**type**\: :py:class:`RouteAttrList <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.RouteAttrList>`
**config**\: False
.. attribute:: ext_attributes_list
ExtAttributesList
**type**\: :py:class:`ExtAttributesList <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.ExtAttributesList>`
**config**\: False
.. attribute:: last_modified_date
LastModifiedDate
**type**\: :py:class:`LastModifiedDate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.LastModifiedDate>`
**config**\: False
.. attribute:: last_update_recieved
LastUpdateRecieved
**type**\: :py:class:`LastUpdateRecieved <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.LastUpdateRecieved>`
**config**\: False
.. attribute:: valid_route
ValidRoute
**type**\: bool
**config**\: False
.. attribute:: invalid_reason
IndentityRef
**type**\: :py:class:`BgpOcInvalidRouteReason <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.BgpOcInvalidRouteReason>`
**config**\: False
.. attribute:: best_path
BestPath
**type**\: bool
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route, self).__init__()
self.yang_name = "route"
self.yang_parent_name = "routes"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("prefix-name", ("prefix_name", OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.PrefixName)), ("route-attr-list", ("route_attr_list", OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.RouteAttrList)), ("ext-attributes-list", ("ext_attributes_list", OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.ExtAttributesList)), ("last-modified-date", ("last_modified_date", OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.LastModifiedDate)), ("last-update-recieved", ("last_update_recieved", OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.LastUpdateRecieved))])
self._leafs = OrderedDict([
('route', (YLeaf(YType.str, 'route'), ['str','str'])),
('neighbor_address', (YLeaf(YType.str, 'neighbor-address'), ['str','str'])),
('path_id', (YLeaf(YType.uint32, 'path-id'), ['int'])),
('valid_route', (YLeaf(YType.boolean, 'valid-route'), ['bool'])),
('invalid_reason', (YLeaf(YType.enumeration, 'invalid-reason'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper', 'BgpOcInvalidRouteReason', '')])),
('best_path', (YLeaf(YType.boolean, 'best-path'), ['bool'])),
])
self.route = None
self.neighbor_address = None
self.path_id = None
self.valid_route = None
self.invalid_reason = None
self.best_path = None
self.prefix_name = OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.PrefixName()
self.prefix_name.parent = self
self._children_name_map["prefix_name"] = "prefix-name"
self.route_attr_list = OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.RouteAttrList()
self.route_attr_list.parent = self
self._children_name_map["route_attr_list"] = "route-attr-list"
self.ext_attributes_list = OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.ExtAttributesList()
self.ext_attributes_list.parent = self
self._children_name_map["ext_attributes_list"] = "ext-attributes-list"
self.last_modified_date = OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.LastModifiedDate()
self.last_modified_date.parent = self
self._children_name_map["last_modified_date"] = "last-modified-date"
self.last_update_recieved = OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.LastUpdateRecieved()
self.last_update_recieved.parent = self
self._children_name_map["last_update_recieved"] = "last-update-recieved"
self._segment_path = lambda: "route"
self._absolute_path = lambda: "Cisco-IOS-XR-ipv4-bgp-oc-oper:oc-bgp/bgp-rib/afi-safi-table/ipv6-unicast/loc-rib/routes/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route, ['route', 'neighbor_address', 'path_id', 'valid_route', 'invalid_reason', 'best_path'], name, value)
class PrefixName(_Entity_):
"""
Prefix
.. attribute:: prefix
Prefix
**type**\: :py:class:`Prefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.PrefixName.Prefix>`
**config**\: False
.. attribute:: prefix_length
Prefix length
**type**\: int
**range:** 0..255
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.PrefixName, self).__init__()
self.yang_name = "prefix-name"
self.yang_parent_name = "route"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("prefix", ("prefix", OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.PrefixName.Prefix))])
self._leafs = OrderedDict([
('prefix_length', (YLeaf(YType.uint8, 'prefix-length'), ['int'])),
])
self.prefix_length = None
self.prefix = OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.PrefixName.Prefix()
self.prefix.parent = self
self._children_name_map["prefix"] = "prefix"
self._segment_path = lambda: "prefix-name"
self._absolute_path = lambda: "Cisco-IOS-XR-ipv4-bgp-oc-oper:oc-bgp/bgp-rib/afi-safi-table/ipv6-unicast/loc-rib/routes/route/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.PrefixName, ['prefix_length'], name, value)
class Prefix(_Entity_):
"""
Prefix
.. attribute:: afi
AFI
**type**\: :py:class:`BgpOcAfi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper.BgpOcAfi>`
**config**\: False
.. attribute:: ipv4_address
IPv4 Addr
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**config**\: False
.. attribute:: ipv6_address
IPv6 Addr
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
**config**\: False
"""
_prefix = 'ipv4-bgp-oc-oper'
_revision = '2017-09-07'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(OcBgp.BgpRib.AfiSafiTable.Ipv6Unicast.LocRib.Routes.Route.PrefixName.Prefix, self).__init__()
self.yang_name = "prefix"
self.yang_parent_name = "prefix-name"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('afi', (YLeaf(YType.enumeration, 'afi'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_bgp_oc_oper', 'BgpOcAfi', '')])),
('ipv4_address', (YLeaf(YType.str, 'ipv4-address'), ['str'])),
('ipv6_address', (YLeaf(YType.str, 'ipv6-address'), ['str'])),
])
self.afi = None
self.ipv4_address = None
self.ipv6_address = None
self._segment_path = lambda: "prefix"
self._absolute_path = lambda: "Cisco-IOS-XR-ipv4-bgp-oc-oper:oc-bgp/bgp-rib/afi-safi-table/ipv6-unicast/loc-rib/routes/route/prefix-name/%s" % self._segment_path()
self._is_frozen = True
def | |
from gfracture.functions import convert_geo_list_to_geoseries
from gfracture.functions import make_vertical_segments
from gfracture.functions import make_horizontal_segments
from gfracture.functions import make_polygon_from_tuple
import geopandas as gpd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from shapely import geometry
from shapely.geometry import Point, LineString, Polygon
from pathlib import Path
class FractureTrace(object):
"""A class to contain the results of fracture trace analysis from
a vector file (shp,dxf,etc., or FractureImage object results"""
show_figures = False
output_path = './output/'
save_figures = False
limit_direction_to = None #'horizontal', 'vertical', or 'None'
segment_width_m = 1
segment_step_increment_m = 0.2
scanline_distance_m = 0.5
scale_m_px = 1
window_width_m = 1
window_step_increment_m = 0.2
def __init__(self):
[]
def set_output_path(self, path):
self.output_path = path
Path(self.output_path).mkdir(parents=True, exist_ok=True)
def list_params(self):
""" Print a list of object parameters """
print('show_figures: ' + str(self.show_figures))
print('save_figures: ' + str(self.save_figures))
print('segment_width_m: ' + str(self.window_width_m))
print('segment_step_increment_m: ' + str(self.window_step_increment_m))
print('scanline_distance_m: ' + str(self.scanline_distance_m))
def load_vert_traces(self, file_path):
""" Show image using io.imshow and matplotlib """
self.vert_traces = gpd.GeoDataFrame(gpd.read_file(file_path))
print('Traces loaded')
def load_horiz_traces(self, file_path):
""" Show image using io.imshow and matplotlib """
self.horiz_traces = gpd.GeoDataFrame(gpd.read_file(file_path))
print('Traces loaded')
def combine_vert_horiz_traces(self):
self.traces = pd.concat([self.horiz_traces, self.vert_traces])
print('Traces combined from vertical and horizontal')
if self.show_figures:
self.traces.plot()
if self.save_figures:
plt.savefig(self.output_path+'traces.pdf')
plt.savefig(self.output_path+'traces.png')
plt.show(block=False)
def load_traces(self, file_path):
""" Show image using io.imshow and matplotlib """
self.traces = gpd.GeoDataFrame(gpd.read_file(file_path))
#filter none traces
self.traces=self.traces[~self.traces.geom_type.isna()]
print('Traces loaded')
if self.show_figures:
self.traces.plot()
if self.save_figures:
plt.savefig(self.output_path+'traces.pdf')
plt.savefig(self.output_path+'traces.png')
plt.show(block=False)
def load_masks(self, file_path):
""" Loads mask, selects only polygons """
self.masks = gpd.GeoDataFrame(gpd.read_file(file_path))
self.masks = self.masks[self.masks.geometry.geom_type == 'Polygon']
self.masks = self.masks.reset_index()
print('Masks loaded')
if self.show_figures:
self.masks.plot()
plt.show(block=False)
def scale(self, scale_m_px):
""" Scale traces """
self.scale_m_px = scale_m_px
matrix = [self.scale_m_px, 0, 0, self.scale_m_px, 0, 0]
self.traces = self.traces.affine_transform(matrix)
print('Scaling and overwritting traces')
if hasattr(self, 'masks'):
self.masks = self.masks.affine_transform(matrix)
print('Scaling and overwritting masks')
if self.show_figures:
self.traces.plot(color = 'k')
if hasattr(self, 'masks'): self.masks.plot(color = 'r')
plt.show(block=False)
def mask_traces(self):
""" Mask traces """
self.traces_orig = self.traces
for mask in self.masks:
trace_diff = self.traces.difference(mask)
self.traces = trace_diff[~trace_diff.is_empty]
print('Masking traces (saved & overwritten)')
if self.show_figures:
_,ax = plt.subplots(1, 1)
self.traces_orig.plot(color = 'k', ax=ax, alpha = 0.5)
self.traces.plot(color = 'r', ax=ax)
for mask in self.masks:
ax.plot(*mask.exterior.xy, color = 'b')
if self.save_figures:
plt.savefig(self.output_path+'masked_traces.pdf')
plt.savefig(self.output_path+'masked_traces.png')
plt.show(block=False)
def make_horizontal_scanlines(self):
""" Generate horizontal scanlines """
vert_limits = list(self.traces.total_bounds[i] for i in [1,3])
horiz_limits = list(self.traces.total_bounds[i] for i in [0,2])
vert_splits = np.arange(
min(vert_limits) + self.scanline_distance_m/2,
max(vert_limits), self.scanline_distance_m
)
start = list(zip(np.repeat(min(horiz_limits),len(vert_splits)), vert_splits))
end = list(zip(np.repeat(max(horiz_limits),len(vert_splits)), vert_splits))
lines = list(zip(start,end))
names = ['scan_h_' + str(i) for i in np.arange(0,len(lines))+1]
self.horizontal_scanlines = gpd.GeoDataFrame({
'name': names,
'y_coord': vert_splits},
geometry = gpd.GeoSeries(map(LineString, lines))
)
self.horizontal_scanlines['orig_length'] = self.horizontal_scanlines.length
self.horizontal_scanlines['orig_geom'] = self.horizontal_scanlines['geometry']
print('Horizontal scanlines generated')
def make_vertical_scanlines(self):
""" Generate vertical scanlines """
vert_limits = list(self.traces.total_bounds[i] for i in [1,3])
horiz_limits = list(self.traces.total_bounds[i] for i in [0,2])
horiz_splits = np.arange(
min(horiz_limits) + self.scanline_distance_m/2,
max(horiz_limits), self.scanline_distance_m
)
start = list(zip(horiz_splits, np.repeat(min(vert_limits),len(horiz_splits))))
end = list(zip(horiz_splits, np.repeat(max(vert_limits),len(horiz_splits))))
lines = list(zip(start,end))
names = ['scan_v' + str(i) for i in np.arange(0,len(lines))+1]
self.vertical_scanlines = gpd.GeoDataFrame({
'name': names,
'x_coord': horiz_splits},
geometry = gpd.GeoSeries(map(LineString, lines))
)
self.vertical_scanlines['orig_length'] = self.vertical_scanlines.length
self.vertical_scanlines['orig_geom'] = self.vertical_scanlines['geometry']
print('Vertical scanlines generated')
def make_scanlines(self):
if self.limit_direction_to != 'vertical':
self.make_horizontal_scanlines()
if self.limit_direction_to != 'horizontal':
self.make_vertical_scanlines()
print('Scanlines intersected with convex hull')
if self.show_figures:
_, ax = plt.subplots(1, 1)
self.traces.plot(color = 'k', ax=ax)
if self.limit_direction_to != 'vertical':
self.horizontal_scanlines.plot(color = 'b', ax=ax)
if self.limit_direction_to != 'horizontal':
self.vertical_scanlines.plot(color = 'r', ax=ax)
if self.save_figures:
plt.savefig(self.output_path+'scanlines.pdf')
plt.savefig(self.output_path+'scanlines.png')
plt.show(block=False)
def mask_horizontal_scanlines(self):
self.horizontal_scanlines_orig = self.horizontal_scanlines
for mask in self.masks:
for (i,_) in enumerate(self.horizontal_scanlines.geometry):
self.horizontal_scanlines.geometry[i] = self.horizontal_scanlines.geometry[i].difference(mask)
self.horizontal_scanlines['masked_geom'] = self.horizontal_scanlines.geometry
self.horizontal_scanlines['masked_length'] = self.horizontal_scanlines.length
print('Masking horizontal scanlines (saved & overwritten)')
def mask_vertical_scanlines(self):
self.vertical_scanlines_orig = self.vertical_scanlines
for mask in self.masks:
for (i,_) in enumerate(self.vertical_scanlines.geometry):
self.vertical_scanlines.geometry[i] = self.vertical_scanlines.geometry[i].difference(mask)
self.vertical_scanlines['masked_geom'] = self.vertical_scanlines.geometry
self.vertical_scanlines['masked_length'] = self.vertical_scanlines.length
print('Masking vertical scanlines (saved & overwritten)')
def mask_scanlines(self):
if self.limit_direction_to != 'vertical':
self.mask_horizontal_scanlines()
if self.limit_direction_to != 'horizontal':
self.mask_vertical_scanlines()
if self.show_figures:
_, ax = plt.subplots(1, 1)
self.traces.plot(color = 'k', ax=ax)
if self.limit_direction_to != 'vertical':
(self
.horizontal_scanlines_orig[~self.horizontal_scanlines_orig.is_empty]
.plot(color = 'k', ax=ax, alpha = 0.5)
)
(self
.horizontal_scanlines[~self.horizontal_scanlines.is_empty]
.plot(color = 'r', ax=ax)
)
if self.limit_direction_to != 'horizontal':
(self
.vertical_scanlines_orig[~self.vertical_scanlines_orig.is_empty]
.plot(color = 'k', ax=ax, alpha = 0.5)
)
(self
.vertical_scanlines[~self.vertical_scanlines.is_empty]
.plot(color = 'b', ax=ax)
)
for mask in self.masks:
ax.plot(*mask.exterior.xy, color = 'k')
if self.save_figures:
plt.savefig(self.output_path+'masked_scanlines.pdf')
plt.savefig(self.output_path+'masked_scanlines.png')
plt.show(block=False)
def hull_horizontal_scanlines(self):
self.horizontal_scanlines.geometry = [
self.traces.unary_union.convex_hull.intersection(x)
for x
in self.horizontal_scanlines.geometry
]
self.horizontal_scanlines['hull_trimmed'] = self.horizontal_scanlines.geometry
self.horizontal_scanlines['trimmed_length'] = self.horizontal_scanlines.length
def hull_vertical_scanlines(self):
self.vertical_scanlines.geometry = [
self.traces.unary_union.convex_hull.intersection(x)
for x
in self.vertical_scanlines.geometry
]
self.vertical_scanlines['hull_trimmed'] = self.vertical_scanlines.geometry
self.vertical_scanlines['trimmed_length'] = self.vertical_scanlines.length
def hull_scanlines(self):
if self.limit_direction_to != 'vertical':
self.hull_horizontal_scanlines()
if self.limit_direction_to != 'horizontal':
self.hull_vertical_scanlines()
print('Scanlines intersected with convex hull')
if self.show_figures:
_, ax = plt.subplots(1, 1)
self.traces.plot(color = 'k', ax=ax)
ax.plot(*self.traces.unary_union.convex_hull.exterior.xy, color = 'k')
if self.limit_direction_to != 'vertical':
(self
.horizontal_scanlines[~self.horizontal_scanlines.is_empty]
.plot(color = 'r', ax=ax))
if self.limit_direction_to != 'horizontal':
(self
.vertical_scanlines[~self.vertical_scanlines.is_empty]
.plot(color = 'b', ax=ax))
if self.save_figures:
plt.savefig(self.output_path+'hulled_scanlines.pdf')
plt.savefig(self.output_path+'hulled_scanlines.png')
plt.show(block=False)
def intersect_horizontal_scanlines(self):
self.horiz_scanline_intersections = [
self.traces.intersection(other = scanline)
if ~scanline.is_empty else geometry.Linestring()
for scanline
in self.horizontal_scanlines.geometry
]
self.horiz_scanline_intersected_traces = [
self.traces[np.invert(intersection.geometry.is_empty)]
for intersection
in self.horiz_scanline_intersections
]
self.horiz_scanline_intersected_points = [
intersection[np.invert(intersection.is_empty)]
for intersection
in self.horiz_scanline_intersections
]
point_bool = [
x.geom_type == 'Point'
for x in self.horiz_scanline_intersected_points
]
self.horiz_scanline_intersected_points = [
x.loc[y]
for x,y in zip(self.horiz_scanline_intersected_points,point_bool)
]
print('Horizontal scanlines and traces intersected')
def intersect_vertical_scanlines(self):
self.vert_scanline_intersections = [
self.traces.intersection(other = scanline)
for scanline in self.vertical_scanlines.geometry
]
self.vert_scanline_intersected_traces = [
self.traces[np.invert(intersection.geometry.is_empty)]
for intersection in self.vert_scanline_intersections
]
self.vert_scanline_intersected_points = [
intersection[np.invert(intersection.is_empty)]
for intersection in self.vert_scanline_intersections
]
point_bool = [
x.geom_type == 'Point'
for x in self.vert_scanline_intersected_points
]
self.vert_scanline_intersected_points = [
x.loc[y]
for x,y in zip(self.vert_scanline_intersected_points,point_bool)
]
print('Vertical scanlines and traces intersected')
def intersect_scanlines(self):
if self.limit_direction_to != 'vertical':
self.intersect_horizontal_scanlines()
if self.limit_direction_to != 'horizontal':
self.intersect_vertical_scanlines()
if self.show_figures:
_, ax = plt.subplots(1, 1)
self.traces.plot(color = 'k', ax=ax, alpha=0.5)
if self.limit_direction_to != 'vertical':
(self
.horizontal_scanlines[~self.horizontal_scanlines.is_empty]
.plot(color = 'k', ax=ax, alpha = 0.5)
)
points_series = convert_geo_list_to_geoseries(
self.horiz_scanline_intersected_points
)
points_series.plot(color = 'r', ax=ax, markersize=10)
if self.limit_direction_to != 'horizontal':
(self
.vertical_scanlines[~self.vertical_scanlines.is_empty]
.plot(color = 'k', ax=ax, alpha = 0.5)
)
points_series = convert_geo_list_to_geoseries(
self.vert_scanline_intersected_points
)
points_series.plot(color = 'b', ax=ax, markersize=10)
if self.save_figures:
plt.savefig(self.output_path+'intersected_scanlines.pdf')
plt.savefig(self.output_path+'intersected_scanlines.png')
plt.show(block=False)
def make_horiz_scanline_spacing_df(self):
for (i,scanline) in self.horizontal_scanlines.iterrows():
if scanline.geometry.is_empty:
continue
if len(self.horiz_scanline_intersected_traces[i]) == 0:
continue
out_df = gpd.GeoDataFrame(
{'x' : self.horiz_scanline_intersected_points[i].x},
geometry = self.horiz_scanline_intersected_points[i]
).sort_values('x').reset_index()
out_df['name'] = scanline['name']
out_df['frac_num'] = np.array(out_df.index) + 1
out_df['distance'] = out_df['x'] - out_df['x'].min()
out_df['spacing'] = np.append(0,np.diff(out_df['x']))
out_df['height'] = np.array(
self.horiz_scanline_intersected_points[i].bounds.iloc[:,3]
- self.horiz_scanline_intersected_points[i].bounds.iloc[:,1]
)
try:
self.horiz_scanline_spacing_df = (
self.horiz_scanline_spacing_df.append(
out_df, ignore_index = True, sort = True)
)
except AttributeError:
self.horiz_scanline_spacing_df = out_df
print('Horizontal scanline spacing dataframe generated')
def make_vert_scanline_spacing_df(self):
for (i,scanline) in self.vertical_scanlines.iterrows():
if scanline.geometry.is_empty:
continue
if len(self.vert_scanline_intersected_traces[i]) == 0:
continue
out_df = gpd.GeoDataFrame(
{'y' : self.vert_scanline_intersected_points[i].y},
geometry = self.vert_scanline_intersected_points[i]
).sort_values('y').reset_index()
out_df['name'] = scanline['name']
out_df['frac_num'] = np.array(out_df.index) + 1
out_df['distance'] = out_df['y'] - out_df['y'].min()
out_df['spacing'] = np.append(0,np.diff(out_df['y']))
out_df['height'] = np.array(
self.vert_scanline_intersected_points[i].bounds.iloc[:,2]
- self.vert_scanline_intersected_points[i].bounds.iloc[:,0]
)
try:
self.vert_scanline_spacing_df = (
self.vert_scanline_spacing_df.append(
out_df, ignore_index = True, sort = True)
)
except AttributeError:
self.vert_scanline_spacing_df = out_df
print('Vertical scanline spacing dataframe generated')
def make_scanline_spacing_dfs(self):
if self.limit_direction_to != 'vertical':
self.make_horiz_scanline_spacing_df()
if self.limit_direction_to != 'horizontal':
self.make_vert_scanline_spacing_df()
def calc_horizontal_scanline_stats(self):
self.horizontal_scanlines['frac_to_frac_length'] = [
max(points.x) - min(points.x)
if len(points) > 0
else np.nan
for points
in self.horiz_scanline_intersected_points
]
point_frac_list = list(
zip([len(point)
for point
in self.horiz_scanline_intersected_points],
self.horizontal_scanlines['frac_to_frac_length'])
)
self.horizontal_scanlines['p10_frac'] = (
[x[0]/x[1] if x[1] > 0 else np.nan
for x in point_frac_list]
)
point_trimmed_list = list(
zip([len(point)
for point
in self.horiz_scanline_intersected_points],
self.horizontal_scanlines['trimmed_length'])
)
self.horizontal_scanlines['p10_trimmed'] = (
| |
pnp.Vendor("Microcom", "MNP", datetime.date(1996, 11, 29)),
"MDX": pnp.Vendor("MicroDatec GmbH", "MDX", datetime.date(1999, 9, 13)),
"MRD": pnp.Vendor("MicroDisplay Corporation", "MRD", datetime.date(2007, 6, 14)),
"MDY": pnp.Vendor("Microdyne Inc", "MDY", datetime.date(1996, 12, 18)),
"MFG": pnp.Vendor("MicroField Graphics Inc", "MFG", datetime.date(1996, 11, 29)),
"MPJ": pnp.Vendor("Microlab", "MPJ", datetime.date(1997, 5, 23)),
"LAF": pnp.Vendor("Microline", "LAF", datetime.date(1999, 9, 13)),
"MLG": pnp.Vendor("Micrologica AG", "MLG", datetime.date(1998, 10, 6)),
"MMD": pnp.Vendor("Micromed Biotecnologia Ltd", "MMD", datetime.date(1996, 12, 11)),
"MMA": pnp.Vendor("Micromedia AG", "MMA", datetime.date(1997, 4, 24)),
"MCN": pnp.Vendor("Micron Electronics Inc", "MCN", datetime.date(1997, 2, 20)),
"MCI": pnp.Vendor("Micronics Computers", "MCI", datetime.date(1996, 11, 29)),
"MIP": pnp.Vendor("micronpc.com", "MIP", datetime.date(2000, 8, 10)),
"MYX": pnp.Vendor("Micronyx Inc", "MYX", datetime.date(1996, 11, 29)),
"MPX": pnp.Vendor("Micropix Technologies, Ltd.", "MPX", datetime.date(2001, 10, 8)),
"MSL": pnp.Vendor("MicroSlate Inc.", "MSL", datetime.date(1999, 5, 16)),
"PNP": pnp.Vendor("Microsoft", "PNP", datetime.date(2004, 3, 5)),
"MSH": pnp.Vendor("Microsoft", "MSH", datetime.date(1996, 11, 29)),
"PNG": pnp.Vendor("Microsoft", "PNG", datetime.date(1996, 11, 29)),
"WBN": pnp.Vendor("MicroSoftWare", "WBN", datetime.date(1998, 1, 14)),
"MSI": pnp.Vendor("Microstep", "MSI", datetime.date(1996, 11, 29)),
"MCT": pnp.Vendor("Microtec", "MCT", datetime.date(1996, 11, 29)),
"MTH": pnp.Vendor("Micro-Tech Hearing Instruments", "MTH", datetime.date(1997, 12, 15)),
"MKT": pnp.Vendor("MICROTEK Inc.", "MKT", datetime.date(2005, 7, 14)),
"MTK": pnp.Vendor("Microtek International Inc.", "MTK", datetime.date(2002, 2, 25)),
"MSY": pnp.Vendor("MicroTouch Systems Inc", "MSY", datetime.date(2000, 8, 10)),
"MVS": pnp.Vendor("Microvision", "MVS", datetime.date(2009, 2, 13)),
"MVD": pnp.Vendor("Microvitec PLC", "MVD", datetime.date(1996, 11, 29)),
"MWY": pnp.Vendor("Microway Inc", "MWY", datetime.date(1996, 11, 29)),
"MDC": pnp.Vendor("Midori Electronics", "MDC", datetime.date(1996, 11, 29)),
"SFT": pnp.Vendor("Mikroforum Ring 3", "SFT", datetime.date(2004, 11, 2)),
"MLS": pnp.Vendor("Milestone EPE", "MLS", datetime.date(1998, 8, 11)),
"MLM": pnp.Vendor("Millennium Engineering Inc", "MLM", datetime.date(1996, 11, 29)),
"MLL": pnp.Vendor("Millogic Ltd.", "MLL", datetime.date(2014, 1, 9)),
"MCX": pnp.Vendor("Millson Custom Solutions Inc.", "MCX", datetime.date(2013, 10, 17)),
"VTM": pnp.Vendor("Miltope Corporation", "VTM", datetime.date(2009, 9, 23)),
"MIM": pnp.Vendor("Mimio – A Newell Rubbermaid Company", "MIM", datetime.date(2012, 7, 31)),
"MTD": pnp.Vendor("MindTech Display Co. Ltd", "MTD", datetime.date(2007, 6, 14)),
"FTW": pnp.Vendor("MindTribe Product Engineering, Inc.", "FTW", datetime.date(2011, 2, 14)),
"MNC": pnp.Vendor("Mini Micro Methods Ltd", "MNC", datetime.date(1996, 11, 29)),
"MIN": pnp.Vendor("Minicom Digital Signage", "MIN", datetime.date(2010, 8, 13)),
"MMN": pnp.Vendor("MiniMan Inc", "MMN", datetime.date(1996, 11, 29)),
"MMF": pnp.Vendor("Minnesota Mining and Manufacturing", "MMF", datetime.date(2001, 3, 15)),
"MRA": pnp.Vendor("Miranda Technologies Inc", "MRA", datetime.date(1996, 11, 29)),
"MRL": pnp.Vendor("Miratel", "MRL", datetime.date(1998, 10, 16)),
"MIR": pnp.Vendor("Miro Computer Prod.", "MIR", datetime.date(1996, 11, 29)),
"MID": pnp.Vendor("miro Displays", "MID", datetime.date(1999, 3, 20)),
"MSP": pnp.Vendor("Mistral Solutions [P] Ltd.", "MSP", datetime.date(1998, 9, 23)),
"MII": pnp.Vendor("Mitec Inc", "MII", datetime.date(1996, 11, 29)),
"MTL": pnp.Vendor("Mitel Corporation", "MTL", datetime.date(1997, 8, 1)),
"MTR": pnp.Vendor("Mitron computer Inc", "MTR", datetime.date(1996, 11, 29)),
"MEL": pnp.Vendor("Mitsubishi Electric Corporation", "MEL", datetime.date(1996, 11, 29)),
"MEE": pnp.Vendor("Mitsubishi Electric Engineering Co., Ltd.", "MEE", datetime.date(2005, 10, 3)),
"KMC": pnp.Vendor("Mitsumi Company Ltd", "KMC", datetime.date(1996, 11, 29)),
"MJS": pnp.Vendor("MJS Designs", "MJS", datetime.date(1996, 11, 29)),
"MKS": pnp.Vendor("MK Seiko Co., Ltd.", "MKS", datetime.date(2013, 6, 18)),
"OHW": pnp.Vendor("M-Labs Limited", "OHW", datetime.date(2013, 11, 27)),
"MMS": pnp.Vendor("MMS Electronics", "MMS", datetime.date(1998, 2, 24)),
"FST": pnp.Vendor("Modesto PC Inc", "FST", datetime.date(1997, 2, 27)),
"MDD": pnp.Vendor("MODIS", "MDD", datetime.date(1999, 11, 8)),
"MIS": pnp.Vendor("Modular Industrial Solutions Inc", "MIS", datetime.date(1996, 11, 29)),
"MOD": pnp.Vendor("Modular Technology", "MOD", datetime.date(1997, 6, 9)),
"MOM": pnp.Vendor("Momentum Data Systems", "MOM", datetime.date(2008, 1, 18)),
"MNL": pnp.Vendor("Monorail Inc", "MNL", datetime.date(1997, 2, 18)),
"MYA": pnp.Vendor("Monydata", "MYA", datetime.date(1996, 11, 29)),
"MBV": pnp.Vendor("Moreton Bay", "MBV", datetime.date(2000, 1, 13)),
"MOS": pnp.Vendor("Moses Corporation", "MOS", datetime.date(1996, 11, 29)),
"MSV": pnp.Vendor("Mosgi Corporation", "MSV", datetime.date(1996, 11, 29)),
"MCO": pnp.Vendor("Motion Computing Inc.", "MCO", datetime.date(2002, 5, 30)),
"MTM": pnp.Vendor("Motium", "MTM", datetime.date(2012, 6, 19)),
"MSU": pnp.Vendor("motorola", "MSU", datetime.date(2001, 3, 15)),
"MCL": pnp.Vendor("Motorola Communications Israel", "MCL", datetime.date(2002, 7, 2)),
"MCG": pnp.Vendor("Motorola Computer Group", "MCG", datetime.date(1997, 8, 14)),
"MOT": pnp.Vendor("Motorola UDS", "MOT", datetime.date(1996, 11, 29)),
"MSC": pnp.Vendor("Mouse Systems Corporation", "MSC", datetime.date(1996, 11, 29)),
"MPC": pnp.Vendor("M-Pact Inc", "MPC", datetime.date(1996, 11, 29)),
"MPS": pnp.Vendor("mps Software GmbH", "MPS", datetime.date(1996, 11, 29)),
"MST": pnp.Vendor("MS Telematica", "MST", datetime.date(1997, 4, 28)),
"MEX": pnp.Vendor("MSC Vertriebs GmbH", "MEX", datetime.date(2012, 6, 4)),
"MSG": pnp.Vendor("MSI GmbH", "MSG", datetime.date(1999, 9, 13)),
"MSF": pnp.Vendor("M-Systems Flash Disk Pioneers", "MSF", datetime.date(1997, 12, 17)),
"MTN": pnp.Vendor("Mtron Storage Technology Co., Ltd.", "MTN", datetime.date(2008, 6, 17)),
"MUD": pnp.Vendor("Multi-Dimension Institute", "MUD", datetime.date(2000, 10, 23)),
"MMI": pnp.Vendor("Multimax", "MMI", datetime.date(1996, 11, 29)),
"MTS": pnp.Vendor("Multi-Tech Systems", "MTS", datetime.date(1996, 11, 29)),
"MWI": pnp.Vendor("Multiwave Innovation Pte Ltd", "MWI", datetime.date(1996, 11, 29)),
"MAI": pnp.Vendor("Mutoh America Inc", "MAI", datetime.date(1999, 9, 13)),
"MWR": pnp.Vendor("mware", "MWR", datetime.date(2001, 4, 24)),
"MLX": pnp.Vendor("Mylex Corporation", "MLX", datetime.date(1996, 11, 29)),
"MYR": pnp.Vendor("Myriad Solutions Ltd", "MYR", datetime.date(1996, 11, 29)),
"WYS": pnp.Vendor("Myse Technology", "WYS", datetime.date(1996, 11, 29)),
"NBL": pnp.Vendor("N*Able Technologies Inc", "NBL", datetime.date(1998, 4, 28)),
"NAD": pnp.Vendor("NAD Electronics", "NAD", datetime.date(2007, 6, 14)),
"NDK": pnp.Vendor("Naitoh Densei CO., LTD.", "NDK", datetime.date(2006, 4, 12)),
"NCP": pnp.Vendor("Najing CEC Panda FPD Technology CO. ltd", "NCP", datetime.date(2015, 2, 24)),
"NAK": pnp.Vendor("Nakano Engineering Co.,Ltd.", "NAK", datetime.date(2009, 7, 22)),
"NYC": pnp.Vendor("Nakayo Relecommunications, Inc.", "NYC", datetime.date(2000, 8, 10)),
"SCS": pnp.Vendor("<NAME>", "SCS", datetime.date(1996, 11, 29)),
"ADR": pnp.Vendor("Nasa Ames Research Center", "ADR", datetime.date(1996, 11, 29)),
"NDC": pnp.Vendor("National DataComm Corporaiton", "NDC", datetime.date(1996, 11, 29)),
"NDI": pnp.Vendor("National Display Systems", "NDI", datetime.date(2003, 8, 8)),
"NIC": pnp.Vendor("National Instruments Corporation", "NIC", datetime.date(1996, 11, 29)),
"NBS": pnp.Vendor("National Key Lab. on ISN", "NBS", datetime.date(1998, 7, 16)),
"NSC": pnp.Vendor("National Semiconductor Corporation", "NSC", datetime.date(1996, 11, 29)),
"TTB": pnp.Vendor("National Semiconductor Japan Ltd", "TTB", datetime.date(1997, 4, 14)),
"NTL": pnp.Vendor("National Transcomm. Ltd", "NTL", datetime.date(1996, 11, 29)),
"ZIC": pnp.Vendor("Nationz Technologies Inc.", "ZIC", datetime.date(2009, 3, 12)),
"NMS": pnp.Vendor("Natural Micro System", "NMS", datetime.date(1996, 11, 29)),
"NAT": pnp.Vendor("NaturalPoint Inc.", "NAT", datetime.date(2010, 9, 3)),
"NVT": pnp.Vendor("Navatek Engineering Corporation", "NVT", datetime.date(1998, 3, 2)),
"NME": pnp.Vendor("Navico, Inc.", "NME", datetime.date(2012, 11, 28)),
"NAV": pnp.Vendor("Navigation Corporation", "NAV", datetime.date(1999, 2, 22)),
"NAX": pnp.Vendor("Naxos Tecnologia", "NAX", datetime.date(1997, 12, 12)),
"DUN": pnp.Vendor("NCR Corporation", "DUN", datetime.date(2002, 4, 25)),
"NCC": pnp.Vendor("NCR Corporation", "NCC", datetime.date(1996, 11, 29)),
"NCR": pnp.Vendor("NCR Electronics", "NCR", datetime.date(1996, 11, 29)),
"NDF": pnp.Vendor("NDF Special Light Products B.V.", "NDF", datetime.date(2014, 9, 18)),
"DMV": pnp.Vendor("NDS Ltd", "DMV", datetime.date(1997, 6, 25)),
"NEC": pnp.Vendor("NEC Corporation", "NEC", datetime.date(2000, 5, 24)),
"NCT": pnp.Vendor("NEC CustomTechnica, Ltd.", "NCT", datetime.date(2002, 10, 23)),
"NMV": pnp.Vendor("NEC-Mitsubishi Electric Visual Systems Corporation", "NMV", datetime.date(2002, 2, 25)),
"NEO": pnp.Vendor("NEO TELECOM CO.,LTD.", "NEO", datetime.date(1999, 11, 8)),
"NMX": pnp.Vendor("Neomagic", "NMX", datetime.date(1996, 11, 29)),
"NTC": pnp.Vendor("NeoTech S.R.L", "NTC", datetime.date(1997, 11, 11)),
"NTX": pnp.Vendor("Netaccess Inc", "NTX", datetime.date(1997, 2, 7)),
"NCL": pnp.Vendor("NetComm Ltd", "NCL", datetime.date(1996, 11, 29)),
"NVC": pnp.Vendor("NetVision Corporation", "NVC", datetime.date(1996, 11, 29)),
"NAL": pnp.Vendor("Network Alchemy", "NAL", datetime.date(1997, 9, 30)),
"NDL": pnp.Vendor("Network Designers", "NDL", datetime.date(1996, 11, 29)),
"NGC": pnp.Vendor("Network General", "NGC", datetime.date(1997, 8, 26)),
"NIT": pnp.Vendor("Network Info Technology", "NIT", datetime.date(1996, 11, 29)),
"NPI": pnp.Vendor("Network Peripherals Inc", "NPI", datetime.date(1996, 11, 29)),
"NST": pnp.Vendor("Network Security Technology Co", "NST", datetime.date(1999, 2, 22)),
"NTW": pnp.Vendor("Networth Inc", "NTW", datetime.date(1996, 11, 29)),
"NSA": pnp.Vendor("NeuroSky, Inc.", "NSA", datetime.date(2013, 8, 28)),
"NEU": pnp.Vendor("NEUROTEC - EMPRESA DE PESQUISA E DESENVOLVIMENTO EM BIOMEDICINA", "NEU", datetime.date(2001, 3, 15)),
"NTI": pnp.Vendor("New Tech Int'l Company", "NTI", datetime.date(1996, 11, 29)),
"NCI": pnp.Vendor("NewCom Inc", "NCI", datetime.date(1997, 1, 9)),
"NWS": pnp.Vendor("Newisys, Inc.", "NWS", datetime.date(2002, 10, 8)),
"NSS": pnp.Vendor("Newport Systems Solutions", "NSS", datetime.date(1996, 11, 29)),
"NXG": pnp.Vendor("Nexgen", "NXG", datetime.date(1996, 11, 29)),
"NEX": pnp.Vendor("Nexgen Mediatech Inc.,", "NEX", datetime.date(2003, 11, 11)),
"NXQ": pnp.Vendor("Nexiq Technologies, Inc.", "NXQ", datetime.date(2001, 10, 8)),
"NLC": pnp.Vendor("Next Level Communications", "NLC", datetime.date(1996, 11, 29)),
"NXC": pnp.Vendor("NextCom K.K.", "NXC", datetime.date(1996, 11, 29)),
"NBT": pnp.Vendor("NingBo Bestwinning Technology CO., Ltd", "NBT", datetime.date(2006, 9, 5)),
"BOI": pnp.Vendor("NINGBO BOIGLE DIGITAL TECHNOLOGY CO.,LTD", "BOI", datetime.date(2009, 11, 25)),
"AVI": pnp.Vendor("Nippon Avionics Co.,Ltd", "AVI", datetime.date(2000, 10, 23)),
"GSB": pnp.Vendor("NIPPONDENCHI CO,.LTD", "GSB", datetime.date(2000, 5, 24)),
"NSI": pnp.Vendor("NISSEI ELECTRIC CO.,LTD", "NSI", datetime.date(2000, 1, 13)),
"NIS": pnp.Vendor("Nissei Electric Company", "NIS", datetime.date(1996, 11, 29)),
"NTS": pnp.Vendor("Nits Technology Inc.", "NTS", datetime.date(2006, 12, 19)),
"NCA": pnp.Vendor("Nixdorf Company", "NCA", datetime.date(1996, 11, 29)),
"NNC": pnp.Vendor("NNC", "NNC", datetime.date(1996, 11, 29)),
"NDS": pnp.Vendor("Nokia Data", "NDS", datetime.date(1996, 11, 29)),
"NOK": pnp.Vendor("Nokia Display Products", "NOK", datetime.date(1996, 11, 29)),
"NMP": pnp.Vendor("Nokia Mobile Phones", "NMP", datetime.date(1996, 11, 29)),
"NOR": pnp.Vendor("Norand Corporation", "NOR", datetime.date(1997, 3, 19)),
"NCE": pnp.Vendor("Norcent Technology, Inc.", "NCE", datetime.date(2007, 6, 20)),
"NOE": pnp.Vendor("NordicEye AB", "NOE", datetime.date(2009, 9, 23)),
"NOI": pnp.Vendor("North Invent A/S", "NOI", datetime.date(2010, 5, 4)),
"NCS": pnp.Vendor("Northgate Computer Systems", "NCS", datetime.date(1996, 11, 29)),
"NOT": pnp.Vendor("Not Limited Inc", "NOT", datetime.date(1998, 1, 30)),
"NWP": pnp.Vendor("NovaWeb Technologies Inc", "NWP", datetime.date(1998, 6, 12)),
"NVL": pnp.Vendor("Novell Inc", "NVL", datetime.date(1996, 11, 29)),
"NSP": pnp.Vendor("Nspire System Inc.", "NSP", datetime.date(2007, 2, 13)),
"NTR": pnp.Vendor("N-trig Innovative Technologies, Inc.", "NTR", datetime.date(2005, 10, 3)),
"NTT": pnp.Vendor("NTT Advanced Technology Corporation", "NTT", datetime.date(2004, 8, 19)),
| |
r"""*Direct expect-good API tests for* ``sphobjinv``.
``sphobjinv`` is a toolkit for manipulation and inspection of
Sphinx |objects.inv| files.
**Author**
<NAME> (<EMAIL>)
**File Created**
20 Mar 2019
**Copyright**
\(c) <NAME> 2016-2020
**Source Repository**
http://www.github.com/bskinn/sphobjinv
**Documentation**
http://sphobjinv.readthedocs.io
**License**
The MIT License; see |license_txt|_ for full license terms
**Members**
"""
import itertools as itt
import re
import pytest
import sphobjinv as soi
pytestmark = [pytest.mark.api, pytest.mark.local]
def no_op(val):
"""No-op function to leave Path objects alone in tests."""
return val
PATH_FXNS = (no_op, str)
PATH_FXN_IDS = ("no_op", "str")
@pytest.mark.parametrize(
["actual", "expect"],
tuple(
itt.zip_longest(
soi.SourceTypes, # actual
[ # expect
soi.SourceTypes.Manual,
soi.SourceTypes.BytesPlaintext,
soi.SourceTypes.BytesZlib,
soi.SourceTypes.FnamePlaintext,
soi.SourceTypes.FnameZlib,
soi.SourceTypes.DictJSON,
soi.SourceTypes.URL,
],
fillvalue=None,
)
),
ids=(lambda a: a.value if a else a),
)
def test_source_types_iteration(actual, expect):
"""Confirm that SourceTypes iterates in the expected order."""
assert actual.value == expect.value
@pytest.mark.parametrize("path_fxn", PATH_FXNS, ids=PATH_FXN_IDS)
def test_api_compress(path_fxn, scratch_path, misc_info, sphinx_load_test):
"""Check that a compress attempt via API throws no errors."""
src_path = scratch_path / (
misc_info.FNames.INIT.value + misc_info.Extensions.DEC.value
)
dest_path = scratch_path / (
misc_info.FNames.MOD.value + misc_info.Extensions.CMP.value
)
b_dec = soi.readbytes(path_fxn(src_path))
b_cmp = soi.compress(b_dec)
soi.writebytes(path_fxn(dest_path), b_cmp)
assert dest_path.is_file()
sphinx_load_test(dest_path)
@pytest.mark.parametrize("path_fxn", PATH_FXNS, ids=PATH_FXN_IDS)
def test_api_decompress(path_fxn, scratch_path, misc_info, decomp_cmp_test):
"""Check that a decompress attempt via API throws no errors."""
src_path = scratch_path / (
misc_info.FNames.INIT.value + misc_info.Extensions.CMP.value
)
dest_path = scratch_path / (
misc_info.FNames.MOD.value + misc_info.Extensions.DEC.value
)
b_cmp = soi.readbytes(path_fxn(src_path))
b_dec = soi.decompress(b_cmp)
soi.writebytes(path_fxn(dest_path), b_dec)
assert dest_path.is_file()
decomp_cmp_test(dest_path)
@pytest.mark.parametrize(
["element", "datadict"],
(
[
0,
{ # attr.Attribute py:class 1 api.html#$ -
soi.DataFields.Name: b"attr.Attribute",
soi.DataFields.Domain: b"py",
soi.DataFields.Role: b"class",
soi.DataFields.Priority: b"1",
soi.DataFields.URI: b"api.html#$",
soi.DataFields.DispName: b"-",
},
],
[
-3,
{ # slots std:label -1 examples.html#$ Slots
soi.DataFields.Name: b"slots",
soi.DataFields.Domain: b"std",
soi.DataFields.Role: b"label",
soi.DataFields.Priority: b"-1",
soi.DataFields.URI: b"examples.html#$",
soi.DataFields.DispName: b"Slots",
},
],
),
)
def test_api_data_regex(element, datadict, bytes_txt, misc_info):
"""Confirm the regex for loading data lines is working properly."""
import sphobjinv as soi
# Prelim approximate check to be sure we're working with the
# correct file/data.
assert len(soi.re.pb_data.findall(bytes_txt)) == 56
mchs = list(soi.re.pb_data.finditer(bytes_txt))
assert mchs[element].groupdict() == {_.value: datadict[_] for _ in datadict}
@pytest.mark.xfail(
reason="Will fail until .as_xxx properties are removed from attrs cmp"
)
def test_api_dataobjbytes_init(bytes_txt): # pragma: no cover
"""Confirm the DataObjBytes type functions correctly."""
mch = soi.pb_data.search(bytes_txt)
b_mchdict = {_: mch.group(_) for _ in mch.groupdict()}
s_mchdict = {_: b_mchdict[_].decode(encoding="utf-8") for _ in b_mchdict}
b_dob = soi.DataObjBytes(**b_mchdict)
s_dob = soi.DataObjBytes(**s_mchdict)
assert b_dob == s_dob
assert all(
getattr(b_dob, _) == getattr(b_dob.as_str, _).encode("utf-8") for _ in b_mchdict
)
@pytest.mark.xfail(
reason="Will fail until .as_xxx properties are removed from attrs cmp"
)
def test_api_dataobjstr_init(bytes_txt): # pragma: no cover
"""Confirm the DataObjStr type functions correctly."""
mch = soi.pb_data.search(bytes_txt)
b_mchdict = {_: mch.group(_) for _ in mch.groupdict()}
s_mchdict = {_: b_mchdict[_].decode(encoding="utf-8") for _ in b_mchdict}
b_dos = soi.DataObjStr(**b_mchdict)
s_dos = soi.DataObjStr(**s_mchdict)
assert b_dos == s_dos
assert all(
getattr(s_dos, _) == getattr(b_dos.as_bytes, _).decode("utf-8")
for _ in s_mchdict
)
def test_api_dataobjbytes_flatdictfxn(bytes_txt):
"""Confirm that flat dict generating function works."""
mch = soi.pb_data.search(bytes_txt)
b_mchdict = {_: mch.group(_) for _ in mch.groupdict()}
b_jsondict = soi.DataObjBytes(**b_mchdict).json_dict()
assert b_mchdict == b_jsondict
def test_api_dataobjstr_flatdictfxn(bytes_txt):
"""Confirm that flat dict generating function works."""
mch = soi.pb_data.search(bytes_txt)
b_mchdict = {_: mch.group(_) for _ in mch.groupdict()}
s_mchdict = {_: b_mchdict[_].decode("utf-8") for _ in b_mchdict}
s_jsondict = soi.DataObjStr(**b_mchdict).json_dict()
assert s_mchdict == s_jsondict
@pytest.mark.parametrize(
["dataobjtype", "regex", "lines"],
(
[soi.DataObjBytes, soi.pb_data, "byte_lines"],
[soi.DataObjStr, soi.p_data, "str_lines"],
),
ids=(lambda i: i if type(i) == str else ""),
)
@pytest.mark.parametrize("dataline_arg", (True, False))
@pytest.mark.parametrize("init_expanded", (True, False))
def test_api_dataobj_datalinefxn(
dataobjtype, regex, lines, init_expanded, dataline_arg, misc_info, subtests
):
"""Confirm that data line formatting function works.
Test both str and bytes versions of the DataObj.
Also provides further testing of flat_dict.
"""
lines_obj = getattr(misc_info, lines)
dobj = dataobjtype(**regex.search(lines_obj[init_expanded]).groupdict())
# If dataline_arg is False, should match the value of init_expanded.
# If dataline_arg is True, should match the True (expanded) value.
# Thus, the only False (contracted) situation is with both values False.
with subtests.test(msg="expand"):
dl = dobj.data_line(expand=dataline_arg)
assert dl == lines_obj[dataline_arg or init_expanded]
# If dataline_arg is False, should match the value of init_expanded.
# If dataline_arg is True, should match the False (contracted) value.
# Thus, the only True (expanded) situation is when init_expanded == True
# and and dataline_arg == False.
with subtests.test(msg="contract"):
dl = dobj.data_line(contract=dataline_arg)
assert dl == lines_obj[init_expanded and not dataline_arg]
@pytest.mark.xfail(
reason="Will fail until .as_xxx properties are removed from attrs cmp"
)
@pytest.mark.parametrize(
"use_bytes", (True, False), ids=(lambda b: "use_bytes_" + str(b))
)
def test_api_dataobj_evolvename(use_bytes, res_cmp): # pragma: no cover
"""Confirm evolving new DataObj instances works properly."""
inv = soi.Inventory(res_cmp)
obj = inv.objects[5].as_bytes if use_bytes else inv.objects[5] # Arbitrary choice
oldname = obj.name
newname = b"foo" if use_bytes else "foo"
obj2 = obj.evolve(name=newname)
obj3 = obj2.evolve(name=oldname)
assert obj == obj3
assert obj2.name == newname
def test_api_inventory_default_none_instantiation(subtests):
"""Confirm 'manual' instantiation with None."""
inv = soi.Inventory()
with subtests.test(msg="project"):
assert inv.project is None
with subtests.test(msg="version"):
assert inv.version is None
with subtests.test(msg="count"):
assert inv.count == 0
with subtests.test(msg="source_type"):
assert inv.source_type is soi.SourceTypes.Manual
@pytest.mark.parametrize(
["source_type", "inv_arg"],
[
(soi.SourceTypes.BytesPlaintext, "plaintext"),
(soi.SourceTypes.BytesZlib, "zlib"),
(soi.SourceTypes.FnamePlaintext, "fname_plain"),
(soi.SourceTypes.FnameZlib, "fname_zlib"),
],
ids=(lambda v: v if type(v) == str else ""),
)
@pytest.mark.parametrize("path_fxn", PATH_FXNS, ids=PATH_FXN_IDS)
def test_api_inventory_bytes_fname_instantiation(
source_type, inv_arg, path_fxn, res_path, misc_info, attrs_inventory_test, subtests
):
"""Check bytes and filename modes for Inventory instantiation."""
fname = misc_info.FNames.RES.value
if source_type in (soi.SourceTypes.BytesPlaintext, soi.SourceTypes.FnamePlaintext):
fname += misc_info.Extensions.DEC.value
else:
fname += misc_info.Extensions.CMP.value
source = path_fxn(res_path / fname)
if source_type in (soi.SourceTypes.BytesPlaintext, soi.SourceTypes.BytesZlib):
# Passing in the actual inventory contents, and not just the location
source = soi.readbytes(source)
# General import, without a specified kwarg
with subtests.test(msg="general"):
attrs_inventory_test(soi.Inventory(source), source_type)
# Importing with the respective kwarg for each source type
with subtests.test(msg="specific"):
inv = soi.Inventory(**{inv_arg: source})
attrs_inventory_test(inv, source_type)
# Special case for plaintext bytes, try decoding it
if source_type is soi.SourceTypes.BytesPlaintext:
with subtests.test(msg="plaintext_bytes"):
inv = soi.Inventory(**{inv_arg: source.decode("utf-8")})
attrs_inventory_test(inv, source_type)
@pytest.mark.parametrize("prop", ("none", "expand", "contract"))
def test_api_inventory_flatdict_jsonvalidate(prop, res_cmp):
"""Confirm that the flat_dict properties generated valid JSON."""
import jsonschema
inv = soi.Inventory(res_cmp)
val = jsonschema.Draft4Validator(soi.json_schema)
kwarg = {} if prop == "none" else {prop: True}
val.validate(inv.json_dict(**kwarg))
def test_api_inventory_flatdict_reimport(res_dec, attrs_inventory_test):
"""Confirm re-import of a generated flat_dict."""
inv = soi.Inventory(res_dec)
inv = soi.Inventory(inv.json_dict())
attrs_inventory_test(inv, soi.SourceTypes.DictJSON)
@pytest.mark.parametrize(
"metadata",
["test string", {"this": "foo", "that": "bar"}, 42],
ids=(lambda v: re.search("'([^']+)'", str(type(v))).group(1)),
)
def test_api_inventory_flatdict_reimportwithmetadata(
metadata, res_dec, attrs_inventory_test
):
"""Confirm re-import of a generated flat_dict with metadata."""
inv = soi.Inventory(res_dec)
d = inv.json_dict()
d.update({"metadata": metadata})
inv = soi.Inventory(d)
attrs_inventory_test(inv, soi.SourceTypes.DictJSON)
def test_api_inventory_toosmallflatdict_importbutignore(res_dec):
"""Confirm no error when flat dict passed w/too few objs w/ignore."""
inv = soi.Inventory(res_dec)
d = inv.json_dict()
d.pop("12")
inv2 = soi.Inventory(d, count_error=False)
# 55 b/c the loop continues past missing elements
assert inv2.count == 55
def test_api_inventory_namesuggest(res_cmp, subtests):
"""Confirm object name suggestion is nominally working."""
from numbers import Number
rst = ":py:function:`attr.evolve`"
idx = 6
inv = soi.Inventory(str(res_cmp))
# No test on the exact fuzzywuzzy match score in these since
# it could change as fw continues development
with subtests.test(msg="basic"):
assert inv.suggest("evolve")[0] == rst
with subtests.test(msg="index"):
assert inv.suggest("evolve", with_index=True)[0] == (rst, idx)
with subtests.test(msg="score"):
rec = inv.suggest("evolve", with_score=True)
assert rec[0][0] == rst
assert isinstance(rec[0][1], Number)
with subtests.test(msg="index_and_score"):
rec = inv.suggest("evolve", with_index=True, with_score=True)
assert rec[0][0] == rst
assert isinstance(rec[0][1], Number)
assert rec[0][2] == idx
# Must be run first, otherwise the fuzzywuzzy warning is consumed
# inappropriately
@pytest.mark.first
def test_api_fuzzywuzzy_warningcheck(misc_info):
"""Confirm only the Levenshtein warning is raised, if any are."""
import warnings
if misc_info.IN_PYPY:
pytest.skip("Don't test warnings in PyPy") # pragma: no cover
with warnings.catch_warnings(record=True) as wc:
warnings.simplefilter("always")
from fuzzywuzzy import process # noqa: F401
# Try to import, and adjust tests accordingly
try:
import Levenshtein # noqa: F401
except ImportError:
lev_present = False
else:
# Standard testing setup is WITHOUT python-Levenshtein
lev_present = True # pragma: no cover
if lev_present:
assert len(wc) == 0, "Warning unexpectedly raised" # pragma: no cover
else:
assert len(wc) == 1, "Warning unexpectedly not raised"
# 'message' will be a Warning instance, thus 'args[0]'
# to retrieve the warning message as str.
assert (
"levenshtein" in wc[0].message.args[0].lower()
), "Warning raised for unexpected reason"
@pytest.mark.testall
def test_api_inventory_datafile_gen_and_reimport(
testall_inv_path,
res_path,
scratch_path,
misc_info,
sphinx_load_test,
pytestconfig,
subtests,
):
"""Confirm integrated data_file export/import behavior."""
fname = testall_inv_path.name
scr_fpath = scratch_path / fname
# Drop most unless testall
if not pytestconfig.getoption("--testall") and fname != "objects_attrs.inv":
pytest.skip("'--testall' not specified")
# Make Inventory
inv1 = soi.Inventory(str(res_path / fname))
# Generate new zlib file and reimport
data = inv1.data_file()
cmp_data = soi.compress(data)
soi.writebytes(str(scr_fpath), cmp_data)
inv2 = soi.Inventory(str(scr_fpath))
# Test the things
with subtests.test(msg="content"):
assert inv1.project == inv2.project
| |
<reponame>DiffEqML/neurodiffeq<filename>neurodiffeq/monitors.py
import math
import torch
import warnings
import matplotlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import seaborn as sns
from abc import ABC, abstractmethod
from ._version_utils import deprecated_alias
from .function_basis import RealSphericalHarmonics as _RealSphericalHarmonics
from .generators import Generator1D as _Generator1D
from .generators import Generator2D as _Generator2D
from .generators import Generator3D as _Generator3D
from .conditions import IrregularBoundaryCondition as _IrregularBC
from .operators import grad
def _updatable_contour_plot_available():
from packaging.version import parse as vparse
from matplotlib import __version__
return vparse(__version__) >= vparse('3.3.0')
class BaseMonitor(ABC):
r"""A tool for checking the status of the neural network during training.
A monitor keeps track of a matplotlib.figure.Figure instance and updates the plot
whenever its ``check()`` method is called (usually by a ``neurodiffeq.solvers.BaseSolver`` instance).
.. note::
Currently, the ``check()`` method can only run synchronously.
It blocks the training / validation process, so don't call the ``check()`` method too often.
"""
def __init__(self, check_every=None):
self.check_every = check_every or 100
self.fig = ...
self.using_non_gui_backend = (matplotlib.get_backend() == 'agg')
if matplotlib.get_backend() == 'module://ipykernel.pylab.backend_inline':
warnings.warn(
"You seem to be using jupyter notebook with '%matplotlib inline' "
"which can lead to monitor plots not updating. "
"Consider using '%matplotlib notebook' or '%matplotlib widget' instead.",
UserWarning)
@abstractmethod
def check(self, nets, conditions, history):
pass # pragma: no cover
def to_callback(self, fig_dir=None, format=None, logger=None):
r"""Return a callback that updates the monitor plots, which will be run
1. Every ``self.check_every`` epochs; and
2. After the last local epoch.
:param fig_dir: Directory for saving monitor figs; if not specified, figs will not be saved.
:type fig_dir: str
:param format: Format for saving figures: {'jpg', 'png' (default), ...}.
:type format: str
:param logger: The logger (or its name) to be used for the returned callback. Defaults to the 'root' logger.
:type logger: str or ``logging.Logger``
:return: The callback that updates the monitor plots.
:rtype: neurodiffeq.callbacks.BaseCallback
"""
# to avoid circular import
from .callbacks import MonitorCallback, PeriodLocal, OnLastLocal
action_cb = MonitorCallback(self, fig_dir=fig_dir, format=format, logger=logger)
condition_cb = OnLastLocal(logger=logger)
if self.check_every:
condition_cb = condition_cb | PeriodLocal(self.check_every, logger=logger)
return condition_cb.set_action_callback(action_cb)
class MonitorSpherical(BaseMonitor):
r"""A monitor for checking the status of the neural network during training.
:param r_min:
The lower bound of radius,
i.e., radius of interior boundary.
:type r_min: float
:param r_max:
The upper bound of radius,
i.e., radius of exterior boundary.
:type r_max: float
:param check_every:
The frequency of checking the neural network represented by the number of epochs between two checks.
Defaults to 100.
:type check_every: int, optional
:param var_names:
Names of dependent variables.
If provided, shall be used for plot titles.
Defaults to None.
:type var_names: list[str]
:param shape:
Shape of mesh for visualizing the solution.
Defaults to (10, 10, 10).
:type shape: tuple[int]
:param r_scale:
'linear' or 'log'.
Controls the grid point in the :math:`r` direction.
Defaults to 'linear'.
:type r_scale: str
:param theta_min:
The lower bound of polar angle.
Defaults to :math:`0`.
:type theta_min: float
:param theta_max:
The upper bound of polar angle.
Defaults to :math:`\pi`.
:type theta_max: float
:param phi_min:
The lower bound of azimuthal angle.
Defaults to :math:`0`.
:type phi_min: float
:param phi_max:
The upper bound of azimuthal angle.
Defaults to :math:`2\pi`.
:type phi_max: float
"""
def __init__(self, r_min, r_max, check_every=None, var_names=None, shape=(10, 10, 10), r_scale='linear',
theta_min=0.0, theta_max=math.pi, phi_min=0.0, phi_max=math.pi * 2):
"""Initializer method
"""
super(MonitorSpherical, self).__init__(check_every=check_every)
self.contour_plot_available = _updatable_contour_plot_available()
if not self.contour_plot_available:
warnings.warn("Warning: contourf plot only available for matplotlib version >= v3.3.0 "
"switching to matshow instead")
self.fig = None
self.axs = [] # subplots
self.ax_metrics = None
self.ax_loss = None
self.cbs = [] # color bars
self.names = var_names
self.shape = shape
# input for neural network
if r_scale == 'log':
r_min, r_max = np.log(r_min), np.log(r_max)
gen = _Generator3D(
grid=shape,
xyz_min=(r_min, theta_min, phi_min),
xyz_max=(r_max, theta_max, phi_max),
method='equally-spaced'
)
rs, thetas, phis = gen.get_examples() # type: torch.Tensor, torch.Tensor, torch.Tensor
if r_scale == 'log':
rs = torch.exp(rs)
self.r_tensor = rs.reshape(-1, 1)
self.theta_tensor = thetas.reshape(-1, 1)
self.phi_tensor = phis.reshape(-1, 1)
self.r_label = rs.reshape(-1).detach().cpu().numpy()
self.theta_label = thetas.reshape(-1).detach().cpu().numpy()
self.phi_label = phis.reshape(-1).detach().cpu().numpy()
self.n_vars = None
@staticmethod
def _longitude_formatter(value, count):
value = int(round(value / math.pi * 180)) - 180
if value == 0 or abs(value) == 180:
marker = ''
elif value > 0:
marker = 'E'
else:
marker = 'W'
return f'{abs(value)}°{marker}'
@staticmethod
def _latitude_formatter(value, count):
value = int(round(value / math.pi * 180)) - 90
if value == 0:
marker = ''
elif value > 0:
marker = 'N'
else:
marker = 'S'
return f'{abs(value)}°{marker}'
def _compute_us(self, nets, conditions):
r, theta, phi = self.r_tensor, self.theta_tensor, self.phi_tensor
return [
cond.enforce(net, r, theta, phi).detach().cpu().numpy()
for net, cond in zip(nets, conditions)
]
@deprecated_alias(loss_history='history')
def check(self, nets, conditions, history, analytic_mse_history=None):
r"""Draw (3n + 2) plots
1. For each function :math:`u_i(r, \phi, \theta)`, there are 3 axes:
- one ax for :math:`u`-:math:`r` curves grouped by :math:`\phi`
- one ax for :math:`u`-:math:`r` curves grouped by :math:`\theta`
- one ax for :math:`u`-:math:`\theta`-:math:`\phi` contour heat map
2. Additionally, one ax for training and validaiton loss, another for the rest of the metrics
:param nets:
The neural networks that approximates the PDE.
:type nets: list [`torch.nn.Module`]
:param conditions:
The initial/boundary condition of the PDE.
:type conditions: list [`neurodiffeq.conditions.BaseCondition`]
:param history:
A dict of history of training metrics and validation metrics,
where keys are metric names (str) and values are list of metrics values (list[float]).
It must contain a 'train_loss' key and a 'valid_loss' key.
:type history: dict[str, list[float]]
:param analytic_mse_history:
**[DEPRECATED]**
Include 'train_analytic_mse' and 'valid_analytic_mse' in ``history`` instead.
:type analytic_mse_history: dict['train': list[float], 'valid': list[float]], deprecated
.. note::
``check`` is meant to be called by ``neurodiffeq.solvers.BaseSolver``.
"""
for key in ['train', 'valid']:
if key in history:
warnings.warn(f'`{key}` is deprecated, use `{key}_loss` instead', FutureWarning)
history[key + '_loss'] = history.pop(key)
if ('train_loss' not in history) or ('valid_loss' not in history):
raise ValueError("Either 'train_loss' or 'valid_loss' not present in `history`.")
# initialize the figure and axes here so that the Monitor knows the number of dependent variables and
# shape of the figure, number of the subplots, etc.
n_vars = len(nets) if self.n_vars is None else self.n_vars
n_row = (n_vars + 2) if len(history) > 2 else (n_vars + 1)
n_col = 3
if analytic_mse_history is not None:
warnings.warn(
"`analytic_mse_history` is deprecated. "
"Include 'train_analytic_mse' and 'valid_analytic_mse' in ``history`` instead.",
FutureWarning,
)
history['train_analytic_mse'] = analytic_mse_history['train']
history['valid_analytic_mse'] = analytic_mse_history['valid']
if not self.fig:
self.fig = plt.figure(figsize=(24, 6 * n_row))
self.fig.tight_layout()
self.axs = self.fig.subplots(nrows=n_row, ncols=n_col, gridspec_kw={'width_ratios': [1, 1, 2]})
# remove 1-1-2 empty axes, which will be replaced by ax_loss and ax_metrics
for row in self.axs[n_vars:]:
for ax in row:
ax.remove()
self.cbs = [None] * n_vars
if len(history) > 2:
self.ax_loss = self.fig.add_subplot(n_row, 1, n_row - 1)
self.ax_metrics = self.fig.add_subplot(n_row, 1, n_row)
else:
self.ax_loss = self.fig.add_subplot(n_row, 1, n_row)
us = self._compute_us(nets, conditions)
for i, u in enumerate(us):
try:
var_name = self.names[i]
except (TypeError, IndexError):
var_name = f"u[{i}]"
# prepare data for plotting
u_across_r = u.reshape(*self.shape).mean(0)
df = pd.DataFrame({
'$r$': self.r_label,
'$\\theta$': self.theta_label,
'$\\phi$': self.phi_label,
'u': u.reshape(-1),
})
# u-r curve grouped by phi
ax = self.axs[i][0]
self._update_r_plot_grouped_by_phi(var_name, ax, df)
# u-r curve grouped by theta
ax = self.axs[i][1]
self._update_r_plot_grouped_by_theta(var_name, ax, df)
# u-theta-phi heatmap/contourf depending on matplotlib version
ax = self.axs[i][2]
self._update_contourf(var_name, ax, u_across_r, colorbar_index=i)
self._refresh_history(
self.ax_loss,
{name: history[name] for name in history if name in ['train_loss', 'valid_loss']},
x_label='Epochs',
y_label='Loss Value',
title='Loss (Mean Squared Residual)',
)
if len(history) > 2:
self._refresh_history(
self.ax_metrics,
{name: history[name] for name in history if name not in ['train_loss', 'valid_loss']},
x_label='Epochs',
y_label='Metric Values',
title='Other metrics',
)
self.customization()
self.fig.canvas.draw()
# for command-line, interactive plots, not pausing can lead to graphs not being displayed at all
# see https://stackoverflow.com/questions/
# 19105388/python-2-7-mac-osx-interactive-plotting-with-matplotlib-not-working
if not self.using_non_gui_backend:
plt.pause(0.05)
def customization(self):
"""Customized tweaks can be implemented by overwriting this method.
"""
pass
@staticmethod
def _update_r_plot_grouped_by_phi(var_name, ax, df):
ax.clear()
sns.lineplot(x='$r$', y='u', hue='$\\phi$', data=df, ax=ax)
ax.set_title(f'{var_name}($r$) grouped by $\\phi$')
ax.set_ylabel(var_name)
@staticmethod
def _update_r_plot_grouped_by_theta(var_name, ax, df):
ax.clear()
sns.lineplot(x='$r$', y='u', hue='$\\theta$', data=df, ax=ax)
ax.set_title(f'{var_name}($r$) grouped by $\\theta$')
ax.set_ylabel(var_name)
# _update_contourf cannot be defined as a static method since it depends on self.contourf_plot_available
def _update_contourf(self, | |
#!/usr/bin/python
# Copyright (c) 2018 Cohesity Inc
# Apache License Version 2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import shutil
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.urls import open_url, urllib_error
from tempfile import mkstemp, mkdtemp
try:
# => When unit testing, we need to look in the correct location however, when run via ansible,
# => the expectation is that the modules will live under ansible.
from module_utils.storage.cohesity.cohesity_utilities import cohesity_common_argument_spec, \
raise__cohesity_exception__handler, REQUEST_TIMEOUT
from module_utils.storage.cohesity.cohesity_auth import get__cohesity_auth__token
except Exception as e:
from ansible.module_utils.storage.cohesity.cohesity_utilities import cohesity_common_argument_spec, \
raise__cohesity_exception__handler, REQUEST_TIMEOUT
from ansible.module_utils.storage.cohesity.cohesity_auth import get__cohesity_auth__token
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = '''
module: cohesity_agent
short_description: Management of Cohesity Physical Agent
description:
- Ansible Module used to deploy or remove the Cohesity Physical Agent from supported Linux Machines.
- When executed in a playbook, the Cohesity Agent installation will be validated and the appropriate
- state action will be applied. The most recent version of the Cohesity Agent will be automatically
- downloaded to the host.
version_added: '2.6.5'
author:
- <NAME> (github.com/exospheredata)
- Cohesity, Inc
options:
state:
description:
- Determines if the agent should be C(present) or C(absent) from the host
choices:
- present
- absent
default: 'present'
download_location:
description:
- Optional directory path to which the installer will be downloaded. If not selected, then a temporary
- directory will be created in the default System Temp Directory. When choosing an alternate directory,
- the directory and installer will not be deleted at the end of the execution.
service_user:
description:
- Username underwhich the Cohesity Agent will be installed and run.
- This user must exist unless I(create_user=True) is also configured.
- This user must be an existing user for native installation.
default: 'cohesityagent' for script based installation
service_group:
description:
- Group underwhich permissions will be configured for the Cohesity Agent configuration.
- This group must exist unless I(create_user=True) is also configured.
- This parameter doesn't apply for native installation
default: 'cohesityagent' for script based installation
create_user:
description:
- When enabled, will create a new user and group based on the values of I(service_user) and I(service_group)
- This parameter does not apply for native installations
type: bool
default: True
file_based:
description:
- When enabled, will install the agent in non-LVM mode and support only file based backups
type: bool
default: False
native_package:
description:
- When enabled, native installer packages are used based on the operating system
type: bool
default: False
download_uri:
description:
- The download uri from where the installer can be downloaded
default: ''
operating_system:
description:
- ansible_distribution from facts, this value is automatically populated. Not given by module user
extends_documentation_fragment:
- cohesity
requirements: []
'''
EXAMPLES = '''
# Install the current version of the agent on Linux
- cohesity_agent:
server: cohesity.lab
cohesity_admin: admin
cohesity_password: password
state: present
# Install the current version of the agent with custom User and Group
- cohesity_agent:
server: cohesity.lab
cohesity_admin: admin
cohesity_password: password
state: present
service_user: cagent
service_group: cagent
create_user: True
# Removes the current installed agent from the host
- cohesity_agent:
server: cohesity.lab
cohesity_admin: admin
cohesity_password: password
state: absent
# Download the agent installer to a custom location.
- cohesity_agent:
server: cohesity.lab
cohesity_admin: admin
cohesity_password: password
download_location: /software/installers
state: present
# Install the current version of the agent on Linux using native installers, the service user here should be an
# existing user
- cohesity_agent:
server: cohesity.lab
cohesity_admin: admin
cohesity_password: password
state: present
service_user: cagent
native_package: True
# Install the cohesity agent using native package downloaded from given URI. Here, the Cohesity cluster credentials are not required
- cohesity_agent:
state: present
service_user: cagent
native_package: True
download_uri: 'http://10.2.145.47/files/bin/installers/el-cohesity-agent-6.3-1.x86_64.rpm'
'''
RETURN = '''
'''
SLEEP_TIME_SECONDS = 120
SECONDS_MINUTES_CONVERSION = 60
class InstallError(Exception):
pass
def verify_dependencies():
# => TODO: Need to add package dependency checks for:
# => wget, rsync, lsof, nfs-utils, lvm2
pass
def check_agent(module, results):
# => Determine if the Cohesity Agent is currently installed
aix_agent_path = "/usr/local/cohesity/agent/aix_agent.sh"
def_agent_path = "/etc/init.d/cohesity-agent"
# Look for default ansible agent path and aix agent path.
agent_path = def_agent_path if os.path.exists(def_agent_path) else \
aix_agent_path if os.path.exists(aix_agent_path) else None
if agent_path:
cmd = "%s version" % agent_path
rc, out, err = module.run_command(cmd)
split_out = out.split("\n")
version = ""
for v in split_out:
if v.startswith('Version'):
version = v.split(" ")[-1]
break
if version:
# => When the agent is installed, we should be able to return
# => the version information
results['version'] = version
else:
# => If this didn't return a Version, then we have bigger problems
# => and probably should try to re-install or force the uninstall.
results['version'] = "unknown"
results['check_agent'] = dict(
stdout=out,
stderr=err
)
return results
elif os.path.exists("/etc/cohesity-agent"):
# => If the file is found then let's return an unknown state
# => immediately
results['version'] = "unknown"
return results
else:
cmd = "ps -aux | grep crux/bin/linux_agent | grep -v python | awk '{print $2}'"
rc, out, err = module.run_command(
cmd, check_rc=True, use_unsafe_shell=True)
if out:
orphaned_agents = out.split("\n")
for process in orphaned_agents:
if process:
try:
cmd = "kill -9 {0}".format(process)
except BaseException:
cmd = "kill -9 %s" % (process)
rc, out, err = module.run_command(cmd)
if err:
pattern = "No such process"
if pattern in err:
# => Since the kill command returned 'No such process' we will just continue
pass
else:
results['changed'] = False
results['Failed'] = True
results['check_agent'] = dict(
stdout=out,
stderr=err
)
results['process_id'] = process
module.fail_json(
msg="Failed to remove an orphaned Cohesity Agent service which is still running",
**results)
else:
pass
results['version'] = False
return results
else:
# => If the files are not found then let's return False
# => immediately
results['version'] = False
return results
def download_agent(module, path):
try:
if module.params.get('operating_system') == "AIX":
server = module.params.get('cluster')
token = get__cohesity_auth__token(module)
uri = "https://" + server + \
"/irisservices/api/v1/public/physicalAgents/download?hostType=kAix&agentType=kJava"
headers = {
"Accept": "application/octet-stream",
"Authorization": "Bearer " + token,
"user-agent": "cohesity-ansible/v2.3.0"}
elif not module.params.get('download_uri'):
os_type = "Linux"
server = module.params.get('cluster')
token = get__cohesity_auth__token(module)
package_type = 'kScript'
if module.params.get('native_package'):
if module.params.get('operating_system') in ('CentOS', 'RedHat'):
package_type = 'kRPM'
elif module.params.get('operating_system') == 'SLES':
package_type = 'kSuseRPM'
elif module.params.get('operating_system') == 'Ubuntu':
package_type = 'kDEB'
uri = "https://" + server + \
"/irisservices/api/v1/public/physicalAgents/download?hostType=k" + os_type + '&pkgType=' + package_type
headers = {
"Accept": "application/octet-stream",
"Authorization": "Bearer " + token,
"user-agent": "cohesity-ansible/v2.3.0"}
else:
uri = module.params.get('download_uri')
headers = {
"Accept": "application/octet-stream",
"user-agent": "cohesity-ansible/v2.3.0"}
agent = open_url(url=uri, headers=headers,
validate_certs=False, timeout=REQUEST_TIMEOUT)
resp_headers = agent.headers
if 'content-disposition' in resp_headers.keys():
filename = resp_headers['content-disposition'].split("=")[1]
else:
filename = 'cohesity-agent-installer'
filename = path + "/" + filename
try:
f = open(filename, "wb")
f.write(agent.read())
os.chmod(filename, 0o755)
except Exception as e:
raise InstallError(e)
finally:
f.close()
except urllib_error.HTTPError as e:
error_msg = json.loads(e.read())
if 'message' in error_msg:
module.fail_json(
msg="Failed to download the Cohesity Agent",
reason=error_msg['message'])
else:
raise__cohesity_exception__handler(e, module)
except urllib_error.URLError as e:
# => Capture and report any error messages.
raise__cohesity_exception__handler(e.read(), module)
except Exception as error:
raise__cohesity_exception__handler(error, module)
return filename
def installation_failures(module, stdout, rc, message):
# => The way that this installer works, we will not get back messages in stderr
# => when a failure occurs. For this reason, we need to jump through some hoops
# => to extract the error messages. The install will *Partially* complete even
# => if certain dependencies are missing.
# =>
# => This block of code will first split the stdout into a List of strings that
# => we will filter for any line which contains *Error:*. Those lines will be
# => returned to a new list *stderr* which will be formatted into a \n delimited
# => string as the final step, we will raise a module failure to halt progress.
stdout_lines = stdout.split("\n")
# => Grab any Line that begins with Error:
stderr = [k for k in stdout_lines if 'Error:' in k]
stderr = "\n".join(stderr)
# => Grab any Line that begins with WARNING:
stdwarn = [k for k in stdout_lines if 'WARNING:' in k]
stdwarn = "\n".join(stdwarn)
module.fail_json(
changed=False,
msg=message,
error=stderr,
output=stdout,
warn=stdwarn,
exitcode=rc)
def install_agent(module, installer, native):
# => This command will run the self-extracting installer for the agent | |
<gh_stars>10-100
from numbers import Number
from pathlib import Path
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Union
import attr
import numpy as np
from attr.validators import in_
from attr.validators import instance_of
from attr.validators import optional
from barril.curve.curve import Curve
from barril.units import Array
from barril.units import Scalar
from .case_description_attributes import attrib_array
from .case_description_attributes import attrib_curve
from .case_description_attributes import attrib_dict_of
from .case_description_attributes import attrib_enum
from .case_description_attributes import attrib_instance
from .case_description_attributes import attrib_instance_list
from .case_description_attributes import attrib_scalar
from .case_description_attributes import collapse_array_repr
from .case_description_attributes import dict_of
from .case_description_attributes import dict_of_array
from .case_description_attributes import dict_with_scalar
from .case_description_attributes import InvalidReferenceError
from .case_description_attributes import list_of_strings
from .case_description_attributes import Numpy1DArray
from .case_description_attributes import numpy_array_validator
from .case_description_attributes import PhaseName
from alfasim_sdk._internal import constants
# [[[cog
# # This cog has no output, it just declares and imports symbols used by cogs in this module.
#
# from alfasim_sdk._internal import constants
# from alfasim_sdk._internal.alfacase.case_description_attributes import generate_multi_input
# from alfasim_sdk._internal.alfacase.case_description_attributes import generate_multi_input_dict
#
# def cog_out_multi_input(prop_name, category, default_value, unit):
# code = generate_multi_input(prop_name, category, default_value, unit)
# cog.out(code)
#
# def cog_out_multi_input_dict(prop_name, category):
# code = generate_multi_input_dict(prop_name, category)
# cog.out(code)
#
# ]]]
# [[[end]]] (checksum: d41d8cd98f00b204e9800998ecf8427e)
@attr.s(frozen=True, slots=True)
class PluginDescription:
name: Optional[str] = attr.ib(default=None, validator=optional(instance_of(str)))
gui_models = attr.ib(default=attr.Factory(dict))
additional_variables = attr.ib(default=None)
@attr.s(frozen=True, slots=True)
class PluginTracerReference:
tracer_id = attr.ib(default=None)
@attr.s(frozen=True, slots=True)
class PluginInternalReference:
plugin_item_id = attr.ib(default=None)
@attr.s(frozen=True, slots=True)
class PluginMultipleReference:
container_key = attr.ib(default=None)
item_id_list = attr.ib(default=attr.Factory(list))
@attr.s(frozen=True, slots=True)
class PluginTableContainer:
columns = attr.ib(default=attr.Factory(dict))
@attr.s(kw_only=True)
class _BaseTrendOutputDescription:
name: Optional[str] = attr.ib(default=None, validator=optional(instance_of(str)))
curve_names: List[str] = attr.ib(validator=list_of_strings)
@attr.s()
class PositionalPipeTrendDescription(_BaseTrendOutputDescription):
"""
.. include:: /alfacase_definitions/PositionalPipeTrendDescription.txt
.. include:: /alfacase_definitions/list_of_unit_for_length.txt
"""
location = attrib_enum(type_=constants.OutputAttachmentLocation)
position = attrib_scalar(category="length")
element_name: str = attr.ib(validator=instance_of(str))
@attr.s()
class GlobalTrendDescription(_BaseTrendOutputDescription):
"""
.. include:: /alfacase_definitions/GlobalTrendDescription.txt
"""
@attr.s()
class OverallPipeTrendDescription(_BaseTrendOutputDescription):
"""
.. include:: /alfacase_definitions/OverallPipeTrendDescription.txt
"""
location = attrib_enum(type_=constants.OutputAttachmentLocation)
element_name: str = attr.ib(validator=instance_of(str))
@attr.s()
class EquipmentTrendDescription(_BaseTrendOutputDescription):
"""
.. include:: /alfacase_definitions/EquipmentTrendDescription.txt
"""
element_name: str = attr.ib(validator=instance_of(str))
@attr.s()
class SeparatorTrendDescription(_BaseTrendOutputDescription):
"""
.. include:: /alfacase_definitions/SeparatorTrendDescription.txt
"""
element_name: str = attr.ib(validator=instance_of(str))
@attr.s()
class ProfileOutputDescription:
"""
.. include:: /alfacase_definitions/ProfileOutputDescription.txt
"""
curve_names: List[str] = attr.ib(validator=list_of_strings)
location = attrib_enum(type_=constants.OutputAttachmentLocation)
element_name: str = attr.ib(validator=optional(instance_of(str)))
@attr.s()
class TrendsOutputDescription:
"""
.. include:: /alfacase_definitions/TrendsOutputDescription.txt
"""
positional_pipe_trends = attrib_instance_list(PositionalPipeTrendDescription)
overall_pipe_trends = attrib_instance_list(OverallPipeTrendDescription)
global_trends = attrib_instance_list(GlobalTrendDescription)
equipment_trends = attrib_instance_list(EquipmentTrendDescription)
separator_trends = attrib_instance_list(SeparatorTrendDescription)
@attr.s(frozen=True, slots=True)
class CaseOutputDescription:
"""
.. include:: /alfacase_definitions/CaseOutputDescription.txt
.. include:: /alfacase_definitions/list_of_unit_for_time.txt
"""
automatic_trend_frequency: bool = attr.ib(default=True, validator=instance_of(bool))
trends = attrib_instance(TrendsOutputDescription)
trend_frequency = attrib_scalar(default=Scalar(0.1, "s"))
automatic_profile_frequency: bool = attr.ib(
default=True, validator=instance_of(bool)
)
profiles = attrib_instance_list(ProfileOutputDescription)
profile_frequency = attrib_scalar(default=Scalar(0.1, "s"))
@attr.s(kw_only=True)
class _MassSourceCommon:
"""
:ivar total_mass_flow_rate:
Used when source_type == constants.MassSourceType.TotalMassFlowRatePvtSplit
:ivar mass_flow_rates:
Used when source_type == constants.MassSourceType.MassFlowRates
:ivar gas_oil_ratio:
Used when source_type is one of:
- constants.MassSourceType.FlowRateGasGorWc
- constants.MassSourceType.FlowRateOilGorWc
- constants.MassSourceType.FlowRateWaterGorWc
:ivar volumetric_flow_rates_std:
Used when source_type is one of:
- AllVolumetricFlowRates (All phases are filled)
- constants.MassSourceType.FlowRateGasGorWc (Only the Gas phase if filled)
- constants.MassSourceType.FlowRateOilGorWc (Only the Oil phase if filled)
- constants.MassSourceType.FlowRateWaterGorWc (Only the Water phase if filled)
:ivar water_cut:
Used when the Hydrodynamic model has Water phase and source_type is one of:
- constants.MassSourceType.FlowRateGasGorWc
- constants.MassSourceType.FlowRateOilGorWc
- constants.MassSourceType.FlowRateWaterGorWc
.. include:: /alfacase_definitions/list_of_unit_for_temperature.txt
.. include:: /alfacase_definitions/list_of_unit_for_volume_flow_rate.txt
.. include:: /alfacase_definitions/list_of_unit_for_mass_flow_rate.txt
.. include:: /alfacase_definitions/list_of_unit_for_volume_fraction.txt
.. include:: /alfacase_definitions/list_of_unit_for_standard_volume_per_standard_volume.txt
"""
fluid: Optional[str] = attr.ib(default=None, validator=optional(instance_of(str)))
tracer_mass_fraction: Array = attr.ib(
default=Array([], "-"),
validator=instance_of(Array),
metadata={"type": "array", "category": "mass fraction"},
)
# [[[cog
# cog_out_multi_input("temperature", "temperature", constants.DEFAULT_TEMPERATURE_IN_K, "K")
# ]]]
# fmt: off
temperature_input_type = attrib_enum(default=constants.MultiInputType.Constant)
temperature = attrib_scalar(
default=Scalar('temperature', 288.6, 'K')
)
temperature_curve = attrib_curve(
default=Curve(Array('temperature', [], 'K'), Array('time', [], 's'))
)
# fmt: on
# [[[end]]] (checksum: cfa3eacaa542b1544f9501cfc1bbc800)
source_type = attrib_enum(default=constants.MassSourceType.MassFlowRates)
# [[[cog
# cog_out_multi_input_dict("volumetric_flow_rates_std", "standard volume per time")
# ]]]
# fmt: off
volumetric_flow_rates_std_input_type = attrib_enum(default=constants.MultiInputType.Constant)
volumetric_flow_rates_std: Dict[str, Scalar] = attr.ib(
default=attr.Factory(dict), validator=dict_of(Scalar),
metadata={"type": "scalar_dict", "category": 'standard volume per time'},
)
volumetric_flow_rates_std_curve: Dict[str, Curve] = attr.ib(
default=attr.Factory(dict), validator=dict_of(Curve),
metadata={"type": "curve_dict", "category": 'standard volume per time'},
)
# fmt: on
# [[[end]]] (checksum: 90ffdd6b31ca61d3a254a2a4163470b5)
# [[[cog
# cog_out_multi_input_dict("mass_flow_rates", "mass flow rate")
# ]]]
# fmt: off
mass_flow_rates_input_type = attrib_enum(default=constants.MultiInputType.Constant)
mass_flow_rates: Dict[str, Scalar] = attr.ib(
default=attr.Factory(dict), validator=dict_of(Scalar),
metadata={"type": "scalar_dict", "category": 'mass flow rate'},
)
mass_flow_rates_curve: Dict[str, Curve] = attr.ib(
default=attr.Factory(dict), validator=dict_of(Curve),
metadata={"type": "curve_dict", "category": 'mass flow rate'},
)
# fmt: on
# [[[end]]] (checksum: 14466fad7202e819caa161ebf875697c)
# [[[cog
# cog_out_multi_input("total_mass_flow_rate", "mass flow rate", 1.0, "kg/s")
# ]]]
# fmt: off
total_mass_flow_rate_input_type = attrib_enum(default=constants.MultiInputType.Constant)
total_mass_flow_rate = attrib_scalar(
default=Scalar('mass flow rate', 1.0, 'kg/s')
)
total_mass_flow_rate_curve = attrib_curve(
default=Curve(Array('mass flow rate', [], 'kg/s'), Array('time', [], 's'))
)
# fmt: on
# [[[end]]] (checksum: 311c423906e498a67edbf00f8ef5779d)
# [[[cog
# cog_out_multi_input("water_cut", "volume fraction", 0.0, "-")
# ]]]
# fmt: off
water_cut_input_type = attrib_enum(default=constants.MultiInputType.Constant)
water_cut = attrib_scalar(
default=Scalar('volume fraction', 0.0, '-')
)
water_cut_curve = attrib_curve(
default=Curve(Array('volume fraction', [], '-'), Array('time', [], 's'))
)
# fmt: on
# [[[end]]] (checksum: fd5599325fa31c6221db6f11e2fc1123)
# [[[cog
# cog_out_multi_input("gas_oil_ratio", "standard volume per standard volume", 0.0, "sm3/sm3")
# ]]]
# fmt: off
gas_oil_ratio_input_type = attrib_enum(default=constants.MultiInputType.Constant)
gas_oil_ratio = attrib_scalar(
default=Scalar('standard volume per standard volume', 0.0, 'sm3/sm3')
)
gas_oil_ratio_curve = attrib_curve(
default=Curve(Array('standard volume per standard volume', [], 'sm3/sm3'), Array('time', [], 's'))
)
# fmt: on
# [[[end]]] (checksum: 0cc220bf4175710b45e45e6f4cc58ddd)
@attr.s(kw_only=True)
class _PressureSourceCommon:
"""
.. include:: /alfacase_definitions/list_of_unit_for_pressure.txt
.. include:: /alfacase_definitions/list_of_unit_for_temperature.txt
.. include:: /alfacase_definitions/list_of_unit_for_mass_fraction.txt
.. include:: /alfacase_definitions/list_of_unit_for_volume_fraction.txt
.. include:: /alfacase_definitions/list_of_unit_for_standard_volume_per_standard_volume.txt
"""
# [[[cog
# cog_out_multi_input("pressure", "pressure", 1.0e5, "Pa")
# ]]]
# fmt: off
pressure_input_type = attrib_enum(default=constants.MultiInputType.Constant)
pressure = attrib_scalar(
default=Scalar('pressure', 100000.0, 'Pa')
)
pressure_curve = attrib_curve(
default=Curve(Array('pressure', [], 'Pa'), Array('time', [], 's'))
)
# fmt: on
# [[[end]]] (checksum: 31e9f12ceb2313cace8f856fc15581a5)
# [[[cog
# cog_out_multi_input("temperature", "temperature", constants.DEFAULT_TEMPERATURE_IN_K, "K")
# ]]]
# fmt: off
temperature_input_type = attrib_enum(default=constants.MultiInputType.Constant)
temperature = attrib_scalar(
default=Scalar('temperature', 288.6, 'K')
)
temperature_curve = attrib_curve(
default=Curve(Array('temperature', [], 'K'), Array('time', [], 's'))
)
# fmt: on
# [[[end]]] (checksum: cfa3eacaa542b1544f9501cfc1bbc800)
fluid: Optional[str] = attr.ib(default=None, validator=optional(instance_of(str)))
tracer_mass_fraction: Array = attr.ib(
default=Array([], "-", "mass fraction"),
validator=instance_of(Array),
metadata={"type": "array", "category": "mass fraction"},
)
split_type = attrib_enum(
default=constants.MassInflowSplitType.ConstantVolumeFraction
)
# [[[cog
# cog_out_multi_input_dict("mass_fractions", "mass fraction")
# ]]]
# fmt: off
mass_fractions_input_type = attrib_enum(default=constants.MultiInputType.Constant)
mass_fractions: Dict[str, Scalar] = attr.ib(
default=attr.Factory(dict), validator=dict_of(Scalar),
metadata={"type": "scalar_dict", "category": 'mass fraction'},
)
mass_fractions_curve: Dict[str, Curve] = attr.ib(
default=attr.Factory(dict), validator=dict_of(Curve),
metadata={"type": "curve_dict", "category": 'mass fraction'},
)
# fmt: on
# [[[end]]] (checksum: cc96caed7be4897551ce0afd2c3af9f8)
# [[[cog
# cog_out_multi_input_dict("volume_fractions", "volume fraction")
# ]]]
# fmt: off
volume_fractions_input_type = attrib_enum(default=constants.MultiInputType.Constant)
volume_fractions: Dict[str, Scalar] = attr.ib(
default=attr.Factory(dict), validator=dict_of(Scalar),
metadata={"type": "scalar_dict", "category": 'volume fraction'},
)
volume_fractions_curve: Dict[str, Curve] = attr.ib(
default=attr.Factory(dict), validator=dict_of(Curve),
metadata={"type": "curve_dict", "category": 'volume fraction'},
)
# fmt: on
# [[[end]]] (checksum: 73f1389ef2912c079dc3fad3cec8334b)
# [[[cog
# cog_out_multi_input("gas_liquid_ratio", "standard volume per standard volume", 0.0, "sm3/sm3")
# ]]]
# fmt: off
gas_liquid_ratio_input_type = attrib_enum(default=constants.MultiInputType.Constant)
gas_liquid_ratio = attrib_scalar(
default=Scalar('standard volume per standard volume', 0.0, 'sm3/sm3')
)
gas_liquid_ratio_curve = attrib_curve(
default=Curve(Array('standard volume per standard volume', [], 'sm3/sm3'), Array('time', [], 's'))
)
# fmt: on
# [[[end]]] (checksum: 8799b62448023477ae46a4351289a493)
# [[[cog
# cog_out_multi_input("gas_oil_ratio", "standard volume per standard volume", 0.0, "sm3/sm3")
# ]]]
# fmt: off
gas_oil_ratio_input_type = attrib_enum(default=constants.MultiInputType.Constant)
gas_oil_ratio = attrib_scalar(
default=Scalar('standard volume per standard volume', 0.0, 'sm3/sm3')
)
gas_oil_ratio_curve = attrib_curve(
default=Curve(Array('standard volume per standard volume', [], 'sm3/sm3'), Array('time', [], 's'))
)
# fmt: on
# [[[end]]] (checksum: 0cc220bf4175710b45e45e6f4cc58ddd)
# [[[cog
# cog_out_multi_input("water_cut", "volume fraction", 0.0, "-")
# ]]]
# fmt: off
water_cut_input_type = attrib_enum(default=constants.MultiInputType.Constant)
water_cut = attrib_scalar(
default=Scalar('volume fraction', 0.0, '-')
)
water_cut_curve = attrib_curve(
default=Curve(Array('volume fraction', [], '-'), Array('time', [], 's'))
)
# fmt: on
# [[[end]]] (checksum: fd5599325fa31c6221db6f11e2fc1123)
@attr.s(frozen=True, slots=True)
class CompositionDescription:
"""
:ivar component:
Name of the component available created on:
PvtModelCompositionalDescription.light_components
PvtModelCompositionalDescription.heavy_components
.. note:: CompositionDescription can only refer to components created from the same PvtModelCompositionalDescription
.. include:: /alfacase_definitions/CompositionDescription.txt
.. include:: /alfacase_definitions/list_of_unit_for_mole_per_mole.txt
.. include:: /alfacase_definitions/list_of_unit_for_molar_thermodynamic_energy.txt
"""
component: str = attr.ib(validator=instance_of(str))
molar_fraction = attrib_scalar(default=Scalar(0, "mol/mol"))
reference_enthalpy = attrib_scalar(default=Scalar(0, "J/mol"))
@attr.s(frozen=True, slots=True)
class BipDescription:
"""
.. include:: /alfacase_definitions/BipDescription.txt
"""
component_1: str = attr.ib(validator=instance_of(str))
component_2: str = attr.ib(validator=instance_of(str))
value: float = attr.ib(validator=instance_of(float), converter=float)
@attr.s(frozen=True, slots=True)
class FluidDescription:
"""
.. include:: /alfacase_definitions/FluidDescription.txt
"""
composition = attrib_instance_list(CompositionDescription)
fraction_pairs = attrib_instance_list(BipDescription)
@attr.s(slots=True, kw_only=True)
class MassSourceEquipmentDescription(_MassSourceCommon):
"""
.. include:: /alfacase_definitions/MassSourceEquipmentDescription.txt
.. include:: /alfacase_definitions/list_of_unit_for_length.txt
"""
position = attrib_scalar(category="length")
@attr.s(frozen=True, slots=True, kw_only=True)
class SpeedCurveDescription:
"""
.. include:: /alfacase_definitions/SpeedCurveDescription.txt
.. include:: /alfacase_definitions/list_of_unit_for_length.txt
.. include:: /alfacase_definitions/list_of_unit_for_angle_per_time.txt
"""
time: Array = attr.ib(default=Array([0], "s"), validator=instance_of(Array))
speed: Array = attr.ib(default=Array([500], "rpm"), validator=instance_of(Array))
# fmt: off
@attr.s(frozen=True, slots=True, kw_only=True)
class TablePumpDescription:
"""
.. include:: /alfacase_definitions/TablePumpDescription.txt
.. include:: /alfacase_definitions/list_of_unit_for_angle_per_time.txt
.. include:: /alfacase_definitions/list_of_unit_for_volume_flow_rate.txt
.. include:: /alfacase_definitions/list_of_unit_for_volume_fraction.txt
.. include:: /alfacase_definitions/list_of_unit_for_pressure.txt
"""
speeds: Array = attr.ib(
default=Array([0.0] * 12 + [400.0] * 12 + [600.0] * 12, 'rpm'), validator=instance_of(Array)
)
void_fractions: Array = attr.ib(
default=Array(([0.0] * 6 + [0.1] * 6) * 3, '-'), validator=instance_of(Array)
)
flow_rates: Array = attr.ib(
| |
# TODO merge common tests and Natural Language ones
# default libraries
import os
# user interface
import tkinter as tk
from datetime import datetime
from tkinter import filedialog
from msvcrt import getch
import random
import numpy as np
from scipy.io.wavfile import read, write
from .ABC_weighting import a_weight
# custom libraries
from .dsp import get_rms, add_gain, SaturationError
from .configure import load_list
from .play import play_data
from .recorder import Recorder
from .cli_tools import print_square, clear_console, show_image
from . import metadata
from threading import Thread
root = tk.Tk()
root.wm_attributes("-topmost", 1)
root.withdraw()
cPath = os.getcwd()
langDict = {"ARW": "Arabic",
"CHC": "Chinese",
"DUN": "Dutch",
"ENG": "English (UK)",
"ENA": "English (Australia)",
"ENI": "English (India)",
"ENU": "English (USA)",
"FRF": "French (France)",
"FRC": "French (Canada)",
"GED": "German",
"GED_NLU": "German (Natural language)",
"ITA": "Italian",
"ITA_NLU": "Italian (Natural language)",
"JPJ": "Japanese",
"KRK": "Korean",
"PLP": "Polish",
"PTP": "Portuguese (Portugal)",
"PTB": "Portuguese (Brazil)",
"RUR": "Russian",
"SPE": "Spanish (Spain)",
"SPM": "Spanish (Mexico)",
"TRT": "Turkish"
}
nonsense = ["Collecting shells...", "Parkouring...", "Harvesting potatoes...", "Eating sugar", "Holding beers...",
"Destroying the Death Star", "Learning Kung Fu...", "Fixing the time machine", "Unboxing cats...",
"Parking Millennium Falcon..."]
class CorruptedTestError(Exception):
pass
class TestExistsError(Exception):
pass
def sort_dict(dictionary):
keys = []
for i in list(dictionary.keys()):
keys.append(int(i))
keys.sort()
new_dict = {}
for i in keys:
new_dict[str(i)] = dictionary[str(i)]
return new_dict
def _abs_to_rel(path):
"""
Convert path from absolute to relative
"""
cwd = os.getcwd().replace("\\", "/")
return path.split(cwd)[-1]
def splash():
clear_console()
show_image("./utilities/logo.txt")
welcome = "VoRTEx %s - Voice Recognition Test Execution\n" \
"%s\n" \
"\n" \
"Os: %s\n" \
"%s\n" \
"email: %s\n" \
"%s" % (metadata["version"], metadata["description_short"], metadata["os"], metadata["copyright"],
metadata["email"], metadata["url"])
print_square(welcome, margin=[20, 20, 1, 1], centering="center")
return
def clr_tmp():
try:
os.remove("temp.wav")
except FileNotFoundError:
pass
return
def now():
"""
Returns the current date and time.
"""
now_time = datetime.now().strftime('%Y/%m/%d_%H:%M:%S')
return now_time
def log(event, log_name="test_status.log", log_time=None):
"""
Log every test event with a timestamp.
"""
print("Logging into %s" % log_name)
if log_time is None:
log_time = now()
with open(log_name, "a", encoding="utf-16") as r:
r.write(log_time + "\t")
r.write(event + "\n")
return
def show_dirs(path):
directories = []
for name in os.listdir(path):
if os.path.isdir(os.path.join(path, name)):
directories.append(name)
return directories
# noinspection SpellCheckingInspection
def lombard(noise):
"""
The noise is expressed in dBA
"""
if 50 <= noise <= 77:
lombard_gain = (8 / 27) * (noise - 50)
elif noise > 77:
lombard_gain = 8
else:
lombard_gain = 0
return np.round(lombard_gain, 3)
# noinspection SpellCheckingInspection
class Test:
def __init__(self):
# declare the attributes of the test
self.testName = "" # The name of the test
self.wPath = "." # The current working path of the selected test
self.settingsDir = "settings/" # the directory for the settings of the program
self.settingsFile = "settings/settings.vcfg" # the settings file
self.databaseDir = "database/" # the directory of the testlist databases
self.logname = "" # path of the log file
self.report_file = "" # path of the csv
self.testDir = "vr_tests/" # Where all the tests are contained
self.phrasesPath = "phrases/" # The path of the audio files
self.testfile = ""
self.listfile = ""
# status of the test (default values)
self.lang = "ITA" # The language used for the test (to be defined during the test configuration)
self.isMultigenderEnabled = False
self.gender = None
self.isNluEnabled = True # Is Natural Language enabled?
self.mic_mode = 2 # how the infotainment microphone is activated: ptt(1), wakeword(2), can message(3)
self.issued_ww = 0 # How many times has the wakeword been pronounced
self.recognized_ww = 0 # How many times has the wakeword been recognized
self.passes = 0 # How many passes are there?
self.failed = [] # List of failed tests
self.status = 0 # Is the test running? (0: waiting; 1: running; 2: completed)
self.current_test = 0 # The test number we should start from. If the test is new, then the status is 0.
self.results = {} # A list containing the test results
self.isMouthCalibrated = False # Is the mouth calibrated?
self.gain = 0 # The gain value for the mouth to reach 94dBSPL
self.isLombardEnabled = True
self.noise = 0 # RMS value of the background noise
self.noise_radio = 0 # RMS value of the background noise plus the radio on
self.testlist = []
self.redo = []
# self.testlist = [0, 1, 9, 32, 33, 37, 38, 39, 41, 42, 43, 49, 50, 54, 55, 58, 86, 87,
# 91, 92, 94, 103, 104, 128, 129, 131, 134, 136, 138, 139, 146, 152]
self.database = {}
self.isFirstStart = False
self.isSaved = True
# open the sound recorder for calibration and translation
print("------------------------------------------------------------------")
print("Opening sound recorder\n")
# Audio device settings
self.recorder = Recorder()
print("\nChannels: %d\n" % self.recorder.channels)
self.recorder.channels = 2 # set 2 channels
# channel assignment
# output
self.mouthChannel = 0
self.noiseChannel = 1
# input
self.micChannel = 0
self.earChannel = 1
print("------------------------------------------------------------------")
def load_database(self, database_file=None):
# select the proper list file with the command lists
if database_file is None:
database_file = filedialog.askopenfilename(title="Choose the list file for the test",
filetypes=[("Voice Recognition Test List files", "*.vrtl"),
("All files", "*")],
initialdir=self.databaseDir)
if not database_file:
return
try:
self.listfile = _abs_to_rel(database_file)
self._configure_list() # get the command database (languages, lists) from the list file
except PermissionError:
print("No file chosen!")
return
def _configure_list(self):
"""
Detects the available language and the number of tests for language
Opens the database file and converts it into a dictionary form suitable for the test.
test = {"LANG1" = [[], [], [], []],
"LANG2" = [[], [], [], []],
ecc...
}
"""
self.database = load_list(
os.getcwd().replace("\\", "/") + self.listfile) # create the test sequence dictionary from the vrtl file
self.langs = [] # list of the currently supported languages
for k in self.database.keys():
if k != "preconditions" and k != "expected" and k != "AUDIOPATH":
self.langs.append(k)
self.langs.sort()
return
def new(self, testname=None, l_index=None, gender=0, testlist=None):
# decide the name of the test
self.testName = testname
# create a new folder based on the test
self.wPath = "%s%s" % (self.testDir, self.testName) # this will be your new working directory
try:
os.mkdir(self.wPath) # create a new directory for the test
except FileExistsError:
raise TestExistsError()
# create the configuration file
self.logname = "%s/testlog.log" % self.wPath
self.testfile = "%s/config.cfg" % self.wPath
# decide the language
if l_index is not None:
self.lang = self.langs[l_index]
try: # if available, imports the array for the preconditions and expected behaviour
self.expected = self.database["expected"]
except KeyError:
pass
try:
self.preconditions = self.database["preconditions"]
except KeyError:
pass
self.sequence = self.database[self.lang]
# detects whether male and female voices are available
langpath = self.lang
g = 0
for i in os.listdir(self.database["AUDIOPATH"]):
if self.lang in i:
g += 1
if g == 2:
if gender == 1:
langpath = self.lang + "_M"
elif gender == 0:
langpath = self.lang + "_F"
if len(self.database[self.lang]) > 157: # detects if natural language is available
self.isNluEnabled = True
else:
self.isNluEnabled = False
self.phrasesPath = self.database["AUDIOPATH"] + langpath # build the path for the speech files
self.save() # save the configuration into the cfg file
# reset status values
self.current_test = 1
self.issued_ww = 0 # How many times has the wakeword been pronounced
self.recognized_ww = 0 # How many times has the wakeword been recognized
self.passes = 0 # How many passes are there?
self.failed = [] # List of failed tests
self.status = 0 # Is the test running?
self.current_test = 0 # The test number we should start from. If the test is new, then the status is 0.
print_square("Creating test (%s)\n\n"
""
"Language: %s\n"
"Testlist: %s\n"
"Status: %s" % (self.wPath, self.lang, self.testlist, self.status))
self.results = {} # A list containing the test results
self.isSaved = True
if testlist is None:
self.testlist = range(len(self.database[self.lang]))
else:
self.testlist = testlist
return
def resume(self, path=None):
if path is not None:
self.wPath = path
self.testfile = "%s/config.cfg" % self.wPath # the configuration file's path
self.load_conf() # retrieve the paths and test status from the configuration file
self._configure_list() # get the test configuration (languages, lists) from the listfile
self.save()
return
def detectgenders(self, lang):
"""
For the selected language, detects if both male and female voice are available,
based on the folders on the "phrases" directory.
| |
<gh_stars>0
# Copyright 2014 Facebook, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Facebook nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import errno
import socket
import subprocess
# Sometimes it's really hard to get Python extensions to compile,
# so fall back to a pure Python implementation.
try:
import bser
except ImportError, e:
import pybser as bser
import capabilities
# 2 bytes marker, 1 byte int size, 8 bytes int64 value
sniff_len = 13
class WatchmanError(Exception):
pass
class SocketTimeout(WatchmanError):
"""A specialized exception raised for socket timeouts during communication to/from watchman.
This makes it easier to implement non-blocking loops as callers can easily distinguish
between a routine timeout and an actual error condition.
Note that catching WatchmanError will also catch this as it is a super-class, so backwards
compatibility in exception handling is preserved.
"""
class CommandError(WatchmanError):
"""error returned by watchman
self.msg is the message returned by watchman.
"""
def __init__(self, msg):
self.msg = msg
super(CommandError, self).__init__('watchman command error: %s' % msg)
class Transport(object):
""" communication transport to the watchman server """
buf = None
def close(self):
""" tear it down """
raise NotImplementedError()
def readBytes(self, size):
""" read size bytes """
raise NotImplementedError()
def write(self, buf):
""" write some data """
raise NotImplementedError()
def readLine(self):
""" read a line
Maintains its own buffer, callers of the transport should not mix
calls to readBytes and readLine.
"""
if self.buf is None:
self.buf = []
# Buffer may already have a line if we've received unilateral
# response(s) from the server
if len(self.buf) == 1 and "\n" in self.buf[0]:
(line, b) = self.buf[0].split("\n", 1)
self.buf = [b]
return line
while True:
b = self.readBytes(4096)
if "\n" in b:
result = ''.join(self.buf)
(line, b) = b.split("\n", 1)
self.buf = [b]
return result + line
self.buf.append(b)
class Codec(object):
""" communication encoding for the watchman server """
transport = None
def __init__(self, transport):
self.transport = transport
def receive(self):
raise NotImplementedError()
def send(self, *args):
raise NotImplementedError()
class UnixSocketTransport(Transport):
""" local unix domain socket transport """
sock = None
def __init__(self, sockpath, timeout):
self.sockpath = sockpath
self.timeout = timeout
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.settimeout(self.timeout)
sock.connect(self.sockpath)
self.sock = sock
except socket.error as e:
raise WatchmanError('unable to connect to %s: %s' %
(self.sockpath, e))
def close(self):
self.sock.close()
self.sock = None
def readBytes(self, size):
try:
buf = [self.sock.recv(size)]
if not buf[0]:
raise WatchmanError('empty watchman response')
return buf[0]
except socket.timeout:
raise SocketTimeout('timed out waiting for response')
def write(self, data):
try:
self.sock.sendall(data)
except socket.timeout:
raise SocketTimeout('timed out sending query command')
class WindowsNamedPipeTransport(Transport):
""" connect to a named pipe """
def __init__(self, sockpath, timeout):
self.sockpath = sockpath
self.timeout = timeout
self.pipe = os.open(sockpath, os.O_RDWR | os.O_BINARY)
def close(self):
os.close(self.pipe)
self.pipe = None
def readBytes(self, size):
return os.read(self.pipe, size)
def write(self, data):
return os.write(self.pipe, data)
class CLIProcessTransport(Transport):
""" open a pipe to the cli to talk to the service
This intended to be used only in the test harness!
The CLI is an oddball because we only support JSON input
and cannot send multiple commands through the same instance,
so we spawn a new process for each command.
We disable server spawning for this implementation, again, because
it is intended to be used only in our test harness. You really
should not need to use the CLI transport for anything real.
While the CLI can output in BSER, our Transport interface doesn't
support telling this instance that it should do so. That effectively
limits this implementation to JSON input and output only at this time.
It is the responsibility of the caller to set the send and
receive codecs appropriately.
"""
proc = None
closed = True
def __init__(self, sockpath, timeout):
self.sockpath = sockpath
self.timeout = timeout
def close(self):
if self.proc:
self.proc.kill()
self.proc = None
def _connect(self):
if self.proc:
return self.proc
args = [
'watchman',
'--sockname={}'.format(self.sockpath),
'--logfile=/BOGUS',
'--statefile=/BOGUS',
'--no-spawn',
'--no-local',
'--no-pretty',
'-j',
]
self.proc = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
return self.proc
def readBytes(self, size):
self._connect()
res = self.proc.stdout.read(size)
if res == '':
raise WatchmanError('EOF on CLI process transport')
return res
def write(self, data):
if self.closed:
self.closed = False
self.proc = None
self._connect()
res = self.proc.stdin.write(data)
self.proc.stdin.close()
self.closed = True
return res
class BserCodec(Codec):
""" use the BSER encoding. This is the default, preferred codec """
def receive(self):
buf = [self.transport.readBytes(sniff_len)]
if not buf[0]:
raise WatchmanError('empty watchman response')
elen = bser.pdu_len(buf[0])
rlen = len(buf[0])
while elen > rlen:
buf.append(self.transport.readBytes(elen - rlen))
rlen += len(buf[-1])
response = ''.join(buf)
try:
res = bser.loads(response)
return res
except ValueError as e:
raise WatchmanError('watchman response decode error: %s' % e)
def send(self, *args):
cmd = bser.dumps(*args)
self.transport.write(cmd)
class JsonCodec(Codec):
""" Use json codec. This is here primarily for testing purposes """
json = None
def __init__(self, transport):
super(JsonCodec, self).__init__(transport)
# optional dep on json, only if JsonCodec is used
import json
self.json = json
def receive(self):
line = self.transport.readLine()
try:
return self.json.loads(line)
except Exception as e:
print(e, line)
raise
def send(self, *args):
cmd = self.json.dumps(*args)
self.transport.write(cmd + "\n")
class client(object):
""" Handles the communication with the watchman service """
sockpath = None
transport = None
sendCodec = None
recvCodec = None
sendConn = None
recvConn = None
subs = {} # Keyed by subscription name
logs = [] # When log level is raised
unilateral = ['log', 'subscription']
tport = None
def __init__(self, sockpath=None, timeout=1.0, transport=None,
sendEncoding=None, recvEncoding=None):
self.sockpath = sockpath
self.timeout = timeout
transport = transport or os.getenv('WATCHMAN_TRANSPORT') or 'local'
if transport == 'local' and os.name == 'nt':
self.transport = WindowsNamedPipeTransport
elif transport == 'local':
self.transport = UnixSocketTransport
elif transport == 'cli':
self.transport = CLIProcessTransport
if sendEncoding is None:
sendEncoding = 'json'
if recvEncoding is None:
recvEncoding = sendEncoding
else:
raise WatchmanError('invalid transport %s' % transport)
sendEncoding = sendEncoding or os.getenv('WATCHMAN_ENCODING') or 'bser'
recvEncoding = recvEncoding or os.getenv('WATCHMAN_ENCODING') or 'bser'
self.recvCodec = self._parseEncoding(recvEncoding)
self.sendCodec = self._parseEncoding(sendEncoding)
def _parseEncoding(self, enc):
if enc == 'bser':
return BserCodec
elif enc == 'json':
return JsonCodec
else:
raise WatchmanError('invalid encoding %s' % enc)
def _resolvesockname(self):
# if invoked via a trigger, watchman will set this env var; we
# should use it unless explicitly set otherwise
path = os.getenv('WATCHMAN_SOCK')
if path:
return path
cmd = ['watchman', '--output-encoding=bser', 'get-sockname']
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=os.name != 'nt')
except OSError as e:
raise WatchmanError('"watchman" executable not in PATH (%s)', e)
stdout, stderr = p.communicate()
exitcode = p.poll()
if exitcode:
raise WatchmanError("watchman exited with code %d" % exitcode)
result = bser.loads(stdout)
if 'error' in result:
raise WatchmanError('get-sockname error: %s' % result['error'])
return result['sockname']
def _connect(self):
""" establish transport connection """
if self.recvConn:
return
if self.sockpath is None:
self.sockpath = self._resolvesockname()
self.tport = self.transport(self.sockpath, self.timeout)
self.sendConn = self.sendCodec(self.tport)
self.recvConn = self.recvCodec(self.tport)
def __del__(self):
self.close()
def close(self):
if self.tport:
self.tport.close()
self.tport = None
self.recvConn = None
self.sendConn = | |
rpushx(self, tr, key:NativeType, value:NativeType) -> int:
""" Append a value to a list, only if the list exists """
return self._query(tr, b'rpushx', self.encode_from_native(key), self.encode_from_native(value))
@_query_command
def llen(self, tr, key:NativeType) -> int:
""" Returns the length of the list stored at key. """
return self._query(tr, b'llen', self.encode_from_native(key))
@_query_command
def lrem(self, tr, key:NativeType, count:int=0, value='') -> int:
""" Remove elements from a list """
return self._query(tr, b'lrem', self.encode_from_native(key), self._encode_int(count), self.encode_from_native(value))
@_query_command
def lrange(self, tr, key, start:int=0, stop:int=-1) -> ListReply:
""" Get a range of elements from a list. """
return self._query(tr, b'lrange', self.encode_from_native(key), self._encode_int(start), self._encode_int(stop))
@_query_command
def ltrim(self, tr, key:NativeType, start:int=0, stop:int=-1) -> StatusReply:
""" Trim a list to the specified range """
return self._query(tr, b'ltrim', self.encode_from_native(key), self._encode_int(start), self._encode_int(stop))
@_query_command
def lpop(self, tr, key:NativeType) -> (NativeType, NoneType):
""" Remove and get the first element in a list """
return self._query(tr, b'lpop', self.encode_from_native(key))
@_query_command
def rpop(self, tr, key:NativeType) -> (NativeType, NoneType):
""" Remove and get the last element in a list """
return self._query(tr, b'rpop', self.encode_from_native(key))
@_query_command
def rpoplpush(self, tr, source:NativeType, destination:NativeType) -> (NativeType, NoneType):
""" Remove the last element in a list, append it to another list and return it """
return self._query(tr, b'rpoplpush', self.encode_from_native(source), self.encode_from_native(destination))
@_query_command
def lindex(self, tr, key:NativeType, index:int) -> (NativeType, NoneType):
""" Get an element from a list by its index """
return self._query(tr, b'lindex', self.encode_from_native(key), self._encode_int(index))
@_query_command
def blpop(self, tr, keys:ListOf(NativeType), timeout:int=0) -> BlockingPopReply:
""" Remove and get the first element in a list, or block until one is available.
This will raise :class:`~asyncio_redis.exceptions.TimeoutError` when
the timeout was exceeded and Redis returns `None`. """
return self._blocking_pop(tr, b'blpop', keys, timeout=timeout)
@_query_command
def brpop(self, tr, keys:ListOf(NativeType), timeout:int=0) -> BlockingPopReply:
""" Remove and get the last element in a list, or block until one is available.
This will raise :class:`~asyncio_redis.exceptions.TimeoutError` when
the timeout was exceeded and Redis returns `None`. """
return self._blocking_pop(tr, b'brpop', keys, timeout=timeout)
def _blocking_pop(self, tr, command, keys, timeout:int=0):
return self._query(tr, command, *([ self.encode_from_native(k) for k in keys ] + [self._encode_int(timeout)]), set_blocking=True)
@_command
@asyncio.coroutine
def brpoplpush(self, tr, source:NativeType, destination:NativeType, timeout:int=0) -> NativeType:
""" Pop a value from a list, push it to another list and return it; or block until one is available """
result = yield from self._query(tr, b'brpoplpush', self.encode_from_native(source), self.encode_from_native(destination),
self._encode_int(timeout), set_blocking=True)
if result is None:
raise TimeoutError('Timeout in brpoplpush')
else:
assert isinstance(result, bytes)
return self.decode_to_native(result)
@_query_command
def lset(self, tr, key:NativeType, index:int, value:NativeType) -> StatusReply:
""" Set the value of an element in a list by its index. """
return self._query(tr, b'lset', self.encode_from_native(key), self._encode_int(index), self.encode_from_native(value))
@_query_command
def linsert(self, tr, key:NativeType, pivot:NativeType, value:NativeType, before=False) -> int:
""" Insert an element before or after another element in a list """
return self._query(tr, b'linsert', self.encode_from_native(key), (b'BEFORE' if before else b'AFTER'),
self.encode_from_native(pivot), self.encode_from_native(value))
# Sorted Sets
@_query_command
def zadd(self, tr, key:NativeType, values:dict, only_if_not_exists=False, only_if_exists=False, return_num_changed=False) -> int:
"""
Add one or more members to a sorted set, or update its score if it already exists
::
yield protocol.zadd('myzset', { 'key': 4, 'key2': 5 })
"""
options = [ ]
assert not (only_if_not_exists and only_if_exists)
if only_if_not_exists:
options.append(b'NX')
elif only_if_exists:
options.append(b'XX')
if return_num_changed:
options.append(b'CH')
data = [ ]
for k,score in values.items():
assert isinstance(k, self.native_type)
assert isinstance(score, (int, float))
data.append(self._encode_float(score))
data.append(self.encode_from_native(k))
return self._query(tr, b'zadd', self.encode_from_native(key), *(options + data))
@_query_command
def zpopmin(self, tr, key:NativeType, count:int=1) -> ZRangeReply:
"""
Return the specified numbers of first elements from sorted set with a minimum score
You can do the following to recieve the slice of the sorted set as a
python dict (mapping the keys to their scores):
::
result = yield protocol.zpopmin('myzset', count=10)
my_dict = yield result.asdict()
"""
return self._query(tr, b'zpopmin', self.encode_from_native(key), self._encode_int(count))
@_query_command
def zrange(self, tr, key:NativeType, start:int=0, stop:int=-1) -> ZRangeReply:
"""
Return a range of members in a sorted set, by index.
You can do the following to receive the slice of the sorted set as a
python dict (mapping the keys to their scores):
::
result = yield protocol.zrange('myzset', start=10, stop=20)
my_dict = yield result.asdict()
or the following to retrieve it as a list of keys:
::
result = yield protocol.zrange('myzset', start=10, stop=20)
my_dict = yield result.aslist()
"""
return self._query(tr, b'zrange', self.encode_from_native(key),
self._encode_int(start), self._encode_int(stop), b'withscores')
@_query_command
def zrangebylex(self, tr, key:NativeType, start:str, stop:str) -> SetReply:
"""
Return a range of members in a sorted set, by index.
You can do the following to receive the slice of the sorted set as a
python dict (mapping the keys to their scores):
::
result = yield protocol.zrangebykex('myzset', start='-', stop='[c')
my_dict = yield result.asdict()
or the following to retrieve it as a list of keys:
::
result = yield protocol.zrangebylex('myzset', start='-', stop='[c')
my_dict = yield result.aslist()
"""
return self._query(tr, b'zrangebylex', self.encode_from_native(key),
self.encode_from_native(start), self.encode_from_native(stop))
@_query_command
def zrevrange(self, tr, key:NativeType, start:int=0, stop:int=-1) -> ZRangeReply:
"""
Return a range of members in a reversed sorted set, by index.
You can do the following to receive the slice of the sorted set as a
python dict (mapping the keys to their scores):
::
my_dict = yield protocol.zrevrange_asdict('myzset', start=10, stop=20)
or the following to retrieve it as a list of keys:
::
zrange_reply = yield protocol.zrevrange('myzset', start=10, stop=20)
my_dict = yield zrange_reply.aslist()
"""
return self._query(tr, b'zrevrange', self.encode_from_native(key),
self._encode_int(start), self._encode_int(stop), b'withscores')
@_query_command
def zrangebyscore(self, tr, key:NativeType,
min:ZScoreBoundary=ZScoreBoundary.MIN_VALUE,
max:ZScoreBoundary=ZScoreBoundary.MAX_VALUE,
offset:int=0, limit:int=-1) -> ZRangeReply:
""" Return a range of members in a sorted set, by score """
return self._query(tr, b'zrangebyscore', self.encode_from_native(key),
self._encode_zscore_boundary(min), self._encode_zscore_boundary(max),
b'limit', self._encode_int(offset), self._encode_int(limit),
b'withscores')
@_query_command
def zrevrangebyscore(self, tr, key:NativeType,
max:ZScoreBoundary=ZScoreBoundary.MAX_VALUE,
min:ZScoreBoundary=ZScoreBoundary.MIN_VALUE,
offset:int=0, limit:int=-1) -> ZRangeReply:
""" Return a range of members in a sorted set, by score, with scores ordered from high to low """
return self._query(tr, b'zrevrangebyscore', self.encode_from_native(key),
self._encode_zscore_boundary(max), self._encode_zscore_boundary(min),
b'limit', self._encode_int(offset), self._encode_int(limit),
b'withscores')
@_query_command
def zremrangebyscore(self, tr, key:NativeType,
min:ZScoreBoundary=ZScoreBoundary.MIN_VALUE,
max:ZScoreBoundary=ZScoreBoundary.MAX_VALUE) -> int:
""" Remove all members in a sorted set within the given scores """
return self._query(tr, b'zremrangebyscore', self.encode_from_native(key),
self._encode_zscore_boundary(min), self._encode_zscore_boundary(max))
@_query_command
def zremrangebyrank(self, tr, key:NativeType, min:int=0, max:int=-1) -> int:
""" Remove all members in a sorted set within the given indexes """
return self._query(tr, b'zremrangebyrank', self.encode_from_native(key),
self._encode_int(min), self._encode_int(max))
@_query_command
def zcount(self, tr, key:NativeType, min:ZScoreBoundary, max:ZScoreBoundary) -> int:
""" Count the members in a sorted set with scores within the given values """
return self._query(tr, b'zcount', self.encode_from_native(key),
self._encode_zscore_boundary(min), self._encode_zscore_boundary(max))
@_query_command
def zscore(self, tr, key:NativeType, member:NativeType) -> (float, NoneType):
""" Get the score associated with the given member in a sorted set """
return self._query(tr, b'zscore', self.encode_from_native(key), self.encode_from_native(member))
@_query_command
def zunionstore(self, tr, destination:NativeType, keys:ListOf(NativeType), weights:(NoneType,ListOf(float))=None,
aggregate=ZAggregate.SUM) -> int:
""" Add multiple sorted sets and store the resulting sorted set in a new key """
return self._zstore(tr, b'zunionstore', destination, keys, weights, aggregate)
@_query_command
def zinterstore(self, tr, destination:NativeType, keys:ListOf(NativeType), weights:(NoneType,ListOf(float))=None,
aggregate=ZAggregate.SUM) -> int:
""" Intersect multiple sorted sets and store the resulting sorted set in a new key """
return self._zstore(tr, b'zinterstore', destination, keys, weights, aggregate)
def _zstore(self, tr, command, destination, keys, weights, aggregate):
""" Common part for zunionstore and zinterstore. """
numkeys = len(keys)
if weights is None:
weights = [1] * numkeys
return self._query(tr, *
[ command, self.encode_from_native(destination), self._encode_int(numkeys) ] +
list(map(self.encode_from_native, keys)) +
[ b'weights' ] +
list(map(self._encode_float, weights)) +
[ b'aggregate' ] +
[ {
ZAggregate.SUM: b'SUM',
ZAggregate.MIN: b'MIN',
ZAggregate.MAX: b'MAX' }[aggregate]
] )
@_query_command
def zcard(self, tr, key:NativeType) -> int:
""" Get the number of members in a sorted set """
return self._query(tr, b'zcard', self.encode_from_native(key))
@_query_command
def zrank(self, tr, key:NativeType, member:NativeType) -> (int, NoneType):
""" Determine the index of a member in a sorted set """
return self._query(tr, b'zrank', self.encode_from_native(key), self.encode_from_native(member))
@_query_command
def zrevrank(self, tr, key:NativeType, member:NativeType) -> (int, NoneType):
""" Determine the index of a member in a sorted set, with scores ordered from high to low """
return self._query(tr, b'zrevrank', self.encode_from_native(key), self.encode_from_native(member))
@_query_command
def zincrby(self, tr, key:NativeType, increment:float, member:NativeType, only_if_exists=False) -> (float, NoneType):
""" Increment the score of a member in a sorted set """
if only_if_exists:
return self._query(tr, b'zadd', self.encode_from_native(key), b'xx', b'incr',
self._encode_float(increment), self.encode_from_native(member))
else:
return self._query(tr, b'zincrby', self.encode_from_native(key),
self._encode_float(increment), self.encode_from_native(member))
@_query_command
def | |
if depth5 else tmp4371)
tmp4369 = (tmp4099 if depth4 else tmp4370)
tmp4368 = (tmp4099 if depth3 else tmp4369)
tmp4367 = (tmp4099 if depth2 else tmp4368)
tmp4360 = (tmp4361 if depth1 else tmp4367)
tmp4378 = (tmp4073 if depth7 else tmp4104)
tmp4377 = (tmp4378 if depth6 else tmp4119)
tmp4376 = (tmp4377 if depth5 else tmp4119)
tmp4375 = (tmp4376 if depth4 else tmp4119)
tmp4374 = (tmp4375 if depth3 else tmp4119)
tmp4373 = (tmp4374 if depth2 else tmp4119)
tmp4384 = (tmp4073 if depth7 else tmp4128)
tmp4383 = (tmp4384 if depth6 else tmp4119)
tmp4382 = (tmp4383 if depth5 else tmp4119)
tmp4381 = (tmp4382 if depth4 else tmp4119)
tmp4380 = (tmp4381 if depth3 else tmp4119)
tmp4387 = (tmp4119 if depth5 else tmp4145)
tmp4386 = (tmp4119 if depth4 else tmp4387)
tmp4385 = (tmp4119 if depth3 else tmp4386)
tmp4379 = (tmp4380 if depth2 else tmp4385)
tmp4372 = (tmp4373 if depth1 else tmp4379)
tmp4359 = (tmp4360 if s0 else tmp4372)
tmp4346 = (tmp4347 if s1 else tmp4359)
tmp4341 = (tmp4342 if s2 else tmp4346)
tmp4280 = (tmp4281 if s3 else tmp4341)
tmp4135 = (1 - (tmp4136 if s4 else tmp4280))
tmp4006 = (tmp4007 if s5 else tmp4135)
tmp4400 = (tmp4073 if depth7 else tmp4134)
tmp4399 = (tmp4400 if depth6 else tmp4119)
tmp4398 = (tmp4399 if depth5 else tmp4119)
tmp4397 = (tmp4398 if depth4 else tmp4119)
tmp4396 = (tmp4397 if depth3 else tmp4119)
tmp4395 = (tmp4396 if depth2 else tmp4380)
tmp4394 = (tmp4395 if depth1 else tmp4373)
tmp4406 = (tmp4073 if depth6 else tmp4119)
tmp4405 = (tmp4406 if depth5 else tmp4119)
tmp4404 = (tmp4405 if depth4 else tmp4119)
tmp4403 = (tmp4404 if depth3 else tmp4119)
tmp4402 = (tmp4403 if depth2 else tmp4374)
tmp4401 = (tmp4402 if depth1 else tmp4395)
tmp4393 = (tmp4394 if s0 else tmp4401)
tmp4410 = (tmp4404 if depth3 else tmp4381)
tmp4409 = (tmp4410 if depth2 else tmp4396)
tmp4408 = (tmp4409 if depth1 else tmp4402)
tmp4413 = (tmp4404 if depth3 else tmp4375)
tmp4412 = (tmp4413 if depth2 else tmp4403)
tmp4411 = (tmp4412 if depth1 else tmp4409)
tmp4407 = (tmp4408 if s0 else tmp4411)
tmp4392 = (tmp4393 if s1 else tmp4407)
tmp4418 = (tmp4404 if depth3 else tmp4397)
tmp4417 = (tmp4418 if depth2 else tmp4410)
tmp4416 = (tmp4417 if depth1 else tmp4412)
tmp4420 = (tmp4404 if depth2 else tmp4413)
tmp4419 = (tmp4420 if depth1 else tmp4417)
tmp4415 = (tmp4416 if s0 else tmp4419)
tmp4426 = (tmp4171 if depth6 else tmp4170)
tmp4425 = (tmp4426 if depth5 else tmp4170)
tmp4424 = (tmp4425 if depth4 else tmp4170)
tmp4431 = (tmp4171 if depth7 else tmp4182)
tmp4430 = (tmp4431 if depth6 else tmp4170)
tmp4429 = (tmp4430 if depth5 else tmp4170)
tmp4428 = (tmp4429 if depth4 else tmp4170)
tmp4427 = (tmp4424 if depth3 else tmp4428)
tmp4423 = (tmp4424 if depth2 else tmp4427)
tmp4437 = (tmp4171 if depth7 else tmp4189)
tmp4436 = (tmp4437 if depth6 else tmp4170)
tmp4435 = (tmp4436 if depth5 else tmp4170)
tmp4434 = (tmp4435 if depth4 else tmp4170)
tmp4433 = (tmp4424 if depth3 else tmp4434)
tmp4432 = (tmp4424 if depth2 else tmp4433)
tmp4422 = (tmp4423 if depth1 else tmp4432)
tmp4441 = (tmp4172 if depth6 else tmp4193)
tmp4440 = (tmp4441 if depth5 else tmp4193)
tmp4439 = (tmp4440 if depth4 else tmp4193)
tmp4447 = (tmp4172 if depth7 else tmp4201)
tmp4446 = (tmp4447 if depth6 else tmp4193)
tmp4445 = (tmp4446 if depth5 else tmp4193)
tmp4444 = (tmp4445 if depth4 else tmp4193)
tmp4443 = (tmp4439 if depth3 else tmp4444)
tmp4442 = (tmp4439 if depth2 else tmp4443)
tmp4438 = (tmp4439 if depth1 else tmp4442)
tmp4421 = (tmp4422 if s0 else tmp4438)
tmp4414 = (tmp4415 if s1 else tmp4421)
tmp4391 = (tmp4392 if s2 else tmp4414)
tmp4453 = (tmp4208 if depth6 else tmp4207)
tmp4452 = (tmp4453 if depth5 else tmp4207)
tmp4451 = (tmp4452 if depth4 else tmp4207)
tmp4450 = (tmp4451 if s0 else tmp4173)
tmp4449 = (tmp4450 if s1 else tmp4173)
tmp4458 = (tmp4216 if depth6 else tmp4215)
tmp4457 = (tmp4458 if depth5 else tmp4215)
tmp4461 = (tmp4457 if depth4 else tmp4215)
tmp4460 = (tmp4457 if depth3 else tmp4461)
tmp4459 = (tmp4457 if depth2 else tmp4460)
tmp4456 = (tmp4457 if depth1 else tmp4459)
tmp4464 = (tmp4217 if depth6 else tmp4226)
tmp4463 = (tmp4464 if depth5 else tmp4226)
tmp4470 = (tmp4217 if depth7 else tmp4234)
tmp4469 = (tmp4470 if depth6 else tmp4226)
tmp4468 = (tmp4469 if depth5 else tmp4226)
tmp4467 = (tmp4463 if depth4 else tmp4468)
tmp4466 = (tmp4463 if depth3 else tmp4467)
tmp4465 = (tmp4463 if depth2 else tmp4466)
tmp4462 = (tmp4463 if depth1 else tmp4465)
tmp4455 = (tmp4456 if s0 else tmp4462)
tmp4477 = (tmp4242 if depth6 else tmp4241)
tmp4479 = (tmp4242 if depth7 else tmp4246)
tmp4478 = (tmp4479 if depth6 else tmp4241)
tmp4476 = (tmp4477 if depth5 else tmp4478)
tmp4480 = (tmp4477 if depth5 else tmp4241)
tmp4475 = (tmp4476 if depth4 else tmp4480)
tmp4474 = (tmp4475 if depth3 else tmp4480)
tmp4473 = (tmp4474 if depth2 else tmp4480)
tmp4486 = (tmp4242 if depth7 else tmp4256)
tmp4485 = (tmp4486 if depth6 else tmp4241)
tmp4484 = (tmp4485 if depth5 else tmp4241)
tmp4483 = (tmp4480 if depth4 else tmp4484)
tmp4482 = (tmp4480 if depth3 else tmp4483)
tmp4481 = (tmp4480 if depth2 else tmp4482)
tmp4472 = (tmp4473 if depth1 else tmp4481)
tmp4492 = (tmp4062 if depth6 else tmp4262)
tmp4494 = (tmp4062 if depth7 else tmp4247)
tmp4493 = (tmp4494 if depth6 else tmp4262)
tmp4491 = (tmp4492 if depth5 else tmp4493)
tmp4495 = (tmp4492 if depth5 else tmp4262)
tmp4490 = (tmp4491 if depth4 else tmp4495)
tmp4489 = (tmp4490 if depth3 else tmp4495)
tmp4488 = (tmp4489 if depth2 else tmp4495)
tmp4501 = (tmp4062 if depth7 else tmp4273)
tmp4500 = (tmp4501 if depth6 else tmp4262)
tmp4499 = (tmp4492 if depth5 else tmp4500)
tmp4498 = (tmp4499 if depth4 else tmp4495)
tmp4497 = (tmp4498 if depth3 else tmp4495)
tmp4506 = (tmp4062 if depth7 else tmp4279)
tmp4505 = (tmp4506 if depth6 else tmp4262)
tmp4504 = (tmp4505 if depth5 else tmp4262)
tmp4503 = (tmp4495 if depth4 else tmp4504)
tmp4502 = (tmp4495 if depth3 else tmp4503)
tmp4496 = (tmp4497 if depth2 else tmp4502)
tmp4487 = (tmp4488 if depth1 else tmp4496)
tmp4471 = (tmp4472 if s0 else tmp4487)
tmp4454 = (tmp4455 if s1 else tmp4471)
tmp4448 = (tmp4449 if s2 else tmp4454)
tmp4390 = (tmp4391 if s3 else tmp4448)
tmp4515 = (tmp4492 if depth5 else tmp4505)
tmp4514 = (tmp4515 if depth4 else tmp4495)
tmp4513 = (tmp4514 if depth3 else tmp4495)
tmp4512 = (tmp4513 if depth2 else tmp4497)
tmp4511 = (tmp4512 if depth1 else tmp4488)
tmp4519 = (tmp4492 if depth4 else tmp4495)
tmp4518 = (tmp4519 if depth3 else tmp4495)
tmp4517 = (tmp4518 if depth2 else tmp4489)
tmp4516 = (tmp4517 if depth1 else tmp4512)
tmp4510 = (tmp4511 if s0 else tmp4516)
tmp4523 = (tmp4519 if depth3 else tmp4498)
tmp4522 = (tmp4523 if depth2 else tmp4513)
tmp4521 = (tmp4522 if depth1 else tmp4517)
tmp4526 = (tmp4519 if depth3 else tmp4490)
tmp4525 = (tmp4526 if depth2 else tmp4518)
tmp4524 = (tmp4525 if depth1 else tmp4522)
tmp4520 = (tmp4521 if s0 else tmp4524)
tmp4509 = (tmp4510 if s1 else tmp4520)
tmp4531 = (tmp4519 if depth3 else tmp4514)
tmp4530 = (tmp4531 if depth2 else tmp4523)
tmp4529 = (tmp4530 if depth1 else tmp4525)
tmp4533 = (tmp4519 if depth2 else tmp4526)
tmp4532 = (tmp4533 if depth1 else tmp4530)
tmp4528 = (tmp4529 if s0 else tmp4532)
tmp4538 = (tmp4312 if depth6 else tmp4311)
tmp4539 = (tmp4538 if depth5 else tmp4311)
tmp4537 = (tmp4538 if depth4 else tmp4539)
tmp4544 = (tmp4312 if depth7 else tmp4321)
tmp4543 = (tmp4544 if depth6 else tmp4311)
tmp4542 = (tmp4538 if depth5 else tmp4543)
tmp4541 = (tmp4542 if depth4 else tmp4539)
tmp4540 = (tmp4537 if depth3 else tmp4541)
tmp4536 = (tmp4537 if depth2 else tmp4540)
tmp4550 = (tmp4312 if depth7 else tmp4328)
tmp4549 = (tmp4550 if depth6 else tmp4311)
tmp4548 = (tmp4538 if depth5 else tmp4549)
tmp4547 = (tmp4548 if depth4 else tmp4539)
tmp4546 = (tmp4537 if depth3 else tmp4547)
tmp4545 = (tmp4537 if depth2 else tmp4546)
| |
import os
import platform
import subprocess
import warnings
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from urllib.parse import quote
import astropy, astropy.wcs, astropy.visualization
import astropy.io.fits as fits
import astropy.coordinates as coords
import astropy.units as u
import pysiaf
import jwst_gtvt.find_tgt_info
import jwst_gtvt.ephemeris_old2x as EPH
# Visit plotting tools
# A note about interpretation of GSPA. This is subtle, and the implementation varies depending on PPS version.
# OSS passes through the GSPA parameter to spacecraft ACS for slews. ACS does not utilize the V frame in its pointing control.
# - The GSPA parameter is interpreted as the PA of the FGS1 Ideal coordinate system ("FGS ics"), and transformed to the
# spacecraft J frame via the FGS_to_STA k-constant matrix. In so doing, the GSPA is interpreted as the PA of th FGS Yics
# angle *at the position of the guide star*
# - Note that the FGS1 Yics differs from the V3 axis by ~ -1.25 degrees
#
# In PPS versions 14.14.1 and below, this was neglected and OPGS provides the computed V3PA at the guide star as the GSPA parameter.
# (incorrectly / inconsistently, resulting in unexpected attitudes)
# In later versions, PPS applies the compensation and provides the FGS Y ideal PA, which spacecraft ACS can then transform to the J frame
#
PROGRAMS_WITH_SEGMENT_GUIDING = [1410, 1141, 1143, 1148, 1150, 1151, 1153, 1158, # flight programs, not yet a complete list
710, 741, 743, # rehearsal programs
646] # WF Guiding rehearsal program
def get_image_cache_dir():
cache_dir = os.path.join(os.path.dirname(__file__), "image_cache")
if not os.path.isdir(cache_dir):
os.mkdir(cache_dir)
return cache_dir
def load_all_siafs():
return {
'NRC': pysiaf.Siaf("NIRCam"),
'FGS': pysiaf.Siaf('FGS'),
'NIS': pysiaf.Siaf('NIRISS'),
'NRS': pysiaf.Siaf("NIRSpec"),
'MIRI': pysiaf.Siaf('MIRI')
}
# let's just load these once and save in a global to reuse as needed, for efficiency
SIAFS = load_all_siafs()
##--- Functions for plotting the visit field of VIEW
def retrieve_2mass_image(visit, ra=None, dec=None, verbose=True, redownload=False, filter='K', fov=0.35):
"""Obtain from Aladin a 2MASS image for the pointing location of a JWST visit
Uses HIPS2FITS service; see http://alasky.u-strasbg.fr/hips-image-services/hips2fits
FITS files for the retrieved images are cached for re-use, in a subdirectory
`image_cache` next to where this code is.
Parameters
----------
visit : VisitFileContents object
Representation of some JWST visit file
filter : string
Which bandpass filter in 2MASS to get?
verbose : bool
more output text
redownload : bool
Even if image is already downloaded and cached, ignore that and download from Vizier again.
"""
if ra==None and dec==None:
ra = visit.slew.GSRA
dec = visit.slew.GSDEC
hips_catalog = f'CDS/P/2MASS/{filter}' # also try 2MASS/color
width = 1024
height = 1024
visitname = os.path.splitext(os.path.basename(visit.filename))[0]
if fov!= 0.35:
# if a non-default FOV is used, save that specially
img_fn = os.path.join(get_image_cache_dir(), f'img_2mass_{filter}_{visitname}_fov{fov}.fits')
else:
img_fn = os.path.join(get_image_cache_dir(), f'img_2mass_{filter}_{visitname}.fits')
if not os.path.exists(img_fn) or redownload:
# optional / TBD - add PA into this query?
# rotation_angle=90.0
url = f'http://alasky.u-strasbg.fr/hips-image-services/hips2fits?hips={quote(hips_catalog)}&width={width}&height={height}&fov={fov}&projection=TAN&coordsys=icrs&ra={ra}&dec={dec}'
if verbose:
print(f"Retrieving 2MASS image from Aladin near ra={ra} & dec={dec}...")
with fits.open(url) as hdu:
hdu.writeto(img_fn, overwrite=True)
if verbose:
print(f"Retrieved 2MASS image from Aladin and saved to {img_fn}")
hdu = fits.open(img_fn)
return hdu
def plot_visit_fov(visit, verbose=False, subplotspec=None, use_dss=False, ):
"""Make a nice annotated plot of a visit FOV"""
# let's center the plot on the master chief ray (MCF; between NIRCams) at the science attitude.
# This works better than centering on the guide star.
# Use SIAF transforms to infer the MCF pointing in RA, Dec.
fgs_aperture_name = visit.get_guider_aperture(return_name=True)
fgs_aperture = SIAFS['FGS'].apertures[fgs_aperture_name]
attmat = visit.get_attitude_matrix(step='sci')
fgs_aperture.set_attitude_matrix(attmat)
mcf_ra, mcf_dec = fgs_aperture.tel_to_sky(0,-468) # RA, Dec of master chief ray location (between NIRCam A+B)
# Compute RA, Dec, PA of the V1 axis, for comparison to values in SciOps OP delivery report
v1_ra, v1_dec = fgs_aperture.tel_to_sky(0,0) # RA, Dec of V1 axis reference location
v1pv3_ra, v1pv3_dec = fgs_aperture.tel_to_sky(0,1) # Will use to compute V3PA at V1 axis reference location
v1c = coords.SkyCoord(v1_ra, v1_dec, unit='deg', frame='icrs')
v1pv3c = coords.SkyCoord(v1pv3_ra, v1pv3_dec, unit='deg', frame='icrs')
v3pa_at_v1 = v1c.position_angle(v1pv3c).deg
# Compute RA, Dec of the J frame axis
jframe_aperture = SIAFS['FGS'].apertures['J-FRAME']
jframe_aperture.set_attitude_matrix(attmat)
j_ra, j_dec = jframe_aperture.idl_to_sky(0,0) # RA, Dec of V1 axis reference location
if use_dss:
img_hdu = retrieve_dss_image(visit, ra = mcf_ra, dec=mcf_dec, verbose=verbose)
else:
img_hdu = retrieve_2mass_image(visit, ra = mcf_ra, dec=mcf_dec, verbose=verbose)
wcs = astropy.wcs.WCS(img_hdu[0].header)
#-- Setup plot and axes
if subplotspec is None:
plt.figure(figsize=(16,9))
ax = plt.subplot(projection=wcs)
else:
ax = plt.subplot(subplotspec,projection=wcs)
#-- Display the 2MASS image
norm = astropy.visualization.ImageNormalize(img_hdu[0].data,
interval=astropy.visualization.PercentileInterval(99.99),
stretch=astropy.visualization.AsinhStretch(a=0.001))
plt.imshow(img_hdu[0].data, cmap='magma', norm=norm, origin='lower',
zorder=-50) # negative zorder to be below pysiaf aperture fill zorder
#-- Annotate coordinate grid
overlay = ax.get_coords_overlay('icrs')
overlay.grid(color='white', ls='dotted', alpha=0.5)
#-- Mark guide star
gscolor='yellow'
v1color='white'
Jcolor = 'chartreuse'
slew = visit.slew
# Compute expected V3PA
v3pa_at_gs = visit.slew.GSPA + (0 if visit._no_gspa_yoffset else fgs_aperture.V3IdlYAngle)
guidemode = slew.GUIDEMODE
if guidemode=='COARSE':
gslabel = "\n'pseudo guide star'\n(slew coordinates\n for Coarse pointing)"
else:
gslabel = "\nguide star"
plt.scatter(visit.slew.GSRA, visit.slew.GSDEC, s=200, edgecolor=gscolor, facecolor='none',
transform=ax.get_transform('icrs'))
plt.text(visit.slew.GSRA, visit.slew.GSDEC, gslabel,
transform=ax.get_transform('icrs'),
horizontalalignment='left', verticalalignment='top', color=gscolor)
if v1_ra > j_ra:
v1_ha, j_ha = 'right', 'left'
else:
v1_ha, j_ha = 'left', 'right'
plt.scatter([v1_ra], [v1_dec], marker='+', s=50, color=v1color,
transform=ax.get_transform('icrs'))
plt.text(v1_ra, v1_dec, "\nV1 axis",
transform=ax.get_transform('icrs'), fontsize=10,
horizontalalignment=v1_ha, verticalalignment='top', color=v1color)
plt.scatter([j_ra], [j_dec], marker='*', s=50, color=Jcolor,
transform=ax.get_transform('icrs'))
plt.text(j_ra, j_dec, "\nJ axis",
transform=ax.get_transform('icrs'), fontsize=10,
horizontalalignment=j_ha, verticalalignment='top', color=Jcolor)
# subsequent annotations can mess up the axes limits, so save here and restore later
# this is a hacky workaround
xlim = ax.get_xlim()
ylim = ax.get_ylim()
#-- Plot JWST apertures at that visit's orientation(s)
if guidemode=='COARSE':
# we only have a science attitude
attmatsci = visit.get_attitude_matrix(step='slew')
guide_det_info = ""
else:
# there are possibly distinct science and ID attitudes
attmatsci = visit.get_attitude_matrix(step='sci')
attmatid = visit.get_attitude_matrix(step='id')
fgs_detector = 1 if visit.slew.DETECTOR=='GUIDER1' else 2
guide_det_info = f", on FGS{fgs_detector}"
# Plot FGS aperture at ID attitude
fgs_aperture.set_attitude_matrix(attmatid)
fgs_aperture.plot(frame='sky', transform=ax.get_transform('icrs'), color=gscolor, fill=False)
plt.text(0.02, 0.02, f"Yellow = ID attitude",
color=gscolor, transform=ax.transAxes, horizontalalignment='left')
# Plot all apertures, faintly
pysiaf.siaf.plot_main_apertures(frame='sky', darkbg=True,
attitude_matrix=attmatsci, transform=ax.get_transform('icrs'), alpha=0.4, fill=False)
# Plot the active apertures, more visibly
for apername in visit.apertures_used():
# look up aperture from that aperture name
aper_key = apername[0:4] if apername.startswith("M") else apername[0:3] # NRC, NRS, NIS, FGS, or MIRI
aperture = SIAFS[aper_key][apername]
# plot at the correct attitude
aperture.set_attitude_matrix(attmatsci)
aperture.plot(frame='sky', transform=ax.get_transform('icrs'), color='cyan', fill=True, fill_alpha=0.2)
# Highlight main WFSC aperture, NIRCam A3
# TODO make this smart about module A vs B? And about other templates
if 'WFSC' in visit.template:
nrca3_aperture = pysiaf.Siaf("NIRCam").apertures['NRCA3_FULL']
nrca3_aperture.set_attitude_matrix(attmatsci)
nrca3_aperture.plot(frame='sky', transform=ax.get_transform('icrs'), color='white', fill=False)
plt.text(0.02, 0.98, f"Pointing for {os.path.basename(visit.filename)}", color='white', fontweight='bold',
transform=ax.transAxes, fontsize=12, verticalalignment='top')
template_ypos = 0.94 if len(visit.template) > 35 else 0.98 # avoid text overlap for very long names
plt.text(0.98, template_ypos, f"Template = {visit.template}", color='white',
transform=ax.transAxes, horizontalalignment='right',
verticalalignment='top')
plt.text(0.02, 0.05, f"Guide star at {visit.slew.GSRA:.7f}, {visit.slew.GSDEC:.7f}, GSPA={visit.slew.GSPA}\nGuide mode = {guidemode}{guide_det_info}",
color=gscolor, transform=ax.transAxes)
plt.text(0.98, 0.05, f"Shaded apertures are used\n in this observation",
color='cyan', transform=ax.transAxes, horizontalalignment='right')
plt.text(0.98, 0.02, f"Cyan = Science attitude",
color='cyan', transform=ax.transAxes, horizontalalignment='right')
plt.text(0.36, 0.02, f"V1 axis at {v1_ra:.3f}, {v1_dec:.3f}\nV3PA={v3pa_at_gs:.3f} at GS, {v3pa_at_v1:.3f} at V1 axis",
color='white', transform=ax.transAxes)
apt_program_id = int(os.path.basename(visit.filename)[1:6])
if verbose:
print(f"APT program: {apt_program_id}")
if apt_program_id in PROGRAMS_WITH_SEGMENT_GUIDING:
plt.text(0.02, 0.125, f"Segment guiding may be used in this program\nThe guide star indicated may be a segment PSF offset from the star location",
color=gscolor, transform=ax.transAxes)
if visit._no_gspa_yoffset:
plt.text(0.00, -0.035,
f"Interpreting GSPA parameter like PPS$\leq$14.14.1:\nGSPA does not include FGS Yics rotation.",
color='orangered', transform=ax.transAxes, verticalalignment='top')
# re-establish the axes limits
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
return ax
def plot_mosaic_pointings_fov(visitlist, center_on_visit=0, subplotspec=None, verbose=False, use_dss=False, title=None,
crop_for_no12=False):
"""Plot multiple visits together in one FOV as a mosaic
CAUTION: Only tested so far on an example mock OTE-01 visit set from Normal Ops 12,
which is a special case for NIRCam mosaic in coarse point.
This is not (yet..?) a generalized mosaic viewer! YMMV.
"""
# Use one visit, by default the first visit in the list, to set up the plot FOV and image
visit = visitlist[center_on_visit]
fgs_aperture_name = visit.get_guider_aperture(return_name=True)
fgs_aperture = SIAFS['FGS'].apertures[fgs_aperture_name]
attmat = visit.get_attitude_matrix(step='sci')
fgs_aperture.set_attitude_matrix(attmat)
mcf_ra, mcf_dec = fgs_aperture.tel_to_sky(0, -468) # RA, Dec of master chief ray location (between NIRCam A+B)
# Retrieve image
if use_dss:
img_hdu = retrieve_dss_image(visit, ra=mcf_ra, dec=mcf_dec, verbose=verbose, fov=0.5)
else:
img_hdu = retrieve_2mass_image(visit, ra=mcf_ra, dec=mcf_dec, verbose=verbose, fov=0.5)
wcs = | |
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
import ctypes, enum
from qiling.os.macos.structs import POINTER64
base_event_normal = 0
class AutoNumberNormalEvent(enum.Enum):
def __new__(cls):
value = len(cls.__members__) + base_event_normal
obj = object.__new__(cls)
obj._value_ = value
return obj
class MacOSEventType(AutoNumberNormalEvent):
EV_SYSCTL = ()
EV_PROCESS = ()
EV_NETWORK = ()
EV_CTL_CONNECT = ()
EV_CTL_DISCONNECT = ()
EV_CTL_SEND = ()
EV_CTL_SETOPT = ()
EV_CTL_GETOPT = ()
EV_CTL_RCVD_FUNC = ()
EV_CTL_SEND_LIST_FUNC = ()
EV_CTL_BIND_FUNC = ()
EV_THREAD = ()
EV_SFLT_UNREGISTERED = ()
EV_SFLT_ATTACH = ()
EV_SFLT_DETACH = ()
EV_SFLT_NOTIFY_CONNECTING = ()
EV_SFLT_NOTIFY_CONNECTED = ()
EV_SFLT_NOTIFY_DISCONNECTING = ()
EV_SFLT_NOTIFY_DISCONNECTED = ()
EV_SFLT_NOTIFY_FLUSH_READ = ()
EV_SFLT_NOTIFY_SHUTDOWN = ()
EV_SFLT_NOTIFY_CANTRECVMORE = ()
EV_SFLT_NOTIFY_CANTSENDMORE = ()
EV_SFLT_NOTIFY_CLOSING = ()
EV_SFLT_NOTIFY_BOUND = ()
EV_SFLT_GETPEERNAME = ()
EV_SFLT_GETSOCKNAME = ()
EV_SFLT_DATA_IN = ()
EV_SFLT_DATA_OUT = ()
EV_SFLT_CONNECT_IN = ()
EV_SFLT_CONNECT_OUT = ()
EV_SFLT_BIND = ()
EV_SFLT_SETOPTION = ()
EV_SFLT_GETOPTION = ()
EV_SFLT_LISTEN = ()
EV_SFLT_IOCTL = ()
EV_KAUTH_GENERIC = ()
EV_KAUTH_PROCESS = ()
EV_KAUTH_VNODE = ()
EV_KAUTH_FILEOP = ()
EV_IPF_INPUT = ()
EV_IPF_OUTPUT = ()
EV_IPF_DETACH = ()
# enum {
# sock_evt_connecting = 1,
# sock_evt_connected = 2,
# sock_evt_disconnecting = 3,
# sock_evt_disconnected = 4,
# sock_evt_flush_read = 5,
# sock_evt_shutdown = 6, /* param points to an integer specifying how (read, write, or both) see man 2 shutdown */
# sock_evt_cantrecvmore = 7,
# sock_evt_cantsendmore = 8,
# sock_evt_closing = 9,
# sock_evt_bound = 10
# };
base_event_socket = 0x1000
class SocketEvent(enum.Enum):
CONNECTING = 0x1001
CONNECTED = 0x1002
DISCONNECTING = 0x1003
DISCONNECTED = 0x1004
FLUSH_READ = 0x1005
SHUTDOWN = 0x1006
CANTRECVMORE = 0x1007
CANTSENDMORE = 0x1008
CLOSING = 0x1009
BOUND = 0x100a
class NetworkProtocol(enum.Enum):
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_IPV4 = 4
IPPROTO_TCP = 6
IPPROTO_ST = 7
IPPROTO_EGP = 8
IPPROTO_PIGP = 9
IPPROTO_RCCMON = 10
IPPROTO_NVPII = 11
IPPROTO_PUP = 12
IPPROTO_ARGUS = 13
IPPROTO_EMCON = 14
IPPROTO_XNET = 15
IPPROTO_CHAOS = 16
IPPROTO_UDP = 17
IPPROTO_MUX = 18
IPPROTO_MEAS = 19
IPPROTO_HMP = 20
IPPROTO_PRM = 21
IPPROTO_IDP = 22
IPPROTO_TRUNK1 = 23
IPPROTO_TRUNK2 = 24
IPPROTO_LEAF1 = 25
IPPROTO_LEAF2 = 26
IPPROTO_RDP = 27
IPPROTO_IRTP = 28
IPPROTO_TP = 29
IPPROTO_BLT = 30
IPPROTO_NSP = 31
IPPROTO_INP = 32
IPPROTO_SEP = 33
IPPROTO_3PC = 34
IPPROTO_IDPR = 35
IPPROTO_XTP = 36
IPPROTO_DDP = 37
IPPROTO_CMTP = 38
IPPROTO_TPXX = 39
IPPROTO_IL = 40
IPPROTO_IPV6 = 41
IPPROTO_SDRP = 42
IPPROTO_ROUTING = 43
IPPROTO_FRAGMENT = 44
IPPROTO_IDRP = 45
IPPROTO_RSVP = 46
IPPROTO_GRE = 47
IPPROTO_MHRP = 48
IPPROTO_BHA = 49
IPPROTO_ESP = 50
IPPROTO_AH = 51
IPPROTO_INLSP = 52
IPPROTO_SWIPE = 53
IPPROTO_NHRP = 54
IPPROTO_ICMPV6 = 58
IPPROTO_NONE = 59
IPPROTO_DSTOPTS = 60
IPPROTO_AHIP = 61
IPPROTO_CFTP = 62
IPPROTO_HELLO = 63
IPPROTO_SATEXPAK = 64
IPPROTO_KRYPTOLAN = 65
IPPROTO_RVD = 66
IPPROTO_IPPC = 67
IPPROTO_ADFS = 68
IPPROTO_SATMON = 69
IPPROTO_VISA = 70
IPPROTO_IPCV = 71
IPPROTO_CPNX = 72
IPPROTO_CPHB = 73
IPPROTO_WSN = 74
IPPROTO_PVP = 75
IPPROTO_BRSATMON = 76
IPPROTO_ND = 77
IPPROTO_WBMON = 78
IPPROTO_WBEXPAK = 79
IPPROTO_EON = 80
IPPROTO_VMTP = 81
IPPROTO_SVMTP = 82
IPPROTO_VINES = 83
IPPROTO_TTP = 84
IPPROTO_IGP = 85
IPPROTO_DGP = 86
IPPROTO_TCF = 87
IPPROTO_IGRP = 88
IPPROTO_OSPFIGP = 89
IPPROTO_SRPC = 90
IPPROTO_LARP = 91
IPPROTO_MTP = 92
IPPROTO_AX25 = 93
IPPROTO_IPEIP = 94
IPPROTO_MICP = 95
IPPROTO_SCCSP = 96
IPPROTO_ETHERIP = 97
IPPROTO_ENCAP = 98
IPPROTO_APES = 99
IPPROTO_GMTP = 100
IPPROTO_PIM = 103
IPPROTO_IPCOMP = 108
IPPROTO_PGM = 113
IPPROTO_SCTP = 132
IPPROTO_DIVERT = 254
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPROTO_DONE = 257
# KAUTH_FILEOP_OPEN 1
# KAUTH_FILEOP_CLOSE 2
# KAUTH_FILEOP_RENAME 3
# KAUTH_FILEOP_EXCHANGE 4
# KAUTH_FILEOP_LINK 5
# KAUTH_FILEOP_EXEC 6
# KAUTH_FILEOP_DELETE 7
# KAUTH_FILEOP_WILL_RENAME 8
class Kauth(enum.Enum):
KAUTH_FILEOP_OPEN = 1
KAUTH_FILEOP_CLOSE = 2
KAUTH_FILEOP_RENAME = 3
KAUTH_FILEOP_EXCHANGE = 4
KAUTH_FILEOP_LINK = 5
KAUTH_FILEOP_EXEC = 6
KAUTH_FILEOP_DELETE = 7
KAUTH_FILEOP_WILL_RENAME = 8
# struct sysctl_oid {
# struct sysctl_oid_list *oid_parent;
# SLIST_ENTRY(sysctl_oid) oid_link;
# int oid_number;
# int oid_kind;
# void *oid_arg1;
# int oid_arg2;
# const char *oid_name;
# int (*oid_handler) SYSCTL_HANDLER_ARGS;
# const char *oid_fmt;
# const char *oid_descr; /* offsetof() field / long description */
# int oid_version;
# int oid_refcnt;
# };
class sysctl_oid_t(ctypes.Structure):
class slist_entry(ctypes.Structure):
_fields_ = (
("sle_next", POINTER64),
)
_fields_ = (
("oid_parent", POINTER64),
("oid_link", slist_entry),
("oid_number", ctypes.c_int32),
("oid_kind", ctypes.c_int32),
("oid_arg1", POINTER64),
("oid_arg2", ctypes.c_int32),
("oid_name", POINTER64),
("oid_handler", POINTER64),
("oid_fmt", POINTER64),
("oid_descr", POINTER64),
("oid_version", ctypes.c_int32),
("oid_refcnt", ctypes.c_int32),
)
def __init__(self, ql, base):
self.ql = ql
self.base = base
def updateToMem(self):
self.ql.mem.write(self.base, bytes(self))
def loadFromMem(self):
data = self.ql.mem.read(self.base, ctypes.sizeof(self))
newObj = type(self).from_buffer(data)
newObj.ql = self.ql
newObj.base = self.base
return newObj
def dump(self):
for field in self._fields_:
if isinstance(getattr(self, field[0]), POINTER64):
print("%s: 0x%x" % (field[0], getattr(self, field[0]).value))
elif isinstance(getattr(self, field[0]), sysctl_oid_t.slist_entry):
print("%s: Struct( 0x%x )" % (field[0], getattr(self, field[0]).sle_next.value))
else:
print("%s: 0x%x" % (field[0], getattr(self, field[0])))
class sysctl_args_t(ctypes.Structure):
_fields_ = (
("name", ctypes.c_int32 * 2),
("namelen", ctypes.c_uint32),
("old", POINTER64),
("oldlenp", POINTER64),
("new", POINTER64),
("newlen", ctypes.c_uint64),
)
def __init__(self, ql, base):
self.ql = ql
self.base = base
def updateToMem(self):
self.ql.mem.write(self.base, bytes(self))
def loadFromMem(self):
data = self.ql.mem.read(self.base, ctypes.sizeof(self))
newObj = type(self).from_buffer(data)
newObj.ql = self.ql
newObj.base = self.base
return newObj
# struct sysctlbyname_args {
# const char * name
# size_t namelen
# void * old
# size_t * oldlenp
# void * new
# size_t newlen
# }
class sysctlbyname_args_t(ctypes.Structure):
_fields_ = (
("name", POINTER64),
("namelen", ctypes.c_size_t),
("old", POINTER64),
("oldlenp", POINTER64),
("new", POINTER64),
("newlen", ctypes.c_size_t),
)
def __init__(self, ql, base):
self.ql = ql
self.base = base
def updateToMem(self):
self.ql.mem.write(self.base, bytes(self))
def loadFromMem(self):
data = self.ql.mem.read(self.base, ctypes.sizeof(self))
newObj = type(self).from_buffer(data)
newObj.ql = self.ql
newObj.base = self.base
return newObj
# struct sysctl_req {
# struct proc *p;
# int lock;
# user_addr_t oldptr; /* pointer to user supplied buffer */
# size_t oldlen; /* user buffer length (also returned) */
# size_t oldidx; /* total data iteratively copied out */
# int (*oldfunc)(struct sysctl_req *, const void *, size_t);
# user_addr_t newptr; /* buffer containing new value */
# size_t newlen; /* length of new value */
# size_t newidx; /* total data iteratively copied in */
# int (*newfunc)(struct sysctl_req *, void *, size_t);
# };
class sysctl_req_t(ctypes.Structure):
_fields_ = (
("p", POINTER64),
("lock", ctypes.c_int32),
("oldptr", POINTER64),
("oldlen", ctypes.c_size_t),
("oldidx", ctypes.c_size_t),
("oldfunc", POINTER64),
("newptr", POINTER64),
("newlen", ctypes.c_size_t),
("newidx", ctypes.c_size_t),
("newfunc", ctypes.c_int32),
)
def __init__(self, ql, base):
self.ql = ql
self.base = base
def updateToMem(self):
self.ql.mem.write(self.base, bytes(self))
def loadFromMem(self):
data = self.ql.mem.read(self.base, ctypes.sizeof(self))
newObj = type(self).from_buffer(data)
newObj.ql = self.ql
newObj.base = self.base
return newObj
# struct kern_ctl_reg
# {
# /* control information */
# char ctl_name[MAX_KCTL_NAME];
# u_int32_t ctl_id;
# u_int32_t ctl_unit;
#
# /* control settings */
# u_int32_t ctl_flags;
# u_int32_t ctl_sendsize;
# u_int32_t ctl_recvsize;
#
# /* Dispatch functions */
# ctl_connect_func ctl_connect;
# ctl_disconnect_func ctl_disconnect;
# ctl_send_func ctl_send;
# ctl_setopt_func ctl_setopt;
# ctl_getopt_func ctl_getopt;
# #ifdef KERNEL_PRIVATE
# ctl_rcvd_func ctl_rcvd; /* Only valid if CTL_FLAG_REG_EXTENDED is set */
# ctl_send_list_func ctl_send_list; /* Only valid if CTL_FLAG_REG_EXTENDED is set */
# ctl_bind_func ctl_bind;
# #endif /* KERNEL_PRIVATE */
# };
class kern_ctl_reg_t(ctypes.Structure):
_fields_ = (
("ctl_name", ctypes.c_char * 96),
("ctl_id", ctypes.c_uint32),
("ctl_unit", ctypes.c_uint32),
("ctl_flags", ctypes.c_uint32),
("ctl_sendsize", ctypes.c_uint32),
("ctl_recvsize", ctypes.c_uint32),
("ctl_connect", POINTER64),
("ctl_disconnect", POINTER64),
("ctl_send", POINTER64),
("ctl_setopt", POINTER64),
("ctl_getopt", POINTER64),
("ctl_rcvd", POINTER64),
("ctl_send_list", POINTER64),
("ctl_bind", POINTER64),
)
def __init__(self, ql, base):
self.ql = ql
self.base = base
def updateToMem(self):
self.ql.mem.write(self.base, bytes(self))
def loadFromMem(self):
data = self.ql.mem.read(self.base, ctypes.sizeof(self))
newObj = type(self).from_buffer(data)
newObj.ql = self.ql
newObj.base = self.base
return newObj
def dump(self):
for field in self._fields_:
if isinstance(getattr(self, field[0]), POINTER64):
print("%s: 0x%x" % (field[0], getattr(self, field[0]).value))
elif isinstance(getattr(self, field[0]), int):
print("%s: %d" % (field[0], getattr(self, field[0])))
elif isinstance(getattr(self, field[0]), bytes):
print("%s: %s" % (field[0], getattr(self, field[0]).decode()))
# struct sockaddr_ctl {
# u_char sc_len; /* depends on size of bundle ID string */
# u_char sc_family; /* AF_SYSTEM */
# u_int16_t ss_sysaddr; /* AF_SYS_KERNCONTROL */
# u_int32_t sc_id; /* Controller unique identifier */
# u_int32_t sc_unit; /* Developer private unit number */
# u_int32_t sc_reserved[5];
# };
class sockaddr_ctl_t(ctypes.Structure):
_fields_ = (
("sc_len", ctypes.c_ubyte),
("sc_family", ctypes.c_ubyte),
("ss_sysaddr", ctypes.c_uint16),
("sc_id", ctypes.c_uint32),
("sc_unit", ctypes.c_uint32),
("sc_reserved", ctypes.c_uint32 * 5),
)
def __init__(self, ql, base):
self.ql = ql
self.base = base
def updateToMem(self):
self.ql.mem.write(self.base, bytes(self))
def loadFromMem(self):
data = self.ql.mem.read(self.base, ctypes.sizeof(self))
newObj = type(self).from_buffer(data)
newObj.ql = self.ql
newObj.base = self.base
return newObj
# struct m_hdr {
# struct mbuf *mh_next; /* next buffer in chain */
# struct mbuf *mh_nextpkt; /* next chain in queue/record */
# caddr_t mh_data; /* location of data */
# int32_t mh_len; /* amount of data in this mbuf */
# u_int16_t mh_type; /* type of data in this mbuf */
# u_int16_t mh_flags; /* flags; see below */
# }
class m_hdr_t(ctypes.Structure):
_fields_ = (
("mh_next", POINTER64),
("mh_nextpkt", POINTER64),
("mh_data", POINTER64),
("mh_len", ctypes.c_int32),
("mh_type", ctypes.c_uint16),
("mh_flags", ctypes.c_uint16),
)
class tag_t(ctypes.Structure):
_fields_ = (
("packet_tags", POINTER64),
)
# struct tcp_pktinfo | |
<filename>notebooks/__code/normalization/normalization_with_simplify_selection.py
import os
import collections
import numpy as np
from ipywidgets import widgets
from IPython.core.display import display, HTML
from enum import Enum
from NeuNorm.normalization import Normalization
from __code import file_handler
from __code import metadata_handler
from __code.ipywe import myfileselector
JSON_DEBUGGING = False
class MetadataName(Enum):
EXPOSURE_TIME = 65027
DETECTOR_MANUFACTURER = 65026
APERTURE_HR = 65068
APERTURE_HL = 65070
APERTURE_VT = 65066
APERTURE_VB = 65064
def __str__(self):
return self.value
# METADATA_KEYS = [EXPOSURE_TIME, APERTURE_HR, APERTURE_HL, APERTURE_VT, APERTURE_VB]
# METADATA_KEYS = {'ob': [EXPOSURE_TIME, DETECTOR_MANUFACTURER, APERTURE_HR, APERTURE_HL, APERTURE_VT,
# APERTURE_VB],
# 'df': [EXPOSURE_TIME, DETECTOR_MANUFACTURER],
# 'all': [DETECTOR_MANUFACTURER, EXPOSURE_TIME, APERTURE_HR, APERTURE_HL, APERTURE_VT,
# APERTURE_VB]}
METADATA_KEYS = {'ob' : [MetadataName.EXPOSURE_TIME,
MetadataName.DETECTOR_MANUFACTURER,
MetadataName.APERTURE_HR,
MetadataName.APERTURE_HL,
MetadataName.APERTURE_VT,
MetadataName.APERTURE_VB],
'df' : [MetadataName.EXPOSURE_TIME,
MetadataName.DETECTOR_MANUFACTURER],
'all': [MetadataName.EXPOSURE_TIME,
MetadataName.DETECTOR_MANUFACTURER,
MetadataName.APERTURE_HR,
MetadataName.APERTURE_HL,
MetadataName.APERTURE_VT,
MetadataName.APERTURE_VB]}
class MetadataName:
EXPOSURE_TIME = 65027
DETECTOR_MANUFACTURER = 65026
APERTURE_HR = 65068
APERTURE_HL = 65070
APERTURE_VT = 65066
APERTURE_VB = 65064
MAX_DF_COUNTS_ALLOWED = 900
METADATA_ERROR_ALLOWED = 1
LIST_METADATA_NOT_INSTRUMENT_RELATED = ['filename', 'time_stamp', 'time_stamp_user_format']
class NormalizationWithSimplifySelection(object):
working_dir = ''
def __init__(self, working_dir=''):
self.working_dir = working_dir
self.list_of_images = []
self.input_data_folder = []
# {0: {65027: 55.0,
# 65028: 59.2,
# 65029: 1.0,
# 'filename': 'full_filename',
# 'time_stamp': 1454544.34545,
# 'time_stamp_user_format': '2019-11-19 02:48:47'},
# ...,
# }
self.sample_metadata_dict = {}
self.ob_metadata_dict = {}
self.df_metadata_dict = {}
# key of dictionary being the acquisition time
# {50: {'config0': {'list_sample': [self.sample_metadata_dict[0],
# self.sample_metadata_dict[1],..],
# 'list_ob': [self.ob_metadata_dict[0],
# self.ob_metadata_dict[1],
# ...],
# 'list_df': [file1, file2, file3],
# 'metadata_infos': {},
# 'first_images': {'sample': {},
# 'ob': {},
# 'df': {}},
# 'last_images': {'sample': {},
# 'ob': {},
# 'df': {}},
# 'time_range_s_selected': {'before': np.NaN,
# 'after': np.NaN},
# 'time_range_s': {'before': np.NaN,
# 'after': np.NaN},
# },
# 'config1': {...},
# },
# 30: {...},
# }
self.final_full_master_dict = {}
# same as the final_full_master_dict but in this one, the OB outside the time range
# defined as excluded
self.final_with_time_range_master_dict = {}
def select_sample_images_and_create_configuration(self):
# self.select_sample_images()
self.select_sample_folder()
def select_sample_images(self):
list_of_images_widget = myfileselector.MyFileSelectorPanel(instruction='select images'
'to normalize',
start_dir=self.working_dir,
next=self.retrieve_sample_metadata,
multiple=True)
list_of_images_widget.show()
def select_sample_folder(self):
folder_sample_widget = myfileselector.MyFileSelectorPanel(instruction='select folder of images to normalize',
start_dir=self.working_dir,
next=self.retrieve_sample_metadata_from_sample_folder,
type='directory',
multiple=False)
folder_sample_widget.show()
def retrieve_sample_metadata_from_sample_folder(self, sample_folder):
[list_of_images, _] = file_handler.retrieve_list_of_most_dominant_extension_from_folder(folder=sample_folder)
can_we_continue = self.images_files_found_in_list(list_of_images)
if can_we_continue:
self.retrieve_sample_metadata(list_of_images)
else:
display(HTML('<span style="font-size: 20px; color:Red">No images found in the folder selected!</span>'))
def images_files_found_in_list(self, list_of_images):
for _file in list_of_images:
if (".tiff" in _file) or (".tif" in _file) or (".fits" in _file):
return True
return False
def retrieve_sample_metadata(self, list_of_images):
self.list_of_images = list_of_images
self.sample_metadata_dict = NormalizationWithSimplifySelection.retrieve_metadata(list_of_files=list_of_images,
display_infos=False,
label='sample')
self.auto_retrieve_ob_metadata()
self.auto_retrieve_df_metadata()
self.match_files()
self.calculate_first_and_last_ob()
self.calculate_time_range()
self.display_time_range_selection_widgets()
def select_ob_folder(self):
self.select_folder(message='open beam',
next_function=self.retrieve_ob_metadata())
def retrieve_ob_metadata(self, selected_folder):
list_of_ob_files = NormalizationWithSimplifySelection.get_list_of_tiff_files(folder=selected_folder)
self.ob_metadata_dict = NormalizationWithSimplifySelection.retrieve_metadata(list_of_files=list_of_ob_files)
def auto_retrieve_ob_metadata(self):
folder = os.path.join(self.working_dir, 'raw', 'ob')
list_of_ob_files = file_handler.get_list_of_all_files_in_subfolders(folder=folder,
extensions=['tiff', 'tif'])
self.ob_metadata_dict = NormalizationWithSimplifySelection.retrieve_metadata(list_of_files=list_of_ob_files,
label='ob')
def select_folder(self, message="", next_function=None):
folder_widget = myfileselector.MyFileSelectorPanel(instruction='select {} folder'.format(message),
start_dir=self.working_dir,
next=next_function,
type='directory',
multiple=False)
folder_widget.show()
def select_df_folder(self):
self.select_folder(message='dark field',
next_function=self.retrieve_df_metadata())
def retrieve_df_metadata(self, selected_folder):
list_of_df_files = NormalizationWithSimplifySelection.get_list_of_tiff_files(folder=selected_folder)
self.df_metadata_dict = NormalizationWithSimplifySelection.retrieve_metadata(list_of_files=list_of_df_files)
def auto_retrieve_df_metadata(self):
folder = os.path.join(self.working_dir, 'raw', 'df')
list_of_df_files = file_handler.get_list_of_all_files_in_subfolders(folder=folder,
extensions=['tiff', 'tif'])
self.df_metadata_dict = NormalizationWithSimplifySelection.retrieve_metadata(list_of_files=list_of_df_files,
label='df')
def match_files(self):
"""This is where the files will be associated with their respective OB, DF by using the metadata"""
if not JSON_DEBUGGING:
self.create_master_sample_dict()
self.match_ob()
self.match_df()
if JSON_DEBUGGING:
# for debugging only, exporting the json
import json
with open('/Users/j35/Desktop/which_ob_and_df_to_use.json', 'w') as outfile:
json.dump(self.final_full_master_dict, outfile)
def match_ob(self):
"""we will go through all the ob and associate them with the right sample based on
- acquisition time
- detector type
- aperture
"""
list_ob_dict = self.ob_metadata_dict
final_full_master_dict = self.final_full_master_dict
list_of_sample_acquisition = final_full_master_dict.keys()
for _index_ob in list_ob_dict.keys():
_all_ob_instrument_metadata = self.get_instrument_metadata_only(list_ob_dict[_index_ob])
_ob_instrument_metadata = NormalizationWithSimplifySelection._isolate_instrument_metadata(
_all_ob_instrument_metadata)
_acquisition_time = _all_ob_instrument_metadata[MetadataName.EXPOSURE_TIME]['value']
if _acquisition_time in list_of_sample_acquisition:
for _config_id in final_full_master_dict[_acquisition_time].keys():
_sample_metadata_infos = final_full_master_dict[_acquisition_time][_config_id]['metadata_infos']
if NormalizationWithSimplifySelection.all_metadata_match(_sample_metadata_infos,
_ob_instrument_metadata):
final_full_master_dict[_acquisition_time][_config_id]['list_ob'].append(list_ob_dict[_index_ob])
self.final_full_master_dict = final_full_master_dict
def match_df(self):
"""
we will go through all the df of the IPTS and will associate the df with the right samples
based on:
- detector type used
- acquisition time
"""
list_df_dict = self.df_metadata_dict
final_full_master_dict = self.final_full_master_dict
list_of_sample_acquisition = final_full_master_dict.keys()
for _index_df in list_df_dict.keys():
_all_df_instrument_metadata = self.get_instrument_metadata_only(list_df_dict[_index_df])
_df_instrument_metadata = NormalizationWithSimplifySelection._isolate_instrument_metadata(
_all_df_instrument_metadata)
_acquisition_time = _all_df_instrument_metadata[MetadataName.EXPOSURE_TIME]['value']
if _acquisition_time in list_of_sample_acquisition:
for _config_id in final_full_master_dict[_acquisition_time].keys():
_sample_metadata_infos = final_full_master_dict[_acquisition_time][_config_id]['metadata_infos']
if NormalizationWithSimplifySelection.all_metadata_match(_sample_metadata_infos,
_df_instrument_metadata,
list_key_to_check=[METADATA_KEYS['df'][
1].value]):
final_full_master_dict[_acquisition_time][_config_id]['list_df'].append(list_df_dict[_index_df])
self.final_full_master_dict = final_full_master_dict
def create_master_sample_dict(self):
final_full_master_dict = collections.OrderedDict()
sample_metadata_dict = self.sample_metadata_dict
# we need to keep record of which image was the first one taken and which image was the last one taken
first_sample_image = sample_metadata_dict[0]
last_sample_image = sample_metadata_dict[0]
for _file_index in sample_metadata_dict.keys():
_dict_file_index = sample_metadata_dict[_file_index]
_sample_file = _dict_file_index['filename']
_acquisition_time = _dict_file_index[MetadataName.EXPOSURE_TIME]['value']
_instrument_metadata = NormalizationWithSimplifySelection._isolate_instrument_metadata(_dict_file_index)
_sample_time_stamp = _dict_file_index['time_stamp']
# find which image was first and which image was last
if _sample_time_stamp < first_sample_image['time_stamp']:
first_sample_image = _dict_file_index
elif _sample_time_stamp > last_sample_image['time_stamp']:
last_sample_image = _dict_file_index
# first entry or first time seeing that acquisition time
if (len(final_full_master_dict) == 0) or not (_acquisition_time in final_full_master_dict.keys()):
_first_images_dict = {'sample': first_sample_image,
'ob' : {},
'df' : {}}
_last_images_dict = {'sample': last_sample_image,
'ob' : {},
'df' : {}}
_temp_dict = {'list_sample' : [_dict_file_index],
'first_images' : _first_images_dict,
'last_images' : _last_images_dict,
'list_ob' : [],
'list_df' : [],
'time_range_s_selected': {'before': np.NaN,
'after' : np.NaN},
'time_range_s' : {'before': np.NaN,
'after' : np.NaN},
'metadata_infos' : NormalizationWithSimplifySelection.get_instrument_metadata_only(
_instrument_metadata)}
final_full_master_dict[_acquisition_time] = {}
final_full_master_dict[_acquisition_time]['config0'] = _temp_dict
else:
# check that all the metadata_infos match for the first group of that acquisition time,
# otherwise check the next one or create a group
if _acquisition_time in final_full_master_dict.keys():
_dict_for_this_acquisition_time = final_full_master_dict[_acquisition_time]
_found_a_match = False
for _config_key in _dict_for_this_acquisition_time.keys():
_config = _dict_for_this_acquisition_time[_config_key]
if (NormalizationWithSimplifySelection.all_metadata_match(metadata_1=_config['metadata_infos'],
metadata_2=_instrument_metadata)):
_config['list_sample'].append(_dict_file_index)
_first_images_dict = {'sample': first_sample_image,
'ob' : {},
'df' : {}}
_last_images_dict = {'sample': last_sample_image,
'ob' : {},
'df' : {}}
_config['first_images'] = _first_images_dict
_config['last_images'] = _last_images_dict
_found_a_match = True
if not _found_a_match:
_first_images_dict = {'sample': first_sample_image,
'ob' : {},
'df' : {}}
_last_images_dict = {'sample': last_sample_image,
'ob' : {},
'df' : {}}
_temp_dict = {'list_sample' : [_dict_file_index],
'first_images' : _first_images_dict,
'last_images' : _last_images_dict,
'list_ob' : [],
'list_df' : [],
'time_range_s_selected': {'before': np.NaN,
'after' : np.NaN},
'time_range_s' : {'before': np.NaN,
'after' : np.NaN},
'metadata_infos' : NormalizationWithSimplifySelection.get_instrument_metadata_only(
_instrument_metadata)}
nbr_config = len(_dict_for_this_acquisition_time.keys())
_dict_for_this_acquisition_time['config{}'.format(nbr_config)] = _temp_dict
else:
_first_images_dict = {'sample': first_sample_image,
'ob' : {},
'df' : {}}
_last_images_dict = {'sample': last_sample_image,
'ob' : {},
'df' : {}}
_temp_dict = {'list_sample' : [_dict_file_index],
'first_images' : _first_images_dict,
'last_images' : _last_images_dict,
'list_ob' : [],
'list_df' : [],
'time_range_s_selected': {'before': np.NAN,
'after' : np.NaN},
'time_range_s' : {'before': np.NaN,
'after' : np.NaN},
'metadata_infos' : NormalizationWithSimplifySelection.get_instrument_metadata_only(
_instrument_metadata)}
final_full_master_dict[_acquisition_time] = {}
final_full_master_dict[_acquisition_time]['config0'] = _temp_dict
self.final_full_master_dict = final_full_master_dict
def calculate_first_and_last_ob(self):
"""this will loop through all the acquisition time keys, and config keys, to figure out
what is the first ob and last ob in this dictionary"""
_final_full_master_dict = self.final_full_master_dict
for _acquisition in _final_full_master_dict.keys():
current_acquisition_dict = _final_full_master_dict[_acquisition]
_first_ob_time = np.NaN
_first_ob = {}
_last_ob_time = np.NaN
_last_ob = {}
for _config in current_acquisition_dict.keys():
current_acquisition_config_dict = current_acquisition_dict[_config]
for _ob in current_acquisition_config_dict['list_ob']:
_current_ob_time = _ob['time_stamp']
if np.isnan(_first_ob_time):
_first_ob_time = _current_ob_time
_last_ob_time = _current_ob_time
_first_ob = _last_ob = _ob
elif _current_ob_time < _first_ob_time:
_first_ob_time = _current_ob_time
_first_ob = _ob
elif _current_ob_time > _last_ob_time:
_last_ob_time = _current_ob_time
_last_ob = _ob
current_acquisition_config_dict['first_images']['ob'] = _first_ob
current_acquisition_config_dict['last_images']['ob'] = _last_ob
def calculate_time_range(self):
"""this method will calculate the max time range of OB taken before or after and will use that
for the slider selection time range
Provide option to use all (that means, do not used any time range)
"""
_final_full_master_dict = self.final_full_master_dict
for _acquisition in _final_full_master_dict.keys():
current_acquisition_dict = _final_full_master_dict[_acquisition]
for _config in current_acquisition_dict.keys():
current_acquisition_config_dict = current_acquisition_dict[_config]
first_sample_image = current_acquisition_config_dict['first_images']['sample']
first_ob_image = current_acquisition_config_dict['first_images']['ob']
delta_time_before = first_sample_image.get('time_stamp', 0) - first_ob_image.get('time_stamp', 0)
_time_range_s_before = delta_time_before if delta_time_before > 0 else 0
last_sample_image = current_acquisition_config_dict['last_images']['sample']
last_ob_image = current_acquisition_config_dict['last_images']['ob']
delta_time_after = last_ob_image.get('time_stamp', 0) - last_sample_image.get('time_stamp', 0)
_time_range_s_after = delta_time_after if delta_time_after > 0 else 0
_final_full_master_dict[_acquisition][_config]['time_range_s']['before'] = _time_range_s_before
_final_full_master_dict[_acquisition][_config]['time_range_s']['after'] = _time_range_s_after
def display_time_range_selection_widgets(self):
_final_full_master_dict = self.final_full_master_dict
_config_tab_dict = {} # will keep record of each config tab for each acquisition
_acquisition_tabs = widgets.Tab()
for _acquisition_index, _acquisition in enumerate(_final_full_master_dict.keys()):
_dict_of_this_acquisition = _final_full_master_dict[_acquisition]
_config_tab = widgets.Tab()
_current_acquisition_tab_widgets_id = {'config_tab_id': _config_tab}
for _index, _config in enumerate(_dict_of_this_acquisition.keys()):
_dict_config = _dict_of_this_acquisition[_config]
_dict = self.get_full_layout_for_this_config(_dict_config)
_layout = _dict['verti_layout']
_config_widgets_id_dict = _dict['config_widgets_id_dict']
_config_tab.children += (_layout,)
_config_tab.set_title(_index, _config)
_current_acquisition_tab_widgets_id[_index] = _config_widgets_id_dict
_config_tab_dict[_acquisition_index] = _current_acquisition_tab_widgets_id
_acquisition_tabs.children += (_config_tab,) # add all the config tab to top | |
def check_storage_configuration(self, cfgspec=None):
rsp = self.wc.grab_json_response(
'/api/function/raid_conf?params=raidlink_GetStatus')
if rsp['items'][0]['status'] != 2:
raise pygexc.TemporaryError('Storage configuration unavailable in '
'current state (try boot to setup or '
'an OS)')
if not cfgspec:
return True
for pool in cfgspec.arrays:
self._parse_storage_cfgspec(pool)
self.weblogout()
return True
def get_diagnostic_data(self, savefile, progress=None):
self.wc.grab_json_response('/api/providers/ffdc',
{'Generate_FFDC': 1})
percent = 0
while percent != 100:
ipmisession.Session.pause(3)
result = self.wc.grab_json_response('/api/providers/ffdc',
{'Generate_FFDC_status': 1})
self._refresh_token()
if progress:
progress({'phase': 'initializing', 'progress': float(percent)})
percent = result['progress']
while 'FileName' not in result:
result = self.wc.grab_json_response('/api/providers/ffdc',
{'Generate_FFDC_status': 1})
url = '/ffdc/{0}'.format(result['FileName'])
fd = webclient.FileDownloader(self.wc, url, savefile)
fd.start()
while fd.isAlive():
fd.join(1)
if progress and self.wc.get_download_progress():
progress({'phase': 'download',
'progress': 100 * self.wc.get_download_progress()})
self._refresh_token()
if progress:
progress({'phase': 'complete'})
def disk_inventory(self, mode=0):
# mode 0 is firmware, 1 is hardware
storagedata = self.get_cached_data('lenovo_cached_storage')
if not storagedata:
if self.wc:
storagedata = self.wc.grab_json_response(
'/api/function/raid_alldevices?params=storage_GetAllDisks')
if storagedata:
self.datacache['lenovo_cached_storage'] = (
storagedata, util._monotonic_time())
if storagedata and 'items' in storagedata:
for adp in storagedata['items']:
for diskent in adp.get('disks', ()):
if mode==0:
yield self.get_disk_firmware(diskent)
elif mode==1:
yield self.get_disk_hardware(diskent)
for diskent in adp.get('aimDisks', ()):
if mode==0:
yield self.get_disk_firmware(diskent)
elif mode==1:
yield self.get_disk_hardware(diskent)
if mode == 1:
bdata = {'Description': 'Unmanaged Disk'}
if adp.get('m2Type', -1) == 2:
yield ('M.2 Disk', bdata)
for umd in adp.get('unmanagedDisks', []):
yield ('Disk {0}'.format(umd['slotNo']), bdata)
def get_disk_hardware(self, diskent, prefix=''):
bdata = {}
if not prefix and diskent.get('location', '').startswith('M.2'):
prefix = 'M.2-'
diskname = 'Disk {1}{0}'.format(diskent['slotNo'], prefix)
bdata['Model'] = diskent['productName'].rstrip()
bdata['Serial Number'] = diskent['serialNo'].rstrip()
bdata['FRU Number'] = diskent['fruPartNo'].rstrip()
bdata['Description'] = diskent['type'].rstrip()
return (diskname, bdata)
def get_disk_firmware(self, diskent, prefix=''):
bdata = {}
if not prefix and diskent.get('location', '').startswith('M.2'):
prefix = 'M.2-'
diskname = 'Disk {1}{0}'.format(diskent['slotNo'], prefix)
bdata['model'] = diskent[
'productName'].rstrip()
bdata['version'] = diskent['fwVersion']
return (diskname, bdata)
def _parse_array_spec(self, arrayspec):
controller = None
if arrayspec.disks:
for disk in list(arrayspec.disks) + list(arrayspec.hotspares):
if controller is None:
controller = disk.id[0]
if controller != disk.id[0]:
raise pygexc.UnsupportedFunctionality(
'Cannot span arrays across controllers')
raidmap = self._raid_number_map(controller)
if not raidmap:
raise pygexc.InvalidParameterValue(
'There are no available drives for a new array')
requestedlevel = str(arrayspec.raid).lower()
if requestedlevel not in raidmap:
raise pygexc.InvalidParameterValue(
'Requested RAID "{0}" not available on this '
'system with currently available drives'.format(
requestedlevel))
rdinfo = raidmap[str(arrayspec.raid).lower()]
rdlvl = str(rdinfo[0])
defspan = 1 if rdinfo[1] == 1 else 2
spancount = defspan if arrayspec.spans is None else arrayspec.spans
drivesperspan = str(len(arrayspec.disks) // int(spancount))
hotspares = arrayspec.hotspares
drives = arrayspec.disks
if hotspares:
hstr = '|'.join([str(x.id[1]) for x in hotspares]) + '|'
else:
hstr = ''
drvstr = '|'.join([str(x.id[1]) for x in drives]) + '|'
pth = '/api/function/raid_conf?params=raidlink_CheckConfisValid'
args = [pth, controller, rdlvl, spancount, drivesperspan, drvstr,
hstr]
url = ','.join([str(x) for x in args])
rsp = self.wc.grab_json_response(url)
if rsp['items'][0]['errcode'] == 16:
raise pygexc.InvalidParameterValue('Incorrect number of disks')
elif rsp['items'][0]['errcode'] != 0:
raise pygexc.InvalidParameterValue(
'Invalid configuration: {0}'.format(
rsp['items'][0]['errcode']))
return {
'capacity': rsp['items'][0]['freeCapacity'],
'controller': controller,
'drives': drvstr,
'hotspares': hstr,
'raidlevel': rdlvl,
'spans': spancount,
'perspan': drivesperspan,
}
else:
pass # TODO: adding new volume to existing array would be here
def _make_jbod(self, disk, realcfg):
currstatus = self._get_status(disk, realcfg)
if currstatus.lower() == 'jbod':
return
self._make_available(disk, realcfg)
self._set_drive_state(disk, 16)
def _make_global_hotspare(self, disk, realcfg):
currstatus = self._get_status(disk, realcfg)
if currstatus.lower() == 'global hot spare':
return
self._make_available(disk, realcfg)
self._set_drive_state(disk, 1)
def _make_available(self, disk, realcfg):
# 8 if jbod, 4 if hotspare.., leave alone if already...
currstatus = self._get_status(disk, realcfg)
newstate = None
if currstatus == 'Unconfigured Good':
return
elif currstatus.lower() == 'global hot spare':
newstate = 4
elif currstatus.lower() == 'jbod':
newstate = 8
self._set_drive_state(disk, newstate)
def _get_status(self, disk, realcfg):
for cfgdisk in realcfg.disks:
if disk.id == cfgdisk.id:
currstatus = cfgdisk.status
break
else:
raise pygexc.InvalidParameterValue('Requested disk not found')
return currstatus
def _set_drive_state(self, disk, state):
rsp = self.wc.grab_json_response(
'/api/function',
{'raidlink_DiskStateAction': '{0},{1}'.format(disk.id[1], state)})
if rsp.get('return', -1) != 0:
raise Exception(
'Unexpected return to set disk state: {0}'.format(
rsp.get('return', -1)))
def clear_storage_arrays(self):
rsp = self.wc.grab_json_response(
'/api/function', {'raidlink_ClearRaidConf': '1'})
self.weblogout()
if rsp['return'] != 0:
raise Exception('Unexpected return to clear config: ' + repr(rsp))
def remove_storage_configuration(self, cfgspec):
realcfg = self.get_storage_configuration(False)
for pool in cfgspec.arrays:
for volume in pool.volumes:
vid = '{0},{1}'.format(volume.id[1], volume.id[0])
rsp = self.wc.grab_json_response(
'/api/function', {'raidlink_RemoveVolumeAsync': vid})
if rsp.get('return', -1) != 0:
raise Exception(
'Unexpected return to volume deletion: ' + repr(rsp))
self._wait_storage_async()
for disk in cfgspec.disks:
self._make_available(disk, realcfg)
self.weblogout()
def apply_storage_configuration(self, cfgspec):
realcfg = self.get_storage_configuration(False)
for disk in cfgspec.disks:
if disk.status.lower() == 'jbod':
self._make_jbod(disk, realcfg)
elif disk.status.lower() == 'hotspare':
self._make_global_hotspare(disk, realcfg)
elif disk.status.lower() in ('unconfigured', 'available', 'ugood',
'unconfigured good'):
self._make_available(disk, realcfg)
for pool in cfgspec.arrays:
if pool.disks:
self._create_array(pool)
self.weblogout()
def _create_array(self, pool):
params = self._parse_array_spec(pool)
url = '/api/function/raid_conf?params=raidlink_GetDefaultVolProp'
args = (url, params['controller'], 0, params['drives'])
props = self.wc.grab_json_response(','.join([str(x) for x in args]))
props = props['items'][0]
volumes = pool.volumes
remainingcap = params['capacity']
nameappend = 1
vols = []
currvolnames = None
currcfg = None
for vol in volumes:
if vol.name is None:
# need to iterate while there exists a volume of that name
if currvolnames is None:
currcfg = self.get_storage_configuration(False)
currvolnames = set([])
for pool in currcfg.arrays:
for volume in pool.volumes:
currvolnames.add(volume.name)
name = props['name'] + '_{0}'.format(nameappend)
nameappend += 1
while name in currvolnames:
name = props['name'] + '_{0}'.format(nameappend)
nameappend += 1
else:
name = vol.name
stripesize = props['stripsize'] if vol.stripesize is None \
else vol.stripesize
strsize = 'remainder' if vol.size is None else str(vol.size)
if strsize in ('all', '100%'):
volsize = params['capacity']
elif strsize in ('remainder', 'rest'):
volsize = remainingcap
elif strsize.endswith('%'):
volsize = int(params['capacity'] *
float(strsize.replace('%', '')) / 100.0)
else:
try:
volsize = int(strsize)
except ValueError:
raise pygexc.InvalidParameterValue(
'Unrecognized size ' + strsize)
remainingcap -= volsize
if remainingcap < 0:
raise pygexc.InvalidParameterValue(
'Requested sizes exceed available capacity')
vols.append('{0};{1};{2};{3};{4};{5};{6};{7};{8};|'.format(
name, volsize, stripesize, props['cpwb'], props['cpra'],
props['cpio'], props['ap'], props['dcp'], props['initstate']))
url = '/api/function'
arglist = '{0},{1},{2},{3},{4},{5},'.format(
params['controller'], params['raidlevel'], params['spans'],
params['perspan'], params['drives'], params['hotspares'])
arglist += ''.join(vols)
parms = {'raidlink_AddNewVolWithNaAsync': arglist}
rsp = self.wc.grab_json_response(url, parms)
if rsp['return'] != 0:
raise Exception(
'Unexpected response to add volume command: ' + repr(rsp))
self._wait_storage_async()
def _wait_storage_async(self):
rsp = {'items': [{'status': 0}]}
while rsp['items'][0]['status'] == 0:
ipmisession.Session.pause(1)
rsp = self.wc.grab_json_response(
'/api/function/raid_conf?params=raidlink_QueryAsyncStatus')
def extract_drivelist(self, cfgspec, controller, drives):
for drive in cfgspec['drives']:
ctl, drive = self._extract_drive_desc(drive)
if controller is None:
controller = ctl
if ctl != controller:
raise pygexc.UnsupportedFunctionality(
'Cannot span arrays across controllers')
drives.append(drive)
return controller
def get_storage_configuration(self, logout=True):
rsp = self.wc.grab_json_response(
'/api/function/raid_alldevices?params=storage_GetAllDevices')
standalonedisks = []
pools = []
for item in rsp.get('items', []):
for cinfo in item['controllerInfo']:
cid = cinfo['id']
for pool in cinfo['pools']:
volumes = []
disks = []
spares = []
for volume in pool['volumes']:
volumes.append(
storage.Volume(name=volume['name'],
size=volume['capacity'],
status=volume['statusStr'],
id=(cid, volume['id'])))
for disk in pool['disks']:
diskinfo = storage.Disk(
name=disk['name'], description=disk['type'],
id=(cid, disk['id']), status=disk['RAIDState'],
serial=disk['serialNo'], fru=disk['fruPartNo'])
if disk['RAIDState'] == 'Dedicated Hot Spare':
spares.append(diskinfo)
else:
disks.append(diskinfo)
totalsize = pool['totalCapacityStr'].replace('GB', '')
totalsize = int(float(totalsize) * 1024)
freesize = pool['freeCapacityStr'].replace('GB', '')
freesize = int(float(freesize) * 1024)
pools.append(storage.Array(
disks=disks, raid=pool['rdlvlstr'], volumes=volumes,
id=(cid, pool['id']), hotspares=spares,
capacity=totalsize, available_capacity=freesize))
for disk in cinfo.get('unconfiguredDisks', ()):
# can be unused, global hot spare, or JBOD
standalonedisks.append(
storage.Disk(
name=disk['name'], description=disk['type'],
id=(cid, disk['id']), status=disk['RAIDState'],
serial=disk['serialNo'], fru=disk['fruPartNo']))
if logout:
self.weblogout()
return storage.ConfigSpec(disks=standalonedisks, arrays=pools)
def attach_remote_media(self, url, user, password):
proto, host, path = util.urlsplit(url)
if proto == 'smb':
proto = 'cifs'
rq = {'Option': '', 'Domain': '', 'Write': 0}
# nfs == 1, cifs == 0
if proto == 'nfs':
rq['Protocol'] = 1
rq['Url'] = '{0}:{1}'.format(host, path)
elif proto == 'cifs':
rq['Protocol'] = 0
rq['Credential'] = '{0}:{1}'.format(user, password)
rq['Url'] = '//{0}{1}'.format(host, path)
elif proto in ('http', 'https'):
rq['Protocol'] = 7
rq['Url'] = url
else:
raise pygexc.UnsupportedFunctionality(
'"{0}" scheme is not supported on this system or '
'invalid url format'.format(proto))
rt = self.wc.grab_json_response('/api/providers/rp_vm_remote_connect',
json.dumps(rq))
if 'return' not in rt or rt['return'] != 0:
if rt['return'] in (657, 659, 656):
raise pygexc.InvalidParameterValue(
'Given location was unreachable by the XCC')
if rt['return'] == 32:
raise pygexc.InvalidParameterValue(
'XCC does not have required license for operation')
raise Exception('Unhandled return: ' + repr(rt))
rt = self.wc.grab_json_response('/api/providers/rp_vm_remote_mountall',
'{}')
if 'return' not in rt or rt['return'] != 0:
if rt['return'] in (657, 659, 656):
raise pygexc.InvalidParameterValue(
'Given location was unreachable by the XCC')
| |
<reponame>JackKelly/neuralnilm_prototype
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from neuralnilm.net import BidirectionalRecurrentLayer
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer, RecurrentLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
from copy import deepcopy
from math import sqrt
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
"""
WARNING: COSTS PROBABLY CANNOT BE SAFELY COMPARED BECAUSE
STANDARDISATION WAS NOT APPLIED VERY CONSISTENTLY
Good things:
T.max
a single 3x pool
Bad things:
2x2x pool
Conv
10 filters in conv. Much better to have 50
To do again:
conv AND pool with 50 filters
New experiments
T.max and single 3x pool. And try this with conv and pool with 50 filters.
BLSTM with above
2 RNNs, then 3x pool, then BLSTM (try 1 and 2 layers)
"""
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=10,
subsample_target=5,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0
)
def change_learning_rate(net, epoch):
net.updates = partial(nesterov_momentum, learning_rate=0.001)
net.compile()
def change_subsample(net, epoch):
net.source.subsample_target = 5
net.generate_validation_data_and_set_shapes()
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=0.01),
do_save_activations=True,
epoch_callbacks={501: change_learning_rate}
)
def exp_a(name):
# avg valid cost = 1.4374585152
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 5, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.mean
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_b(name):
# 4 layers
# avg valid cost = 1.6067613363
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 5, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.mean
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_c(name):
# 3 layers, 3x downsample
# avg valid cost = 1.1548461914
source_dict_copy = deepcopy(source_dict)
source_dict_copy['subsample_target'] = 3
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 3, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.mean
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_d(name):
# 3 layers, 2x2x downsample
# avg valid cost = 1.8310753107
source_dict_copy = deepcopy(source_dict)
source_dict_copy['subsample_target'] = 4
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.mean
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.mean
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_e(name):
# T.max
# best yet? avg top 25 valid cost = 1.0413346291
source_dict_copy = deepcopy(source_dict)
source_dict_copy['subsample_target'] = 5
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 5, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_f(name):
# Conv instead of pool
# avg valid cost = 1.6656855345
source_dict_copy = deepcopy(source_dict)
source_dict_copy['subsample_target'] = 5
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 10,
'filter_length': 5,
'stride': 5,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_g(name):
# Conv instead of pool, with 50 filters
# avg valid cost = 1.1587812901
source_dict_copy = deepcopy(source_dict)
source_dict_copy['subsample_target'] = 5
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 50,
'filter_length': 5,
'stride': 5,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_h(name):
# Conv AND pool, with 10 filters
# avg valid cost = 1.3773527145
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
subsample_target=5,
input_padding=4
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 10,
'filter_length': 5,
'stride': 1,
'nonlinearity': tanh,
'W': Normal(std=1/sqrt(N))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 5, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(N)))
}
]
net = Net(**net_dict_copy)
return net
def exp_i(name):
# | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 23:47:57 2021
@author: guo.1648
"""
# final version.
# referenced from NN_getDist_testCode_forBiggan.py and
# NN_getRepThreshPairImg_testCode_forBiggan.py.
# This code does the following:
# (1) generate a 32x32 sample sheet from images in dir .../chenqi_random_samples/
# (2) do NN query as in NN_getDist_testCode_forBiggan.py
# (3) threshold the matched pairs as in NN_getRepThreshPairImg_testCode_forBiggan.py
# Note: for MNIST (grayscale) dataset, still we use 3 channels (where each channel is the same)
# to compute the L2 norm! <-- so that we don't need to change thresholds
import cv2
import os
import re
import numpy as np
from shutil import copyfile
from sklearn.neighbors import NearestNeighbors
NNmatchDist_threshold_values = [10000, 9000, 8000, 7000]
dstFolder_thresh_list = ['NNmatchResult_threshold10000/','NNmatchResult_threshold9000/','NNmatchResult_threshold8000/','NNmatchResult_threshold7000/']
"""
#### for FLOWER_128_sub1000: 1000 images dataset
srcDir_sampleSheetImgs = '/scratch/BigGAN-PyTorch/samples/BigGAN_FLOWER_128_sub1000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs16_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/scratch/BigGAN-PyTorch/FLOWER_128_sub1000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/FLOWER_128_sub1000/Itr38950/'
# parameters:
im_size = 128
batch_size = 16 # i.e., each sample sheet is of 4x4 !!!!:
num_row = 4
num_col = 4
"""
"""
#### for FLOWER_128_sub2000: 2000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_FLOWER_128_sub2000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs32_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/FLOWER_128_sub2000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/FLOWER_128_sub2000/Itr29700/'
# parameters:
im_size = 128
batch_size = 32 # i.e., the sample sheet is of 5x6+2 !!!!:
num_row = 7
num_col = 5
"""
"""
#### for FLOWER_128_sub4000: 4000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_FLOWER_128_sub4000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs56_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/FLOWER_128_sub4000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/FLOWER_128_sub4000/Itr10700/'
# parameters:
im_size = 128
batch_size = 56 # i.e., each sample sheet is of 8x7 !!!!:
num_row = 8
num_col = 7
"""
"""
#### for FLOWER_128_sub6000: 6000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_FLOWER_128_sub6000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs24_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/FLOWER_128_sub6000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/FLOWER_128_sub6000/Itr17300/'
# parameters:
im_size = 128
batch_size = 24 # i.e., each sample sheet is of 6x4 !!!!:
num_row = 6
num_col = 4
"""
"""
#### for FLOWER_128: 8189 images dataset (the original FLOWER dataset)
srcDir_sampleSheetImgs = '/scratch/BigGAN-PyTorch/samples/BigGAN_FLOWER_128_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs48_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/scratch/BigGAN-PyTorch/FLOWER_128_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/FLOWER_128/Itr21500/'
# parameters:
im_size = 128
batch_size = 48 # i.e., each sample sheet is of 8x6 !!!!:
num_row = 8
num_col = 6
"""
"""
#### for CelebA_128_sub200: 200 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_CelebA_128_sub200_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs32_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/CelebA_128_sub200_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/CelebA_128_sub200/Itr17850/'
# parameters:
im_size = 128
batch_size = 32 # i.e., the sample sheet is of 5x6+2 !!!!:
num_row = 7
num_col = 5
"""
"""
#### for CelebA_128_sub600: 600 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_CelebA_128_sub600_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs32_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/CelebA_128_sub600_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/CelebA_128_sub600/Itr20450/'
# parameters:
im_size = 128
batch_size = 32 # i.e., the sample sheet is of 5x6+2 !!!!:
num_row = 7
num_col = 5
"""
"""
#### for CelebA_128_sub1000: 1000 images dataset
srcDir_sampleSheetImgs = '/scratch/BigGAN-PyTorch/samples/BigGAN_CelebA_128_sub1000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs16_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/scratch/BigGAN-PyTorch/CelebA_128_sub1000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/CelebA_128_sub1000/Itr37400/'
# parameters:
im_size = 128
batch_size = 16 # i.e., each sample sheet is of 4x4 !!!!:
num_row = 4
num_col = 4
"""
"""
#### for CelebA_128_sub4000: 4000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_CelebA_128_sub4000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs32_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/CelebA_128_sub4000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/CelebA_128_sub4000/Itr19600/'
# parameters:
im_size = 128
batch_size = 32 # i.e., the sample sheet is of 5x6+2 !!!!:
num_row = 7
num_col = 5
"""
"""
#### for CelebA_128_sub8000: 8000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_CelebA_128_sub8000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs32_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/CelebA_128_sub8000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/CelebA_128_sub8000/Itr23550/'
# parameters:
im_size = 128
batch_size = 32 # i.e., the sample sheet is of 5x6+2 !!!!:
num_row = 7
num_col = 5
"""
"""
#### for MNIST_128_sub10000: 10000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_MNIST_128_sub10000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs16_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/MNIST_128_sub10000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/MNIST_128_sub10000/Itr35600/'
# parameters:
im_size = 128
batch_size = 16 # i.e., each sample sheet is of 4x4 !!!!:
num_row = 4
num_col = 4
"""
"""
#### for MNIST_128_sub30000: 30000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_MNIST_128_sub30000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs16_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/MNIST_128_sub30000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/MNIST_128_sub30000/Itr37300/'
# parameters:
im_size = 128
batch_size = 16 # i.e., each sample sheet is of 4x4 !!!!:
num_row = 4
num_col = 4
"""
"""
#### for MNIST_128_train: 60000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_MNIST_128_train_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs16_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/MNIST_128_train_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/MNIST_128_train/Itr35850/'
# parameters:
im_size = 128
batch_size = 16 # i.e., each sample sheet is of 4x4 !!!!:
num_row = 4
num_col = 4
"""
"""
#### for LSUN_128_sub200: 200 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_LSUN_128_sub200_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs48_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/LSUN_128_sub200_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/LSUN_128_sub200/Itr12000/'
# parameters:
im_size = 128
batch_size = 48 # i.e., each sample sheet is of 8x6 !!!!:
num_row = 8
num_col = 6
"""
"""
#### for LSUN_128_sub1000: 1000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_LSUN_128_sub1000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs48_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/LSUN_128_sub1000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/LSUN_128_sub1000/Itr13450/'
# parameters:
im_size = 128
batch_size = 48 # i.e., each sample sheet is of 8x6 !!!!:
num_row = 8
num_col = 6
"""
"""
#### for LSUN_128_sub5000: 5000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_LSUN_128_sub5000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs48_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/LSUN_128_sub5000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/LSUN_128_sub5000/Itr9650/'
# parameters:
im_size = 128
batch_size = 48 # i.e., each sample sheet is of 8x6 !!!!:
num_row = 8
num_col = 6
"""
"""
#### for LSUN_128_sub10000: 10000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_LSUN_128_sub10000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs48_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/LSUN_128_sub10000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/LSUN_128_sub10000/Itr12000/'
# parameters:
im_size = 128
batch_size = 48 # i.e., each sample sheet is of 8x6 !!!!:
num_row = 8
num_col = 6
"""
#"""
#### for LSUN_128_sub30000: 30000 images dataset
srcDir_sampleSheetImgs = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/samples/BigGAN_LSUN_128_sub30000_BigGANdeep_seed0_Gch128_Dch128_Gd2_Dd2_bs48_nDa64_nGa64_Glr1.0e-04_Dlr4.0e-04_Gnlinplace_relu_Dnlinplace_relu_Ginitortho_Dinitortho_Gattn64_Dattn64_Gshared_hier_ema/chenqi_random_samples/'
#srcRootDir_originDataImg = '' # NOT USED
srcRootDir_imgNpz = '/eecf/cbcsl/data100b/Chenqi/BigGAN-PyTorch/LSUN_128_sub30000_imgs.npz'
dstRootDir = '/eecf/cbcsl/data100b/Chenqi/gan_results_for_presentation/biggan/NN_query/LSUN_128_sub30000/Itr10400/'
# parameters:
im_size = 128
batch_size = 48 # i.e., each sample sheet is of 8x6 !!!!:
num_row = 8
num_col = 6
#"""
# for (1) and (2):
dstRootDir_viewSampleSheetImgs = dstRootDir + 'view_sampleSheetImgs/'
dstRootDir_NNmatchResult = dstRootDir + 'NNmatchResult/'
dstImgName_sampleSheetAll = dstRootDir + 'fakes.png'
dstImgName_NNmatchSheet = dstRootDir + 'NNmatchResultSheet.png'
dstTxtName_matchDist = dstRootDir + 'NNmatchDist.txt'
# for (3):
dstTxtName_matchDistThresh = dstRootDir + 'NNmatchDist_smallerThanThresh.txt'
def dealWith_sampleSheets():
# the list to store each image in all the sampleSheet_imgs
sample_img_list = []
for (dirpath, dirnames, filenames) in os.walk(srcDir_sampleSheetImgs):
#print(filenames)
for filename in filenames:
#print(filename)
if ".jpg" in filename:
print("------------------deal with---------------------")
print(filename)
fullImgName = srcDir_sampleSheetImgs + filename
sampleSheet_img = cv2.imread(fullImgName)
(sheet_img_height, sheet_img_width, ch) = sampleSheet_img.shape
single_img_height = sheet_img_height//num_row # 130
single_img_width = sheet_img_width//num_col # 130
# split the sampleSheet img into batch_size (here 16) images:
tmp_count = 1
for i in range(num_row):
for j in range(num_col):
start_row_pos = i*single_img_height
end_row_pos = (i+1)*single_img_height
start_col_pos = j*single_img_width
end_col_pos = (j+1)*single_img_width
single_sample_img = sampleSheet_img[start_row_pos:end_row_pos,start_col_pos:end_col_pos,:]
if tmp_count <= batch_size:
sample_img_list.append(single_sample_img)
tmp_count += 1
if len(sample_img_list) > 1024:
sample_img_list = sample_img_list[:1024] # only keep 1025 imgs
return sample_img_list
def generateSave_sampleSheetAll(sample_img_list):
# generate and save the 32x32 sample sheet from sample_img_list
(single_img_height, single_img_width, ch) = sample_img_list[0].shape
sample_sheet_all = np.zeros((single_img_height*32,single_img_width*32,ch),dtype=np.uint8)
for i in range(32):
for j in range(32):
start_row_pos = i*single_img_height
end_row_pos = (i+1)*single_img_height
start_col_pos = j*single_img_width
end_col_pos = (j+1)*single_img_width
match_img_idx = i*num_col + j
if match_img_idx < 1024:
sample_sheet_all[start_row_pos:end_row_pos,start_col_pos:end_col_pos,:] = sample_img_list[match_img_idx]
# save this sheet
cv2.imwrite(dstImgName_sampleSheetAll, sample_sheet_all)
return
def image_to_feature_vector(image):
# Note: the image is already resized to a fixed size.
# flatten the image into a list of raw pixel intensities:
return image.flatten()
def my_center_crop(origin_img, crop_size):
y,x,_ = origin_img.shape
startx = x//2-(crop_size//2)
starty = y//2-(crop_size//2)
origin_img_centCrop = origin_img[starty:starty+crop_size,startx:startx+crop_size]
return origin_img_centCrop
def generateTrainSet(len_featVec, dim):
all_origin_img_vecs = [] # this is our feature space
all_origin_img_names = []
# newly modified: different from that in FLOWER_128_sub1000 and FLOWER_128:
images_arr = np.load(srcRootDir_imgNpz)
#images_arr.files
images_list = list(images_arr['imgs'][:,0])
for filename in images_list:
#print("------------------deal with---------------------")
#print(filename)
#origin_img = cv2.imread(srcRootDir_originDataImg+filename)
origin_img = cv2.imread(filename)
origin_img_centCrop = my_center_crop(origin_img, min(origin_img.shape[0],origin_img.shape[1]))
# resize using linear interpolation:
origin_img_centCrop_resize = cv2.resize(origin_img_centCrop, dim)
# also convert it to feature vector:
origin_img_centCrop_resize_vec = image_to_feature_vector(origin_img_centCrop_resize)
assert(len(origin_img_centCrop_resize_vec)==len_featVec)
all_origin_img_vecs.append(origin_img_centCrop_resize_vec)
all_origin_img_names.append(filename)
return (np.array(all_origin_img_vecs), all_origin_img_names)
def combine_matchingResult(match_img_list):
# combine the match_img together into a corresponding sheet
(single_img_height, single_img_width, ch) = match_img_list[0].shape
match_img_sheet = np.zeros((single_img_height*32,single_img_width*32,ch),dtype=np.uint8)
for i in range(32):
for j in range(32):
start_row_pos = i*single_img_height
end_row_pos = (i+1)*single_img_height
start_col_pos = j*single_img_width
end_col_pos = (j+1)*single_img_width
match_img_idx = i*num_col + j
if match_img_idx < 1024:
match_img_sheet[start_row_pos:end_row_pos,start_col_pos:end_col_pos,:] = match_img_list[match_img_idx]
# save this sheet
cv2.imwrite(dstImgName_NNmatchSheet, match_img_sheet)
return
def query_NN_wrapper(sample_img_list):
# this is a wrapper func!
# first, get the training set from original images:
len_featVec = len(image_to_feature_vector(sample_img_list[0]))
dim = (sample_img_list[0].shape[1],sample_img_list[0].shape[0])
trainSet_feats, all_origin_img_names = generateTrainSet(len_featVec, dim)
neigh = NearestNeighbors(n_neighbors=1) # radius=0.4
neigh.fit(trainSet_feats)
# then, query:
match_img_list = []
match_distance_strs = ''
for i in range(len(sample_img_list)):
single_sample_img = sample_img_list[i]
# get the query vector:
single_sample_img_vec = image_to_feature_vector(single_sample_img)
# NN to search:
match_distance, match_idx = neigh.kneighbors([single_sample_img_vec], 1, return_distance=True)
match_distance = match_distance[0][0]
match_idx = match_idx[0][0]
match_imgName = all_origin_img_names[match_idx].split('/')[-1]
match_img = trainSet_feats[match_idx,:].reshape((dim[1],dim[0],3))
match_img_list.append(match_img)
# save the matching result:
im_h = cv2.hconcat([single_sample_img, match_img])
cv2.imwrite(dstRootDir_NNmatchResult+str(i+1)+'_'+match_imgName, im_h)
# newly added: also save the corresponding match_distance into txt file:
match_distance_strs += str(i+1)+'_'+match_imgName + ': match_distance = ' + str(match_distance) + '\n'
# also combine the match_img together into a corresponding sheet!
combine_matchingResult(match_img_list)
# newly added: also save the corresponding match_distance into txt file:
f = open(dstTxtName_matchDist, 'w')
f.write(match_distance_strs)
f.close()
return
def threshNNpairs():
match_distance_thresh_strs = ''
for i in range(3):
NNmatchDist_threshold_value = NNmatchDist_threshold_values[i]
dstFolder_thresh = dstRootDir + dstFolder_thresh_list[i]
| |
"""
Gameplay aids for 'Attack Vector: Tactical'.
Functions:
avt.movement(vector_string)
Get 'pretty-formatted' string for movement grid.
avt.shellstar(
Vector_to_target,
crossing_Vector,
muzzle_velocity,
segment=0)
Generate seeker shellstar info.
Classes:
Vector
"""
from math import floor, pi, sin, sqrt
from decimal import Decimal
import movement_grid_remainders as remainders
_CARDINAL_H = ['A', 'B', 'C', 'D', 'E', 'F']
_CARDINAL_V = ['+', '-']
_CARDINALS = _CARDINAL_H + _CARDINAL_V
_TILE_RADIUS = 8
def SET_TILE_RADIUS(r) :
"""Set radius used in tile computation."""
#assert type(r) == int, "invalid radius type"
#assert r%2 == 0, "radius must be even"
#assert r > 0, "radius must be greater than zero"
global _TILE_RADIUS
_TILE_RADIUS = r
_SIN60 = Decimal(sin(pi/3))
_HALF_DECIMAL = Decimal('0.5')
def _uvp_from_vectors(vectors) :
"""Convert 'vectors' dictionary to U,V,+ dictionary."""
uvp = {
'U' : Decimal(0),
'V' : Decimal(0),
}
if vectors['B'] != 0 :
uvp['U'] += vectors['B']
uvp['V'] -= vectors['B']
if vectors['E'] != 0 :
uvp['U'] -= vectors['E']
uvp['V'] += vectors['E']
uvp['V'] += vectors['D']
uvp['V'] -= vectors['A']
uvp['U'] += vectors['C']
uvp['U'] -= vectors['F']
uvp['+'] = Decimal(vectors['+'] - vectors['-'])
return uvp
#end _uvp_from_vectors
def __place_in_vector(d, magnitude, pos_dir, neg_dir) :
"""Place integer `magnitude` in vector dict `d` by direction.
If `magnitude` is positive, is placed in `d[pos_dir]`.
If `magnitude` is negative, is placed in `d[neg_dir]`.
"""
if magnitude > 0 :
d[pos_dir] = magnitude
elif magnitude < 0 :
d[neg_dir] = -magnitude
#end __place_in_vector
def __consolidate_120(d, dccw, dir, d_cw) :
"""Consolidate vectors dictionary d.
Given that `dir`, `d_cw`, and `dccw` are valid directions
in the vector dictionary `d` (center, clockwise, and
counterclockwise, respectively), if values in `d[dccw]` and
`d[d_cw]` are nonzero, subtract smaller from opposite and add
smaller to `d[dir]`
"""
v_cw = d[d_cw]
vccw = d[dccw]
lesser, greater = None, None
if v_cw > 0 and vccw >= v_cw :
greater = (vccw, dccw)
lesser = (v_cw, d_cw)
elif vccw > 0 and v_cw >= vccw :
greater = (v_cw, d_cw)
lesser = (vccw, dccw)
if lesser is not None :
d[dir] = lesser[0]
d[lesser[1]] = Decimal(0)
d[greater[1]] = greater[0] - lesser[0]
#end __consolidate_120
def _vectors_from_uvp(uvp) :
"""Convert "UV+" dictionary to a vector dictionary."""
vectors = dict(zip(_CARDINALS,
(Decimal(0) for _ in range(len(_CARDINALS)))))
__place_in_vector(vectors, uvp['+'], '+', '-')
__place_in_vector(vectors, uvp['U'], 'C', 'F')
__place_in_vector(vectors, uvp['V'], 'D', 'A')
__consolidate_120(vectors, 'A', 'B', 'C')
__consolidate_120(vectors, 'D', 'E', 'F')
return vectors
#end _vectors_from_uvp
class Vector :
"""Representation of velocity or position in hexagonal coordinates.
Relies on AV:T's 'AVID' directions, (A, B, C, D, E, F, +, -) for
user interaction, but converts to an intenal format for easier math.
Created by (and interacts with user) through 'vector strings', each
being either the empty string (""), indicating a zeroed vector,
or a string containing a number of <number><direction> sequences
separated by spaces, e.g. "5F", "4A 2C 6+", "1A 5d 2+ 3b 7- 6E".
Note that case is unimportant in user input, but all output will be
capitalized.
Example instantiation: `vector_instance = avt.Vector("4A 2C 6+")`
Basic arithmetic is supported for Vector instances, with addition and
subtracting creating new Vector instances from the result.
Python's built-in To-string method `str(<Vector>)` is supported,
providing an instance's vector string.
Vector instances have a number of useful routines:
bearing()
Get the (rounded) distance and AVID window of a Vector.
Assumes relative to the zero vector.
cartesian()
Get a dictionary with 'X', 'Y', and 'Z' terms from Vector..
Values are Decimal objects.
movement_grid()
Look up and format the movement grid for the Vector.
"""
def __init__(self, vecstr) :
"""Create Vector instance from a vector string.
Order and count of components is not important,
e.g., "5F", "4A 2C 6+", "1A 5D 2+ 3B 7- 6E",
as vectors are automatically normalized into at most two
horizontal components adjacent to one another, and at most one
vertical component.
Raises ValueError if:
+ a number is negative
+ a duplicate direction is encountered
+ direction is not recognized
Raises an error of some sort if `vecstr` is not a string.
The empy string ("") returns the zero vector.
"""
vectors = dict(zip(_CARDINALS,
(Decimal(0) for _ in range(len(_CARDINALS)))))
seen = []
for elem in vecstr.strip().split() :
num = int(elem[:-1])
if num < 0 :
raise ValueError("negative value {}".format(elem))
dir = elem[-1].upper()
if dir in seen :
raise ValueError("duplicate direction {}".format(dir))
if dir not in _CARDINALS :
raise ValueError("invalid direction {}".format(dir))
vectors[dir] = num
seen.append(dir)
self._uvp_vectors = _uvp_from_vectors(vectors)
def __repr__(self) :
"""format in U,V,+ details"""
return "UV+ ({},{},{})".format(
self._uvp_vectors['U'],
self._uvp_vectors['V'],
self._uvp_vectors['+'])
def _major_minor_vertical(self) :
"""return tuple of (magnitude, dirction) tuples
if there are no suitable components, tuple has value of 0 and an
undefined direction
"""
vectors = _vectors_from_uvp(self._uvp_vectors)
major = (Decimal(0), 'A')
minor = (Decimal(0), 'A')
vertical = (Decimal(0), '+')
for dir in _CARDINAL_H :
magnitude = vectors[dir]
if magnitude > major[0] :
if major[0] > minor[0] :
minor = major
major = (magnitude, dir)
elif magnitude > minor[0] :
minor = (magnitude, dir)
for dir in ['-', '+'] :
magnitude = vectors[dir]
if magnitude > vertical[0] :
vertical = (magnitude, dir)
return (major, minor, vertical)
def __str__(self) :
"""Return formatted vector string with cardinal directions."""
(major, minor, vertical) = self._major_minor_vertical()
if major[0] == 0 :
if vertical[0] == 0 :
return "STILL"
else :
return "{}{}".format(vertical[0], vertical[1])
else :
arr = ["{}{}".format(major[0], major[1])]
if minor[0] > 0 :
arr.append("{}{}".format(minor[0], minor[1]))
if vertical[0] > 0 :
arr.append("{}{}".format(vertical[0], vertical[1]))
return ' '.join(arr)
def __add__(self, other) :
uvp = dict()
for k in ['U', 'V', '+'] :
uvp[k] = self._uvp_vectors[k] + other._uvp_vectors[k]
v = Vector("")
v._uvp_vectors = uvp
return v
def __sub__(self, other) :
uvp = dict()
for k in ['U', 'V', '+'] :
uvp[k] = self._uvp_vectors[k] - other._uvp_vectors[k]
v = Vector("")
v._uvp_vectors = uvp
return v
def _cartesian_magnitude(self) :
global _HALF_DECIMAL
global _SIN60
(major, minor, vertical) = self._major_minor_vertical()
h_dist = (major[0] + minor[0]*_HALF_DECIMAL)**2
h_dist += (minor[0] * _SIN60)**2
return (h_dist + vertical[0]**2).sqrt()
def bearing(self, count=False) :
"""Get distance and AVID window of vector, relative to zero.
Distance and window are separated by a space; distance is the
integer (rounded, if relevant) from zero to this vector instance.
The window is the bearing through which the vector is seen from
the zero vector.
By default, uses mathematical tricks to get exact distance.
If `count` is True, this correction is not done and horizontal
distance is the sum of horizontal components.
Green-ring windows (B/C++, e.g.) may occur -- interpret per rules.
"""
(major, minor, vertical) = self._major_minor_vertical()
dir = major[1]
if count :
h_dist = major[0] + minor[0]
else :
h_dist = ( major[0] + minor[0] * Decimal('0.5') )**2
h_dist += ( minor[0] * Decimal(sin(pi/3)) )**2
h_dist = h_dist.sqrt()
if major[0] == 0 :
if vertical[0] == 0 :
return "NONE"
else :
return "{} {}".format(
vertical[0],
"+++" if vertical[1] is '+' else "---")
if (minor[0] * 3) >= major[0] :
# through hex edge
# format clockwise (A/B, not B/A)
dir0, dir1 = major[1], minor[1]
if dir1 < dir0 :
dir0, dir1 = dir1, dir0
dir = "{}/{}".format(dir0, dir1)
v_dist = vertical[0]
height = 0 # deviations from horizontal; +/++ categorization
if abs(4*v_dist) > abs(h_dist) :
height += 1
if abs(v_dist) > abs(h_dist) :
height += 1
if abs(v_dist) >= abs(4*h_dist) :
height += 1
if height == 3 :
dir = ''
dist_pow = h_dist**2 + v_dist**2
dist = floor(dist_pow.sqrt() + Decimal('0.5'))
return "{} {}{}".format(dist, dir, vertical[1]*height)
def movement_grid(self) :
"""Pretty-format a movement grid for this vector.
If the zero vector, returns "STILL".
Else, returns columns for each major horizontal, minor
horizontal, and vertical movement component. First row is
direction, second row is movement 'each' segment, remaining rows
are the remainder grid, indexed by segment number.
When a column is empty, there is no movement that applies. This
may occur when there is no 'minor' or vertical component.
"""
(major, minor, vertical) = self._major_minor_vertical()
if major[0] == 0 and vertical[0] == 0 :
return "STILL"
majordir, minordir, vert_dir = ' ', ' ', ' '
majoreach, minoreach, vert_each = ' ', ' | |
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import logging
import tempfile
from pathlib import Path
from typing import List, Optional, Sequence, Tuple, Union
from azureml.core import Dataset, Datastore, Workspace
from azureml.data import FileDataset, OutputFileDatasetConfig
from azureml.data.dataset_consumption_config import DatasetConsumptionConfig
from azureml.dataprep.fuse.daemon import MountContext
from health_azure.utils import PathOrString, get_workspace
def get_datastore(workspace: Workspace, datastore_name: str) -> Datastore:
"""
Retrieves a datastore of a given name from an AzureML workspace. The datastore_name argument can be omitted if
the workspace only contains a single datastore. Raises a ValueError if there is no datastore of the given name.
:param workspace: The AzureML workspace to read from.
:param datastore_name: The name of the datastore to retrieve.
:return: An AzureML datastore.
"""
datastores = workspace.datastores
existing_stores = list(datastores.keys())
if not datastore_name:
if len(existing_stores) == 1:
return datastores[existing_stores[0]]
raise ValueError("No datastore name provided. This is only possible if the workspace has a single datastore. "
f"However, the workspace has {len(existing_stores)} datastores: {existing_stores}")
if datastore_name in datastores:
return datastores[datastore_name]
raise ValueError(f"Datastore \"{datastore_name}\" was not found in the \"{workspace.name}\" workspace. "
f"Existing datastores: {existing_stores}")
def get_or_create_dataset(workspace: Workspace, datastore_name: str, dataset_name: str) -> FileDataset:
"""
Looks in the AzureML datastore for a dataset of the given name. If there is no such dataset, a dataset is
created and registered, assuming that the files are in a folder that has the same name as the dataset.
For example, if dataset_name is 'foo', then the 'foo' dataset should be pointing to the folder
<container_root>/datasets/dataset_name/
"""
if not dataset_name:
raise ValueError("No dataset name provided.")
try:
logging.info(f"Trying to retrieve AzureML Dataset '{dataset_name}'")
azureml_dataset = Dataset.get_by_name(workspace, name=dataset_name)
logging.info("Dataset found.")
except Exception:
logging.info(f"Retrieving datastore '{datastore_name}' from AzureML workspace")
datastore = get_datastore(workspace, datastore_name)
logging.info(f"Creating a new dataset from data in folder '{dataset_name}' in the datastore")
# Ensure that there is a / at the end of the file path, otherwise folder that share a prefix could create
# trouble (for example, folders foo and foo_bar exist, and I'm trying to create a dataset from "foo")
azureml_dataset = Dataset.File.from_files(path=(datastore, dataset_name + "/"))
logging.info("Registering the dataset for future use.")
azureml_dataset.register(workspace, name=dataset_name)
return azureml_dataset
def _input_dataset_key(index: int) -> str:
return f"INPUT_{index}"
def _output_dataset_key(index: int) -> str:
return f"OUTPUT_{index}"
class DatasetConfig:
"""
Contains information to use AzureML datasets as inputs or outputs.
"""
def __init__(self,
name: str,
datastore: str = "",
version: Optional[int] = None,
use_mounting: Optional[bool] = None,
target_folder: Optional[PathOrString] = None,
local_folder: Optional[PathOrString] = None):
"""
:param name: The name of the dataset, as it was registered in the AzureML workspace. For output datasets,
this will be the name given to the newly created dataset.
:param datastore: The name of the AzureML datastore that holds the dataset. This can be empty if the AzureML
workspace has only a single datastore, or if the default datastore should be used.
:param version: The version of the dataset that should be used. This is only used for input datasets.
If the version is not specified, the latest version will be used.
:param use_mounting: If True, the dataset will be "mounted", that is, individual files will be read
or written on-demand over the network. If False, the dataset will be fully downloaded before the job starts,
respectively fully uploaded at job end for output datasets.
Defaults: False (downloading) for datasets that are script inputs, True (mounting) for datasets that are
script outputs.
:param target_folder: The folder into which the dataset should be downloaded or mounted. If left empty, a
random folder on /tmp will be chosen. Do NOT use "." as the target_folder.
:param local_folder: The folder on the local machine at which the dataset is available. This
is used only for runs outside of AzureML. If this is empty then the target_folder will be used to
mount or download the dataset.
"""
# This class would be a good candidate for a dataclass, but having an explicit constructor makes
# documentation tools in the editor work nicer.
name = name.strip()
if not name:
raise ValueError("The name of the dataset must be a non-empty string.")
self.name = name
self.datastore = datastore
self.version = version
self.use_mounting = use_mounting
# If target_folder is "" then convert to None
self.target_folder = Path(target_folder) if target_folder else None
if str(self.target_folder) == ".":
raise ValueError("Can't mount or download a dataset to the current working directory.")
self.local_folder = Path(local_folder) if local_folder else None
def to_input_dataset_local(self, workspace: Optional[Workspace]) -> Tuple[Optional[Path], Optional[MountContext]]:
"""
Return a local path to the dataset when outside of an AzureML run.
If local_folder is supplied, then this is assumed to be a local dataset, and this is returned.
Otherwise the dataset is mounted or downloaded to either the target folder or a temporary folder and that is
returned.
:param workspace: The AzureML workspace to read from.
:return: Pair of optional path to dataset and optional mountcontext.
"""
status = f"Dataset {self.name} will be "
if self.local_folder is not None:
status += f"obtained from local folder {str(self.local_folder)}"
print(status)
return self.local_folder, None
if workspace is None:
status += "'None' - neither local_folder nor workspace available"
print(status)
return None, None
azureml_dataset = get_or_create_dataset(workspace=workspace,
dataset_name=self.name,
datastore_name=self.datastore)
target_path = self.target_folder or Path(tempfile.mkdtemp())
use_mounting = self.use_mounting if self.use_mounting is not None else False
if use_mounting:
status += f"mounted at {target_path}"
print(status)
mount_context = azureml_dataset.mount(mount_point=str(target_path))
result = target_path, mount_context
else:
status += f"downloaded to {target_path}"
print(status)
azureml_dataset.download(target_path=str(target_path), overwrite=False)
result = target_path, None
return result
def to_input_dataset(self,
workspace: Workspace,
dataset_index: int) -> DatasetConsumptionConfig:
"""
Creates a configuration for using an AzureML dataset inside of an AzureML run. This will make the AzureML
dataset with given name available as a named input, using INPUT_0 as the key for dataset index 0.
:param workspace: The AzureML workspace to read from.
:param dataset_index: Suffix for using datasets as named inputs, the dataset will be marked INPUT_{index}
"""
status = f"In AzureML, dataset {self.name} (index {dataset_index}) will be "
azureml_dataset = get_or_create_dataset(workspace=workspace,
dataset_name=self.name,
datastore_name=self.datastore)
named_input = azureml_dataset.as_named_input(_input_dataset_key(index=dataset_index))
# If running on windows then self.target_folder may be a WindowsPath, make sure it is
# in posix format for Azure.
path_on_compute = self.target_folder.as_posix() if self.target_folder is not None else None
use_mounting = False if self.use_mounting is None else self.use_mounting
if use_mounting:
status += "mounted at "
result = named_input.as_mount(path_on_compute)
else:
status += "downloaded to "
result = named_input.as_download(path_on_compute)
if path_on_compute:
status += f"{path_on_compute}."
else:
status += "a randomly chosen folder."
print(status)
return result
def to_output_dataset(self,
workspace: Workspace,
dataset_index: int) -> OutputFileDatasetConfig:
"""
Creates a configuration to write a script output to an AzureML dataset. The name and datastore of this new
dataset will be taken from the present object.
:param workspace: The AzureML workspace to read from.
:param dataset_index: Suffix for using datasets as named inputs, the dataset will be marked OUTPUT_{index}
:return:
"""
status = f"Output dataset {self.name} (index {dataset_index}) will be "
datastore = get_datastore(workspace, self.datastore)
dataset = OutputFileDatasetConfig(name=_output_dataset_key(index=dataset_index),
destination=(datastore, self.name + "/"))
# TODO: Can we get tags into here too?
dataset = dataset.register_on_complete(name=self.name)
if self.target_folder:
raise ValueError("Output datasets can't have a target_folder set.")
use_mounting = True if self.use_mounting is None else self.use_mounting
if use_mounting:
status += "uploaded while the job runs."
result = dataset.as_mount()
else:
status += "uploaded when the job completes."
result = dataset.as_upload()
logging.info(status)
return result
StrOrDatasetConfig = Union[str, DatasetConfig]
def _replace_string_datasets(datasets: List[StrOrDatasetConfig],
default_datastore_name: str) -> List[DatasetConfig]:
"""
Processes a list of input or output datasets. All entries in the list that are strings are turned into
DatasetConfig objects, using the string as the dataset name, and pointing to the default datastore.
:param datasets: A list of datasets, each given either as a string or a DatasetConfig object.
:param default_datastore_name: The datastore to use for all datasets that are only specified via their name.
:return: A list of DatasetConfig objects, in the same order as the input list.
"""
return [DatasetConfig(name=d, datastore=default_datastore_name) if isinstance(d, str) | |
'Medford, OR'},
'1541988':{'en': 'Springfield, OR'},
'1541994':{'en': 'Lincoln City, OR'},
'1541995':{'en': 'Harrisburg, OR'},
'1541996':{'en': 'Lincoln City, OR'},
'1541997':{'en': 'Florence, OR'},
'1541998':{'en': 'Junction City, OR'},
'1548':{'en': 'Ontario'},
'1551':{'en': 'New Jersey'},
'1559':{'en': 'California'},
'155922':{'en': 'Fresno, CA'},
'1559230':{'en': 'Fresno, CA'},
'1559233':{'en': 'Fresno, CA'},
'1559237':{'en': 'Fresno, CA'},
'1559243':{'en': 'Fresno, CA'},
'1559244':{'en': 'Fresno, CA'},
'1559248':{'en': 'Fresno, CA'},
'1559251':{'en': 'Fresno, CA'},
'1559252':{'en': 'Fresno, CA'},
'1559253':{'en': 'Fresno, CA'},
'1559255':{'en': 'Fresno, CA'},
'1559256':{'en': 'Fresno, CA'},
'1559261':{'en': 'Fresno, CA'},
'1559264':{'en': 'Fresno, CA'},
'1559266':{'en': 'Fresno, CA'},
'1559268':{'en': 'Fresno, CA'},
'1559271':{'en': 'Fresno, CA'},
'1559274':{'en': 'Fresno, CA'},
'1559275':{'en': 'Fresno, CA'},
'1559276':{'en': 'Fresno, CA'},
'1559277':{'en': 'Fresno, CA'},
'1559294':{'en': 'Fresno, CA'},
'1559297':{'en': 'Clovis, CA'},
'1559298':{'en': 'Clovis, CA'},
'1559299':{'en': 'Clovis, CA'},
'1559307':{'en': 'Fresno, CA'},
'1559320':{'en': 'Fresno, CA'},
'1559322':{'en': 'Clovis, CA'},
'1559323':{'en': 'Clovis, CA'},
'1559324':{'en': 'Clovis, CA'},
'1559325':{'en': 'Clovis, CA'},
'1559326':{'en': 'Clovis, CA'},
'1559353':{'en': 'Madera, CA'},
'1559386':{'en': 'Avenal, CA'},
'1559412':{'en': 'Fresno, CA'},
'155943':{'en': 'Fresno, CA'},
'155944':{'en': 'Fresno, CA'},
'155945':{'en': 'Fresno, CA'},
'1559478':{'en': 'Fresno, CA'},
'1559485':{'en': 'Fresno, CA'},
'1559486':{'en': 'Fresno, CA'},
'155949':{'en': 'Fresno, CA'},
'1559528':{'en': 'Orosi, CA'},
'1559535':{'en': 'Terra Bella, CA'},
'1559539':{'en': 'Springville, CA'},
'1559561':{'en': 'Three Rivers, CA'},
'1559562':{'en': 'Lindsay, CA'},
'1559564':{'en': 'Woodlake, CA'},
'155958':{'en': 'Hanford, CA'},
'1559591':{'en': 'Dinuba, CA'},
'1559592':{'en': 'Exeter, CA'},
'1559594':{'en': 'Exeter, CA'},
'1559595':{'en': 'Dinuba, CA'},
'1559622':{'en': 'Visalia, CA'},
'1559623':{'en': 'Visalia, CA'},
'1559624':{'en': 'Visalia, CA'},
'1559625':{'en': 'Visalia, CA'},
'1559626':{'en': 'Orange Cove, CA'},
'1559627':{'en': 'Visalia, CA'},
'1559635':{'en': 'Visalia, CA'},
'1559636':{'en': 'Visalia, CA'},
'1559637':{'en': 'Reedley, CA'},
'1559638':{'en': 'Reedley, CA'},
'1559641':{'en': 'Oakhurst, CA'},
'1559645':{'en': 'Madera, CA'},
'1559646':{'en': 'Parlier, CA'},
'1559651':{'en': 'Visalia, CA'},
'1559655':{'en': 'Mendota, CA'},
'1559658':{'en': 'Oakhurst, CA'},
'1559659':{'en': 'Firebaugh, CA'},
'1559661':{'en': 'Madera, CA'},
'1559662':{'en': 'Madera, CA'},
'1559664':{'en': 'Madera, CA'},
'1559665':{'en': 'Chowchilla, CA'},
'1559673':{'en': 'Madera, CA'},
'1559674':{'en': 'Madera, CA'},
'1559675':{'en': 'Madera, CA'},
'1559683':{'en': 'Oakhurst, CA'},
'1559684':{'en': 'Tulare, CA'},
'1559685':{'en': 'Tulare, CA'},
'1559686':{'en': 'Tulare, CA'},
'1559687':{'en': 'Tulare, CA'},
'1559688':{'en': 'Tulare, CA'},
'1559713':{'en': 'Visalia, CA'},
'1559732':{'en': 'Visalia, CA'},
'1559733':{'en': 'Visalia, CA'},
'1559734':{'en': 'Visalia, CA'},
'1559738':{'en': 'Visalia, CA'},
'1559739':{'en': 'Visalia, CA'},
'1559741':{'en': 'Visalia, CA'},
'1559747':{'en': 'Farmersville, CA'},
'1559757':{'en': 'Pixley, CA'},
'1559781':{'en': 'Porterville, CA'},
'1559782':{'en': 'Porterville, CA'},
'1559783':{'en': 'Porterville, CA'},
'1559784':{'en': 'Porterville, CA'},
'1559788':{'en': 'Porterville, CA'},
'1559791':{'en': 'Porterville, CA'},
'1559798':{'en': 'Ivanhoe, CA'},
'1559834':{'en': 'Fowler, CA'},
'1559840':{'en': 'Fresno, CA'},
'1559841':{'en': 'Shaver Lake, CA'},
'1559846':{'en': 'Kerman, CA'},
'1559855':{'en': 'Auberry, CA'},
'1559864':{'en': 'Caruthers, CA'},
'1559867':{'en': 'Riverdale, CA'},
'1559875':{'en': 'Sanger, CA'},
'1559877':{'en': 'North Fork, CA'},
'1559891':{'en': 'Selma, CA'},
'1559896':{'en': 'Selma, CA'},
'1559897':{'en': 'Kingsburg, CA'},
'1559924':{'en': 'Lemoore, CA'},
'1559925':{'en': 'Lemoore, CA'},
'1559935':{'en': 'Coalinga, CA'},
'1559945':{'en': 'Huron, CA'},
'1559992':{'en': 'Corcoran, CA'},
'1559998':{'en': 'Lemoore, CA'},
'1561':{'en': 'Florida'},
'1561200':{'en': 'Boynton Beach, FL'},
'1561208':{'en': 'Boca Raton, FL'},
'1561210':{'en': 'Boca Raton, FL'},
'1561218':{'en': 'Boca Raton, FL'},
'1561228':{'en': 'West Palm Beach, FL'},
'1561241':{'en': 'Boca Raton, FL'},
'1561242':{'en': 'West Palm Beach, FL'},
'1561243':{'en': 'Delray Beach, FL'},
'1561244':{'en': 'Boynton Beach, FL'},
'1561245':{'en': 'Boca Raton, FL'},
'1561265':{'en': 'Delray Beach, FL'},
'1561266':{'en': 'Delray Beach, FL'},
'1561272':{'en': 'Delray Beach, FL'},
'1561274':{'en': 'Delray Beach, FL'},
'1561276':{'en': 'Delray Beach, FL'},
'1561278':{'en': 'Delray Beach, FL'},
'1561279':{'en': 'Delray Beach, FL'},
'1561289':{'en': 'Boca Raton, FL'},
'1561330':{'en': 'Delray Beach, FL'},
'1561338':{'en': 'Boca Raton, FL'},
'1561347':{'en': 'Boca Raton, FL'},
'1561353':{'en': 'Boca Raton, FL'},
'1561355':{'en': 'West Palm Beach, FL'},
'1561361':{'en': 'Boca Raton, FL'},
'1561362':{'en': 'Boca Raton, FL'},
'1561364':{'en': 'Boynton Beach, FL'},
'1561367':{'en': 'Boca Raton, FL'},
'1561368':{'en': 'Boca Raton, FL'},
'1561369':{'en': 'Boynton Beach, FL'},
'1561372':{'en': 'Boca Raton, FL'},
'1561374':{'en': 'Boynton Beach, FL'},
'1561391':{'en': 'Boca Raton, FL'},
'1561392':{'en': 'Boca Raton, FL'},
'1561393':{'en': 'Boca Raton, FL'},
'1561394':{'en': 'Boca Raton, FL'},
'1561395':{'en': 'Boca Raton, FL'},
'1561404':{'en': 'Delray Beach, FL'},
'1561416':{'en': 'Boca Raton, FL'},
'1561417':{'en': 'Boca Raton, FL'},
'1561422':{'en': 'Riviera Beach, FL'},
'1561427':{'en': 'Jupiter, FL'},
'1561447':{'en': 'Boca Raton, FL'},
'1561450':{'en': 'Delray Beach, FL'},
'1561451':{'en': 'Boca Raton, FL'},
'1561455':{'en': 'Delray Beach, FL'},
'1561470':{'en': 'Boca Raton, FL'},
'1561471':{'en': 'West Palm Beach, FL'},
'1561477':{'en': 'Boca Raton, FL'},
'1561478':{'en': 'West Palm Beach, FL'},
'1561479':{'en': 'Boca Raton, FL'},
'1561482':{'en': 'Boca Raton, FL'},
'1561483':{'en': 'Boca Raton, FL'},
'1561487':{'en': 'Boca Raton, FL'},
'1561488':{'en': 'Boca Raton, FL'},
'1561495':{'en': 'Delray Beach, FL'},
'1561496':{'en': 'Delray Beach, FL'},
'1561498':{'en': 'Delray Beach, FL'},
'1561499':{'en': 'Delray Beach, FL'},
'1561509':{'en': 'Boynton Beach, FL'},
'1561514':{'en': 'West Palm Beach, FL'},
'1561544':{'en': 'Boca Raton, FL'},
'1561558':{'en': 'Boca Raton, FL'},
'1561572':{'en': 'Boynton Beach, FL'},
'1561575':{'en': 'Jupiter, FL'},
'1561615':{'en': 'West Palm Beach, FL'},
'1561616':{'en': 'West Palm Beach, FL'},
'1561620':{'en': 'Boca Raton, FL'},
'1561637':{'en': 'Delray Beach, FL'},
'1561638':{'en': 'Delray Beach, FL'},
'1561640':{'en': 'West Palm Beach, FL'},
'1561650':{'en': 'West Palm Beach, FL'},
'1561653':{'en': 'West Palm Beach, FL'},
'1561655':{'en': 'West Palm Beach, FL'},
'1561659':{'en': 'West Palm Beach, FL'},
'1561672':{'en': 'Boca Raton, FL'},
'156168':{'en': 'West Palm Beach, FL'},
'1561697':{'en': 'West Palm Beach, FL'},
'1561712':{'en': 'West Palm Beach, FL'},
'156173':{'en': 'Boynton Beach, FL'},
'156174':{'en': 'Jupiter, FL'},
'1561740':{'en': 'Boynton Beach, FL'},
'1561742':{'en': 'Boynton Beach, FL'},
'1561750':{'en': 'Boca Raton, FL'},
'1561752':{'en': 'Boynton Beach, FL'},
'1561756':{'en': 'Boca Raton, FL'},
'1561768':{'en': 'Jupiter, FL'},
'1561802':{'en': 'West Palm Beach, FL'},
'1561807':{'en': 'Boca Raton, FL'},
'1561819':{'en': 'Delray Beach, FL'},
'1561820':{'en': 'West Palm Beach, FL'},
'1561822':{'en': 'West Palm Beach, FL'},
'1561826':{'en': 'Boca Raton, FL'},
'1561832':{'en': 'West Palm Beach, FL'},
'1561833':{'en': 'West Palm Beach, FL'},
'1561835':{'en': 'West Palm Beach, FL'},
'1561852':{'en': 'Boca Raton, FL'},
'1561865':{'en': 'Delray Beach, FL'},
'1561883':{'en': 'Boca Raton, FL'},
'1561910':{'en': 'Boca Raton, FL'},
'1561912':{'en': 'Boca Raton, FL'},
'1561921':{'en': 'Delray Beach, FL'},
'1561924':{'en': 'Pahokee, FL'},
'1561929':{'en': 'Boca Raton, FL'},
'1561939':{'en': 'Boca Raton, FL'},
'1561948':{'en': 'Boca Raton, FL'},
'1561955':{'en': 'Boca Raton, FL'},
'1561962':{'en': 'Boca Raton, FL'},
'1561972':{'en': 'Jupiter, FL'},
'1561981':{'en': 'Boca Raton, FL'},
'1561988':{'en': 'Boca Raton, FL'},
'1561989':{'en': 'Boca Raton, FL'},
'1561992':{'en': 'Belle Glade, FL'},
'1561994':{'en': 'Boca Raton, FL'},
'1561995':{'en': 'Boca Raton, FL'},
'1561996':{'en': 'Belle Glade, FL'},
'1561997':{'en': 'Boca Raton, FL'},
'1561998':{'en': 'Boca Raton, FL'},
'1561999':{'en': 'Boca Raton, FL'},
'1562':{'en': 'California'},
'1562216':{'en': 'Long Beach, CA'},
'1562218':{'en': 'Long Beach, CA'},
'1562272':{'en': 'Paramount, CA'},
'1562343':{'en': 'Long Beach, CA'},
'1562401':{'en': 'Downey, CA'},
'1562408':{'en': 'Paramount, CA'},
'156242':{'en': 'Long Beach, CA'},
'156243':{'en': 'Long Beach, CA'},
'1562461':{'en': 'Bellflower, CA'},
'1562464':{'en': 'Whittier, CA'},
'156249':{'en': 'Long Beach, CA'},
'1562529':{'en': 'Paramount, CA'},
'1562531':{'en': 'Paramount, CA'},
'1562570':{'en': 'Long Beach, CA'},
'1562590':{'en': 'Long Beach, CA'},
'1562591':{'en': 'Long Beach, CA'},
'1562595':{'en': 'Long Beach, CA'},
'1562597':{'en': 'Long Beach, CA'},
'1562599':{'en': 'Long Beach, CA'},
'1562602':{'en': 'Paramount, CA'},
'1562612':{'en': 'Long Beach, CA'},
'1562622':{'en': 'Downey, CA'},
'1562624':{'en': 'Long Beach, CA'},
'1562633':{'en': 'Paramount, CA'},
'1562657':{'en': 'Downey, CA'},
'1562690':{'en': 'La Habra, CA'},
'1562691':{'en': 'La Habra, CA'},
'1562692':{'en': 'Whittier, CA'},
'1562693':{'en': 'Whittier, CA'},
'1562694':{'en': 'La Habra, CA'},
'1562695':{'en': 'Whittier, CA'},
'1562696':{'en': 'Whittier, CA'},
'1562697':{'en': 'La Habra, CA'},
'1562698':{'en': 'Whittier, CA'},
'1562728':{'en': 'Long Beach, CA'},
'1562777':{'en': 'Santa Fe Springs, CA'},
'1562789':{'en': 'Whittier, CA'},
'1562795':{'en': 'Los Alamitos, CA'},
'1562801':{'en': 'Pico Rivera, CA'},
'1562803':{'en': 'Downey, CA'},
'1562804':{'en': 'Bellflower, CA'},
'1562826':{'en': 'Long Beach, CA'},
'1562856':{'en': 'Long Beach, CA'},
'1562861':{'en': 'Downey, CA'},
'1562862':{'en': 'Downey, CA'},
'1562863':{'en': 'Norwalk, CA'},
'1562864':{'en': 'Norwalk, CA'},
'1562866':{'en': 'Bellflower, CA'},
'1562867':{'en': 'Bellflower, CA'},
'1562868':{'en': 'Norwalk, CA'},
'1562869':{'en': 'Downey, CA'},
'1562901':{'en': 'Long Beach, CA'},
'1562903':{'en': 'Santa Fe Springs, CA'},
'1562904':{'en': 'Downey, CA'},
'1562906':{'en': 'Santa Fe Springs, CA'},
'1562907':{'en': 'Whittier, CA'},
'1562912':{'en': 'Long Beach, CA'},
'1562920':{'en': 'Bellflower, CA'},
'1562923':{'en': 'Downey, CA'},
'1562925':{'en': 'Bellflower, CA'},
'1562929':{'en': 'Norwalk, CA'},
'1562933':{'en': 'Long Beach, CA'},
'1562938':{'en': 'Long Beach, CA'},
'1562942':{'en': 'Pico Rivera, CA'},
'1562945':{'en': 'Whittier, CA'},
'1562946':{'en': 'Santa Fe Springs, CA'},
'1562947':{'en': 'Whittier, CA'},
'1562948':{'en': 'Pico Rivera, CA'},
'1562949':{'en': 'Pico Rivera, CA'},
'1562951':{'en': 'Long Beach, CA'},
'1562961':{'en': 'Long Beach, CA'},
'156298':{'en': 'Long Beach, CA'},
'1562997':{'en': 'Long Beach, CA'},
'1563':{'en': 'Iowa'},
'1563242':{'en': 'Clinton, IA'},
'1563243':{'en': 'Clinton, IA'},
'1563244':{'en': 'Clinton, IA'},
'1563245':{'en': 'Elkader, IA'},
'1563252':{'en': 'Guttenberg, IA'},
'1563259':{'en': 'Camanche, IA'},
'1563262':{'en': 'Muscatine, IA'},
'1563263':{'en': 'Muscatine, IA'},
'1563264':{'en': 'Muscatine, IA'},
'1563284':{'en': 'Walcott, IA'},
'1563285':{'en': 'Eldridge, IA'},
'1563289':{'en': '<NAME>, IA'},
'1563322':{'en': 'Davenport, IA'},
'1563323':{'en': 'Davenport, IA'},
'1563324':{'en': 'Davenport, IA'},
'1563326':{'en': 'Davenport, IA'},
'1563332':{'en': 'Bettendorf, IA'},
'1563355':{'en': 'Bettendorf, IA'},
'1563359':{'en': 'Davenport, IA'},
'1563382':{'en': 'Decorah, IA'},
'1563383':{'en': 'Davenport, IA'},
'1563386':{'en': 'Davenport, IA'},
'1563388':{'en': 'Davenport, IA'},
'1563391':{'en': 'Davenport, IA'},
'1563421':{'en': 'Davenport, IA'},
'1563422':{'en': 'West Union, IA'},
'1563441':{'en': 'Davenport, IA'},
'1563445':{'en': 'Davenport, IA'},
'1563532':{'en': 'Ossian, IA'},
'1563538':{'en': 'Lansing, IA'},
'1563539':{'en': 'Monona, IA'},
'1563547':{'en': 'Cresco, IA'},
'1563556':{'en': 'Dubuque, IA'},
'1563557':{'en': 'Dubuque, IA'},
'1563568':{'en': 'Waukon, IA'},
'1563578':{'en': 'Sumner, IA'},
'156358':{'en': 'Dubuque, IA'},
'1563652':{'en': 'Maquoketa, IA'},
'1563659':{'en': 'DeWitt, IA'},
'1563689':{'en': 'Preston, IA'},
'1563690':{'en': 'Dubuque, IA'},
'1563732':{'en': 'Wilton, IA'},
'1563742':{'en': 'Bettendorf, IA'},
'1563744':{'en': 'Farley, IA'},
'1563785':{'en': 'Durant, IA'},
'1563823':{'en': 'Davenport, IA'},
| |
import datetime
import pathlib
import pickle
from io import BytesIO
from unittest.mock import MagicMock, patch
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
import yaml
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.pipeline import Pipeline
from ml_tooling import Model
from ml_tooling.data import Dataset
from ml_tooling.logging import Log
from ml_tooling.metrics import Metrics, Metric
from ml_tooling.result import Result
from ml_tooling.search import Real
from ml_tooling.storage import FileStorage
from ml_tooling.transformers import DFStandardScaler, DFFeatureUnion
from ml_tooling.utils import MLToolingError, DatasetError
plt.switch_backend("agg")
class TestBaseClass:
def test_is_properties_works(
self, classifier: Model, regression: Model, pipeline_linear: Pipeline
):
assert classifier.is_regressor is False
assert classifier.is_classifier is True
assert regression.is_regressor is True
assert regression.is_classifier is False
assert classifier.is_pipeline is False
assert regression.is_pipeline is False
pipeline = Model(pipeline_linear)
assert pipeline.is_pipeline is True
def test_instantiate_model_with_non_estimator_pipeline_fails(self):
example_pipe = Pipeline([("scale", DFStandardScaler)])
with pytest.raises(
MLToolingError,
match="You passed a Pipeline without an estimator as the last step",
):
Model(example_pipe)
def test_instantiate_model_with_feature_pipeline_sets_estimator_correctly(self):
example_pipe = Pipeline([("scale", DFStandardScaler)])
clf = LinearRegression()
model = Model(clf, feature_pipeline=example_pipe)
expected = Pipeline([("features", example_pipe), ("estimator", clf)])
assert model.estimator.steps == expected.steps
def test_instantiate_model_with_other_object_fails(self):
with pytest.raises(
MLToolingError,
match="Expected a Pipeline or Estimator - got <class 'dict'>",
):
Model({})
def test_default_metric_getter_works_as_expected_classifier(self):
rf = Model(RandomForestClassifier(n_estimators=10))
assert rf.config.CLASSIFIER_METRIC == "accuracy"
assert rf.config.REGRESSION_METRIC == "r2"
assert rf.default_metric == "accuracy"
rf.default_metric = "fowlkes_mallows_score"
assert rf.config.CLASSIFIER_METRIC == "fowlkes_mallows_score"
assert rf.config.REGRESSION_METRIC == "r2"
assert rf.default_metric == "fowlkes_mallows_score"
rf.config.reset_config()
def test_default_metric_getter_works_as_expected_regressor(self):
linreg = Model(LinearRegression())
assert linreg.config.CLASSIFIER_METRIC == "accuracy"
assert linreg.config.REGRESSION_METRIC == "r2"
assert linreg.default_metric == "r2"
linreg.default_metric = "neg_mean_squared_error"
assert linreg.config.CLASSIFIER_METRIC == "accuracy"
assert linreg.config.REGRESSION_METRIC == "neg_mean_squared_error"
assert linreg.default_metric == "neg_mean_squared_error"
linreg.config.reset_config()
def test_default_metric_works_as_expected_without_pipeline(self):
rf = Model(RandomForestClassifier(n_estimators=10))
linreg = Model(LinearRegression())
assert "accuracy" == rf.default_metric
assert "r2" == linreg.default_metric
rf.config.CLASSIFIER_METRIC = "fowlkes_mallows_score"
linreg.config.REGRESSION_METRIC = "neg_mean_squared_error"
assert "fowlkes_mallows_score" == rf.default_metric
assert "neg_mean_squared_error" == linreg.default_metric
rf.config.reset_config()
linreg.config.reset_config()
def test_default_metric_works_as_expected_with_pipeline(
self, pipeline_logistic: Pipeline, pipeline_linear: Pipeline
):
logreg = Model(pipeline_logistic)
linreg = Model(pipeline_linear)
assert "accuracy" == logreg.default_metric
assert "r2" == linreg.default_metric
logreg.config.CLASSIFIER_METRIC = "fowlkes_mallows_score"
linreg.config.REGRESSION_METRIC = "neg_mean_squared_error"
assert "fowlkes_mallows_score" == logreg.default_metric
assert "neg_mean_squared_error" == linreg.default_metric
logreg.config.reset_config()
linreg.config.reset_config()
def test_regression_model_can_be_saved(
self, classifier: Model, tmp_path: pathlib.Path, train_iris_dataset
):
classifier.score_estimator(train_iris_dataset)
load_storage = FileStorage(tmp_path)
storage = FileStorage(tmp_path)
saved_model_path = classifier.save_estimator(storage)
assert saved_model_path.exists()
loaded_model = classifier.load_estimator(saved_model_path, storage=load_storage)
assert loaded_model.estimator.get_params() == classifier.estimator.get_params()
def test_regression_model_filename_is_generated_correctly(
self, classifier: Model, tmp_path: pathlib.Path, train_iris_dataset
):
storage = FileStorage(tmp_path)
saved_model_path = classifier.save_estimator(storage)
assert saved_model_path.exists()
assert datetime.datetime.strptime(
saved_model_path.stem, f"{classifier.estimator_name}_%Y_%m_%d_%H_%M_%S_%f"
)
def test_save_model_saves_pipeline_correctly(
self, pipeline_logistic: Pipeline, tmp_path: pathlib.Path, train_iris_dataset
):
model = Model(pipeline_logistic)
model.train_estimator(train_iris_dataset)
saved_model_path = model.save_estimator(FileStorage(tmp_path))
assert saved_model_path.exists()
@patch("ml_tooling.logging.log_estimator.get_git_hash")
def test_save_estimator_saves_logging_dir_correctly(
self, mock_hash: MagicMock, classifier: Model, tmp_path: pathlib.Path
):
mock_hash.return_value = "1234"
with classifier.log(str(tmp_path)):
expected_file = classifier.save_estimator(FileStorage(tmp_path))
assert expected_file.exists()
assert (
"LogisticRegression" in [str(file) for file in tmp_path.rglob("*.yaml")][0]
)
mock_hash.assert_called_once()
def test_save_estimator_with_prod_flag_saves_correctly(self, classifier: Model):
mock_storage = MagicMock()
classifier.save_estimator(mock_storage, prod=True)
mock_storage.save.assert_called_once_with(
classifier.estimator, "production_model.pkl", prod=True
)
def test_save_estimator_uses_default_storage_if_no_storage_is_passed(
self, tmp_path: pathlib.Path, classifier: Model
):
classifier.config.ESTIMATOR_DIR = tmp_path
classifier.save_estimator()
models = classifier.config.default_storage.get_list()
assert len(models) == 1
new_classifier = Model.load_estimator(models[0])
assert (
classifier.estimator.get_params() == new_classifier.estimator.get_params()
)
@patch("ml_tooling.baseclass.import_path")
def test_can_load_production_estimator(
self, mock_path: MagicMock, classifier: Model
):
buffer = BytesIO()
pickle.dump(classifier.estimator, buffer)
buffer.seek(0)
mock_path.return_value.__enter__.return_value = buffer
model = Model.load_production_estimator("test")
assert isinstance(model, Model)
assert isinstance(model.estimator, BaseEstimator)
def test_log_context_manager_works_as_expected(self, regression: Model):
assert regression.config.LOG is False
assert "runs" == regression.config.RUN_DIR.name
with regression.log("test"):
assert regression.config.LOG is True
assert "test" == regression.config.RUN_DIR.name
assert "runs" == regression.config.RUN_DIR.parent.name
assert regression.config.LOG is False
assert "runs" == regression.config.RUN_DIR.name
assert "test" not in regression.config.RUN_DIR.parts
def test_log_context_manager_logs_when_scoring_model(
self, tmp_path: pathlib.Path, train_iris_dataset
):
model = Model(LinearRegression())
runs = tmp_path / "runs"
with model.log(str(runs)):
result = model.score_estimator(train_iris_dataset)
for file in runs.rglob("LinearRegression_*"):
with file.open() as f:
log_result = yaml.safe_load(f)
assert result.metrics.score == log_result["metrics"]["r2"]
assert result.model.estimator_name == log_result["estimator_name"]
def test_test_models_logs_when_given_dir(
self, tmp_path: pathlib.Path, train_iris_dataset
):
test_models_log = tmp_path / "test_estimators"
Model.test_estimators(
train_iris_dataset,
[
RandomForestClassifier(n_estimators=10),
DummyClassifier(strategy="prior"),
],
log_dir=str(test_models_log),
metrics="accuracy",
)
for file in test_models_log.rglob("*.yaml"):
with file.open() as f:
result = yaml.safe_load(f)
model_name = result["model_name"]
assert model_name in {
"IrisData_RandomForestClassifier",
"IrisData_DummyClassifier",
}
def test_dump_serializes_correctly_without_pipeline(self, regression: Model):
serialized_model = regression.to_dict()
expected = [
{
"module": "sklearn.linear_model._base",
"classname": "LinearRegression",
"params": {
"copy_X": True,
"fit_intercept": True,
"n_jobs": None,
"normalize": False,
},
}
]
assert serialized_model == expected
def test_dump_serializes_correctly_with_pipeline(self, pipeline_linear: Pipeline):
serialized_model = Model(pipeline_linear).to_dict()
expected = [
{
"name": "scale",
"module": "sklearn.preprocessing._data",
"classname": "StandardScaler",
"params": {"copy": True, "with_mean": True, "with_std": True},
},
{
"name": "estimator",
"module": "sklearn.linear_model._base",
"classname": "LinearRegression",
"params": {
"copy_X": True,
"fit_intercept": True,
"n_jobs": None,
"normalize": False,
},
},
]
assert serialized_model == expected
def test_to_dict_serializes_correctly_with_feature_union(
self, feature_union_classifier: DFFeatureUnion
):
model = Model(feature_union_classifier)
result = model.to_dict()
assert len(result) == 2
union = result[0]
assert union["name"] == "features"
assert len(union["params"]) == 2
pipe1 = union["params"][0]
pipe2 = union["params"][1]
assert pipe1["name"] == "pipe1"
select1 = pipe1["params"][0]
scale1 = pipe1["params"][1]
assert select1["name"] == "select"
assert select1["classname"] == "Select"
assert select1["params"] == {
"columns": ["sepal length (cm)", "sepal width (cm)"]
}
assert scale1["name"] == "scale"
assert scale1["classname"] == "DFStandardScaler"
assert scale1["params"] == {"copy": True, "with_mean": True, "with_std": True}
assert pipe2["name"] == "pipe2"
select2 = pipe2["params"][0]
scale2 = pipe2["params"][1]
assert select2["name"] == "select"
assert select2["classname"] == "Select"
assert select2["params"] == {
"columns": ["petal length (cm)", "petal width (cm)"]
}
assert scale2["name"] == "scale"
assert scale2["classname"] == "DFStandardScaler"
assert scale2["params"] == {"copy": True, "with_mean": True, "with_std": True}
def test_from_yaml_serializes_correctly_with_feature_union(
self, feature_union_classifier: DFFeatureUnion, tmp_path: pathlib.Path
):
model = Model(feature_union_classifier)
result = model.to_dict()
log = Log(
name="test", metrics=Metrics.from_list(["accuracy"]), estimator=result
)
log.save_log(tmp_path)
new_model = Model.from_yaml(log.output_path)
assert len(new_model.estimator.steps[0][1].transformer_list) == 2
new_steps = new_model.estimator.steps
old_steps = model.estimator.steps
assert new_steps[0][0] == old_steps[0][0]
assert isinstance(new_steps[0][1], type(old_steps[0][1]))
new_union = new_steps[0][1].transformer_list
old_union = old_steps[0][1].transformer_list
assert len(new_union) == len(old_union)
for new_transform, old_transform in zip(new_union, old_union):
assert new_transform[1].steps[0][0] == old_transform[1].steps[0][0]
assert (
new_transform[1].steps[0][1].get_params()
== old_transform[1].steps[0][1].get_params()
)
def test_can_load_serialized_model_from_pipeline(
self, pipeline_linear: Pipeline, tmp_path: pathlib.Path
):
model = Model(pipeline_linear)
log = Log(
name="test",
estimator=model.to_dict(),
metrics=Metrics([Metric("accuracy", score=1.0)]),
)
log.save_log(tmp_path)
model2 = Model.from_yaml(log.output_path)
for model1, model2 in zip(model.estimator.steps, model2.estimator.steps):
assert model1[0] == model2[0]
assert model1[1].get_params() == model2[1].get_params()
def test_can_load_serialized_model_from_estimator(
self, classifier: Model, tmp_path: pathlib.Path
):
log = Log(
name="test",
estimator=classifier.to_dict(),
metrics=Metrics([Metric("accuracy", score=1.0)]),
)
log.save_log(tmp_path)
model2 = Model.from_yaml(log.output_path)
assert model2.estimator.get_params() == classifier.estimator.get_params()
class TestTrainEstimator:
def test_train_model_sets_result_to_none(
self, regression: Model, train_iris_dataset
):
assert regression.result is not None
regression.train_estimator(train_iris_dataset)
assert regression.result is None
def test_train_model_followed_by_score_model_returns_correctly(
self, pipeline_logistic: Pipeline, train_iris_dataset
):
model = Model(pipeline_logistic)
model.train_estimator(train_iris_dataset)
model.score_estimator(train_iris_dataset)
assert isinstance(model.result, Result)
def test_train_model_errors_correctly_when_not_scored(
self, pipeline_logistic: Pipeline, tmp_path: pathlib.Path, train_iris_dataset
):
model = Model(pipeline_logistic)
with pytest.raises(MLToolingError, match="You haven't scored the estimator"):
with model.log(str(tmp_path)):
model.train_estimator(train_iris_dataset)
model.save_estimator(FileStorage(tmp_path))
def test_can_score_estimator_with_no_y_value(self):
class DummyEstimator(BaseEstimator, RegressorMixin):
def __init__(self):
self.average = None
def fit(self, x, y=None):
self.average = np.mean(x, axis=0)
return self
def predict(self, x):
return self.average
class DummyData(Dataset):
def load_training_data(self):
return pd.DataFrame({"col1": [1, 2, 3, 4], "col2": [4, 5, 6, 7]}), None
def load_prediction_data(self, *args, **kwargs):
return pd.DataFrame({"col1": [1, 2, 3, 4], "col2": [4, 5, 6, 7]})
model = Model(DummyEstimator())
data = DummyData()
model.train_estimator(data)
assert np.all(np.isclose(model.estimator.average, np.array([2.5, 5.5])))
with pytest.raises(DatasetError, match="The dataset does not define a y value"):
data.create_train_test()
class TestScoreEstimator:
def test_score_estimator_creates_train_test_data(
self, boston_dataset, train_boston_dataset
):
model = Model(LinearRegression())
data = boston_dataset()
model.score_estimator(data)
test = train_boston_dataset
pd.testing.assert_frame_equal(data.test_x, test.test_x)
assert np.array_equal(data.test_y, test.test_y)
pd.testing.assert_frame_equal(data.train_x, test.train_x)
assert np.array_equal(data.train_y, test.train_y)
def test_score_estimator_creates_train_test_data_classification(
self, iris_dataset, train_iris_dataset
):
model = Model(LogisticRegression())
data = iris_dataset()
model.score_estimator(data)
test = train_iris_dataset
pd.testing.assert_frame_equal(data.test_x, test.test_x)
assert np.array_equal(data.test_y, test.test_y)
pd.testing.assert_frame_equal(data.train_x, test.train_x)
assert np.array_equal(data.train_y, test.train_y)
def test_score_estimator_creates_train_test_data_with_changed_config(
self, boston_dataset
):
model = Model(LinearRegression())
model.config.RANDOM_STATE = 1
model.config.TEST_SIZE = 0.5
model.config.TRAIN_TEST_SHUFFLE = False
data = boston_dataset()
model.score_estimator(data)
test = boston_dataset()
test.create_train_test(stratify=False, shuffle=False, seed=1, test_size=0.5)
pd.testing.assert_frame_equal(data.test_x, test.test_x)
assert np.array_equal(data.test_y, test.test_y)
pd.testing.assert_frame_equal(data.train_x, test.train_x)
assert np.array_equal(data.train_y, test.train_y)
model.config.reset_config()
def test_score_estimator_creates_train_test_data_with_changed_config_and_classification_data(
self, iris_dataset
):
model = Model(LogisticRegression())
model.config.RANDOM_STATE = 1
model.config.TEST_SIZE = 0.50
data = iris_dataset()
model.score_estimator(data)
test = iris_dataset()
test.create_train_test(stratify=True, seed=1, test_size=0.50)
pd.testing.assert_frame_equal(data.test_x, test.test_x)
assert np.array_equal(data.test_y, test.test_y)
pd.testing.assert_frame_equal(data.train_x, test.train_x)
assert np.array_equal(data.train_y, test.train_y)
model.config.reset_config()
def test_can_score_estimator_with_specified_metric(self, train_iris_dataset):
model = Model(LogisticRegression(solver="liblinear"))
result = model.score_estimator(train_iris_dataset, metrics="roc_auc")
assert result.metrics.name == "roc_auc"
def test_can_score_estimator_with_default_metric(self, train_iris_dataset):
model = Model(LogisticRegression(solver="liblinear"))
result = model.score_estimator(train_iris_dataset)
assert result.metrics.name == "accuracy"
def test_can_score_estimator_with_multiple_metrics(self, train_iris_dataset):
model = Model(LogisticRegression(solver="liblinear"))
result = model.score_estimator(
train_iris_dataset, metrics=["accuracy", "roc_auc"]
)
assert len(result.metrics) == 2
assert "accuracy" in result.metrics
assert "roc_auc" in result.metrics
class TestModelSelection:
def test_model_selection_works_as_expected(self, train_iris_dataset):
models = [
LogisticRegression(solver="liblinear"),
RandomForestClassifier(n_estimators=10),
]
best_model, results = Model.test_estimators(
train_iris_dataset, models, metrics="accuracy"
)
assert models[1] is best_model.estimator
assert 2 == len(results)
assert results[0].metrics[0].score >= results[1].metrics[0].score
for result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.