input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
zip
p14['zip5'] = p14['zip'].map(lambda x: x[:5])
"""#Changing the NAN strings back to np.NaN (this happened while we were cleaning)
p14.replace('NAN', np.NaN,inplace=True)"""
#I dropped this rn
#Groupby the ID so we can shrink our df
payids = p14.groupby(['id','fn','ln','mn','zip','specialty','address1']).count()['amount'].to_frame().reset_index(inplace=True)
#Dropping amount column because it's not necessary rn
payids.drop('amount',axis=1,inplace=True)
#Finding all the mispelled names/addresses/etc.
mis = payids[payids['id'].isin(payids['id'][payids['id'].duplicated()])].sort_values("id")
#Saving as a CSV for future use
payids.to_csv('/Volumes/Seagate/Galvanize/nj_14_payment_ids.csv')
mis.to_csv('/Volumes/Seagate/Galvanize/nj_14_multi_name_payment_ids.csv')
"""Cleaning the Prescriptions Data for joining & grabbing only the NJ data"""
#First I load only a small subset of the data to get an idea of the columns I'll need to use for joining
prescrip14 = pd.read_csv('/Volumes/Seagate/Galvanize/Prescriptions 2014.csv', nrows=100)
#This is so I can view all the columns since otherwise they get cut off
prescrip14[:1].to_dict('index')
#Then I pick only the columns I want & load the entire csv
scripts14 = pd.read_csv('/Volumes/Seagate/Galvanize/Prescriptions 2014.csv',usecols=list(pres14.columns[:6]))
#Grabbing only the NJ practioners
scripts14nj = scripts14[scripts14['nppes_provider_state'].isin(['NJ'])]
#Renaming the columns so they're easier to call
l = scripts14nj.columns
scripts14nj.rename(columns={l[0]:'npi',l[1]:'ln',l[2]:'fn',l[3]:'city',l[4]:'state',l[5]:'specialty'},inplace=True)
#Upper casing the first name, last name, and city
scripts14nj['ln'] = [x.upper() for x in scripts14nj['ln']]
scripts14nj['fn'] = [str(x).upper() for x in scripts14nj['fn']]
scripts14nj['city'] = [x.upper() for x in scripts14nj['city']]
"""Saving as a csv for future use"""
scripts14nj.to_csv('/Volumes/Seagate/Galvanize/2014_scriptsnj.csv')
#Grouping so I don't have a bunch of duplicate NPIs & I can look for mispellings
s14 = scripts14nj.groupby(['npi','ln','fn','city','specialty']).count()['state'].to_frame()
#Resetting the index becuase all my groupbys are now the index, which wont work for a merge
s14.reset_index(inplace=True)
#Checking if any of the people's names are misspelled
dup14 = s14[s14['npi'].isin(s14['npi'][s14['npi'].duplicated()])].sort_values("npi")
#There were none
"""Combining with NPI df so I can remove all the organizations"""
#Joining the Scripts & NPI
"""Reasons for doing this:
1. To get the Middle name of the individual
2. To remove all organizations from the prescriptions data
3. To get the address & zip of the individual
"""
script_npi = s14.merge(npi_idv, left_on='npi',right_on='NPI')
#Checking to make sure the ones not in the joined df are actually only organizations
notjoined = npi_nj[npi_nj['NPI'].isin(list(s14[~s14.npi.isin(script_npi.NPI)]['npi']))]
len(notjoined[notjoined['Entity Type Code'].isin([1.0])])
#1 = Indivduals, There were none
#Dropping unecessary column & the NPI column because it's repeated
script_npi.drop(['Entity Type Code','NPI','state_x','state_y'],axis=1,inplace=True)
#Cleaning the joined dataframe, removing essentially all punctuation & spaces from the first or last name
script_npi['ln_x'] = script_npi['ln_x'].map(lambda x: x.replace(' ',''))
script_npi['ln_y'] = script_npi['ln_y'].map(lambda x: x.replace(' ',''))
script_npi['fn_x'] = script_npi['fn_x'].map(lambda x: x.replace(' ',''))
script_npi['fn_y'] = script_npi['fn_y'].map(lambda x: x.replace(' ',''))
script_npi['mn'] = script_npi['mn'].map(lambda x: str(x).replace(' ',''))
script_npi['ln_x'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in script_npi['ln_x']]
script_npi['ln_y'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in script_npi['ln_y']]
script_npi['fn_x'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in script_npi['fn_x']]
script_npi['fn_y'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in script_npi['fn_y']]
script_npi['mn'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in script_npi['mn']]
script_npi['address1'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in script_npi['address1']]
#Cleaning the zip codes, NJ has a leading 0 in the zip so it might of been dropped
script_npi['zip'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in script_npi['zip']]
script_npi['zip'] = [('0'+str(x)) if len(str(x))==8 or len(str(x))==4 else str(x) for x in script_npi['zip']]
"""#Changing the NAN strings back to np.NaN (this happened while we were cleaning)
script_npi.replace('NAN', np.NaN,inplace=True)"""
#I dropped this rn
#Making a column for the 5 digit zip
script_npi['zip5'] = script_npi['zip'].map(lambda x: x[:5])
"""Dealing with mismatched/mispelled names"""
#This just lets me see names where the first name & last name don't match up
script_npi.loc[(script_npi['ln_x']!=script_npi['ln_y']) & (script_npi['fn_x']!=script_npi['fn_y'])]
#Making a new table of the mismatched names
mis_name = script_npi.loc[(script_npi['ln_x']!=script_npi['ln_y']) | (script_npi['fn_x']!=script_npi['fn_y'])]
#Saving these as CSVs for future use
script_npi.to_csv('/Volumes/Seagate/Galvanize/nj_14_scrip_npi.csv')
mis_name.to_csv('/Volumes/Seagate/Galvanize/nj_14_multi_name_npi.csv')
"""Making 1 dictionary for each NPI & 1 dictionary for each Payment_ID
Keys = NPI or Payment ID
Values = Dictionary of:
Keys: first name(fn), last name(ln), 5 digit zip(zip5), address(address1), Physician Specialty (specialty)
Values: All the values that are listed in the NPI/Prescriptions or Payments data for that Key
This is due to misspellings, extra info (Ex: Road instead of Rd), etc.
"""
#Clearing the the Payment ID dictionary
d = {}
#Applying the function to our payment_ids
payids.apply(payment_id,axis=1)
#Pickling it so I can utilize it with ease later
pickle.dump(d, open('14paydict.pkl', 'wb'))
#Clearing the NPI dictioanry
n = {}
#Applying the function to our payment_ids
script_npi.apply(script_npi_dict,axis=1)
#Pickling it so I can utilize it with ease later
pickle.dump(n, open('14npidict.pkl', 'wb'))
#Just renaming the 2 dictionaries from above
pay_id_14_dict = d
npi_14_dict = n
"""Combining the NPI & Payment_ID dictionaries to match them up"""
npi_pay_dict = {}
#Combining them
combine_npi_payid(pay_id_14_dict, npi_14_dict)
#Pickling it so I can utilize it with ease later
pickle.dump(npi_pay_dict, open('14_linked.pkl', 'wb'))
"""2015 DATA"""
"""Cleaning the Payments Data for joining & grabbing only the NJ data"""
#Loading csv
pay15 = pd.read_csv('/Volumes/Seagate/Galvanize/2015 Open Payments/OP_DTL_GNRL_PGYR2015_P01172018.csv', \
usecols=['Physician_Profile_ID','Physician_First_Name','Physician_Middle_Name', \
'Physician_Last_Name','Recipient_State','Total_Amount_of_Payment_USDollars', \
'Recipient_Primary_Business_Street_Address_Line1', 'Physician_Specialty', 'Recipient_Zip_Code'], \
dtype={'Recipient_Zip_Code':object})
#Grabbing only the NJ practioners
p15 = pay15[pay15['Recipient_State'].isin(['NJ'])]
"""Saving as a CSV for future use"""
p15.to_csv('/Volumes/Seagate/Galvanize/nj_payments_2015.csv')
#Renaming the columns so they're easier to call
l = p15.columns
p15.rename(columns={l[0]:'id',l[1]:'fn',l[2]:'mn',l[3]:'ln',l[4]:'address1',l[5]:'state',l[6]:'zip',l[-2]:'specialty',l[-1]:'amount'},inplace=True)
#Cleaning the first & last name (removing space & symbols) and the address (removing any symbols)
p15['ln'] = p15['ln'].map(lambda x: str(x).replace(' ',''))
p15['fn'] = p15['fn'].map(lambda x: str(x).replace(' ',''))
p15['mn'] = p15['mn'].map(lambda x: str(x).replace(' ',''))
p15['ln'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in p15['ln']]
p15['fn'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in p15['fn']]
p15['mn'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in p15['mn']]
p15['address1'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in p15['address1']]
#Cleaning the zip codes (removing dashes & making sure they are not missing the leading 0 that most NJ zip codes have)
p15['zip'] = [x.replace('-','') for x in p15['zip']]
p15['zip'] = [('0'+str(x)) if len(str(x))==8 or len(str(x))==4 else str(x) for x in p15['zip']]
#Making a column for the 5 digit zip
p15['zip5'] = p15['zip'].map(lambda x: x[:5])
"""#Changing the NAN strings back to np.NaN (this happened while we were cleaning)
p15.replace('NAN', np.NaN,inplace=True)"""
#I dropped this rn
#Groupby the ID so we can shrink our df
payids = p15.groupby(['id','fn','ln','mn','zip','specialty','address1']).count()['amount'].to_frame().reset_index(inplace=True)
#Dropping amount column because it's not necessary rn
payids.drop('amount',axis=1,inplace=True)
#Finding all the mispelled names/addresses/etc.
mis = payids[payids['id'].isin(payids['id'][payids['id'].duplicated()])].sort_values("id")
#Saving as a CSV for future use
payids.to_csv('/Volumes/Seagate/Galvanize/nj_15_payment_ids.csv')
mis.to_csv('/Volumes/Seagate/Galvanize/nj_15_multi_name_payment_ids.csv')
"""Cleaning the Prescriptions Data for joining & grabbing only the NJ data"""
#First I load only a small subset of the data to get an idea of the columns I'll need to use for joining
prescrip15 = pd.read_csv('/Volumes/Seagate/Galvanize/Prescriptions 2015.csv', nrows=100)
#This is so I can view all the columns since otherwise they get cut off
prescrip15[:1].to_dict('index')
#Then I pick only the columns I want & load the entire csv
scripts15 = pd.read_csv('/Volumes/Seagate/Galvanize/Prescriptions 2015.csv',usecols=list(pres15.columns[:6]))
#Grabbing only the NJ practioners
scripts15nj = scripts15[scripts15['nppes_provider_state'].isin(['NJ'])]
#Renaming the columns so they're easier to call
l = scripts15nj.columns
scripts15nj.rename(columns={l[0]:'npi',l[1]:'ln',l[2]:'fn',l[3]:'city',l[4]:'state',l[5]:'specialty'},inplace=True)
#Upper casing the first name, last name, and city
scripts15nj['ln'] = [x.upper() for x in scripts15nj['ln']]
scripts15nj['fn'] = [str(x).upper() for x in scripts15nj['fn']]
scripts15nj['city'] = [x.upper() for x in scripts15nj['city']]
"""Saving as a csv for future use"""
scripts15nj.to_csv('/Volumes/Seagate/Galvanize/2015_scriptsnj.csv')
#Grouping so I don't have a bunch of duplicate NPIs & I can look for mispellings
s15 = scripts15nj.groupby(['npi','ln','fn','city','specialty']).count()['state'].to_frame()
#Resetting the index becuase all my groupbys are now the index, which wont work for a merge
s15.reset_index(inplace=True)
#Checking if any of the people's names are misspelled
dup15 = s15[s15['npi'].isin(s15['npi'][s15['npi'].duplicated()])].sort_values("npi")
#There were none
"""Combining with NPI df so I can remove all the organizations"""
#Joining the Scripts & NPI
"""Reasons for doing this:
1. To get the Middle name of the individual
2. To remove all organizations from the prescriptions data
3. To get the address & zip of the individual
"""
script_npi = s15.merge(npi_idv, left_on='npi',right_on='NPI')
#Checking to make sure the ones not in the joined df are actually only organizations
notjoined = npi_nj[npi_nj['NPI'].isin(list(s15[~s15.npi.isin(script_npi.NPI)]['npi']))]
len(notjoined[notjoined['Entity Type Code'].isin([1.0])])
#1 = Indivduals, There were none
#Dropping unecessary column & the NPI column because it's repeated
script_npi.drop(['Entity Type Code','NPI','state_x','state_y'],axis=1,inplace=True)
#Cleaning the joined dataframe, removing essentially all punctuation & spaces from the first or last name
script_npi['ln_x'] = script_npi['ln_x'].map(lambda x: x.replace(' ',''))
script_npi['ln_y'] = script_npi['ln_y'].map(lambda x: x.replace(' ',''))
script_npi['fn_x'] = script_npi['fn_x'].map(lambda x: x.replace(' ',''))
script_npi['fn_y'] = script_npi['fn_y'].map(lambda x: x.replace(' ',''))
script_npi['mn'] = script_npi['mn'].map(lambda x: str(x).replace(' ',''))
script_npi['ln_x'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in script_npi['ln_x']]
script_npi['ln_y'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in script_npi['ln_y']]
script_npi['fn_x'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in script_npi['fn_x']]
script_npi['fn_y'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in script_npi['fn_y']]
script_npi['mn'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in script_npi['mn']]
script_npi['address1'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in script_npi['address1']]
#Cleaning the zip codes, NJ has a leading 0 in the zip so it might of been dropped
script_npi['zip'] = [re.sub(r'[^\w\s]','',str(x).upper()) for x in script_npi['zip']]
script_npi['zip'] = [('0'+str(x)) if len(str(x))==8 or len(str(x))==4 else str(x) for x in script_npi['zip']]
"""#Changing the NAN strings back to np.NaN (this happened while we were cleaning)
script_npi.replace('NAN', np.NaN,inplace=True)"""
#I dropped this rn
#Making a column for the 5 digit zip
script_npi['zip5'] = script_npi['zip'].map(lambda x: x[:5])
"""Dealing with mismatched/mispelled names"""
#This just lets me see names where the first name & last name don't match up
script_npi.loc[(script_npi['ln_x']!=script_npi['ln_y']) & (script_npi['fn_x']!=script_npi['fn_y'])]
#Making a new table of the mismatched names
mis_name = script_npi.loc[(script_npi['ln_x']!=script_npi['ln_y']) | (script_npi['fn_x']!=script_npi['fn_y'])]
#Saving these as CSVs for future use
script_npi.to_csv('/Volumes/Seagate/Galvanize/nj_15_scrip_npi.csv')
mis_name.to_csv('/Volumes/Seagate/Galvanize/nj_15_multi_name_npi.csv')
"""Making 1 dictionary for each NPI & 1 dictionary for each Payment_ID
Keys = NPI or Payment ID
Values = Dictionary of:
Keys: first name(fn), last name(ln), 5 digit zip(zip5), address(address1), Physician Specialty (specialty)
Values: All the values that are listed in the NPI/Prescriptions or Payments data for that Key
This is due to misspellings, extra info (Ex: Road instead of Rd), etc.
"""
#Clearing the | |
'subordinates': 'subordinates', 'workload_status': 'workload-status', 'workload_version': 'workload-version'}
_toPy = {'address': 'address', 'agent-status': 'agent_status', 'charm': 'charm', 'leader': 'leader', 'machine': 'machine', 'opened-ports': 'opened_ports', 'provider-id': 'provider_id', 'public-address': 'public_address', 'subordinates': 'subordinates', 'workload-status': 'workload_status', 'workload-version': 'workload_version'}
def __init__(self, address=None, agent_status=None, charm=None, leader=None, machine=None, opened_ports=None, provider_id=None, public_address=None, subordinates=None, workload_status=None, workload_version=None, **unknown_fields):
'''
address : str
agent_status : DetailedStatus
charm : str
leader : bool
machine : str
opened_ports : typing.Sequence[str]
provider_id : str
public_address : str
subordinates : typing.Mapping[str, ~UnitStatus]
workload_status : DetailedStatus
workload_version : str
'''
address_ = address
agent_status_ = DetailedStatus.from_json(agent_status) if agent_status else None
charm_ = charm
leader_ = leader
machine_ = machine
opened_ports_ = opened_ports
provider_id_ = provider_id
public_address_ = public_address
subordinates_ = subordinates
workload_status_ = DetailedStatus.from_json(workload_status) if workload_status else None
workload_version_ = workload_version
# Validate arguments against known Juju API types.
if address_ is not None and not isinstance(address_, (bytes, str)):
raise Exception("Expected address_ to be a str, received: {}".format(type(address_)))
if agent_status_ is not None and not isinstance(agent_status_, (dict, DetailedStatus)):
raise Exception("Expected agent_status_ to be a DetailedStatus, received: {}".format(type(agent_status_)))
if charm_ is not None and not isinstance(charm_, (bytes, str)):
raise Exception("Expected charm_ to be a str, received: {}".format(type(charm_)))
if leader_ is not None and not isinstance(leader_, bool):
raise Exception("Expected leader_ to be a bool, received: {}".format(type(leader_)))
if machine_ is not None and not isinstance(machine_, (bytes, str)):
raise Exception("Expected machine_ to be a str, received: {}".format(type(machine_)))
if opened_ports_ is not None and not isinstance(opened_ports_, (bytes, str, list)):
raise Exception("Expected opened_ports_ to be a Sequence, received: {}".format(type(opened_ports_)))
if provider_id_ is not None and not isinstance(provider_id_, (bytes, str)):
raise Exception("Expected provider_id_ to be a str, received: {}".format(type(provider_id_)))
if public_address_ is not None and not isinstance(public_address_, (bytes, str)):
raise Exception("Expected public_address_ to be a str, received: {}".format(type(public_address_)))
if subordinates_ is not None and not isinstance(subordinates_, dict):
raise Exception("Expected subordinates_ to be a Mapping, received: {}".format(type(subordinates_)))
if workload_status_ is not None and not isinstance(workload_status_, (dict, DetailedStatus)):
raise Exception("Expected workload_status_ to be a DetailedStatus, received: {}".format(type(workload_status_)))
if workload_version_ is not None and not isinstance(workload_version_, (bytes, str)):
raise Exception("Expected workload_version_ to be a str, received: {}".format(type(workload_version_)))
self.address = address_
self.agent_status = agent_status_
self.charm = charm_
self.leader = leader_
self.machine = machine_
self.opened_ports = opened_ports_
self.provider_id = provider_id_
self.public_address = public_address_
self.subordinates = subordinates_
self.workload_status = workload_status_
self.workload_version = workload_version_
self.unknown_fields = unknown_fields
class UnitsNetworkConfig(Type):
_toSchema = {'args': 'args'}
_toPy = {'args': 'args'}
def __init__(self, args=None, **unknown_fields):
'''
args : typing.Sequence[~UnitNetworkConfig]
'''
args_ = [UnitNetworkConfig.from_json(o) for o in args or []]
# Validate arguments against known Juju API types.
if args_ is not None and not isinstance(args_, (bytes, str, list)):
raise Exception("Expected args_ to be a Sequence, received: {}".format(type(args_)))
self.args = args_
self.unknown_fields = unknown_fields
class UnitsResolved(Type):
_toSchema = {'all_': 'all', 'retry': 'retry', 'tags': 'tags'}
_toPy = {'all': 'all_', 'retry': 'retry', 'tags': 'tags'}
def __init__(self, all_=None, retry=None, tags=None, **unknown_fields):
'''
all_ : bool
retry : bool
tags : Entities
'''
all__ = all_
retry_ = retry
tags_ = Entities.from_json(tags) if tags else None
# Validate arguments against known Juju API types.
if all__ is not None and not isinstance(all__, bool):
raise Exception("Expected all__ to be a bool, received: {}".format(type(all__)))
if retry_ is not None and not isinstance(retry_, bool):
raise Exception("Expected retry_ to be a bool, received: {}".format(type(retry_)))
if tags_ is not None and not isinstance(tags_, (dict, Entities)):
raise Exception("Expected tags_ to be a Entities, received: {}".format(type(tags_)))
self.all_ = all__
self.retry = retry_
self.tags = tags_
self.unknown_fields = unknown_fields
class UnsetModelDefaults(Type):
_toSchema = {'keys': 'keys'}
_toPy = {'keys': 'keys'}
def __init__(self, keys=None, **unknown_fields):
'''
keys : typing.Sequence[~ModelUnsetKeys]
'''
keys_ = [ModelUnsetKeys.from_json(o) for o in keys or []]
# Validate arguments against known Juju API types.
if keys_ is not None and not isinstance(keys_, (bytes, str, list)):
raise Exception("Expected keys_ to be a Sequence, received: {}".format(type(keys_)))
self.keys = keys_
self.unknown_fields = unknown_fields
class UpdateApplicationServiceArg(Type):
_toSchema = {'addresses': 'addresses', 'application_tag': 'application-tag', 'generation': 'generation', 'provider_id': 'provider-id', 'scale': 'scale'}
_toPy = {'addresses': 'addresses', 'application-tag': 'application_tag', 'generation': 'generation', 'provider-id': 'provider_id', 'scale': 'scale'}
def __init__(self, addresses=None, application_tag=None, generation=None, provider_id=None, scale=None, **unknown_fields):
'''
addresses : typing.Sequence[~Address]
application_tag : str
generation : int
provider_id : str
scale : int
'''
addresses_ = [Address.from_json(o) for o in addresses or []]
application_tag_ = application_tag
generation_ = generation
provider_id_ = provider_id
scale_ = scale
# Validate arguments against known Juju API types.
if addresses_ is not None and not isinstance(addresses_, (bytes, str, list)):
raise Exception("Expected addresses_ to be a Sequence, received: {}".format(type(addresses_)))
if application_tag_ is not None and not isinstance(application_tag_, (bytes, str)):
raise Exception("Expected application_tag_ to be a str, received: {}".format(type(application_tag_)))
if generation_ is not None and not isinstance(generation_, int):
raise Exception("Expected generation_ to be a int, received: {}".format(type(generation_)))
if provider_id_ is not None and not isinstance(provider_id_, (bytes, str)):
raise Exception("Expected provider_id_ to be a str, received: {}".format(type(provider_id_)))
if scale_ is not None and not isinstance(scale_, int):
raise Exception("Expected scale_ to be a int, received: {}".format(type(scale_)))
self.addresses = addresses_
self.application_tag = application_tag_
self.generation = generation_
self.provider_id = provider_id_
self.scale = scale_
self.unknown_fields = unknown_fields
class UpdateApplicationServiceArgs(Type):
_toSchema = {'args': 'args'}
_toPy = {'args': 'args'}
def __init__(self, args=None, **unknown_fields):
'''
args : typing.Sequence[~UpdateApplicationServiceArg]
'''
args_ = [UpdateApplicationServiceArg.from_json(o) for o in args or []]
# Validate arguments against known Juju API types.
if args_ is not None and not isinstance(args_, (bytes, str, list)):
raise Exception("Expected args_ to be a Sequence, received: {}".format(type(args_)))
self.args = args_
self.unknown_fields = unknown_fields
class UpdateApplicationUnitArgs(Type):
_toSchema = {'args': 'args'}
_toPy = {'args': 'args'}
def __init__(self, args=None, **unknown_fields):
'''
args : typing.Sequence[~UpdateApplicationUnits]
'''
args_ = [UpdateApplicationUnits.from_json(o) for o in args or []]
# Validate arguments against known Juju API types.
if args_ is not None and not isinstance(args_, (bytes, str, list)):
raise Exception("Expected args_ to be a Sequence, received: {}".format(type(args_)))
self.args = args_
self.unknown_fields = unknown_fields
class UpdateApplicationUnits(Type):
_toSchema = {'application_tag': 'application-tag', 'generation': 'generation', 'scale': 'scale', 'status': 'status', 'units': 'units'}
_toPy = {'application-tag': 'application_tag', 'generation': 'generation', 'scale': 'scale', 'status': 'status', 'units': 'units'}
def __init__(self, application_tag=None, generation=None, scale=None, status=None, units=None, **unknown_fields):
'''
application_tag : str
generation : int
scale : int
status : EntityStatus
units : typing.Sequence[~ApplicationUnitParams]
'''
application_tag_ = application_tag
generation_ = generation
scale_ = scale
status_ = EntityStatus.from_json(status) if status else None
units_ = [ApplicationUnitParams.from_json(o) for o in units or []]
# Validate arguments against known Juju API types.
if application_tag_ is not None and not isinstance(application_tag_, (bytes, str)):
raise Exception("Expected application_tag_ to be a str, received: {}".format(type(application_tag_)))
if generation_ is not None and not isinstance(generation_, int):
raise Exception("Expected generation_ to be a int, received: {}".format(type(generation_)))
if scale_ is not None and not isinstance(scale_, int):
raise Exception("Expected scale_ to be a int, received: {}".format(type(scale_)))
if status_ is not None and not isinstance(status_, (dict, EntityStatus)):
raise Exception("Expected status_ to be a EntityStatus, received: {}".format(type(status_)))
if units_ is not None and not isinstance(units_, (bytes, str, list)):
raise Exception("Expected units_ to be a Sequence, received: {}".format(type(units_)))
self.application_tag = application_tag_
self.generation = generation_
self.scale = scale_
self.status = status_
self.units = units_
self.unknown_fields = unknown_fields
class UpdateBehavior(Type):
_toSchema = {'enable_os_refresh_update': 'enable-os-refresh-update', 'enable_os_upgrade': 'enable-os-upgrade'}
_toPy = {'enable-os-refresh-update': 'enable_os_refresh_update', 'enable-os-upgrade': 'enable_os_upgrade'}
def __init__(self, enable_os_refresh_update=None, enable_os_upgrade=None, **unknown_fields):
'''
enable_os_refresh_update : bool
enable_os_upgrade : bool
'''
enable_os_refresh_update_ = enable_os_refresh_update
enable_os_upgrade_ = enable_os_upgrade
# Validate arguments against known Juju API types.
if enable_os_refresh_update_ is not None and not isinstance(enable_os_refresh_update_, bool):
raise Exception("Expected enable_os_refresh_update_ to be a bool, received: {}".format(type(enable_os_refresh_update_)))
if enable_os_upgrade_ is not None and not isinstance(enable_os_upgrade_, bool):
raise Exception("Expected enable_os_upgrade_ to be a bool, received: {}".format(type(enable_os_upgrade_)))
self.enable_os_refresh_update = enable_os_refresh_update_
self.enable_os_upgrade = enable_os_upgrade_
self.unknown_fields = unknown_fields
class UpdateCloudArgs(Type):
_toSchema = {'clouds': 'clouds'}
_toPy = {'clouds': 'clouds'}
def __init__(self, clouds=None, **unknown_fields):
'''
clouds : typing.Sequence[~AddCloudArgs]
'''
clouds_ = [AddCloudArgs.from_json(o) for o in clouds or []]
# Validate arguments against known Juju API types.
if clouds_ is not None and not isinstance(clouds_, (bytes, str, list)):
raise Exception("Expected clouds_ to be a Sequence, received: {}".format(type(clouds_)))
self.clouds = clouds_
self.unknown_fields = unknown_fields
class UpdateCloudCredential(Type):
_toSchema = {'credential': 'credential', 'tag': 'tag'}
_toPy = | |
this category
(bytes)
'download_size_pretty': Size of unique files in this category
(pretty format)
}
], ...
], ...
]
"""
api_code = enter_api_call('api_reset_session', request)
if not request or request.GET is None:
ret = Http404(HTTP404_NO_REQUEST('/__cart/reset.json'))
exit_api_call(api_code, ret)
raise ret
session_id = get_session_id(request)
reqno = get_reqno(request)
if reqno is None or throw_random_http404_error():
log.error('api_reset_session: Missing or badly formatted reqno')
ret = Http404(HTTP404_BAD_OR_MISSING_REQNO(request))
exit_api_call(api_code, ret)
raise ret
recycle_bin = request.GET.get('recyclebin', 0)
try:
recycle_bin = int(recycle_bin)
if throw_random_http404_error(): # pragma: no cover
raise ValueError
except:
log.error('api_reset_session: Bad value for recyclebin %s: %s',
recycle_bin, request.GET)
ret = Http404(HTTP404_BAD_RECYCLEBIN(recycle_bin, request))
exit_api_call(api_code, ret)
raise ret
sql = 'DELETE FROM '+connection.ops.quote_name('cart')
sql += ' WHERE session_id=%s'
values = [session_id]
if recycle_bin:
sql += ' AND recycled=1'
log.debug('api_reset_session SQL: %s %s', sql, values)
cursor = connection.cursor()
cursor.execute(sql, values)
download = request.GET.get('download', 0)
try:
download = int(download)
except:
pass
if download:
product_types_str = request.GET.get('types', 'all')
product_types = product_types_str.split(',')
info = _get_download_info(product_types, session_id)
else:
info = {}
count, recycled_count = get_cart_count(session_id, recycled=True)
info['count'] = count
info['recycled_count'] = recycled_count
info['reqno'] = reqno
ret = json_response(info)
exit_api_call(api_code, ret)
return ret
@never_cache
def api_create_download(request, opus_id=None):
"""Creates a zip file of all items in the cart or the given OPUS ID.
This is a PRIVATE API.
Format: __cart/download.json
or: [__]api/download/(?P<opus_id>[-\w]+).zip
Arguments: types=<PRODUCT_TYPES>
urlonly=1 (optional) means to not zip the actual data products
"""
api_code = enter_api_call('api_create_download', request)
if not request or request.GET is None:
if opus_id:
ret = Http404(HTTP404_NO_REQUEST(f'/api/download/{opus_id}.zip'))
else:
ret = Http404(HTTP404_NO_REQUEST('/__cart/download.json'))
exit_api_call(api_code, ret)
raise ret
url_file_only = request.GET.get('urlonly', 0)
session_id = get_session_id(request)
product_types = request.GET.get('types', 'all')
if product_types is None or product_types == '':
product_types = []
else:
product_types = product_types.split(',')
if opus_id:
opus_ids = [opus_id]
return_directly = True
else:
num_selections = (Cart.objects
.filter(session_id__exact=session_id)
.filter(recycled=0)
.count())
if url_file_only:
max_selections = settings.MAX_SELECTIONS_FOR_URL_DOWNLOAD
if num_selections > max_selections:
ret = json_response({'error':
f'You are attempting to download more than the maximum '
+f'permitted number ({max_selections}) of observations in '
+f'a URL archive. Please reduce the number of '
+f'observations you are trying to download.'})
exit_api_call(api_code, ret)
return ret
else:
max_selections = settings.MAX_SELECTIONS_FOR_DATA_DOWNLOAD
if num_selections > max_selections:
ret = json_response({'error':
f'You are attempting to download more than the maximum '
+f'permitted number ({max_selections}) of observations in '
+f'a data archive. Please either reduce the number of '
+f'observations you are trying to download or download a '
+f'URL archive instead and then retrieve the data products '
+f'using "wget".'})
exit_api_call(api_code, ret)
return ret
res = (Cart.objects
.filter(session_id__exact=session_id)
.filter(recycled=0)
.values_list('opus_id'))
opus_ids = [x[0] for x in res]
return_directly = False
if not opus_ids:
if return_directly:
raise Http404(HTTP404_MISSING_OPUS_ID(request))
else:
ret = json_response({'error': 'No observations selected'})
exit_api_call(api_code, ret)
return ret
# Fetch the full file info of the files we'll be zipping up
# We want the raw objects so we can get the checksum as well as the
# abspath
files = get_pds_products(opus_ids, loc_type='raw',
product_types=product_types)
zip_base_file_name = _zip_filename(opus_id, url_file_only)
zip_root = zip_base_file_name.split('.')[0]
zip_file_name = settings.TAR_FILE_PATH + zip_base_file_name
chksum_file_name = settings.TAR_FILE_PATH + f'checksum_{zip_root}.txt'
manifest_file_name = settings.TAR_FILE_PATH + f'manifest_{zip_root}.txt'
csv_file_name = settings.TAR_FILE_PATH + f'csv_{zip_root}.txt'
url_file_name = settings.TAR_FILE_PATH + f'url_{zip_root}.txt'
_create_csv_file(request, csv_file_name, opus_id, api_code=api_code)
# Don't create download if the resultant zip file would be too big
if not url_file_only:
info = _get_download_info(product_types, session_id)
download_size = info['total_download_size']
if download_size > settings.MAX_DOWNLOAD_SIZE:
ret = json_response({'error':
'Sorry, this download would require '
+'{:,}'.format(download_size)
+' bytes but the maximum allowed is '
+'{:,}'.format(settings.MAX_DOWNLOAD_SIZE)
+' bytes. Please either reduce the number of '
+f'observations you are trying to download, reduce the number '
+f'of data products for each observation, or download a URL '
+f'archive instead and then retrieve the data products using '
+f'"wget".'})
exit_api_call(api_code, ret)
return ret
# Don't keep creating downloads after user has reached their size limit
# for this session
cum_download_size = request.session.get('cum_download_size', 0)
cum_download_size += download_size
if cum_download_size > settings.MAX_CUM_DOWNLOAD_SIZE:
ret = json_response({'error':
'Sorry, maximum cumulative download size ('
+'{:,}'.format(settings.MAX_CUM_DOWNLOAD_SIZE)
+' bytes) reached for this session'})
exit_api_call(api_code, ret)
return ret
request.session['cum_download_size'] = int(cum_download_size)
# Add each file to the new zip file and create a manifest too
if return_directly:
response = HttpResponse(content_type='application/zip')
zip_file = zipfile.ZipFile(response, mode='w')
else:
zip_file = zipfile.ZipFile(zip_file_name, mode='w')
chksum_fp = open(chksum_file_name, 'w')
manifest_fp = open(manifest_file_name, 'w')
url_fp = open(url_file_name, 'w')
errors = []
added = []
for f_opus_id in files:
if 'Current' not in files[f_opus_id]:
continue
files_version = files[f_opus_id]['Current']
for product_type in files_version:
for file_data in files_version[product_type]:
path = file_data['path']
url = file_data['url']
checksum = file_data['checksum']
pretty_name = path.split('/')[-1]
digest = f'{pretty_name}:{checksum}'
mdigest = f'{f_opus_id}:{pretty_name}'
if pretty_name not in added:
chksum_fp.write(digest+'\n')
manifest_fp.write(mdigest+'\n')
url_fp.write(url+'\n')
filename = os.path.basename(path)
if not url_file_only:
try:
zip_file.write(path, arcname=filename)
except Exception as e:
log.error(
'api_create_download threw exception for opus_id %s, product_type %s, '
+'file %s, pretty_name %s: %s',
f_opus_id, product_type, path, pretty_name, str(e))
errors.append('Error adding: ' + pretty_name)
added.append(pretty_name)
# Write errors to manifest file
if errors:
manifest_fp.write('Errors:\n')
for e in errors:
manifest_fp.write(e+'\n')
# Add manifests and checksum files to tarball and close everything up
manifest_fp.close()
chksum_fp.close()
url_fp.close()
zip_file.write(chksum_file_name, arcname='checksum.txt')
zip_file.write(manifest_file_name, arcname='manifest.txt')
zip_file.write(csv_file_name, arcname='data.csv')
zip_file.write(url_file_name, arcname='urls.txt')
zip_file.close()
os.remove(chksum_file_name)
os.remove(manifest_file_name)
os.remove(csv_file_name)
os.remove(url_file_name)
if return_directly:
response['Content-Disposition'] = f'attachment; filename={zip_base_file_name}'
ret = response
else:
zip_url = settings.TAR_FILE_URL_PATH + zip_base_file_name
ret = json_response({'filename': zip_url})
exit_api_call(api_code, '<Encoded zip file>')
return ret
################################################################################
#
# Support routines - get information
#
################################################################################
def _get_download_info(product_types, session_id):
"""Return information about the current cart useful for download.
The resulting totals are limited to the given product_types.
['all'] means return all product_types.
Product types for items in the recycle bin are returned with values of 0.
Returns dict containing:
'total_download_count': Total number of unique files
'total_download_size': Total size of unique files (bytes)
'total_download_size_pretty': Total size of unique files (pretty format)
'product_cat_list': List of categories and info:
[
[<Product Type Category>,
[{'slug_name': Like "browse-thumb"
'product_type': Like "Browse Image (thumbnail)"
'tooltip': User-friendly tooltip, if any
'product_count': Number of opus_ids in this category
'download_count': Number of unique files in this category
'download_size': Size of unique files in this category
(bytes)
'download_size_pretty': Size of unique files in this category
(pretty format)
}
], ...
], ...
]
"""
cursor = connection.cursor()
q = connection.ops.quote_name
values = []
sql = 'SELECT DISTINCT '
# Retrieve the distinct list of product types for all observations, including the ones in the
# recycle bin. This is used to allow the items on the cart to be added/removed from the recycle bin
# and update the download data panel without redrawing the cart page on every edit.
sql += q('obs_files')+'.'+q('category')+' AS '+q('cat')+', '
sql += q('obs_files')+'.'+q('sort_order')+' AS '+q('sort')+', '
sql += q('obs_files')+'.'+q('short_name')+' AS '+q('short')+', '
sql += q('obs_files')+'.'+q('full_name')+' AS '+q('full')
sql += 'FROM '+q('obs_files')+' '
sql += 'INNER JOIN '+q('cart')+' ON '
sql += q('cart')+'.'+q('obs_general_id')+'='
sql += q('obs_files')+'.'+q('obs_general_id')+' '
sql += 'WHERE '+q('cart')+'.'+q('session_id')+'=%s '
values.append(session_id)
sql += 'ORDER BY '+q('sort')
log.debug('_get_download_info SQL DISTINCT product_type list: %s %s', sql, values)
cursor.execute(sql, values)
results = cursor.fetchall()
product_cats = []
product_cat_list = []
product_dict_by_short_name = {}
for res in results:
(category, sort_order, short_name, full_name) = res
pretty_name = category
if category == 'standard':
pretty_name = 'Standard Data Products'
elif category == 'metadata':
pretty_name = 'Metadata Products'
elif category == 'browse':
pretty_name = 'Browse Products'
elif category == 'diagram':
pretty_name = 'Diagram Products'
else:
pretty_name = category + '-Specific Products'
key = (category, pretty_name)
if key not in product_cats:
product_cats.append(key)
cur_product_list = []
product_cat_list.append((pretty_name, cur_product_list))
try:
entry = Definitions.objects.get(context__name='OPUS_PRODUCT_TYPE',
term=short_name)
tooltip = entry.definition
except Definitions.DoesNotExist:
log.error('No tooltip definition for OPUS_PRODUCT_TYPE "%s"',
short_name)
tooltip = None
product_dict_entry = {
'slug_name': short_name,
'tooltip': tooltip,
'product_type': full_name,
'product_count': 0,
'download_count': 0,
'download_size': 0,
'download_size_pretty': 0
}
cur_product_list.append(product_dict_entry)
product_dict_by_short_name[short_name] = product_dict_entry
# SELECT obs_files.short_name,
# count(distinct obs_files.opus_id) as product_count,
# count(distinct obs_files.logical_path) as download_count,
# t2.download_size as downloadsize
# FROM obs_files,
#
# (SELECT t1.short_name, sum(t1.size) as download_size
# FROM (SELECT DISTINCT obs_files.short_name, obs_files.logical_path, obs_files.size
# FROM obs_files
# WHERE opus_id IN ('co-iss-n1460960653', 'co-iss-n1460960868')
# ) as t1
# GROUP BY t1.short_name
# ) as t2
# WHERE obs_files.short_name=t2.short_name
# AND obs_files.opus_id in ('co-iss-n1460960653', 'co-iss-n1460960868')
# GROUP BY obs_files.category, obs_files.sort_order, obs_files.short_name, t2.download_size
# ORDER BY sort_order;
values | |
<filename>evaluate_populations.py
"""
evaluate_populations.py
Evaluate all the populations across their generations and compare each of them against each other.
Note: Evaluation is only done on backed-up populations.
"""
import argparse
from collections import Counter
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from tqdm import tqdm
from main import get_folder
from population.population import Population
from population.utils.population_util.evaluate import evaluate_generations as eval_gen
from utils.dictionary import *
from utils.myutils import get_subfolder, load_dict, update_dict
HOPS = 10
def evaluate_generations(experiment_id: int,
pop_folder: str,
folder: str = None,
hops: int = HOPS,
max_v: int = 50,
unused_cpu: int = 2):
"""Evaluate all the populations' generations in a given folder of a given experiment."""
if pop_folder[-1] != '/': pop_folder += '/'
# for v in range(31, max_v + 1):
for v in [51, 52]:
print(f"\n===> EVALUATING POPULATION {pop_folder}v{v} <===")
eval_gen(
name=f"{pop_folder}v{v}",
experiment_id=experiment_id,
folder=folder,
hops=hops,
unused_cpu=unused_cpu,
)
def evaluate_populations(folder: str, pop_folder: str, max_v: int = 50):
"""
Evaluate the various populations against each other. Note that it is assumed that 'evaluate_generations' has ran
first.
"""
if folder[-1] != '/': folder += '/'
if pop_folder[-1] != '/': pop_folder += '/'
# Load in dummy population
print(f"\n===> COMBINING POPULATION RESULTS OF FOLDER {folder}{pop_folder} <===")
pop = Population(
name=f'{pop_folder}v1',
folder_name=folder,
log_print=False,
use_backup=True,
)
max_gen = pop.generation
# Parse the results
fitness_dict = dict()
finished_dict = dict()
score_dict = dict()
distance_dict = dict()
time_dict = dict()
for g in range(0, max_gen + 1, HOPS):
fitness_dict[g] = []
finished_dict[g] = []
score_dict[g] = []
distance_dict[g] = []
time_dict[g] = []
for v in range(1, max_v + 1):
results: dict = load_dict(f"population_backup/storage/{folder}{pop_folder}v{v}/evaluation/{g:05d}/results")
fitness_dict[g].append(max([results[k][D_FITNESS] for k in results.keys()]))
finished_dict[g].append(max([results[k][D_FINISHED] / 100 for k in results.keys()]))
score_dict[g].append(max([results[k][D_SCORE_AVG] for k in results.keys()]))
distance_dict[g].append(min([results[k][D_DISTANCE_AVG] for k in results.keys()]))
time_dict[g].append(min([results[k][D_TIME_AVG] for k in results.keys()]))
# Save received data in evaluation subfolder of the population folder
path = get_subfolder(f'population_backup/storage/{folder}{pop_folder}', 'evaluation')
update_dict(f'{path}fitness', fitness_dict, overwrite=True)
update_dict(f'{path}finished', finished_dict, overwrite=True)
update_dict(f'{path}score', score_dict, overwrite=True)
update_dict(f'{path}distance', distance_dict, overwrite=True)
update_dict(f'{path}time', time_dict, overwrite=True)
# Visualize the data
path_images = get_subfolder(path, 'images')
plot_result(d=fitness_dict,
ylabel="fitness",
title="Average fitness",
save_path=f'{path_images}fitness')
plot_result(d=finished_dict,
ylabel="finished ratio",
title="Averaged finished ratio",
save_path=f'{path_images}finished')
plot_result(d=score_dict,
ylabel="score",
title="Average score",
save_path=f'{path_images}score')
plot_result(d=distance_dict,
ylabel="distance (m)",
title="Average final distance to target",
save_path=f'{path_images}distance')
plot_result(d=time_dict,
ylabel="time (s)",
title="Average simulation time",
save_path=f'{path_images}time')
def combine_all_populations(folder: str,
max_v: int = None,
neat: bool = False,
neat_gru: bool = False,
neat_lstm: bool = False,
neat_sru: bool = False,
neat_sru_s: bool = False,
):
"""Combine the scores for all of the populations in a given folder."""
# Collect all the populations
populations = []
if neat: populations.append(D_NEAT)
if neat_gru: populations.append(D_NEAT_GRU)
if neat_lstm: populations.append(D_NEAT_LSTM)
if neat_sru: populations.append(D_NEAT_SRU)
if neat_sru_s: populations.append(D_NEAT_SRU_S)
if len(populations) == 0: return
# Collect all the measure options
OPTIONS = ['distance', 'finished', 'fitness', 'score', 'time', 'training']
# OPTIONS = ['fitness']
# Go over all possibilities
print(f"\n===> COMBINING POPULATIONS OF FOLDER {folder} <===")
path = f"population_backup/storage/{folder}/"
path_images = get_subfolder(path, 'images')
for option in OPTIONS:
plt.figure(figsize=(8, 2.5))
max_data = 0
max_gen = 0
for pop in populations:
# Load the dictionary
d = load_dict(f"{path}{pop}/evaluation/{option}")
size = len(list(d.values())[0])
if max_v: assert size == max_v
# Prepare the data containers
q1 = []
q2 = [] # Median
q3 = []
idx_q1 = int(round(1 / 4 * size))
idx_q2 = int(round(2 / 4 * size))
idx_q3 = int(round(3 / 4 * size))
# Loop over each iteration
x = sorted([int(k) for k in d.keys()])
for g in x:
if g > max_gen: max_gen = g
lst = sorted(d[str(g)]) # Sort values from low to high
q1.append(lst[idx_q1])
q2.append(lst[idx_q2])
q3.append(lst[idx_q3])
# Plot the results
plt.plot(x, q1, color=COLORS[pop], linestyle=":", linewidth=.5)
plt.plot(x, q3, color=COLORS[pop], linestyle=":", linewidth=.5)
plt.plot(x, q2, color=COLORS[pop], linestyle="-", linewidth=2, label=pop)
plt.fill_between(x, q1, q3, color=COLORS[pop], alpha=0.2)
# Update the max-counter
if max(q3) > max_data: max_data = max(q3)
# Finalize the figure
leg = plt.legend(loc='upper center',
bbox_to_anchor=(0.5, 1.25),
fancybox=True,
fontsize=10,
ncol=len(populations))
for line in leg.get_lines():
line.set_linewidth(4.0)
# plt.xticks([i * 100 for i in range(11)]) # TODO
plt.xlabel("generation")
plt.xlim(0, max_gen)
# plt.yticks([i for i in range(7)]) # TODO
plt.ylabel(option)
plt.ylim(0, max(max_data * 1.05, 1.05))
# plt.ylim(0, 6) # TODO
plt.grid()
plt.tight_layout()
plt.savefig(f"{path_images}comb_{option}.png", bbox_inches='tight', pad_inches=0.02, dpi=500)
# plt.savefig(f"{path_images}comb_{option}.eps", format="eps", bbox_inches='tight', pad_inches=0.02)
# plt.show()
plt.close()
def evaluate_training(experiment_id: int, pop_folder: str, folder: str = None, max_v: int = 50):
"""Evaluate the fitness of a population's elite each training generation."""
if pop_folder[-1] != '/': pop_folder += '/'
folder = folder if folder else get_folder(experiment_id)
if folder[-1] != '/': folder += '/'
# Get dummy population
pop = Population(
name=f"{pop_folder}v1",
folder_name=folder,
log_print=False,
use_backup=True,
)
max_gen = pop.generation
# Initialize data container
training_fitness = dict()
for g in range(0, max_gen + 1, HOPS):
training_fitness[g] = []
# Pull the training scores
print(f"\n===> PULLING TRAINING FITNESS OF THE {pop_folder} POPULATIONS <===")
pbar = tqdm(range(int(max_v * (max_gen / HOPS + 1))))
for v in range(1, max_v + 1):
name = f"{pop_folder}v{v}"
pop = Population(
name=name,
folder_name=folder,
log_print=False,
use_backup=True,
)
# Perform the evaluations
max_gen = pop.generation
for gen in range(0, max_gen + 1, HOPS):
if not pop.load(gen=gen):
raise Exception(f"Population {name} is not trained for generation {gen}")
training_fitness[gen].append(pop.best_genome.fitness if pop.best_genome else 0)
pbar.update()
pbar.close()
# Plot the result
path = get_subfolder(f'population_backup/storage/{folder}{pop_folder}', 'evaluation')
update_dict(f'{path}training', training_fitness, overwrite=True)
path_images = get_subfolder(path, 'images')
plot_result(d=training_fitness,
ylabel="fitness",
title="Average training fitness",
save_path=f'{path_images}training')
def plot_result(d: dict, ylabel: str, title: str, save_path: str):
"""Create a plot of the given dictionary. Each value of d consists of a list of length 3 (min, avg, max)."""
# Parse the values
keys = sorted(d.keys())
data = np.zeros((len(d[keys[0]]), len(keys)))
for i, k in enumerate(keys):
data[:, i] = d[k]
# Create the plot
plt.figure(figsize=(12, 3.5)) # TODO: Better comparison
plt.boxplot(data, labels=[str(k) if k % 20 == 0 else '' for k in keys], whis=[0, 100])
plt.xticks(rotation=90)
plt.xlabel("generations")
plt.yticks([i for i in range(11)])
plt.ylabel(ylabel)
# plt.ylim(0, max(np.max(data) * 1.05, 1.05))
plt.ylim(0, 10) # TODO: Fixed to have fair comparison
plt.grid()
plt.tight_layout()
plt.savefig(save_path + ".png", bbox_inches='tight', pad_inches=0.02, dpi=500)
plt.savefig(save_path + ".eps", format='eps', bbox_inches='tight', pad_inches=0.02)
# plt.show()
plt.close()
def plot_distribution(folder: str,
neat: bool = False,
neat_gru: bool = False,
neat_lstm: bool = False,
neat_sru: bool = False,
neat_sru_s: bool = False,
gen: int = 500,
):
"""
Plot the one-dimensional distribution of all of the populations on each of the evaluation measures for the requested
generation. It is assumed that the evaluation-data has already been collected.
"""
# Collect all the populations
populations = []
if neat: populations.append(D_NEAT)
if neat_gru: populations.append(D_NEAT_GRU)
if neat_lstm: populations.append(D_NEAT_LSTM)
if neat_sru: populations.append(D_NEAT_SRU)
if neat_sru_s: populations.append(D_NEAT_SRU_S)
if len(populations) == 0: return
# Collect all the measure options
OPTIONS = ['distance', 'finished', 'fitness', 'score', 'time', 'training']
# Go over all possibilities
print(f"\n===> CREATING POPULATION DISTRIBUTIONS <===")
path = f"population_backup/storage/{folder}/"
path_images = get_subfolder(path, 'images')
for option in OPTIONS:
plt.figure(figsize=(10, 2.5))
min_val = float("inf")
max_val = -float("inf")
for pop in populations:
d = load_dict(f"{path}{pop}/evaluation/{option}")
dist = d[str(gen)]
if min(dist) < min_val: min_val = min(dist)
if max(dist) > max_val: max_val = max(dist)
# Remove outliers first
dist = sorted(dist)
q1 = min(dist[int(round(1 / 4 * len(dist)))], dist[int(round(3 / 4 * len(dist)))])
q3 = max(dist[int(round(1 / 4 * len(dist)))], dist[int(round(3 / 4 * len(dist)))])
iqr = q3 - q1
for i in range(len(dist) - 1, -1, -1):
if (dist[i] < (q1 - 1.5 * iqr)) or (dist[i] > (q3 + 1.5 * iqr)): del dist[i]
sns.distplot(dist,
hist=False,
kde=True,
norm_hist=True,
bins=100,
color=COLORS[pop],
kde_kws={'linewidth': 2},
label=pop,
)
plt.xlim(min_val, max_val)
# plt.title(f"Probability density across populations for '{option}' at generation {gen}")
plt.xlabel(option)
# plt.yticks([])
plt.ylabel('probability density')
leg = plt.legend(loc='upper center',
bbox_to_anchor=(0.5, 1.2),
fancybox=True,
fontsize=8,
ncol=len(populations))
for line in leg.get_lines():
line.set_linewidth(4.0)
plt.tight_layout()
plt.savefig(f"{path_images}dist_{option}.png", bbox_inches='tight', pad_inches=0.02)
plt.savefig(f"{path_images}dist_{option}.eps", format='eps', bbox_inches='tight', pad_inches=0.02)
# plt.show()
plt.close()
def compute_complexity(folder: str,
neat: bool = False,
neat_gru: bool = False,
neat_lstm: bool = False,
neat_sru: bool = False,
neat_sru_s: bool = False,
gen: int = 500,
max_v: int = 50,
):
"""Compute the complexity of the populations' elites."""
# Collect all the populations
populations = []
if neat: populations.append(D_NEAT)
if | |
[("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(), 1)],
sparse_threshold=0.1,
)
col_trans.fit(X_array)
X_trans = col_trans.transform(X_array)
assert not sparse.issparse(X_trans)
assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)
assert_array_equal(X_trans[:, 1:], np.eye(X_trans.shape[0]))
def test_column_transformer_mixed_cols_sparse():
df = np.array([["a", 1, True], ["b", 2, False]], dtype="O")
ct = make_column_transformer(
(OneHotEncoder(), [0]), ("passthrough", [1, 2]), sparse_threshold=1.0
)
# this shouldn't fail, since boolean can be coerced into a numeric
# See: https://github.com/scikit-learn/scikit-learn/issues/11912
X_trans = ct.fit_transform(df)
assert X_trans.getformat() == "csr"
assert_array_equal(X_trans.toarray(), np.array([[1, 0, 1, 1], [0, 1, 2, 0]]))
ct = make_column_transformer(
(OneHotEncoder(), [0]), ("passthrough", [0]), sparse_threshold=1.0
)
with pytest.raises(ValueError, match="For a sparse output, all columns should"):
# this fails since strings `a` and `b` cannot be
# coerced into a numeric.
ct.fit_transform(df)
def test_column_transformer_sparse_threshold():
X_array = np.array([["a", "b"], ["A", "B"]], dtype=object).T
# above data has sparsity of 4 / 8 = 0.5
# apply threshold even if all sparse
col_trans = ColumnTransformer(
[("trans1", OneHotEncoder(), [0]), ("trans2", OneHotEncoder(), [1])],
sparse_threshold=0.2,
)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
# mixed -> sparsity of (4 + 2) / 8 = 0.75
for thres in [0.75001, 1]:
col_trans = ColumnTransformer(
[
("trans1", OneHotEncoder(sparse=True), [0]),
("trans2", OneHotEncoder(sparse=False), [1]),
],
sparse_threshold=thres,
)
res = col_trans.fit_transform(X_array)
assert sparse.issparse(res)
assert col_trans.sparse_output_
for thres in [0.75, 0]:
col_trans = ColumnTransformer(
[
("trans1", OneHotEncoder(sparse=True), [0]),
("trans2", OneHotEncoder(sparse=False), [1]),
],
sparse_threshold=thres,
)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
# if nothing is sparse -> no sparse
for thres in [0.33, 0, 1]:
col_trans = ColumnTransformer(
[
("trans1", OneHotEncoder(sparse=False), [0]),
("trans2", OneHotEncoder(sparse=False), [1]),
],
sparse_threshold=thres,
)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
def test_column_transformer_error_msg_1D():
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
col_trans = ColumnTransformer([("trans", StandardScaler(), 0)])
msg = "1D data passed to a transformer"
with pytest.raises(ValueError, match=msg):
col_trans.fit(X_array)
with pytest.raises(ValueError, match=msg):
col_trans.fit_transform(X_array)
col_trans = ColumnTransformer([("trans", TransRaise(), 0)])
for func in [col_trans.fit, col_trans.fit_transform]:
with pytest.raises(ValueError, match="specific message"):
func(X_array)
def test_2D_transformer_output():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
# if one transformer is dropped, test that name is still correct
ct = ColumnTransformer([("trans1", "drop", 0), ("trans2", TransNo2D(), 1)])
msg = "the 'trans2' transformer should be 2D"
with pytest.raises(ValueError, match=msg):
ct.fit_transform(X_array)
# because fit is also doing transform, this raises already on fit
with pytest.raises(ValueError, match=msg):
ct.fit(X_array)
def test_2D_transformer_output_pandas():
pd = pytest.importorskip("pandas")
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=["col1", "col2"])
# if one transformer is dropped, test that name is still correct
ct = ColumnTransformer([("trans1", TransNo2D(), "col1")])
msg = "the 'trans1' transformer should be 2D"
with pytest.raises(ValueError, match=msg):
ct.fit_transform(X_df)
# because fit is also doing transform, this raises already on fit
with pytest.raises(ValueError, match=msg):
ct.fit(X_df)
@pytest.mark.parametrize("remainder", ["drop", "passthrough"])
def test_column_transformer_invalid_columns(remainder):
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
# general invalid
for col in [1.5, ["string", 1], slice(1, "s"), np.array([1.0])]:
ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
with pytest.raises(ValueError, match="No valid specification"):
ct.fit(X_array)
# invalid for arrays
for col in ["string", ["string", "other"], slice("a", "b")]:
ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
with pytest.raises(ValueError, match="Specifying the columns"):
ct.fit(X_array)
# transformed n_features does not match fitted n_features
col = [0, 1]
ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
ct.fit(X_array)
X_array_more = np.array([[0, 1, 2], [2, 4, 6], [3, 6, 9]]).T
msg = "X has 3 features, but ColumnTransformer is expecting 2 features as input."
with pytest.raises(ValueError, match=msg):
ct.transform(X_array_more)
X_array_fewer = np.array(
[
[0, 1, 2],
]
).T
err_msg = (
"X has 1 features, but ColumnTransformer is expecting 2 features as input."
)
with pytest.raises(ValueError, match=err_msg):
ct.transform(X_array_fewer)
def test_column_transformer_invalid_transformer():
class NoTrans(BaseEstimator):
def fit(self, X, y=None):
return self
def predict(self, X):
return X
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
ct = ColumnTransformer([("trans", NoTrans(), [0])])
msg = "All estimators should implement fit and transform"
with pytest.raises(TypeError, match=msg):
ct.fit(X_array)
def test_make_column_transformer():
scaler = StandardScaler()
norm = Normalizer()
ct = make_column_transformer((scaler, "first"), (norm, ["second"]))
names, transformers, columns = zip(*ct.transformers)
assert names == ("standardscaler", "normalizer")
assert transformers == (scaler, norm)
assert columns == ("first", ["second"])
def test_make_column_transformer_pandas():
pd = pytest.importorskip("pandas")
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=["first", "second"])
norm = Normalizer()
ct1 = ColumnTransformer([("norm", Normalizer(), X_df.columns)])
ct2 = make_column_transformer((norm, X_df.columns))
assert_almost_equal(ct1.fit_transform(X_df), ct2.fit_transform(X_df))
def test_make_column_transformer_kwargs():
scaler = StandardScaler()
norm = Normalizer()
ct = make_column_transformer(
(scaler, "first"),
(norm, ["second"]),
n_jobs=3,
remainder="drop",
sparse_threshold=0.5,
)
assert (
ct.transformers
== make_column_transformer((scaler, "first"), (norm, ["second"])).transformers
)
assert ct.n_jobs == 3
assert ct.remainder == "drop"
assert ct.sparse_threshold == 0.5
# invalid keyword parameters should raise an error message
msg = re.escape(
"make_column_transformer() got an unexpected "
"keyword argument 'transformer_weights'"
)
with pytest.raises(TypeError, match=msg):
make_column_transformer(
(scaler, "first"),
(norm, ["second"]),
transformer_weights={"pca": 10, "Transf": 1},
)
def test_make_column_transformer_remainder_transformer():
scaler = StandardScaler()
norm = Normalizer()
remainder = StandardScaler()
ct = make_column_transformer(
(scaler, "first"), (norm, ["second"]), remainder=remainder
)
assert ct.remainder == remainder
def test_column_transformer_get_set_params():
ct = ColumnTransformer(
[("trans1", StandardScaler(), [0]), ("trans2", StandardScaler(), [1])]
)
exp = {
"n_jobs": None,
"remainder": "drop",
"sparse_threshold": 0.3,
"trans1": ct.transformers[0][1],
"trans1__copy": True,
"trans1__with_mean": True,
"trans1__with_std": True,
"trans2": ct.transformers[1][1],
"trans2__copy": True,
"trans2__with_mean": True,
"trans2__with_std": True,
"transformers": ct.transformers,
"transformer_weights": None,
"verbose": False,
}
assert ct.get_params() == exp
ct.set_params(trans1__with_mean=False)
assert not ct.get_params()["trans1__with_mean"]
ct.set_params(trans1="passthrough")
exp = {
"n_jobs": None,
"remainder": "drop",
"sparse_threshold": 0.3,
"trans1": "passthrough",
"trans2": ct.transformers[1][1],
"trans2__copy": True,
"trans2__with_mean": True,
"trans2__with_std": True,
"transformers": ct.transformers,
"transformer_weights": None,
"verbose": False,
}
assert ct.get_params() == exp
def test_column_transformer_named_estimators():
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
ct = ColumnTransformer(
[
("trans1", StandardScaler(), [0]),
("trans2", StandardScaler(with_std=False), [1]),
]
)
assert not hasattr(ct, "transformers_")
ct.fit(X_array)
assert hasattr(ct, "transformers_")
assert isinstance(ct.named_transformers_["trans1"], StandardScaler)
assert isinstance(ct.named_transformers_.trans1, StandardScaler)
assert isinstance(ct.named_transformers_["trans2"], StandardScaler)
assert isinstance(ct.named_transformers_.trans2, StandardScaler)
assert not ct.named_transformers_.trans2.with_std
# check it are fitted transformers
assert ct.named_transformers_.trans1.mean_ == 1.0
def test_column_transformer_cloning():
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
ct = ColumnTransformer([("trans", StandardScaler(), [0])])
ct.fit(X_array)
assert not hasattr(ct.transformers[0][1], "mean_")
assert hasattr(ct.transformers_[0][1], "mean_")
ct = ColumnTransformer([("trans", StandardScaler(), [0])])
ct.fit_transform(X_array)
assert not hasattr(ct.transformers[0][1], "mean_")
assert hasattr(ct.transformers_[0][1], "mean_")
def test_column_transformer_get_feature_names_raises():
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
ct = ColumnTransformer([("trans", Trans(), [0, 1])])
# raise correct error when not fitted
with pytest.raises(NotFittedError):
ct.get_feature_names()
# raise correct error when no feature names are available
ct.fit(X_array)
msg = r"Transformer trans \(type Trans\) does not provide " r"get_feature_names"
with pytest.raises(AttributeError, match=msg):
ct.get_feature_names()
@pytest.mark.parametrize(
"X, keys",
[
(
np.array(
[[{"a": 1, "b": 2}, {"a": 3, "b": 4}], [{"c": 5}, {"c": 6}]],
dtype=object,
).T,
("a", "b", "c"),
),
(
np.array([[{1: 1, 2: 2}, {1: 3, 2: 4}], [{3: 5}, {3: 6}]], dtype=object).T,
("1", "2", "3"),
),
],
)
def test_column_transformer_get_feature_names(X, keys):
ct = ColumnTransformer([("col" + str(i), DictVectorizer(), i) for i in range(2)])
ct.fit(X)
assert ct.get_feature_names() == [f"col0__{key}" for key in keys[:2]] + [
f"col1__{keys[2]}"
]
# drop transformer
ct = ColumnTransformer([("col0", DictVectorizer(), 0), ("col1", "drop", 1)])
ct.fit(X)
assert ct.get_feature_names() == [f"col0__{key}" for key in keys[:2]]
# passthrough transformer
ct = ColumnTransformer([("trans", "passthrough", [0, 1])])
ct.fit(X)
assert ct.get_feature_names() == ["x0", "x1"]
ct = ColumnTransformer([("trans", DictVectorizer(), 0)], remainder="passthrough")
ct.fit(X)
assert ct.get_feature_names() == [f"trans__{key}" for key in keys[:2]] + ["x1"]
ct = ColumnTransformer([("trans", "passthrough", [1])], remainder="passthrough")
ct.fit(X)
assert ct.get_feature_names() == ["x1", "x0"]
ct = ColumnTransformer(
[("trans", "passthrough", lambda x: [1])], remainder="passthrough"
)
ct.fit(X)
assert ct.get_feature_names() == ["x1", "x0"]
ct = ColumnTransformer(
[("trans", "passthrough", np.array([False, True]))], remainder="passthrough"
)
ct.fit(X)
assert ct.get_feature_names() == ["x1", "x0"]
ct = ColumnTransformer(
[("trans", "passthrough", slice(1, 2))], remainder="passthrough"
)
ct.fit(X)
assert ct.get_feature_names() == ["x1", "x0"]
def test_column_transformer_get_feature_names_dataframe():
# passthough transformer with a dataframe
pd = pytest.importorskip("pandas")
X = np.array(
[[{"a": 1, "b": 2}, {"a": 3, "b": 4}], [{"c": 5}, {"c": 6}]], dtype=object
).T
X_df = pd.DataFrame(X, columns=["col0", "col1"])
ct = ColumnTransformer([("trans", "passthrough", ["col0", "col1"])])
ct.fit(X_df)
assert ct.get_feature_names() == ["col0", "col1"]
ct = ColumnTransformer([("trans", "passthrough", [0, 1])])
ct.fit(X_df)
assert ct.get_feature_names() == ["col0", "col1"]
ct = ColumnTransformer([("col0", DictVectorizer(), 0)], remainder="passthrough")
ct.fit(X_df)
assert ct.get_feature_names() == ["col0__a", "col0__b", "col1"]
ct = ColumnTransformer(
[("trans", "passthrough", ["col1"])], remainder="passthrough"
)
ct.fit(X_df)
assert ct.get_feature_names() == ["col1", "col0"]
ct = ColumnTransformer(
[("trans", "passthrough", lambda x: x[["col1"]].columns)],
remainder="passthrough",
)
ct.fit(X_df)
assert ct.get_feature_names() == ["col1", "col0"]
ct = ColumnTransformer(
[("trans", "passthrough", np.array([False, True]))], remainder="passthrough"
)
ct.fit(X_df)
assert ct.get_feature_names() == ["col1", "col0"]
ct = ColumnTransformer(
[("trans", "passthrough", slice(1, 2))], remainder="passthrough"
)
ct.fit(X_df)
| |
<filename>lib/gs.py
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Library to make common google storage operations more reliable.
"""
import logging
import os
from chromite.buildbot import constants
from chromite.lib import cache
from chromite.lib import cros_build_lib
from chromite.lib import osutils
# Default pathway; stored here rather than usual buildbot.constants since
# we don't want to import buildbot code from here.
# Note that this value is reset after GSContext via the GetDefaultGSUtilBin
# method; we set it initially here just for the sake of making clear it
# exists.
GSUTIL_BIN = None
PUBLIC_BASE_HTTPS_URL = 'http://storage.core-os.com/'
PRIVATE_BASE_HTTPS_URL = 'https://sandbox.google.com/storage/'
BASE_GS_URL = 'gs://'
def CanonicalizeURL(url, strict=False):
"""Convert provided URL to gs:// URL, if it follows a known format.
Arguments:
url: URL to canonicalize.
strict: Raises exception if URL cannot be canonicalized.
"""
for prefix in (PUBLIC_BASE_HTTPS_URL, PRIVATE_BASE_HTTPS_URL):
if url.startswith(prefix):
return url.replace(prefix, BASE_GS_URL)
if not url.startswith(BASE_GS_URL) and strict:
raise ValueError('Url %r cannot be canonicalized.' % url)
return url
def GetGsURL(bucket, for_gsutil=False, public=True, suburl=''):
"""Construct a Google Storage URL
Args:
bucket: The Google Storage bucket to use
for_gsutil: Do you want a URL for passing to `gsutil`?
public: Do we want the public or private url
suburl: A url fragment to tack onto the end
Returns:
The fully constructed URL
"""
if for_gsutil:
urlbase = BASE_GS_URL
else:
urlbase = PUBLIC_BASE_HTTPS_URL if public else PRIVATE_BASE_HTTPS_URL
return '%s%s/%s' % (urlbase, bucket, suburl)
class GSContextException(Exception):
"""Thrown when expected google storage preconditions are not met."""
class GSContextPreconditionFailed(GSContextException):
"""Thrown when google storage returns code=PreconditionFailed."""
class GSNoSuchKey(GSContextException):
"""Thrown when google storage returns code=NoSuchKey."""
class GSContext(object):
"""A class to wrap common google storage operations."""
DEFAULT_BOTO_FILE = os.path.expanduser('~/.boto')
# This is set for ease of testing.
DEFAULT_GSUTIL_BIN = None
DEFAULT_GSUTIL_BUILDER_BIN = '/b/build/third_party/gsutil/gsutil'
# How many times to retry uploads.
DEFAULT_RETRIES = 10
# Multiplier for how long to sleep (in seconds) between retries; will delay
# (1*sleep) the first time, then (2*sleep), continuing via attempt * sleep.
DEFAULT_SLEEP_TIME = 60
GSUTIL_TAR = 'gsutil-3.10.tar.gz'
GSUTIL_URL = PUBLIC_BASE_HTTPS_URL + 'chromeos-public/%s' % GSUTIL_TAR
@classmethod
def GetDefaultGSUtilBin(cls):
if cls.DEFAULT_GSUTIL_BIN is None:
gsutil_bin = cls.DEFAULT_GSUTIL_BUILDER_BIN
if not os.path.exists(gsutil_bin):
gsutil_bin = osutils.Which('gsutil')
cls.DEFAULT_GSUTIL_BIN = gsutil_bin
return cls.DEFAULT_GSUTIL_BIN
@classmethod
def Cached(cls, cache_dir, *args, **kwargs):
"""Reuses previously fetched GSUtil, performing the fetch if necessary.
Arguments:
cache_dir: The toplevel cache dir.
*args, **kwargs: Arguments that are passed through to the GSContext()
constructor.
Returns:
An initialized GSContext() object.
"""
common_path = os.path.join(cache_dir, constants.COMMON_CACHE)
tar_cache = cache.TarballCache(common_path)
key = (cls.GSUTIL_TAR,)
# The common cache will not be LRU, removing the need to hold a read
# lock on the cached gsutil.
ref = tar_cache.Lookup(key)
if ref.Exists():
logging.debug('Reusing cached gsutil.')
else:
logging.debug('Fetching gsutil.')
with osutils.TempDirContextManager(
base_dir=tar_cache.staging_dir) as tempdir:
gsutil_tar = os.path.join(tempdir, cls.GSUTIL_TAR)
cros_build_lib.RunCurl([cls.GSUTIL_URL, '-o', gsutil_tar],
debug_level=logging.DEBUG)
ref.SetDefault(gsutil_tar)
gsutil_bin = os.path.join(ref.path, 'gsutil', 'gsutil')
return cls(*args, gsutil_bin=gsutil_bin, **kwargs)
def __init__(self, boto_file=None, acl_file=None, dry_run=False,
gsutil_bin=None, init_boto=False, retries=None, sleep=None):
"""Constructor.
Args:
boto_file: Fully qualified path to user's .boto credential file.
acl_file: A permission file capable of setting different permissions
for different sets of users.
dry_run: Testing mode that prints commands that would be run.
gsutil_bin: If given, the absolute path to the gsutil binary. Else
the default fallback will be used.
init_boto: If set to True, GSContext will check during __init__ if a
valid boto config is configured, and if not, will attempt to ask the
user to interactively set up the boto config.
retries: Number of times to retry a command before failing.
sleep: Amount of time to sleep between failures.
"""
if gsutil_bin is None:
gsutil_bin = self.GetDefaultGSUtilBin()
self._CheckFile('gsutil not found', gsutil_bin)
self.gsutil_bin = gsutil_bin
# Prefer boto_file if specified, else prefer the env then the default.
if boto_file is None:
boto_file = os.environ.get('BOTO_CONFIG', self.DEFAULT_BOTO_FILE)
self.boto_file = boto_file
if acl_file is not None:
self._CheckFile('Not a valid permissions file', acl_file)
self.acl_file = acl_file
self.dry_run = dry_run
self._retries = self.DEFAULT_RETRIES if retries is None else int(retries)
self._sleep_time = self.DEFAULT_SLEEP_TIME if sleep is None else int(sleep)
if init_boto:
self._InitBoto()
self._CheckFile('Boto credentials not found', self.boto_file)
def _CheckFile(self, errmsg, afile):
"""Pre-flight check for valid inputs.
Args:
errmsg: Error message to display.
afile: Fully qualified path to test file existance.
"""
if not os.path.isfile(afile):
raise GSContextException('%s, %s is not a file' % (errmsg, afile))
def _TestGSLs(self):
"""Quick test of gsutil functionality."""
result = self._DoCommand(['ls'], retries=0, debug_level=logging.DEBUG,
redirect_stderr=True, error_code_ok=True)
return not (result.returncode == 1 and
'no configured credentials' in result.error)
def _ConfigureBotoConfig(self):
"""Make sure we can access protected bits in GS."""
print('Configuring gsutil. **Please use your @google.com account.**')
try:
self._DoCommand(['config'], retries=0, debug_level=logging.CRITICAL,
print_cmd=False)
finally:
if (os.path.exists(self.boto_file) and not
os.path.getsize(self.boto_file)):
os.remove(self.boto_file)
raise GSContextException('GS config could not be set up.')
def _InitBoto(self):
if not self._TestGSLs():
self._ConfigureBotoConfig()
def Cat(self, path):
"""Returns the contents of a GS object."""
return self._DoCommand(['cat', path], redirect_stdout=True)
def CopyInto(self, local_path, remote_dir, filename=None, acl=None,
version=None):
"""Upload a local file into a directory in google storage.
Args:
local_path: Local file path to copy.
remote_dir: Full gs:// url of the directory to transfer the file into.
filename: If given, the filename to place the content at; if not given,
it's discerned from basename(local_path).
acl: If given, a canned ACL.
version: If given, the generation; essentially the timestamp of the last
update. Note this is not the same as sequence-number; it's
monotonically increasing bucket wide rather than reset per file.
The usage of this is if we intend to replace/update only if the version
is what we expect. This is useful for distributed reasons- for example,
to ensure you don't overwrite someone else's creation, a version of
0 states "only update if no version exists".
"""
filename = filename if filename is not None else local_path
# Basename it even if an explicit filename was given; we don't want
# people using filename as a multi-directory path fragment.
return self.Copy(local_path,
'%s/%s' % (remote_dir, os.path.basename(filename)),
acl=acl, version=version)
def _RunCommand(self, cmd, **kwargs):
try:
return cros_build_lib.RunCommand(cmd, **kwargs)
# gsutil uses the same exit code for any failure, so we are left to
# parse the output as needed.
except cros_build_lib.RunCommandError as e:
error = e.result.error
if error and 'GSResponseError' in error:
if 'code=PreconditionFailed' in error:
raise GSContextPreconditionFailed(e)
if 'code=NoSuchKey' in error:
raise GSNoSuchKey(e)
raise
def _DoCommand(self, gsutil_cmd, headers=(), retries=None, **kwargs):
"""Run a gsutil command, suppressing output, and setting retry/sleep.
Returns:
A RunCommandResult object.
"""
cmd = [self.gsutil_bin]
for header in headers:
cmd += ['-h', header]
cmd.extend(gsutil_cmd)
if retries is None:
retries = self._retries
extra_env = kwargs.pop('extra_env', {})
extra_env.setdefault('BOTO_CONFIG', self.boto_file)
if self.dry_run:
logging.debug("%s: would've ran %r", self.__class__.__name__, cmd)
else:
return cros_build_lib.RetryCommand(
self._RunCommand, retries, cmd, sleep=self._sleep_time,
extra_env=extra_env, **kwargs)
def Copy(self, src_path, dest_path, acl=None, version=None, **kwargs):
"""Copy to/from GS bucket.
Canned ACL permissions can be specified on the gsutil cp command line.
More info:
https://developers.google.com/storage/docs/accesscontrol#applyacls
Args:
src_path: Fully qualified local path or full gs:// path of the src file.
dest_path: Fully qualified local path or full gs:// path of the dest
file.
acl: One of the google storage canned_acls to apply.
version: If given, the generation; essentially the timestamp of the last
update. Note this is not the same as sequence-number; it's
monotonically increasing bucket wide rather than reset per file.
The usage of this is if we intend to replace/update only if the version
is what we expect. This is useful for distributed reasons- for example,
to ensure you don't overwrite someone else's creation, a version of
0 states "only update if no version exists".
Raises:
RunCommandError if the command failed despite retries.
Returns:
Return the CommandResult from the run.
"""
cmd, headers = [], []
if version is not None:
headers = ['x-goog-if-generation-match:%d' % version]
cmd.append('cp')
acl = self.acl_file if acl is None else acl
if acl is not None:
cmd += ['-a', acl]
cmd += ['--', src_path, dest_path]
# For ease of testing, only pass headers if we got some.
if headers:
kwargs['headers'] = headers
return self._DoCommand(cmd, redirect_stderr=True, **kwargs)
def LS(self, path):
"""Does a directory listing of the given gs path."""
return | |
return cas_models.QueryDatabaseResponse().from_map(
self.do_request('1.0', 'antcloud.cas.database.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_database_ex_async(
self,
request: cas_models.QueryDatabaseRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryDatabaseResponse:
"""
Description: 查询数据库
Summary: 查询数据库
"""
UtilClient.validate_model(request)
return cas_models.QueryDatabaseResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.database.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_database_spec(
self,
request: cas_models.QueryDatabaseSpecRequest,
) -> cas_models.QueryDatabaseSpecResponse:
"""
Description: 查询数据库规格
Summary: 查询数据库规格
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_database_spec_ex(request, headers, runtime)
async def query_database_spec_async(
self,
request: cas_models.QueryDatabaseSpecRequest,
) -> cas_models.QueryDatabaseSpecResponse:
"""
Description: 查询数据库规格
Summary: 查询数据库规格
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_database_spec_ex_async(request, headers, runtime)
def query_database_spec_ex(
self,
request: cas_models.QueryDatabaseSpecRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryDatabaseSpecResponse:
"""
Description: 查询数据库规格
Summary: 查询数据库规格
"""
UtilClient.validate_model(request)
return cas_models.QueryDatabaseSpecResponse().from_map(
self.do_request('1.0', 'antcloud.cas.database.spec.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_database_spec_ex_async(
self,
request: cas_models.QueryDatabaseSpecRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryDatabaseSpecResponse:
"""
Description: 查询数据库规格
Summary: 查询数据库规格
"""
UtilClient.validate_model(request)
return cas_models.QueryDatabaseSpecResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.database.spec.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def all_database_engine(
self,
request: cas_models.AllDatabaseEngineRequest,
) -> cas_models.AllDatabaseEngineResponse:
"""
Description: 查询数据引擎
Summary: 查询数据引擎
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.all_database_engine_ex(request, headers, runtime)
async def all_database_engine_async(
self,
request: cas_models.AllDatabaseEngineRequest,
) -> cas_models.AllDatabaseEngineResponse:
"""
Description: 查询数据引擎
Summary: 查询数据引擎
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.all_database_engine_ex_async(request, headers, runtime)
def all_database_engine_ex(
self,
request: cas_models.AllDatabaseEngineRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.AllDatabaseEngineResponse:
"""
Description: 查询数据引擎
Summary: 查询数据引擎
"""
UtilClient.validate_model(request)
return cas_models.AllDatabaseEngineResponse().from_map(
self.do_request('1.0', 'antcloud.cas.database.engine.all', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def all_database_engine_ex_async(
self,
request: cas_models.AllDatabaseEngineRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.AllDatabaseEngineResponse:
"""
Description: 查询数据引擎
Summary: 查询数据引擎
"""
UtilClient.validate_model(request)
return cas_models.AllDatabaseEngineResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.database.engine.all', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def all_database_quota(
self,
request: cas_models.AllDatabaseQuotaRequest,
) -> cas_models.AllDatabaseQuotaResponse:
"""
Description: 查询数据库quota
Summary: 查询数据库quota
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.all_database_quota_ex(request, headers, runtime)
async def all_database_quota_async(
self,
request: cas_models.AllDatabaseQuotaRequest,
) -> cas_models.AllDatabaseQuotaResponse:
"""
Description: 查询数据库quota
Summary: 查询数据库quota
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.all_database_quota_ex_async(request, headers, runtime)
def all_database_quota_ex(
self,
request: cas_models.AllDatabaseQuotaRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.AllDatabaseQuotaResponse:
"""
Description: 查询数据库quota
Summary: 查询数据库quota
"""
UtilClient.validate_model(request)
return cas_models.AllDatabaseQuotaResponse().from_map(
self.do_request('1.0', 'antcloud.cas.database.quota.all', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def all_database_quota_ex_async(
self,
request: cas_models.AllDatabaseQuotaRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.AllDatabaseQuotaResponse:
"""
Description: 查询数据库quota
Summary: 查询数据库quota
"""
UtilClient.validate_model(request)
return cas_models.AllDatabaseQuotaResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.database.quota.all', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def all_database_securityip(
self,
request: cas_models.AllDatabaseSecurityipRequest,
) -> cas_models.AllDatabaseSecurityipResponse:
"""
Description: 查询数据库所有securityip
Summary: 查询数据库所有securityip
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.all_database_securityip_ex(request, headers, runtime)
async def all_database_securityip_async(
self,
request: cas_models.AllDatabaseSecurityipRequest,
) -> cas_models.AllDatabaseSecurityipResponse:
"""
Description: 查询数据库所有securityip
Summary: 查询数据库所有securityip
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.all_database_securityip_ex_async(request, headers, runtime)
def all_database_securityip_ex(
self,
request: cas_models.AllDatabaseSecurityipRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.AllDatabaseSecurityipResponse:
"""
Description: 查询数据库所有securityip
Summary: 查询数据库所有securityip
"""
UtilClient.validate_model(request)
return cas_models.AllDatabaseSecurityipResponse().from_map(
self.do_request('1.0', 'antcloud.cas.database.securityip.all', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def all_database_securityip_ex_async(
self,
request: cas_models.AllDatabaseSecurityipRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.AllDatabaseSecurityipResponse:
"""
Description: 查询数据库所有securityip
Summary: 查询数据库所有securityip
"""
UtilClient.validate_model(request)
return cas_models.AllDatabaseSecurityipResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.database.securityip.all', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_database_scheme(
self,
request: cas_models.QueryDatabaseSchemeRequest,
) -> cas_models.QueryDatabaseSchemeResponse:
"""
Description: !!!!不要用,typo
Summary: !!!!不要用,typo
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_database_scheme_ex(request, headers, runtime)
async def query_database_scheme_async(
self,
request: cas_models.QueryDatabaseSchemeRequest,
) -> cas_models.QueryDatabaseSchemeResponse:
"""
Description: !!!!不要用,typo
Summary: !!!!不要用,typo
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_database_scheme_ex_async(request, headers, runtime)
def query_database_scheme_ex(
self,
request: cas_models.QueryDatabaseSchemeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryDatabaseSchemeResponse:
"""
Description: !!!!不要用,typo
Summary: !!!!不要用,typo
"""
UtilClient.validate_model(request)
return cas_models.QueryDatabaseSchemeResponse().from_map(
self.do_request('1.0', 'antcloud.cas.database.scheme.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_database_scheme_ex_async(
self,
request: cas_models.QueryDatabaseSchemeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryDatabaseSchemeResponse:
"""
Description: !!!!不要用,typo
Summary: !!!!不要用,typo
"""
UtilClient.validate_model(request)
return cas_models.QueryDatabaseSchemeResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.database.scheme.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def all_database_import(
self,
request: cas_models.AllDatabaseImportRequest,
) -> cas_models.AllDatabaseImportResponse:
"""
Description: 获取数据库导入记录
Summary: 获取数据库导入记录
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.all_database_import_ex(request, headers, runtime)
async def all_database_import_async(
self,
request: cas_models.AllDatabaseImportRequest,
) -> cas_models.AllDatabaseImportResponse:
"""
Description: 获取数据库导入记录
Summary: 获取数据库导入记录
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.all_database_import_ex_async(request, headers, runtime)
def all_database_import_ex(
self,
request: cas_models.AllDatabaseImportRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.AllDatabaseImportResponse:
"""
Description: 获取数据库导入记录
Summary: 获取数据库导入记录
"""
UtilClient.validate_model(request)
return cas_models.AllDatabaseImportResponse().from_map(
self.do_request('1.0', 'antcloud.cas.database.import.all', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def all_database_import_ex_async(
self,
request: cas_models.AllDatabaseImportRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.AllDatabaseImportResponse:
"""
Description: 获取数据库导入记录
Summary: 获取数据库导入记录
"""
UtilClient.validate_model(request)
return cas_models.AllDatabaseImportResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.database.import.all', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_computer_agentstatus(
self,
request: cas_models.QueryComputerAgentstatusRequest,
) -> cas_models.QueryComputerAgentstatusResponse:
"""
Description: 查询computer agent状态
Summary: 查询computer agent状态
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_computer_agentstatus_ex(request, headers, runtime)
async def query_computer_agentstatus_async(
self,
request: cas_models.QueryComputerAgentstatusRequest,
) -> cas_models.QueryComputerAgentstatusResponse:
"""
Description: 查询computer agent状态
Summary: 查询computer agent状态
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_computer_agentstatus_ex_async(request, headers, runtime)
def query_computer_agentstatus_ex(
self,
request: cas_models.QueryComputerAgentstatusRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryComputerAgentstatusResponse:
"""
Description: 查询computer agent状态
Summary: 查询computer agent状态
"""
UtilClient.validate_model(request)
return cas_models.QueryComputerAgentstatusResponse().from_map(
self.do_request('1.0', 'antcloud.cas.computer.agentstatus.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_computer_agentstatus_ex_async(
self,
request: cas_models.QueryComputerAgentstatusRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryComputerAgentstatusResponse:
"""
Description: 查询computer agent状态
Summary: 查询computer agent状态
"""
UtilClient.validate_model(request)
return cas_models.QueryComputerAgentstatusResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.computer.agentstatus.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_computer_agentsetupcommand(
self,
request: cas_models.QueryComputerAgentsetupcommandRequest,
) -> cas_models.QueryComputerAgentsetupcommandResponse:
"""
Description: 查询computer staragent 启动命令
Summary: 查询 staragent 启动命令
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_computer_agentsetupcommand_ex(request, headers, runtime)
async def query_computer_agentsetupcommand_async(
self,
request: cas_models.QueryComputerAgentsetupcommandRequest,
) -> cas_models.QueryComputerAgentsetupcommandResponse:
"""
Description: 查询computer staragent 启动命令
Summary: 查询 staragent 启动命令
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_computer_agentsetupcommand_ex_async(request, headers, runtime)
def query_computer_agentsetupcommand_ex(
self,
request: cas_models.QueryComputerAgentsetupcommandRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryComputerAgentsetupcommandResponse:
"""
Description: 查询computer staragent 启动命令
Summary: 查询 staragent 启动命令
"""
UtilClient.validate_model(request)
return cas_models.QueryComputerAgentsetupcommandResponse().from_map(
self.do_request('1.0', 'antcloud.cas.computer.agentsetupcommand.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_computer_agentsetupcommand_ex_async(
self,
request: cas_models.QueryComputerAgentsetupcommandRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryComputerAgentsetupcommandResponse:
"""
Description: 查询computer staragent 启动命令
Summary: 查询 staragent 启动命令
"""
UtilClient.validate_model(request)
return cas_models.QueryComputerAgentsetupcommandResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.computer.agentsetupcommand.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_computer_import(
self,
request: cas_models.QueryComputerImportRequest,
) -> cas_models.QueryComputerImportResponse:
"""
Description: 查询已导入的computer信息
Summary: 查询已导入的computer信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_computer_import_ex(request, headers, runtime)
async def query_computer_import_async(
self,
request: cas_models.QueryComputerImportRequest,
) -> cas_models.QueryComputerImportResponse:
"""
Description: 查询已导入的computer信息
Summary: 查询已导入的computer信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_computer_import_ex_async(request, headers, runtime)
def query_computer_import_ex(
self,
request: cas_models.QueryComputerImportRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryComputerImportResponse:
"""
Description: 查询已导入的computer信息
Summary: 查询已导入的computer信息
"""
UtilClient.validate_model(request)
return cas_models.QueryComputerImportResponse().from_map(
self.do_request('1.0', 'antcloud.cas.computer.import.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_computer_import_ex_async(
self,
request: cas_models.QueryComputerImportRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryComputerImportResponse:
"""
Description: 查询已导入的computer信息
Summary: 查询已导入的computer信息
"""
UtilClient.validate_model(request)
return cas_models.QueryComputerImportResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.computer.import.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def list_computer(
self,
request: cas_models.ListComputerRequest,
) -> cas_models.ListComputerResponse:
"""
Description: list computers
Summary: list computers
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.list_computer_ex(request, headers, runtime)
async def list_computer_async(
self,
request: cas_models.ListComputerRequest,
) -> cas_models.ListComputerResponse:
"""
Description: list computers
Summary: list computers
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.list_computer_ex_async(request, headers, runtime)
def list_computer_ex(
self,
request: cas_models.ListComputerRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListComputerResponse:
"""
Description: list computers
Summary: list computers
"""
UtilClient.validate_model(request)
return cas_models.ListComputerResponse().from_map(
self.do_request('1.0', 'antcloud.cas.computer.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def list_computer_ex_async(
self,
request: cas_models.ListComputerRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListComputerResponse:
"""
Description: list computers
Summary: list computers
"""
UtilClient.validate_model(request)
return cas_models.ListComputerResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.computer.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_computer_spec(
self,
request: cas_models.QueryComputerSpecRequest,
) -> cas_models.QueryComputerSpecResponse:
"""
Description: query computer specs
Summary: query computer specs
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_computer_spec_ex(request, headers, runtime)
async def query_computer_spec_async(
self,
request: cas_models.QueryComputerSpecRequest,
) -> cas_models.QueryComputerSpecResponse:
"""
Description: query computer specs
Summary: query computer specs
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_computer_spec_ex_async(request, headers, runtime)
def query_computer_spec_ex(
self,
request: cas_models.QueryComputerSpecRequest,
headers: | |
import re
from django.contrib.auth.decorators import login_required
from django.forms import model_to_dict
import json
import logging
from copy import deepcopy
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.template import Context
from django.template.loader import get_template
from django.template.response import TemplateResponse
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse
from django.contrib.auth.models import User
from django.utils import timezone
from datetime import timedelta
from time import time
from rest_framework.decorators import api_view
from rest_framework.response import Response
from atlas.celerybackend.celery import app, ProdSysTask
from django.core.cache import cache
from celery.result import AsyncResult
from atlas.prodtask.mcevgen import sync_request_jos
from atlas.prodtask.models import HashTagToRequest, HashTag, WaitingStep, StepAction, ActionStaging, \
ActionDefault, SliceError
from atlas.prodtask.spdstodb import fill_template
from ..prodtask.helper import form_request_log, form_json_request_dict
from ..prodtask.ddm_api import find_dataset_events, DDM
from rest_framework.authtoken.models import Token
import atlas.datatables as datatables
from .models import StepTemplate, StepExecution, InputRequestList, TRequest, MCPattern, ProductionTask, \
get_priority_object, ProductionDataset, RequestStatus, get_default_project_mode_dict, get_default_nEventsPerJob_dict, \
OpenEndedRequest, TrainProduction, ParentToChildRequest, TProject
from django.db.models import Q
_logger = logging.getLogger('prodtaskwebui')
_jsonLogger = logging.getLogger('prodtask_ELK')
def create_predefinition_action(step):
if not ProductionTask.objects.filter(step=step).exists():
action = ActionDefault.ACTION_NAME_TYPE[step.get_task_config('PDA')]
if action in [5, 8]:
if not StepAction.objects.filter(step=int(step.id), action=action,
status__in=['active', 'executing']).exists():
sa = StepAction()
sa.action = action
sa.status = 'active'
sa.request = step.request
sa.step = step.id
sa.attempt = 0
sa.create_time = timezone.now()
sa.execution_time = timezone.now() + timedelta(minutes=10)
sa.save()
if ('toStaging=yes') not in step.get_task_config('project_mode'):
step.update_project_mode('toStaging','yes')
step.save()
if ('inputPreStaging') not in step.get_task_config('project_mode') and (action not in [8]):
step.update_project_mode('inputPreStaging','yes')
step.save()
if (action in [8]) and ('useZipToPin=yes') not in step.get_task_config('project_mode'):
step.update_project_mode('useZipToPin','yes')
step.save()
elif action in [11]:
pass
else:
step.status = 'Waiting'
step.save()
if not WaitingStep.objects.filter(step=int(step.id), action=action,
status__in=['active', 'executing']).exists():
waiting_step = WaitingStep()
waiting_step.step = step.id
waiting_step.request = step.request
waiting_step.create_time = timezone.now()
waiting_step.execution_time = timezone.now()
waiting_step.attempt = 0
waiting_step.action = action
waiting_step.status = 'active'
waiting_step.save()
def step_approve(request, stepexid=None, reqid=None, sliceid=None):
if request.method == 'GET':
try:
choosen_step = StepExecution.objects.get(id=stepexid)
if (choosen_step.step_template.step != 'Evgen'):
steps_for_approve = [choosen_step]
else:
cur_slice = InputRequestList.objects.get(id=sliceid)
cur_request = TRequest.objects.get(reqid=reqid)
steps_for_approve = StepExecution.objects.all().filter(request=cur_request, slice=cur_slice)
for st in steps_for_approve:
st.status = 'Approved'
st.save()
except Exception as e:
#print e
return HttpResponseRedirect(reverse('prodtask:step_execution_table'))
return HttpResponseRedirect(reverse('prodtask:step_execution_table'))
def find_missing_tags(tags):
return_list = []
for tag in tags:
try:
if int(tag[1:])==9999:
return_list.append(tag)
else:
pass
# trtf = None
# if not trtf:
# if (tag[0]=='r') and (int(tag[1:])<6000):
# return_list.append(tag)
# else:
# pass
except ObjectDoesNotExist as e:
pass
except Exception as e:
raise e
return return_list
def step_status_definition(is_skipped, is_approve=True, is_waiting=False):
if is_waiting:
return 'Waiting'
if is_skipped and is_approve:
return 'Skipped'
if not(is_skipped) and is_approve:
return 'Approved'
if is_skipped and not(is_approve):
return 'NotCheckedSkipped'
if not(is_skipped) and not(is_approve):
return 'NotChecked'
def form_existed_step_list(step_list):
result_list = []
temporary_list = []
another_chain_step = None
for step in step_list:
if step.step_parent == step:
if result_list:
raise ValueError('Not linked chain')
else:
result_list.append(step)
else:
temporary_list.append(step)
if not result_list:
for index,current_step in enumerate(temporary_list):
if current_step.step_parent not in temporary_list:
# step in other chain
another_chain_step = current_step.step_parent
result_list.append(current_step)
temporary_list.pop(index)
break
for i in range(len(temporary_list)):
j = 0
while (temporary_list[j].step_parent!=result_list[-1]):
j+=1
if j >= len(temporary_list):
raise ValueError('Not linked chain')
result_list.append(temporary_list[j])
return (result_list,another_chain_step)
def similar_status(status, is_skipped):
return ((((status == 'Skipped') or (status == 'NotCheckedSkipped')) and is_skipped) or
(((status == 'Approved') or (status == 'NotChecked')) and not is_skipped))
def step_is_equal(step_value, existed_step):
if step_value['formats']:
return (existed_step.step_template.output_formats == step_value['formats']) and \
(existed_step.step_template.ctag == step_value['value']) and similar_status(existed_step.status,step_value['is_skipped'])
else:
return (existed_step.step_template.ctag == step_value['value']) and similar_status(existed_step.status,step_value['is_skipped'])
def approve_existed_step(step, new_status):
if not (step.status == 'Approved') and not (step.status == 'Skipped'):
if step.status != new_status:
step.status = new_status
step.save_with_current_time()
pass
#TODO: Change it to real dataset workflow
def fill_dataset(ds):
if ds:
return ds
else:
return None
# dataset = None
# try:
# dataset = ProductionDataset.objects.all().filter(name=ds)[0]
# except:
# pass
# finally:
# if dataset:
# return dataset
# else:
# dataset = ProductionDataset.objects.create(name=ds, files=-1, timestamp=timezone.now())
# dataset.save()
# return dataset
def form_step_in_page(ordered_existed_steps,STEPS, is_foreign):
if STEPS[0]:
return_list = []
i = 0
if len(ordered_existed_steps)==0:
return [None]*len(STEPS)
for STEP_NAME in STEPS:
if i >= len(ordered_existed_steps):
return_list.append(None)
else:
if STEP_NAME == ordered_existed_steps[i].step_template.step:
return_list.append(ordered_existed_steps[i])
i += 1
else:
return_list.append(None)
if len(ordered_existed_steps)!=i:
raise ValueError('Not consistent chain')
return return_list
else:
if is_foreign:
return [None]+ordered_existed_steps+[None]*(len(STEPS)-len(ordered_existed_steps)-1)
else:
return ordered_existed_steps+[None]*(len(STEPS)-len(ordered_existed_steps))
#TODO: FIX it. Make one commit
def create_steps(prodsys_async_task, slice_steps, reqid, STEPS=StepExecution.STEPS, approve_level=99, waiting_level=99):
"""
Creating/saving steps
:param slice_steps: dict of slices this element {Slice number:[step tag,is_skipped]}
:param reqid: request id
:param is_approve: approve if true, save if false
"""
def set_action(step_to_check):
try:
create_predefinition_action(step_to_check)
except Exception as e:
_logger.error("Problem with pre defintion action %s" % str(e))
def events_per_input_file(index, STEPS, task_config, parent_step):
if index == 0:
task_config.update({'nEventsPerInputFile':get_default_nEventsPerJob_dict().get(STEPS[index],'')})
else:
if parent_step:
if 'nEventsPerJob' in json.loads(parent_step.task_config):
task_config.update({'nEventsPerInputFile': json.loads(parent_step.task_config)['nEventsPerJob']})
else:
task_config.update({'nEventsPerInputFile': get_default_nEventsPerJob_dict().get(parent_step.step_template.step,'')})
else:
task_config.update({'nEventsPerInputFile':get_default_nEventsPerJob_dict().get(STEPS[index-1],'')})
try:
APPROVED_STATUS = ['Skipped','Approved']
SKIPPED_STATUS = ['NotCheckedSkipped','Skipped']
waiting_level = 99
error_slices = []
no_action_slices = []
cur_request = TRequest.objects.get(reqid=reqid)
processed = 0
for slice, steps_status in list(slice_steps.items()):
if prodsys_async_task:
processed += 1
prodsys_async_task.progress_message_update(processed+1,len(slice_steps.keys())+2)
input_list = InputRequestList.objects.filter(request=cur_request, slice=int(slice))[0]
existed_steps = StepExecution.objects.filter(request=cur_request, slice=input_list)
if input_list.priority is None:
priority_obj = get_priority_object(850)
else:
priority_obj = get_priority_object(input_list.priority)
# Check steps which already exist in slice, and change them if needed
try:
ordered_existed_steps, existed_foreign_step = form_existed_step_list(existed_steps)
except ValueError as e:
ordered_existed_steps, existed_foreign_step = [],None
parent_step = None
no_action = True
status_changed = False
foreign_step = 0
if int(steps_status[-1]['foreign_id']) !=0:
foreign_step = int(steps_status[-1]['foreign_id'])
parent_step = StepExecution.objects.get(id=foreign_step)
steps_status.pop()
step_as_in_page = form_step_in_page(ordered_existed_steps,STEPS,existed_foreign_step)
# if foreign_step !=0 :
# step_as_in_page = [None] + step_as_in_page
first_not_approved_index = 0
total_events = -1
if not parent_step:
total_events = input_list.input_events
still_skipped = True
new_step = False
for index,step in enumerate(step_as_in_page):
if step:
if step.status in APPROVED_STATUS:
first_not_approved_index = index + 1
parent_step = step
if step.status not in SKIPPED_STATUS:
total_events = -1
still_skipped = False
if (index>=waiting_level):
waiting_level = 99
else:
total_events = step.input_events
try:
to_delete = []
for index,step_value in enumerate(steps_status[first_not_approved_index:],first_not_approved_index):
step_in_db = step_as_in_page[index]
if not step_value['value'] and not step_in_db:
continue
if not step_value['value'] and step_in_db:
to_delete.append(step_in_db)
continue
if step_value['value'] and (not step_in_db) and existed_foreign_step and (index == 0):
raise ValueError("Part of child chain before linked step can't be overridden")
no_action = False
if step_value['changes']:
for key in list(step_value['changes'].keys()):
if type(step_value['changes'][key]) != bool:
if (type(step_value['changes'][key]) != dict):
step_value['changes'][key].strip()
else:
for key_second_level in list(step_value['changes'][key].keys()):
step_value['changes'][key][key_second_level].strip()
if step_in_db:
if (len(to_delete)==0)and(step_in_db.step_template.ctag == step_value['value']) and \
(not step_value['changes']) and (total_events==step_in_db.input_events) and \
similar_status(step_in_db.status,step_value['is_skipped']) and (not new_step):
approve_existed_step(step_in_db,step_status_definition(step_value['is_skipped'],
index<=approve_level,
index>=waiting_level))
if (step_in_db.status in ['Approved','Waiting']) and (step_in_db.get_task_config('PDA')):
set_action(step_in_db)
if step_in_db.status not in SKIPPED_STATUS:
total_events = -1
still_skipped = False
parent_step = step_in_db
else:
if step_in_db.task_config:
task_config = json.loads(step_in_db.task_config)
else:
task_config = {}
for x in ['input_format','nEventsPerJob','token','merging_tag','nEventsPerMergeJob',
'nFilesPerMergeJob','nGBPerMergeJob','nMaxFilesPerMergeJob','project_mode',
'nFilesPerJob','nGBPerJob','maxAttempt','maxFailure','evntFilterEff',
'PDA','PDAParams','container_name', 'onlyTagsForFC']:
if x in step_value['changes']:
if step_value['changes'][x] and x in StepExecution.INT_TASK_CONFIG_PARAMS:
task_config[x] = int(step_value['changes'][x])
else:
task_config[x] = step_value['changes'][x]
for x in ['nEventsPerInputFile']:
if x in step_value['changes']:
if step_value['changes'][x]:
task_config[x] = int(step_value['changes'][x])
else:
task_config[x] = ''
elif new_step:
task_config[x] = ''
if 'maxFailure' in task_config:
if task_config['maxFailure']:
task_config['maxAttempt'] = int(task_config['maxFailure']) + 10
change_template = False
ctag = step_value['value']
if ctag != step_in_db.step_template.ctag:
change_template = True
output_formats = step_in_db.step_template.output_formats
if 'output_formats' in step_value['changes']:
output_formats = step_value['changes']['output_formats']
change_template = True
memory = step_in_db.step_template.memory
if 'memory' in step_value['changes']:
change_template = True
memory = step_value['changes']['memory']
if change_template:
step_in_db.step_template = fill_template(step_in_db.step_template.step, ctag, step_in_db.step_template.priority, output_formats, memory)
if 'priority' in step_value['changes']:
step_in_db.priority = step_value['changes']['priority']
if parent_step:
if still_skipped and (step_in_db.step_parent != parent_step) and ('nEventsPerInputFile' not in step_value['changes']):
if parent_step.get_task_config('nEventsPerJob'):
task_config['nEventsPerInputFile'] = parent_step.get_task_config('nEventsPerJob')
step_in_db.step_parent = parent_step
else:
step_in_db.step_parent = step_in_db
if not similar_status(step_in_db.status,step_value['is_skipped']):
status_changed = True
step_in_db.status = step_status_definition(step_value['is_skipped'], index<=approve_level,
index>=waiting_level)
if 'input_events' in step_value['changes']:
step_in_db.input_events = step_value['changes']['input_events']
total_events = step_in_db.input_events
else:
if (step_in_db.input_events == -1):
if status_changed:
step_in_db.input_events = total_events
else:
if not still_skipped:
step_in_db.input_events = -1
else:
total_events = step_in_db.input_events
if ('nEventsPerInputFile' not in step_value['changes']) and (not task_config.get('nEventsPerInputFile','')) and still_skipped:
events_per_input_file(index,STEPS,task_config,parent_step)
if step_in_db.status not in SKIPPED_STATUS:
total_events = -1
still_skipped = False
step_in_db.set_task_config(task_config)
step_in_db.remove_task_config('spreadsheet_original')
step_in_db.step_def_time = None
step_in_db.save_with_current_time()
if (step_in_db.status == 'Approved') and (step_in_db.get_task_config('PDA')):
set_action(step_in_db)
# waiting_level = index
parent_step = step_in_db
else:
status_changed = True
task_config = {'maxFailure':10,'maxAttempt':30}
if not input_list.project_mode:
task_config.update({'project_mode':get_default_project_mode_dict().get(STEPS[index],'')})
task_config.update({'nEventsPerJob':get_default_nEventsPerJob_dict().get(STEPS[index],'')})
if still_skipped:
events_per_input_file(index,STEPS,task_config,parent_step)
else:
task_config.update({'project_mode':input_list.project_mode})
for x in ['input_format','nEventsPerJob','token','merging_tag','nEventsPerMergeJob',
'nFilesPerMergeJob','nGBPerMergeJob','nMaxFilesPerMergeJob','project_mode','nFilesPerJob',
'nGBPerJob','maxAttempt','maxFailure','evntFilterEff',
'PDA','PDAParams','container_name', 'onlyTagsForFC']:
if x in step_value['changes']:
if step_value['changes'][x] and x in StepExecution.INT_TASK_CONFIG_PARAMS:
task_config[x] = int(step_value['changes'][x])
else:
task_config[x] = step_value['changes'][x]
for x in ['nEventsPerInputFile']:
if x in step_value['changes']:
if step_value['changes'][x]:
task_config[x] = int(step_value['changes'][x])
else:
task_config[x] = ''
ctag = step_value['value']
output_formats = step_value['formats']
if 'output_formats' in step_value['changes']:
output_formats = step_value['changes']['output_formats']
memory = None
if 'memory' in step_value['changes']:
memory = step_value['changes']['memory']
if(STEPS[index]):
| |
2D tensors, then being converted into a
list of 2D slices
Returns:
Tensor: LSTM output for each model time step
"""
self.init_buffers(inputs)
if self.reset_cells:
self.h[-1][:] = 0
self.c[-1][:] = 0
params = (self.h, self.h_prev, self.xs, self.ifog, self.ifo,
self.i, self.f, self.o, self.g, self.c, self.c_prev, self.c_act)
for (h, h_prev, xs, ifog, ifo, i, f, o, g, c, c_prev, c_act) in zip(*params):
self.be.compound_dot(self.W_recur, h_prev, ifog)
self.be.compound_dot(self.W_input, xs, ifog, beta=1.0)
ifog[:] = ifog + self.b
ifo[:] = self.gate_activation(ifo)
g[:] = self.activation(g)
c[:] = f * c_prev + i * g
c_act[:] = self.activation(c)
h[:] = o * c_act
return self.outputs
def bprop(self, deltas, alpha=1.0, beta=0.0):
"""
Backpropagation of errors, output delta for previous layer, and
calculate the update on model parmas
Arguments:
deltas (list[Tensor]): error tensors for each time step
of unrolling
do_acts (bool, optional): Carry out activations. Defaults to True
Attributes:
dW_input (Tensor): input weight gradients
dW_recur (Tensor): revursive weight gradients
db (Tensor): bias gradients
Returns:
Tensor: Backpropagated errors for each time step
of model unrolling
"""
self.c_delta_buffer[:] = 0
self.dW[:] = 0
if self.in_deltas is None:
self.in_deltas = get_steps(deltas, self.out_shape)
self.prev_in_deltas = self.in_deltas[-1:] + self.in_deltas[:-1]
self.ifog_delta_last_steps = self.ifog_delta_buffer[:, self.be.bsz:]
self.h_first_steps = self.outputs[:, :-self.be.bsz]
params = (self.h_delta, self.in_deltas, self.prev_in_deltas,
self.i, self.f, self.o, self.g, self.ifog_delta,
self.i_delta, self.f_delta, self.o_delta, self.g_delta,
self.c_delta, self.c_delta_prev, self.c_prev_bprop, self.c_act)
for (h_delta, in_deltas, prev_in_deltas,
i, f, o, g, ifog_delta, i_delta, f_delta, o_delta, g_delta,
c_delta, c_delta_prev, c_prev, c_act) in reversed(zip(*params)):
# current cell delta
c_delta[:] = c_delta + self.activation.bprop(c_act) * (o * in_deltas)
i_delta[:] = self.gate_activation.bprop(i) * c_delta * g
f_delta[:] = self.gate_activation.bprop(f) * c_delta * c_prev
o_delta[:] = self.gate_activation.bprop(o) * in_deltas * c_act
g_delta[:] = self.activation.bprop(g) * c_delta * i
# out deltas
self.be.compound_dot(self.W_recur.T, ifog_delta, h_delta)
if c_delta_prev is not None:
c_delta_prev[:] = c_delta * f
prev_in_deltas[:] = prev_in_deltas + h_delta
# Weight deltas and accumulate
self.be.compound_dot(self.ifog_delta_last_steps, self.h_first_steps.T, self.dW_recur)
self.be.compound_dot(self.ifog_delta_buffer, self.x.T, self.dW_input)
# Bias delta and accumulate
self.db[:] = self.be.sum(self.ifog_delta_buffer, axis=1)
# out deltas
if self.out_deltas_buffer: # save a bit of computation
self.be.compound_dot(self.W_input.T, self.ifog_delta_buffer, self.out_deltas_buffer,
alpha=alpha, beta=beta)
return self.out_deltas_buffer
class GRU(Recurrent):
"""
Implementation of the Gated Recurrent Unit based on [Cho2014]
- It uses two gates: reset gate (r) and update gate (z)
- The update gate (z) decides how much the activation is updated
- The reset gate (r) decides how much to reset (when r = 0) from the previous activation
- Activation (h_t) is a linear interpolation (by z) between the previous
activation (h_t-1) and the new candidate activation ( h_can )
- r and z are compuated the same way, using different weights
- gate activation function and unit activation function are usually different
- gate activation is usually logistic
- unit activation is usually tanh
- consider there are 3 gates: r, z, h_can
Arguments:
output_size (int): Number of hidden/output units
init (Initializer): Function for initializing the model's input to hidden weights. By
default, this initializer will also be used for recurrent parameters
unless init_inner is also specified. Biases will always be
initialized to zero.
init_inner (Initializer, optional): Function for initializing the model's recurrent
parameters. If absent, will default to using same
initializer provided to init.
activation (Transform): Activiation function for the input modulation
gate_activation (Transform): Activation function for the gates
Attributes:
x (Tensor): Input data tensor (seq len, inp size, batch size)
W_input (Tensor): Weights on the input units
(out size * 3, input size)
W_recur (Tensor): Weights on the recursive inputs
(out size * 3, out size)
b (Tensor): Biases (out size * 3 , 1)
References:
* Learning phrase representations using rnn encoder-decoder for
statistical machine translation `[Cho2014]`_
* Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling
`[Chung2014]`_
.. _[Cho2014]: http://arxiv.org/abs/1406.1078
.. _[Chung2014]: http://arxiv.org/pdf/1412.3555v1.pdf
"""
def __init__(self, output_size, init, init_inner=None, activation=None,
gate_activation=None, reset_cells=False, name="GruLayer"):
super(GRU, self).__init__(output_size, init, init_inner,
activation, reset_cells, name)
self.gate_activation = gate_activation
self.ngates = 3 # r, z, hcandidate
def allocate(self, shared_outputs=None):
super(GRU, self).allocate(shared_outputs)
self.h_prev_bprop = [0] + self.h[:-1]
# indices for slicing gate buffers
(rz1, rz2) = (0, self.nout * 2)
(r1, r2) = (0, self.nout)
(z1, z2) = (self.nout, self.nout * 2)
(c1, c2) = (self.nout * 2, self.nout * 3)
# buffers for:
# rh_prev_buffer: previous hidden multiply with r;
# wrc_T_dc: wc_recur.T dot with hcan_delta
self.rh_prev_buffer = self.be.iobuf(self.out_shape)
self.rh_prev = get_steps(self.rh_prev_buffer, self.out_shape)
self.wrc_T_dc = self.be.iobuf(self.nout)
# Gates: reset: r; update: z; candidate h: hcan
self.rzhcan_buffer = self.be.iobuf(self.gate_shape)
self.rzhcan = get_steps(self.rzhcan_buffer, self.gate_shape)
self.rz = [gate[rz1:rz2] for gate in self.rzhcan]
self.r = [gate[r1:r2] for gate in self.rzhcan]
self.z = [gate[z1:z2] for gate in self.rzhcan]
self.hcan = [gate[c1:c2] for gate in self.rzhcan]
# the buffer only deals with recurrent inputs to the gates
self.rzhcan_rec_buffer = self.be.iobuf(self.gate_shape)
self.rzhcan_rec = get_steps(self.rzhcan_rec_buffer, self.gate_shape)
self.rz_rec = [gate[rz1:rz2] for gate in self.rzhcan_rec]
self.hcan_rec = [gate[c1:c2] for gate in self.rzhcan_rec]
# Pre activation gate deltas
self.rzhcan_delta_buffer = self.be.iobuf(self.gate_shape)
self.rzhcan_delta = get_steps(self.rzhcan_delta_buffer, self.gate_shape)
self.rz_delta = [gate[rz1:rz2] for gate in self.rzhcan_delta]
self.r_delta = [gate[r1:r2] for gate in self.rzhcan_delta]
self.z_delta = [gate[z1:z2] for gate in self.rzhcan_delta]
self.hcan_delta = [gate[c1:c2] for gate in self.rzhcan_delta]
def init_params(self, shape):
"""
Initialize params for GRU including weights and biases.
The weight matrix and bias matrix are concatenated from the weights
for inputs and weights for recurrent inputs and bias.
The shape of the weights are (number of inputs + number of outputs +1 )
by (number of outputs * 3)
Arguments:
shape (Tuple): contains number of outputs and number of inputs
"""
super(GRU, self).init_params(shape)
(nout, nin) = shape
# indices for slicing gate buffers
(rz1, rz2) = (0, nout * 2)
(c1, c2) = (nout * 2, nout * 3)
self.Wrz_recur = self.W_recur[rz1:rz2]
self.Whcan_recur = self.W_recur[c1:c2]
self.b_rz = self.b[rz1:rz2]
self.b_hcan = self.b[c1:c2]
self.dWrz_recur = self.dW_recur[rz1:rz2]
self.dWhcan_recur = self.dW_recur[c1:c2]
def fprop(self, inputs, inference=False):
"""
Apply the forward pass transformation to the input data. The input data is a list of
inputs with an element for each time step of model unrolling.
Arguments:
inputs (Tensor): input data as 3D tensors, then converted into a list of 2D tensors
Returns:
Tensor: GRU output for each model time step
"""
self.init_buffers(inputs)
if self.reset_cells:
self.h[-1][:] = 0
self.rz[-1][:] = 0
self.hcan[-1][:] = 0
for (h, h_prev, rh_prev, xs, rz, r, z, hcan, rz_rec, hcan_rec, rzhcan) in zip(
self.h, self.h_prev, self.rh_prev, self.xs, self.rz, self.r,
self.z, self.hcan, self.rz_rec, self.hcan_rec, self.rzhcan):
# computes r, z, hcan from inputs
self.be.compound_dot(self.W_input, xs, rzhcan)
# computes r, z, hcan from recurrents
self.be.compound_dot(self.Wrz_recur, h_prev, rz_rec)
rz[:] = self.gate_activation(rz + rz_rec + self.b_rz)
rh_prev[:] = r * h_prev
self.be.compound_dot(self.Whcan_recur, rh_prev, hcan_rec)
hcan[:] = self.activation(hcan_rec + hcan + self.b_hcan)
h[:] = (1 - z) * h_prev + z * hcan
return self.outputs
def bprop(self, deltas, alpha=1.0, beta=0.0):
"""
Backpropagation of errors, output delta for previous layer, and calculate the update on
model parmas
Arguments:
deltas (Tensor): error tensors for each time step of unrolling
do_acts (bool, optional): Carry out activations. Defaults to True
Attributes:
dW_input (Tensor): input weight gradients
dW_recur (Tensor): recurrent weight gradients
db (Tensor): bias gradients
Returns:
Tensor: Backpropagated errors for each time step of model unrolling
"""
self.dW[:] = 0
if self.in_deltas is None:
self.in_deltas = get_steps(deltas, self.out_shape)
self.prev_in_deltas = self.in_deltas[-1:] + self.in_deltas[:-1]
params = (self.r, self.z, self.hcan, self.rh_prev, self.h_prev_bprop,
self.r_delta, self.z_delta, self.hcan_delta, self.rz_delta, self.rzhcan_delta,
self.h_delta, self.in_deltas, self.prev_in_deltas)
for (r, z, hcan, rh_prev, h_prev, r_delta, z_delta, hcan_delta, rz_delta,
rzhcan_delta, h_delta, in_deltas, prev_in_deltas) in reversed(zip(*params)):
# hcan_delta
hcan_delta[:] = self.activation.bprop(hcan) * in_deltas * z
z_delta[:] = self.gate_activation.bprop(z) * in_deltas * (hcan - h_prev)
# r_delta
self.be.compound_dot(self.Whcan_recur.T, hcan_delta, r_delta)
r_delta[:] = self.gate_activation.bprop(r) * r_delta * h_prev
# out hidden delta
h_delta[:] = in_deltas * (1 - z)
self.be.compound_dot(self.Wrz_recur.T, rz_delta, h_delta, beta=1.0)
self.be.compound_dot(self.Whcan_recur.T, hcan_delta, self.wrc_T_dc)
h_delta[:] = h_delta + r * self.wrc_T_dc
if h_prev != 0:
self.be.compound_dot(rz_delta, h_prev.T, self.dWrz_recur, beta=1.0)
self.be.compound_dot(hcan_delta, rh_prev.T, self.dWhcan_recur, beta=1.0)
prev_in_deltas[:] = prev_in_deltas + h_delta
# Weight deltas and accumulate
self.be.compound_dot(self.rzhcan_delta_buffer, self.x.T, self.dW_input) # batch
self.db[:] = self.be.sum(self.rzhcan_delta_buffer, axis=1)
# out deltas
| |
<gh_stars>1-10
from zope.interface import implements
from twisted.trial import unittest
from twisted.application import service
from twisted.mail import smtp
from twisted.internet.defer import gatherResults
from twisted.internet import error
from twisted.cred import portal, checkers
from twisted.test.proto_helpers import StringTransport
from twisted.internet.address import IPv4Address
from twisted.python import failure
from axiom import store, userbase
from axiom.item import Item
from axiom.attributes import text, reference
from axiom.test.util import getPristineStore
from axiom.dependency import installOn
from xquotient import mail, exmess
from xquotient.compose import Composer
from xquotient.inbox import Inbox
from xquotient.iquotient import IMessageSender
def createStore(testCase):
"""
Create a database suitable for use by the L{MailTests} suite.
@type testCase: L{MailTests}
@rtype: L{axiom.store.Store}
"""
location = testCase.mktemp()
s = store.Store(location)
def initializeStore():
"""
Install site requirements for the MTA tests and create several users
which will be used as the origin and destination of various test
messages.
"""
login = userbase.LoginSystem(store=s)
installOn(login, s)
for (localpart, domain, internal) in [
('testuser', 'localhost', True),
('testuser', 'example.com', False),
('administrator', 'localhost', True)]:
account = login.addAccount(localpart, domain, None, internal=internal)
subStore = account.avatars.open()
def endow():
installOn(Inbox(store=subStore), subStore)
installOn(Composer(store=subStore), subStore)
subStore.transact(endow)
s.transact(initializeStore)
return s
class _DeliveryRecord(Item):
"""
Record an attempt to deliver a message to a particular address. Used to
test that the correct attempts are made to send messages out of the system.
"""
toAddress = text(doc="""
RFC2822-format string indicating to whom this delivery would have been
attempted. This represents one element of the C{toAddresses} list passed
to L{iquotient.IMessageSender.sendMessage}.
""", default=None)
message = reference(doc="""
The message being delivered.
""")
stub = reference(doc="""
The L{StubSender} which created this record.
""")
class StubSender(Item):
"""
Testable L{IMessageSender} implementation.
"""
implements(IMessageSender)
attr = reference(doc="placeholder attribute")
powerupInterfaces = (IMessageSender,)
def sendMessage(self, toAddresses, message):
for addr in toAddresses:
_DeliveryRecord(store=self.store,
stub=self,
toAddress=addr,
message=message)
def getSends(self):
"""
Retrieve information about all attempts to send messages which have
been made.
@return: A C{list} of two-tuples of an address and a message.
"""
return (
(r.toAddress, r.message)
for r
in self.store.query(_DeliveryRecord,
_DeliveryRecord.stub == self))
class MailTests(unittest.TestCase):
def setUp(self):
self.store = getPristineStore(self, createStore)
self.login = self.store.findUnique(userbase.LoginSystem)
svc = service.IService(self.store)
svc.privilegedStartService()
svc.startService()
def tearDown(self):
svc = service.IService(self.store)
return svc.stopService()
def test_messageTransferAgentDeliveryFactory(self):
"""
Test that L{mail.MailTransferAgent} properly powers up the Item it is
installed on for L{smtp.IMessageDeliveryFactory} and that the
L{smtp.IMessageDelivery} provider it makes available is at least
minimally functional.
"""
mta = mail.MailTransferAgent(store=self.store)
installOn(mta, self.store)
factory = smtp.IMessageDeliveryFactory(self.store)
delivery = factory.getMessageDelivery()
self.failUnless(smtp.IMessageDelivery.providedBy(delivery))
def test_messageDeliveryAgentDeliveryFactory(self):
"""
Similar to L{test_messageTransferAgentDeliveryFactory}, but test
L{mail.MailDeliveryAgent} instead.
"""
account = self.login.accountByAddress(u'testuser', u'example.com')
factory = smtp.IMessageDeliveryFactory(account)
delivery = factory.getMessageDelivery()
self.failUnless(smtp.IMessageDelivery.providedBy(delivery))
def test_validateFromUnauthenticatedLocal(self):
"""
Test that using a local address as the sender address without
authenticating as that user raises an exception to prevent the
delivery.
"""
factory = mail.MailTransferAgent(store=self.store)
installOn(factory, self.store)
delivery = factory.getMessageDelivery()
d = delivery.validateFrom(
('home.example.net', '192.168.1.1'),
smtp.Address('testuser@localhost'))
return self.assertFailure(d, smtp.SMTPBadSender)
def test_validateFromUnauthenticatedNonLocal(self):
"""
Test that using a non-local address as the sender address without
authenticating first is accepted.
"""
factory = mail.MailTransferAgent(store=self.store)
installOn(factory, self.store)
delivery = factory.getMessageDelivery()
addr = smtp.Address('<EMAIL>')
d = delivery.validateFrom(('home.example.net', '192.168.1.1'), addr)
return d.addCallback(self.assertEquals, addr)
def test_validateFromAuthenticatedLocal(self):
"""
Test that using a local address as the sender address after
authenticating as the user who owns that address is accepted.
"""
avatar = self.login.accountByAddress(u'testuser', u'localhost')
delivery = smtp.IMessageDeliveryFactory(avatar).getMessageDelivery()
addr = smtp.Address('testuser@localhost')
d = delivery.validateFrom(('home.example.net', '192.168.1.1'), addr)
return d.addCallback(self.assertEquals, addr)
def test_validateFromAuthenticatedDisallowedLocal(self):
"""
Test that using a local address as the sender address after
authenticating as a user who does /not/ own that address is rejected.
"""
avatar = self.login.accountByAddress(u'testuser', u'localhost')
delivery = smtp.IMessageDeliveryFactory(avatar).getMessageDelivery()
addr = smtp.Address('admistrator@localhost')
d = delivery.validateFrom(('home.example.net', '192.168.1.1'), addr)
return self.assertFailure(d, smtp.SMTPBadSender)
def test_validateFromAuthenticatedNonLocal(self):
"""
Test that using a non-local address as the sender address after
authenticating as a user is rejected.
"""
avatar = self.login.accountByAddress(u'testuser', u'localhost')
delivery = smtp.IMessageDeliveryFactory(avatar).getMessageDelivery()
addr = smtp.Address('<EMAIL>')
d = delivery.validateFrom(('home.example.net', '192.168.1.1'), addr)
return self.assertFailure(d, smtp.SMTPBadSender)
def test_validateToUnauthenticatedLocal(self):
"""
Test that using a local address as the recipient address without
authenticating is accepted.
"""
factory = mail.MailTransferAgent(store=self.store)
installOn(factory, self.store)
delivery = factory.getMessageDelivery()
addr = smtp.Address('<EMAIL>')
d = delivery.validateFrom(('home.example.net', '192.168.1.1'), addr)
def validatedFrom(ign):
d = delivery.validateTo(
smtp.User(
smtp.Address('testuser', 'localhost'),
None, None, None))
return d
d.addCallback(validatedFrom)
return d
def test_validateToUnauthenticatedNonLocal(self):
"""
Test that using a non-local address as the recipient address without
authenticating is rejected.
"""
factory = mail.MailTransferAgent(store=self.store)
installOn(factory, self.store)
delivery = factory.getMessageDelivery()
addr = smtp.Address('<EMAIL>')
d = delivery.validateFrom(('home.example.net', '192.168.1.1'), addr)
def validatedFrom(ign):
d = delivery.validateTo(
smtp.User(
smtp.Address('<EMAIL>'),
None, None, None))
return self.assertFailure(d, smtp.SMTPBadRcpt)
d.addCallback(validatedFrom)
return d
def test_validateToUnauthenticatedNonExistentLocal(self):
"""
Test that using as the recipient address a non-existent address which
would exist locally if it existed at all is rejected.
"""
factory = mail.MailTransferAgent(store=self.store)
installOn(factory, self.store)
delivery = factory.getMessageDelivery()
addr = smtp.Address('<EMAIL>')
d = delivery.validateFrom(('home.example.net', '192.168.1.1'), addr)
def validatedFrom(ign):
d = delivery.validateTo(
smtp.User(
smtp.Address('nonexistent', 'localhost'),
None, None, None))
return self.assertFailure(d, smtp.SMTPBadRcpt)
d.addCallback(validatedFrom)
return d
def test_validateToAuthenticatedLocal(self):
"""
Test that using a local address as the recipient address after
authenticating as anyone is accepted.
"""
avatar = self.login.accountByAddress(u'testuser', u'localhost')
delivery = smtp.IMessageDeliveryFactory(avatar).getMessageDelivery()
addr = smtp.Address('testuser@localhost')
d = delivery.validateFrom(('home.example.net', '192.168.1.1'), addr)
def validatedFrom(ign):
d = delivery.validateTo(
smtp.User(
smtp.Address('administrator', 'localhost'),
None, None, None))
return d
d.addCallback(validatedFrom)
return d
def test_validateToAuthenticatedNonLocal(self):
"""
Test that using a non-local address as the recipient address after
authenticating as anyone is accepted.
"""
avatar = self.login.accountByAddress(u'testuser', u'localhost')
delivery = smtp.IMessageDeliveryFactory(avatar).getMessageDelivery()
addr = smtp.Address('testuser@localhost')
d = delivery.validateFrom(('home.example.net', '192.168.1.1'), addr)
def validatedFrom(ign):
d = delivery.validateTo(
smtp.User(
smtp.Address('administrator', 'example.com'),
None, None, None))
return d
d.addCallback(validatedFrom)
return d
def test_validateToAuthenticatedNonExistentLocal(self):
"""
Test that using as the recipient address a non-existent address which
would exist locally if it existed at all is rejected.
"""
avatar = self.login.accountByAddress(u'testuser', u'localhost')
delivery = smtp.IMessageDeliveryFactory(avatar).getMessageDelivery()
addr = smtp.Address('testuser@localhost')
d = delivery.validateFrom(('home.example.net', '192.168.1.1'), addr)
def validatedFrom(ign):
d = delivery.validateTo(
smtp.User(
smtp.Address('nonexistent', 'localhost'),
None, None, None))
return self.assertFailure(d, smtp.SMTPBadRcpt)
d.addCallback(validatedFrom)
return d
def deliverMessageAndVerify(self, messageFactory, recipientLocal, recipientDomain):
"""
L{deliver} then L{verify}.
"""
self.deliver(messageFactory)
self.verify(recipientLocal, recipientDomain)
def deliver(self, messageFactory):
"""
Create a message using the given factory and deliver a message to it.
"""
if isinstance(messageFactory, list):
map(self.deliver, messageFactory)
else:
msg = messageFactory()
msg.lineReceived('Header: value')
msg.lineReceived('')
msg.lineReceived('Goodbye.')
msg.eomReceived()
def verify(self, recipientLocal, recipientDomain):
"""
Assert that the message has made it into the database of the given user
as an L{exmess.Message} instance.
"""
account = self.login.accountByAddress(recipientLocal, recipientDomain)
avatar = account.avatars.open()
sq = exmess.MailboxSelector(avatar)
sq.refineByStatus(exmess.INCOMING_STATUS)
messages = list(sq)
self.assertEquals(len(messages), 1)
self.assertIn(
'Goodbye.',
messages[0].impl.source.open().read())
def test_unauthenticatedMailDelivery(self):
"""
Test that an unauthenticated user sending mail to a local user actually
gets his message delivered.
"""
d = self.test_validateToUnauthenticatedLocal()
d.addCallback(self.deliverMessageAndVerify, u'testuser', u'localhost')
return d
def verifyOutgoing(self, sender, localpart, domain):
"""
Assert that there is a message scheduled to be delivered to the given
address.
"""
address = u'@'.join((localpart, domain))
for (toAddr, msg) in sender.getSends():
if toAddr == address:
return
self.fail("No message addressed to %r" % (address,))
def test_authenticatedMailDelivery(self):
"""
Test that an authenticated user sending mail to a local user actually
gets his message delivered and gets a record of that message in the
form of a sent message.
"""
sender = self.installStubSender(u'testuser', u'localhost')
d = self.test_validateToAuthenticatedLocal()
d.addCallback(self.deliver)
def verify(ign):
# This works even though outgoing *should* be True on this message
# because apparently it is Composer's responsibility to set that
# flag, and Composer isn't being allowed to run here.
self.verify(u'testuser', u'localhost')
return self.verifyOutgoing(
sender, u'administrator', u'localhost')
d.addCallback(verify)
return d
def installStubSender(self, localpart, domain):
"""
Replace the IMessageSender powerup on the named avatar with a testable
implementation.
@rtype: L{StubMessage}
"""
# Remove the real IMessageSender so it doesn't try to actually deliver
# any mail.
account = self.login.accountByAddress(localpart, domain)
avatar = account.avatars.open()
sender = IMessageSender(account)
avatar.powerDown(sender, IMessageSender)
# Put in a new stub IMessageSender which we can use to assert things
# about the sending behavior in this case.
newSender = StubSender(store=avatar)
installOn(newSender, avatar)
return newSender
def test_authenticatedMailTransfer(self):
"""
Test that an authenticated user sending mail to a remote user actually
gets his message delivered and gets a record of the transmission in the
form of a sent message object.
"""
sender = self.installStubSender(u'testuser', u'localhost')
d = self.test_validateToAuthenticatedNonLocal()
d.addCallback(self.deliver)
def verify(ign):
# This works even though outgoing *should* be True on this | |
plugin.socket_connect_timeout == URLBase.socket_connect_timeout
assert plugin.socket_read_timeout == URLBase.socket_read_timeout
# Reset our object
a.clear()
assert len(a) == 0
# Instantiate a bad object
plugin = a.instantiate(object, tag="bad_object")
assert plugin is None
# Instantiate a good object
plugin = a.instantiate('good://localhost', tag="good")
assert isinstance(plugin, NotifyBase)
# Test simple tagging inside of the object
assert "good" in plugin
assert "bad" not in plugin
# the in (__contains__ override) is based on or'ed content; so although
# 'bad' isn't tagged as being in the plugin, 'good' is, so the return
# value of this is True
assert ["bad", "good"] in plugin
assert set(["bad", "good"]) in plugin
assert ("bad", "good") in plugin
# We an add already substatiated instances into our Apprise object
a.add(plugin)
assert len(a) == 1
# We can add entries as a list too (to add more then one)
a.add([plugin, plugin, plugin])
assert len(a) == 4
# Reset our object again
a.clear()
with pytest.raises(TypeError):
a.instantiate('throw://localhost', suppress_exceptions=False)
assert len(a) == 0
assert a.instantiate(
'throw://localhost', suppress_exceptions=True) is None
assert len(a) == 0
#
# We rince and repeat the same tests as above, however we do them
# using the dict version
#
# Reset our object
a.clear()
assert len(a) == 0
# Instantiate a good object
plugin = a.instantiate({
'schema': 'good',
'host': 'localhost'}, tag="good")
assert isinstance(plugin, NotifyBase)
# Test simple tagging inside of the object
assert "good" in plugin
assert "bad" not in plugin
# the in (__contains__ override) is based on or'ed content; so although
# 'bad' isn't tagged as being in the plugin, 'good' is, so the return
# value of this is True
assert ["bad", "good"] in plugin
assert set(["bad", "good"]) in plugin
assert ("bad", "good") in plugin
# We an add already substatiated instances into our Apprise object
a.add(plugin)
assert len(a) == 1
# We can add entries as a list too (to add more then one)
a.add([plugin, plugin, plugin])
assert len(a) == 4
# Reset our object again
a.clear()
with pytest.raises(TypeError):
a.instantiate({
'schema': 'throw',
'host': 'localhost'}, suppress_exceptions=False)
assert len(a) == 0
assert a.instantiate({
'schema': 'throw',
'host': 'localhost'}, suppress_exceptions=True) is None
assert len(a) == 0
def test_apprise_pretty_print(tmpdir):
"""
API: Apprise() Pretty Print tests
"""
# Privacy Print
# PrivacyMode.Secret always returns the same thing to avoid guessing
assert URLBase.pprint(
None, privacy=True, mode=PrivacyMode.Secret) == '****'
assert URLBase.pprint(
42, privacy=True, mode=PrivacyMode.Secret) == '****'
assert URLBase.pprint(
object, privacy=True, mode=PrivacyMode.Secret) == '****'
assert URLBase.pprint(
"", privacy=True, mode=PrivacyMode.Secret) == '****'
assert URLBase.pprint(
"a", privacy=True, mode=PrivacyMode.Secret) == '****'
assert URLBase.pprint(
"ab", privacy=True, mode=PrivacyMode.Secret) == '****'
assert URLBase.pprint(
"abcdefghijk", privacy=True, mode=PrivacyMode.Secret) == '****'
# PrivacyMode.Outer
assert URLBase.pprint(
None, privacy=True, mode=PrivacyMode.Outer) == ''
assert URLBase.pprint(
42, privacy=True, mode=PrivacyMode.Outer) == ''
assert URLBase.pprint(
object, privacy=True, mode=PrivacyMode.Outer) == ''
assert URLBase.pprint(
"", privacy=True, mode=PrivacyMode.Outer) == ''
assert URLBase.pprint(
"a", privacy=True, mode=PrivacyMode.Outer) == 'a...a'
assert URLBase.pprint(
"ab", privacy=True, mode=PrivacyMode.Outer) == 'a...b'
assert URLBase.pprint(
"abcdefghijk", privacy=True, mode=PrivacyMode.Outer) == 'a...k'
# PrivacyMode.Tail
assert URLBase.pprint(
None, privacy=True, mode=PrivacyMode.Tail) == ''
assert URLBase.pprint(
42, privacy=True, mode=PrivacyMode.Tail) == ''
assert URLBase.pprint(
object, privacy=True, mode=PrivacyMode.Tail) == ''
assert URLBase.pprint(
"", privacy=True, mode=PrivacyMode.Tail) == ''
assert URLBase.pprint(
"a", privacy=True, mode=PrivacyMode.Tail) == '...a'
assert URLBase.pprint(
"ab", privacy=True, mode=PrivacyMode.Tail) == '...ab'
assert URLBase.pprint(
"abcdefghijk", privacy=True, mode=PrivacyMode.Tail) == '...hijk'
# Quoting settings
assert URLBase.pprint(" ", privacy=False, safe='') == '%20'
assert URLBase.pprint(" ", privacy=False, quote=False, safe='') == ' '
@mock.patch('requests.get')
@mock.patch('requests.post')
def test_apprise_tagging(mock_post, mock_get):
"""
API: Apprise() object tagging functionality
"""
def do_notify(server, *args, **kwargs):
return server.notify(*args, **kwargs)
apprise_tagging_test(mock_post, mock_get, do_notify)
@mock.patch('requests.get')
@mock.patch('requests.post')
@pytest.mark.skipif(sys.version_info.major <= 2, reason="Requires Python 3.x+")
def test_apprise_tagging_async(mock_post, mock_get):
"""
API: Apprise() object tagging functionality asynchronous methods
"""
def do_notify(server, *args, **kwargs):
return py3aio.tosync(server.async_notify(*args, **kwargs))
apprise_tagging_test(mock_post, mock_get, do_notify)
def apprise_tagging_test(mock_post, mock_get, do_notify):
# A request
robj = mock.Mock()
setattr(robj, 'raw', mock.Mock())
# Allow raw.read() calls
robj.raw.read.return_value = ''
robj.text = ''
robj.content = ''
mock_get.return_value = robj
mock_post.return_value = robj
# Simulate a successful notification
mock_get.return_value.status_code = requests.codes.ok
mock_post.return_value.status_code = requests.codes.ok
# Create our object
a = Apprise()
# An invalid addition can't add the tag
assert a.add('averyinvalidschema://localhost', tag='uhoh') is False
assert a.add({
'schema': 'averyinvalidschema',
'host': 'localhost'}, tag='uhoh') is False
# Add entry and assign it to a tag called 'awesome'
assert a.add('json://localhost/path1/', tag='awesome') is True
assert a.add({
'schema': 'json',
'host': 'localhost',
'fullpath': '/path1/'}, tag='awesome') is True
# Add another notification and assign it to a tag called 'awesome'
# and another tag called 'local'
assert a.add('json://localhost/path2/', tag=['mmost', 'awesome']) is True
# notify the awesome tag; this would notify both services behind the
# scenes
assert do_notify(
a, title="my title", body="my body", tag='awesome') is True
# notify all of the tags
assert do_notify(
a, title="my title", body="my body", tag=['awesome', 'mmost']) is True
# When we query against our loaded notifications for a tag that simply
# isn't assigned to anything, we return None. None (different then False)
# tells us that we litterally had nothing to query. We didn't fail...
# but we also didn't do anything...
assert do_notify(
a, title="my title", body="my body", tag='missing') is None
# Now to test the ability to and and/or notifications
a = Apprise()
# Add a tag by tuple
assert a.add('json://localhost/tagA/', tag=("TagA", )) is True
# Add 2 tags by string
assert a.add('json://localhost/tagAB/', tag="TagA, TagB") is True
# Add a tag using a set
assert a.add('json://localhost/tagB/', tag=set(["TagB"])) is True
# Add a tag by string (again)
assert a.add('json://localhost/tagC/', tag="TagC") is True
# Add 2 tags using a list
assert a.add('json://localhost/tagCD/', tag=["TagC", "TagD"]) is True
# Add a tag by string (again)
assert a.add('json://localhost/tagD/', tag="TagD") is True
# add a tag set by set (again)
assert a.add('json://localhost/tagCDE/',
tag=set(["TagC", "TagD", "TagE"])) is True
# Expression: TagC and TagD
# Matches the following only:
# - json://localhost/tagCD/
# - json://localhost/tagCDE/
assert do_notify(
a, title="my title", body="my body", tag=[('TagC', 'TagD')]) is True
# Expression: (TagY and TagZ) or TagX
# Matches nothing, None is returned in this case
assert do_notify(
a, title="my title", body="my body",
tag=[('TagY', 'TagZ'), 'TagX']) is None
# Expression: (TagY and TagZ) or TagA
# Matches the following only:
# - json://localhost/tagAB/
assert do_notify(
a, title="my title", body="my body",
tag=[('TagY', 'TagZ'), 'TagA']) is True
# Expression: (TagE and TagD) or TagB
# Matches the following only:
# - json://localhost/tagCDE/
# - json://localhost/tagAB/
# - json://localhost/tagB/
assert do_notify(
a, title="my title", body="my body",
tag=[('TagE', 'TagD'), 'TagB']) is True
# Garbage Entries in tag field just get stripped out. the below
# is the same as notifying no tags at all. Since we have not added
# any entries that do not have tags (that we can match against)
# we fail. None is returned as a way of letting us know that we
# had Notifications to notify, but since none of them matched our tag
# none were notified.
assert do_notify(
a, title="my title", body="my body",
tag=[(object, ), ]) is None
@pytest.mark.skipif(sys.version_info.major <= 2, reason="Requires Python 3.x+")
def test_apprise_schemas(tmpdir):
"""
API: Apprise().schema() tests
"""
# Caling load matix a second time which is an internal function causes it
# to skip over content already loaded into our matrix and thefore accesses
# other if/else parts of the code that aren't otherwise called
__load_matrix()
a = Apprise()
# no items
assert len(a) == 0
class TextNotification(NotifyBase):
# set our default notification format
notify_format = NotifyFormat.TEXT
# Garbage Protocol Entries
protocol = None
secure_protocol = (None, object)
class HtmlNotification(NotifyBase):
protocol = ('html', 'htm')
secure_protocol = ('htmls', 'htms')
class MarkDownNotification(NotifyBase):
protocol = 'markdown'
secure_protocol = 'markdowns'
# Store our notifications into our schema map
SCHEMA_MAP['text'] = TextNotification
SCHEMA_MAP['html'] = HtmlNotification
SCHEMA_MAP['markdown'] = MarkDownNotification
schemas = URLBase.schemas(TextNotification)
assert isinstance(schemas, set) is True
# We didn't define a protocol or secure protocol
assert len(schemas) == 0
schemas = URLBase.schemas(HtmlNotification)
assert isinstance(schemas, set) is True
assert len(schemas) == 4
assert 'html' in schemas
assert 'htm' in schemas
assert 'htmls' in schemas
assert 'htms' in schemas
# Invalid entries do not disrupt schema calls
for garbage in (object(), None, 42):
schemas = | |
<reponame>cashaddy/NeuroKit.py<filename>neurokit/bio/bio_ecg_preprocessing.py
# -*- coding: utf-8 -*-
"""
Subsubmodule for ecg processing.
"""
import numpy as np
import pandas as pd
import biosppy
import scipy
from .bio_rsp import *
from ..signal import *
from ..materials import Path
from ..statistics import *
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def ecg_preprocess(ecg, sampling_rate=1000, filter_type="FIR", filter_band="bandpass", filter_frequency=[3, 45], filter_order=0.3, segmenter="hamilton"):
"""
ECG signal preprocessing.
Parameters
----------
ecg : list or ndarray
ECG signal array.
sampling_rate : int
Sampling rate (samples/second).
filter_type : str or None
Can be Finite Impulse Response filter ("FIR"), Butterworth filter ("butter"), Chebyshev filters ("cheby1" and "cheby2"), Elliptic filter ("ellip") or Bessel filter ("bessel").
filter_band : str
Band type, can be Low-pass filter ("lowpass"), High-pass filter ("highpass"), Band-pass filter ("bandpass"), Band-stop filter ("bandstop").
filter_frequency : int or list
Cutoff frequencies, format depends on type of band: "lowpass" or "bandpass": single frequency (int), "bandpass" or "bandstop": pair of frequencies (list).
filter_order : float
Filter order.
segmenter : str
The cardiac phase segmenter. Can be "hamilton", "gamboa", "engzee", "christov", "ssf" or "pekkanen".
Returns
----------
ecg_preprocessed : dict
Preprocesed ECG.
Example
----------
>>> import neurokit as nk
>>> ecg_preprocessed = nk.ecg_preprocess(signal)
Notes
----------
*Details*
- **segmenter**: Different methods of segmentation are implemented: **hamilton** (`Hamilton, 2002 <http://www.eplimited.com/osea13.pdf/>`_) , **gamboa** (`gamboa, 2008 <http://www.lx.it.pt/~afred/pub/thesisHugoGamboa.pdf/>`_), **engzee** (Engelse and Zeelenberg, 1979; Lourenco et al., 2012), **christov** (Christov, 2004) or **ssf** (Slope Sum Function), **pekkanen** (`Kathirvel, 2001) <http://link.springer.com/article/10.1007/s13239-011-0065-3/fulltext.html>`_.
*Authors*
- the bioSSPy dev team (https://github.com/PIA-Group/BioSPPy)
- `<NAME> <https://dominiquemakowski.github.io/>`_
*Dependencies*
- biosppy
- numpy
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
References
-----------
- <NAME>. (2002, September). Open source ECG analysis. In Computers in Cardiology, 2002 (pp. 101-104). IEEE.
- <NAME>., <NAME>., <NAME>., & <NAME>. (2011). An efficient R-peak detection based on new nonlinear transformation and first-order Gaussian differentiator. Cardiovascular Engineering and Technology, 2(4), 408-425.
- <NAME>., <NAME>., <NAME>., & <NAME>. (2013). Review and Comparison of Real Time Electrocardiogram Segmentation Algorithms for Biometric Applications. In Proceedings of the 6th Int’l Conference on Health Informatics (HEALTHINF).
- <NAME>. (2004). Real time electrocardiogram QRS detection using combined adaptive threshold. Biomedical engineering online, 3(1), 28.
- Engelse, <NAME>., & <NAME>. (1979). A single scan algorithm for QRS-detection and feature extraction. Computers in cardiology, 6(1979), 37-42.
- <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2012, February). Real Time Electrocardiogram Segmentation for Finger based ECG Biometrics. In Biosignals (pp. 49-54).
"""
# Signal Processing
# =======================
# Transform to array
ecg = np.array(ecg)
# Filter signal
if filter_type in ["FIR", "butter", "cheby1", "cheby2", "ellip", "bessel"]:
order = int(filter_order * sampling_rate)
filtered, _, _ = biosppy.tools.filter_signal(signal=ecg,
ftype=filter_type,
band=filter_band,
order=order,
frequency=filter_frequency,
sampling_rate=sampling_rate)
else:
filtered = ecg # filtered is not-filtered
# Segment
if segmenter == "hamilton":
rpeaks, = biosppy.ecg.hamilton_segmenter(signal=filtered, sampling_rate=sampling_rate)
elif segmenter == "gamboa":
rpeaks, = biosppy.ecg.gamboa_segmenter(signal=filtered, sampling_rate=sampling_rate, tol=0.002)
elif segmenter == "engzee":
rpeaks, = biosppy.ecg.engzee_segmenter(signal=filtered, sampling_rate=sampling_rate, threshold=0.48)
elif segmenter == "christov":
rpeaks, = biosppy.ecg.christov_segmenter(signal=filtered, sampling_rate=sampling_rate)
elif segmenter == "ssf":
rpeaks, = biosppy.ecg.ssf_segmenter(signal=filtered, sampling_rate=sampling_rate, threshold=20, before=0.03, after=0.01)
elif segmenter == "pekkanen":
rpeaks = segmenter_pekkanen(ecg=filtered, sampling_rate=sampling_rate, window_size=5.0, lfreq=5.0, hfreq=15.0)
else:
raise ValueError("Unknown segmenter: %s." % segmenter)
# Correct R-peak locations
rpeaks, = biosppy.ecg.correct_rpeaks(signal=filtered,
rpeaks=rpeaks,
sampling_rate=sampling_rate,
tol=0.05)
# Extract cardiac cycles and rpeaks
cardiac_cycles, rpeaks = biosppy.ecg.extract_heartbeats(signal=filtered,
rpeaks=rpeaks,
sampling_rate=sampling_rate,
before=0.2,
after=0.4)
# Compute heart rate
heart_rate_idx, heart_rate = biosppy.tools.get_heart_rate(beats=rpeaks,
sampling_rate=sampling_rate,
smooth=True,
size=3)
# Get time indices
length = len(ecg)
T = (length - 1) / float(sampling_rate)
ts = np.linspace(0, T, length, endpoint=False)
heart_rate_times = ts[heart_rate_idx]
heart_rate_times = np.round(heart_rate_times*sampling_rate).astype(int) # Convert heart rate times to timepoints
# what for is this line in biosppy??
# cardiac_cycles_tmpl = np.linspace(-0.2, 0.4, cardiac_cycles.shape[1], endpoint=False)
# Prepare Output Dataframe
# ==========================
ecg_df = pd.DataFrame({"ECG_Raw": np.array(ecg)}) # Create a dataframe
ecg_df["ECG_Filtered"] = filtered # Add filtered signal
# Add R peaks
rpeaks_signal = np.array([np.nan]*len(ecg))
rpeaks_signal[rpeaks] = 1
ecg_df["ECG_R_Peaks"] = rpeaks_signal
# Heart Rate
try:
heart_rate = interpolate(heart_rate, heart_rate_times, sampling_rate) # Interpolation using 3rd order spline
ecg_df["Heart_Rate"] = heart_rate
except TypeError:
print("NeuroKit Warning: ecg_process(): Sequence too short to compute heart rate.")
ecg_df["Heart_Rate"] = np.nan
# Store Additional Feature
# ========================
processed_ecg = {"df": ecg_df,
"ECG": {
"R_Peaks": rpeaks
}
}
# Heartbeats
heartbeats = pd.DataFrame(cardiac_cycles).T
heartbeats.index = pd.date_range(pd.datetime.today(), periods=len(heartbeats), freq=str(int(1000000/sampling_rate)) + "us")
processed_ecg["ECG"]["Cardiac_Cycles"] = heartbeats
# Waves
waves = ecg_wave_detector(ecg_df["ECG_Filtered"], rpeaks)
processed_ecg["ECG"].update(waves)
# Systole
processed_ecg["df"]["ECG_Systole"] = ecg_systole(ecg_df["ECG_Filtered"], rpeaks, waves["T_Waves_Ends"])
return(processed_ecg)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def ecg_find_peaks(signal, sampling_rate=1000):
"""
Find R peaks indices on the ECG channel.
Parameters
----------
signal : list or ndarray
ECG signal (preferably filtered).
sampling_rate : int
Sampling rate (samples/second).
Returns
----------
rpeaks : list
List of R-peaks location indices.
Example
----------
>>> import neurokit as nk
>>> Rpeaks = nk.ecg_find_peaks(signal)
Notes
----------
*Authors*
- the bioSSPy dev team (https://github.com/PIA-Group/BioSPPy)
*Dependencies*
- biosppy
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
"""
rpeaks, = biosppy.ecg.hamilton_segmenter(np.array(signal), sampling_rate=sampling_rate)
rpeaks, = biosppy.ecg.correct_rpeaks(signal=np.array(signal), rpeaks=rpeaks, sampling_rate=sampling_rate, tol=0.05)
return(rpeaks)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def ecg_wave_detector(ecg, rpeaks):
"""
Returns the localization of the P, Q, T waves. This function needs massive help!
Parameters
----------
ecg : list or ndarray
ECG signal (preferably filtered).
rpeaks : list or ndarray
R peaks localization.
Returns
----------
ecg_waves : dict
Contains wave peaks location indices.
Example
----------
>>> import neurokit as nk
>>> ecg = nk.ecg_simulate(duration=5, sampling_rate=1000)
>>> ecg = nk.ecg_preprocess(ecg=ecg, sampling_rate=1000)
>>> rpeaks = ecg["ECG"]["R_Peaks"]
>>> ecg = ecg["df"]["ECG_Filtered"]
>>> ecg_waves = nk.ecg_wave_detector(ecg=ecg, rpeaks=rpeaks)
>>> nk.plot_events_in_signal(ecg, [ecg_waves["P_Waves"], ecg_waves["Q_Waves_Onsets"], ecg_waves["Q_Waves"], list(rpeaks), ecg_waves["S_Waves"], ecg_waves["T_Waves_Onsets"], ecg_waves["T_Waves"], ecg_waves["T_Waves_Ends"]], color=["green", "yellow", "orange", "red", "black", "brown", "blue", "purple"])
Notes
----------
*Details*
- **Cardiac Cycle**: A typical ECG showing a heartbeat consists of a P wave, a QRS complex and a T wave.The P wave represents the wave of depolarization that spreads from the SA-node throughout the atria. The QRS complex reflects the rapid depolarization of the right and left ventricles. Since the ventricles are the largest part of the heart, in terms of mass, the QRS complex usually has a much larger amplitude than the P-wave. The T wave represents the ventricular repolarization of the ventricles. On rare occasions, a U wave can be seen following the T wave. The U wave is believed to be related to the last remnants of ventricular repolarization.
*Authors*
- `<NAME> <https://dominiquemakowski.github.io/>`_
"""
q_waves = []
p_waves = []
q_waves_starts = []
s_waves = []
t_waves = []
t_waves_starts = []
t_waves_ends = []
cycle_peaks = []
for index, rpeak in enumerate(rpeaks[:-3]):
try:
epoch_before = np.array(ecg)[int(rpeaks[index-1]):int(rpeak)]
epoch_before = epoch_before[int(len(epoch_before)/2):len(epoch_before)]
epoch_before = list(reversed(epoch_before))
q_wave_index = np.min(find_peaks(epoch_before))
q_wave = rpeak - q_wave_index
p_wave_index = q_wave_index + np.argmax(epoch_before[q_wave_index:])
p_wave = rpeak - p_wave_index
inter_pq = epoch_before[q_wave_index:p_wave_index]
inter_pq_derivative = np.gradient(inter_pq, 2)
q_start_index = find_closest_in_list(len(inter_pq_derivative)/2, find_peaks(inter_pq_derivative))
q_start = q_wave - q_start_index
q_waves.append(q_wave)
p_waves.append(p_wave)
q_waves_starts.append(q_start)
except ValueError:
pass
except IndexError:
pass
try:
epoch_after = np.array(ecg)[int(rpeak):int(rpeaks[index+1])]
epoch_after = epoch_after[0:int(len(epoch_after)/2)]
s_wave_index = np.min(find_peaks(epoch_after))
s_wave = rpeak + s_wave_index
t_wave_index = s_wave_index + np.argmax(epoch_after[s_wave_index:])
t_wave = rpeak + t_wave_index
inter_st = epoch_after[s_wave_index:t_wave_index]
inter_st_derivative = np.gradient(inter_st, 2)
t_start_index = find_closest_in_list(len(inter_st_derivative)/2, find_peaks(inter_st_derivative))
t_start = s_wave + t_start_index
t_end = np.min(find_peaks(epoch_after[t_wave_index:]))
t_end = t_wave + t_end
s_waves.append(s_wave)
t_waves.append(t_wave)
t_waves_starts.append(t_start)
t_waves_ends.append(t_end)
except ValueError:
pass
except IndexError:
pass
try:
cycle_peaks.append([p_wave, q_wave, rpeak, s_wave, t_wave])
except NameError:
pass
df_cycle_peaks = pd.DataFrame(cycle_peaks, columns=['p_peak', 'q_peak', 'r_peak', 's_peak', 't_peak'])
# pd.Series(epoch_before).plot()
# t_waves = []
# for index, rpeak in enumerate(rpeaks[0:-1]):
#
# epoch = np.array(ecg)[int(rpeak):int(rpeaks[index+1])]
# pd.Series(epoch).plot()
#
# # T wave
# middle = (rpeaks[index+1] - rpeak) / 2
# quarter = middle/2
#
# epoch = np.array(ecg)[int(rpeak+quarter):int(rpeak+middle)]
#
# try:
# t_wave = int(rpeak+quarter) + np.argmax(epoch)
# t_waves.append(t_wave)
# except ValueError:
# pass
#
# p_waves = []
# for index, rpeak in enumerate(rpeaks[1:]):
# index += 1
# # Q wave
# middle = (rpeak - rpeaks[index-1]) / 2
# quarter = middle/2
#
# epoch = np.array(ecg)[int(rpeak-middle):int(rpeak-quarter)]
#
# try:
# p_wave = int(rpeak-quarter) + np.argmax(epoch)
# p_waves.append(p_wave)
# except ValueError:
# pass
#
# q_waves = []
# for index, p_wave in enumerate(p_waves):
# epoch = np.array(ecg)[int(p_wave):int(rpeaks[rpeaks>p_wave][0])]
#
# try:
# q_wave = p_wave + np.argmin(epoch)
# q_waves.append(q_wave)
# except ValueError:
# pass
#
# # TODO: manage to find the begininng of the Q and the end of the T wave so we can extract the QT interval
ecg_waves = {"T_Waves": t_waves,
"P_Waves": p_waves,
"Q_Waves": q_waves,
"S_Waves": s_waves,
"Q_Waves_Onsets": q_waves_starts,
"T_Waves_Onsets": t_waves_starts,
"T_Waves_Ends": | |
['c3d', 'amc', 'avi'],
'fps': 120},
11: {'desc': 'Run Dive Over Roll Run',
'files': ['c3d', 'amc', 'avi'],
'fps': 120}}},
131: {'desc': '<NAME> Styled Motions',
'motions': {1: {'desc': 'Start Walk Stop',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
2: {'desc': 'Start Walk Stop',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
3: {'desc': 'Start Hop Stop',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
4: {'desc': 'Start Hop Stop',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
5: {'desc': 'Start Duck Underneath Stop',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
6: {'desc': 'Start Duck Underneath Stop',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
7: {'desc': 'Jump Stop',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
8: {'desc': 'Jump Stop',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
9: {'desc': 'Start Walk Left',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
10: {'desc': 'Start Walk Left',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
11: {'desc': 'Start Walk Left',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
12: {'desc': 'Start Walk Right',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
13: {'desc': 'Start Walk Right',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
14: {'desc': 'Start Walk Right',
'files': ['c3d', 'amc', 'avi'],
'fps': 120}}},
132: {'desc': 'Varying Weird Walks',
'motions': {1: {'desc': 'Walk With Arms Out, balancing',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
2: {'desc': 'Walk With Arms Out, balancing',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
3: {'desc': 'Walk With Arms Out, balancing',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
4: {'desc': 'Walk With Arms Out, balancing',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
5: {'desc': 'Walk With Arms Out, balancing',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
6: {'desc': 'Walk With Arms Out, balancing',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
7: {'desc': 'Walk With Arms Out, balancing',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
8: {'desc': 'Walk With Arms Out, balancing',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
9: {'desc': 'Walk With Arms Out, balancing',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
10: {'desc': 'Walk With Arms Out, balancing',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
11: {'desc': 'Walk With Arms Out, balancing',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
12: {'desc': 'Walk With Arms Out, balancing',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
13: {'desc': 'Walk Backwards',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
14: {'desc': 'Walk Duck Footed',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
15: {'desc': 'Walk With Knees Bent',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
16: {'desc': 'Walk Crossover',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
17: {'desc': 'Walk Fast',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
18: {'desc': 'Walk Fast',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
19: {'desc': 'Walk Fast',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
20: {'desc': 'Walk Fast',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
21: {'desc': 'Walk Fast',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
22: {'desc': 'Walk Fast',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
23: {'desc': 'Hop on left foot',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
24: {'desc': 'Hop on left foot',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
25: {'desc': 'Hop on left foot',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
26: {'desc': 'Hop on left foot',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
27: {'desc': 'Hop on left foot',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
28: {'desc': 'Hop on left foot',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
29: {'desc': 'Bouncy Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
30: {'desc': 'Bouncy Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
31: {'desc': 'Bouncy Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
32: {'desc': 'Bouncy Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
33: {'desc': 'Bouncy Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
34: {'desc': 'Bouncy Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
35: {'desc': 'Walk Leaning To The Right',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
36: {'desc': 'Walk Leaning To The Right',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
37: {'desc': 'Marching',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
38: {'desc': 'Motorcycle',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
39: {'desc': 'Pigeon Toed Walking',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
40: {'desc': 'Walk With Stiff Arms',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
41: {'desc': 'Walk With Stiff Arms',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
42: {'desc': 'Walk Swinging Shoulders',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
43: {'desc': 'Walk Swinging Shoulders',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
44: {'desc': 'Walk Swinging Shoulders',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
45: {'desc': 'Walk Slow',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
46: {'desc': 'Walk Slow',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
47: {'desc': 'Walk Slow',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
48: {'desc': 'Walk Slow',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
49: {'desc': 'Walk Slow',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
50: {'desc': 'Walk Slow',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
51: {'desc': 'Tpose',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
52: {'desc': 'Range of Motion',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
53: {'desc': 'Walk With Legs Apart',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
54: {'desc': 'Walk With Wild Arms',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
55: {'desc': 'Walk With Wild Legs',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
56: {'desc': 'Walk With Wild Legs',
'files': ['c3d', 'amc', 'avi'],
'fps': 120}}},
133: {'desc': 'Baby Styled Walk',
'motions': {1: {'desc': 'Walk Crawl',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
2: {'desc': 'Walk Crawl',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
3: {'desc': 'Walk Jump',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
4: {'desc': 'Walk Jump',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
5: {'desc': 'Walk Jump',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
6: {'desc': 'Walk Jump',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
7: {'desc': 'Walk Jump',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
8: {'desc': 'Stretch Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
9: {'desc': 'Stretch Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
10: {'desc': 'Stretch Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
11: {'desc': 'Walk Stop Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
12: {'desc': 'Walk Stop Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
13: {'desc': 'Walk Stop Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
14: {'desc': 'Walk Left',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
15: {'desc': 'Walk Left',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
16: {'desc': 'Walk Left',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
17: {'desc': 'Walk Right',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
18: {'desc': 'Walk Right',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
19: {'desc': 'Walk Right',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
20: {'desc': 'Walk Right',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
21: {'desc': 'Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
22: {'desc': 'Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
23: {'desc': 'Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
24: {'desc': 'Walk ZigZag',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
25: {'desc': 'Motorcycle',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
26: {'desc': 'Range of Motion',
'files': ['c3d', 'amc', 'avi'],
'fps': 120}}},
134: {'desc': 'Skateboard Motions',
'motions': {1: {'desc': 'Duck Under',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
2: {'desc': 'Go Forward',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
3: {'desc': 'Lean Turn Right',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
4: {'desc': 'Motorcycle',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
5: {'desc': 'Pump Jump',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
6: {'desc': 'Push Turn Left',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
7: {'desc': 'Push Turn Left',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
8: {'desc': 'Push Turn Right',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
9: {'desc': 'Push Turn Right',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
10: {'desc': 'Start',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
11: {'desc': 'Start',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
12: {'desc': 'Stop and Go',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
13: {'desc': 'Stop and Go',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
14: {'desc': 'Stop ang Go',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
15: {'desc': 'Lean Turn Left',
'files': ['c3d', 'amc', 'avi'],
'fps': 120}}},
135: {'desc': 'Martial Arts Walks',
'motions': {1: {'desc': 'Bassai',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
2: {'desc': 'Empi',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
3: {'desc': 'Empi',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
4: {'desc': 'Front Kick',
'files': | |
<reponame>lhartung/paradrop-test
import ipaddress
from .base import ConfigObject, ConfigOption
from .command import Command
IPTABLES_WAIT = "5"
class ConfigDefaults(ConfigObject):
typename = "defaults"
options = [
ConfigOption(name="input", default="ACCEPT"),
ConfigOption(name="output", default="ACCEPT"),
ConfigOption(name="forward", default="ACCEPT"),
ConfigOption(name="disable_ipv6", type=bool)
]
def getName(self):
# There should only be one defaults section. If we return a constant
# string for all instances, we can match them across file versions.
return "SINGLETON"
def get_iptables(self):
"""
Get the list of iptables commands to use (iptables / ip6tables).
"""
if self.disable_ipv6:
return ["iptables"]
else:
return ["iptables", "ip6tables"]
def apply(self, allConfigs):
commands = list()
for iptables in self.get_iptables():
for path in ["input", "output", "forward"]:
# Create the delegate_X chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"filter", "--new", "delegate_"+path]
commands.append((self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Jump to delegate_X chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"filter", "--append", path.upper(), "--jump",
"delegate_"+path]
commands.append((self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Create the X_rule chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"filter", "--new", path+"_rule"]
commands.append((self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Jump to X_rule chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"filter", "--append", path.upper(), "--jump",
path+"_rule"]
commands.append((self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Add a rule at the end with the default policy.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"filter", "--append", path.upper(), "--jump",
getattr(self, path)]
commands.append((self.PRIO_IPTABLES_TOP, Command(cmd, self)))
for path in ["prerouting", "postrouting"]:
# Create the delegate_X chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"nat", "--new", "delegate_"+path]
commands.append((self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Jump to delegate_X chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"nat", "--append", path.upper(), "--jump",
"delegate_"+path]
commands.append((self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Create the X_rule chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"nat", "--new", path+"_rule"]
commands.append((self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Jump to X_rule chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"nat", "--append", path.upper(), "--jump",
path+"_rule"]
commands.append((self.PRIO_IPTABLES_TOP, Command(cmd, self)))
return commands
def revert(self, allConfigs):
commands = list()
for iptables in self.get_iptables():
for path in ["prerouting", "postrouting"]:
# Jump to X_rule chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"nat", "--delete", path.upper(), "--jump",
path+"_rule"]
commands.append((-self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Flush the X_rule chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"nat", "--flush", path+"_rule"]
commands.append((-self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Delete the X_rule chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"nat", "--delete-chain", path+"_rule"]
commands.append((-self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Jump to delegate_X chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"nat", "--delete", path.upper(), "--jump",
"delegate_"+path]
commands.append((-self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Flush the delegate_X chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"nat", "--flush", "delegate_"+path]
commands.append((-self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Delete the delegate_X chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"nat", "--delete-chain", "delegate_"+path]
commands.append((-self.PRIO_IPTABLES_TOP, Command(cmd, self)))
for path in ["input", "output", "forward"]:
# Jump to X_rule chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"filter", "--delete", path.upper(), "--jump",
path+"_rule"]
commands.append((-self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Flush the X_rule chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"filter", "--flush", path+"_rule"]
commands.append((-self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Delete the X_rule chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"filter", "--delete-chain", path+"_rule"]
commands.append((-self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Jump to paradrop_X chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"filter", "--delete", path.upper(), "--jump",
"delegate_"+path]
commands.append((-self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Flush the paradrop_X chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"filter", "--flush", "delegate_"+path]
commands.append((-self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Delete the paradrop_X chain.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"filter", "--delete-chain", "delegate_"+path]
commands.append((-self.PRIO_IPTABLES_TOP, Command(cmd, self)))
# Delete the default rule.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"filter", "--delete", path.upper(), "--jump",
getattr(self, path)]
commands.append((-self.PRIO_IPTABLES_TOP, Command(cmd, self)))
return commands
def updateApply(self, new, allConfigs):
commands = list()
for iptables in new.get_iptables():
for path in ["input", "output", "forward"]:
if getattr(self, path) == getattr(new, path):
# Skip if no change.
continue
# Add the new default rule.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"filter", "--append", path.upper(), "--jump",
getattr(new, path)]
commands.append((new.PRIO_IPTABLES_TOP, Command(cmd, new)))
return commands
def updateRevert(self, new, allConfigs):
commands = list()
for iptables in self.get_iptables():
for path in ["input", "output", "forward"]:
if getattr(self, path) == getattr(new, path):
# Skip if no change.
continue
# Delete the old default rule.
cmd = [iptables, "--wait", IPTABLES_WAIT, "--table",
"filter", "--delete", path.upper(), "--jump",
getattr(self, path)]
commands.append((-self.PRIO_IPTABLES_TOP, Command(cmd, self)))
return commands
class ConfigZone(ConfigObject):
typename = "zone"
options = [
ConfigOption(name="name", required=True),
ConfigOption(name="network", type=list),
ConfigOption(name="masq", type=bool, default=False),
ConfigOption(name="masq_src", type=list, default=["0.0.0.0/0"]),
ConfigOption(name="masq_dest", type=list, default=["0.0.0.0/0"]),
ConfigOption(name="conntrack", type=bool, default=False),
ConfigOption(name="input", default="RETURN"),
ConfigOption(name="forward", default="RETURN"),
ConfigOption(name="output", default="RETURN"),
ConfigOption(name="family", default="any")
]
def get_iptables(self):
"""
Get the list of iptables commands to use (iptables / ip6tables).
"""
if self.family == "ipv4":
return ["iptables"]
elif self.family == "ipv6":
return ["ip6tables"]
else:
return ["iptables", "ip6tables"]
def setup(self):
self._interfaces = list()
def __commands_iptables(self, allConfigs, action, prio):
commands = list()
for interface in self._interfaces:
for iptables in self.get_iptables():
# Jump to zone input chain.
chain = "zone_{}_input".format(self.name)
cmd = [iptables, "--wait", IPTABLES_WAIT,
"--table", "filter",
action, "delegate_input",
"--in-interface", interface.config_ifname,
"--jump", chain]
commands.append((prio, Command(cmd, self)))
# If conntrack is enabled, allow incoming traffic that is
# associated with allowed outgoing traffic.
if self.conntrack:
comment = "zone {} conntrack".format(self.name)
cmd = [iptables, "--wait", IPTABLES_WAIT,
"--table", "filter",
action, "input_rule",
"--in-interface", interface.config_ifname,
"--match", "state", "--state", "ESTABLISHED,RELATED",
"--match", "comment", "--comment", comment,
"--jump", "ACCEPT"]
commands.append((prio, Command(cmd, self)))
# Implement default policy for zone.
comment = "zone {} default".format(self.name)
cmd = [iptables, "--wait", IPTABLES_WAIT,
"--table", "filter",
action, "input_rule",
"--in-interface", interface.config_ifname,
"--match", "comment", "--comment", comment,
"--jump", self.input]
commands.append((prio, Command(cmd, self)))
# Jump to zone output chain.
chain = "zone_{}_output".format(self.name)
cmd = [iptables, "--wait", IPTABLES_WAIT,
"--table", "filter",
action, "delegate_output",
"--out-interface", interface.config_ifname,
"--jump", chain]
commands.append((prio, Command(cmd, self)))
# Implement default policy for zone.
cmd = [iptables, "--wait", IPTABLES_WAIT,
"--table", "filter",
action, "output_rule",
"--out-interface", interface.config_ifname,
"--match", "comment", "--comment", comment,
"--jump", self.output]
commands.append((prio, Command(cmd, self)))
# Jump to zone forward chain.
chain = "zone_{}_forward".format(self.name)
cmd = [iptables, "--wait", IPTABLES_WAIT,
"--table", "filter",
action, "delegate_forward",
"--in-interface", interface.config_ifname,
"--jump", chain]
commands.append((prio, Command(cmd, self)))
# Implement default policy for zone.
cmd = [iptables, "--wait", IPTABLES_WAIT,
"--table", "filter",
action, "forward_rule",
"--in-interface", interface.config_ifname,
"--match", "comment", "--comment", comment,
"--jump", self.forward]
commands.append((prio, Command(cmd, self)))
# Jump to zone prerouting chain.
chain = "zone_{}_prerouting".format(self.name)
cmd = [iptables, "--wait", IPTABLES_WAIT,
"--table", "nat",
action, "delegate_prerouting",
"--in-interface", interface.config_ifname,
"--jump", chain]
commands.append((prio, Command(cmd, self)))
# Jump to zone postrouting chain.
chain = "zone_{}_postrouting".format(self.name)
cmd = [iptables, "--wait", IPTABLES_WAIT,
"--table", "nat",
action, "delegate_postrouting",
"--out-interface", interface.config_ifname,
"--jump", chain]
commands.append((prio, Command(cmd, self)))
# Masquerade rules are for IPv4 only, so add them outside the
# iptables loop above.
if self.masq:
for src in self.masq_src:
for dest in self.masq_dest:
comment = "zone {} masq".format(self.name)
cmd = ["iptables", "--wait", IPTABLES_WAIT,
"--table", "nat",
action, "POSTROUTING",
"--out-interface", interface.config_ifname,
"--source", src,
"--destination", dest,
"--match", "comment", "--comment", comment,
"--jump", "MASQUERADE"]
commands.append((prio, Command(cmd, self)))
return commands
def apply(self, allConfigs):
# Initialize the list of network:interface sections.
self._interfaces = list()
if self.network is not None:
for networkName in self.network:
# Look up the interface - may fail.
interface = self.lookup(allConfigs, "network", "interface", networkName)
self._interfaces.append(interface)
commands = list()
for iptables in self.get_iptables():
for path in ["input", "output", "forward"]:
# Create the zone_NAME_X chain.
chain = "zone_{}_{}".format(self.name, path)
cmd = [iptables, "--wait", IPTABLES_WAIT,
"--table", "filter",
"--new", chain]
commands.append((self.PRIO_IPTABLES_ZONE, Command(cmd, self)))
for path in ["prerouting", "postrouting"]:
# Create the zone_NAME_X chain.
chain = "zone_{}_{}".format(self.name, path)
cmd = [iptables, "--wait", IPTABLES_WAIT,
"--table", "nat",
"--new", chain]
commands.append((self.PRIO_IPTABLES_ZONE, Command(cmd, self)))
# Make sure the kernel module is loaded before we add any rules that
# rely on it.
if self.conntrack and not self.manager.conntrackLoaded:
cmd = ["modprobe", "xt_conntrack"]
commands.append((self.PRIO_IPTABLES_ZONE, Command(cmd, self)))
self.manager.conntrackLoaded = True
commands.extend(self.__commands_iptables(allConfigs, "--append",
self.PRIO_IPTABLES_ZONE))
if self.masq:
self.manager.forwardingCount += 1
if self.manager.forwardingCount == 1:
cmd = ["sysctl", "--write",
"net.ipv4.conf.all.forwarding=1"]
commands.append((self.PRIO_IPTABLES_ZONE, Command(cmd, self)))
cmd = ["sysctl", "--write",
"net.ipv6.conf.all.forwarding=1"]
commands.append((self.PRIO_IPTABLES_ZONE, Command(cmd, self)))
return commands
def revert(self, allConfigs):
commands = self.__commands_iptables(allConfigs, "--delete",
-self.PRIO_IPTABLES_ZONE)
if self.masq:
self.manager.forwardingCount -= 1
if self.manager.forwardingCount == 0:
cmd = ["sysctl", "--write",
"net.ipv4.conf.all.forwarding=0"]
commands.append((-self.PRIO_IPTABLES_ZONE, Command(cmd, self)))
cmd = ["sysctl", "--write",
"net.ipv6.conf.all.forwarding=0"]
commands.append((-self.PRIO_IPTABLES_ZONE, Command(cmd, self)))
for iptables in self.get_iptables():
pairs = [
("filter", "input"),
("filter", "output"),
("filter", "forward"),
("nat", "prerouting"),
("nat", "postrouting")
]
for table, path in pairs:
chain = "zone_{}_{}".format(self.name, path)
# Flush the zone_NAME_X chain, so that we do not get an error
# with the delete command.
cmd = [iptables, "--wait", IPTABLES_WAIT,
"--table", table,
"--flush", chain]
commands.append((-self.PRIO_IPTABLES_ZONE, Command(cmd, self)))
# Delete the | |
<filename>cli/tests/pcluster/validators/test_cluster_validators.py
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from assertpy import assert_that
from pcluster.aws.aws_resources import InstanceTypeInfo
from pcluster.config.cluster_config import Tag
from pcluster.constants import PCLUSTER_NAME_MAX_LENGTH
from pcluster.validators.cluster_validators import (
FSX_MESSAGES,
FSX_SUPPORTED_ARCHITECTURES_OSES,
ArchitectureOsValidator,
ClusterNameValidator,
ComputeResourceSizeValidator,
DcvValidator,
DisableSimultaneousMultithreadingArchitectureValidator,
DuplicateMountDirValidator,
EfaOsArchitectureValidator,
EfaPlacementGroupValidator,
EfaSecurityGroupValidator,
EfaValidator,
FsxArchitectureOsValidator,
FsxNetworkingValidator,
HeadNodeImdsValidator,
HostedZoneValidator,
InstanceArchitectureCompatibilityValidator,
IntelHpcArchitectureValidator,
IntelHpcOsValidator,
MaxCountValidator,
NameValidator,
NumberOfStorageValidator,
OverlappingMountDirValidator,
RegionValidator,
SchedulerOsValidator,
SharedStorageNameValidator,
_LaunchTemplateValidator,
)
from tests.pcluster.aws.dummy_aws_api import mock_aws_api
from tests.pcluster.validators.utils import assert_failure_messages
from tests.utils import MockedBoto3Request
@pytest.fixture()
def boto3_stubber_path():
return "pcluster.aws.common.boto3"
@pytest.mark.parametrize(
"cluster_name, should_trigger_error",
[
("ThisClusterNameShouldBeRightSize-ContainAHyphen-AndANumber12", False),
("ThisClusterNameShouldBeJustOneCharacterTooLongAndShouldntBeOk", True),
("2AClusterCanNotBeginByANumber", True),
("ClusterCanNotContainUnderscores_LikeThis", True),
("ClusterCanNotContainSpaces LikeThis", True),
],
)
def test_cluster_name_validator(cluster_name, should_trigger_error):
expected_message = (
(
"Error: The cluster name can contain only alphanumeric characters (case-sensitive) and hyphens. "
"It must start with an alphabetic character and can't be longer "
f"than {PCLUSTER_NAME_MAX_LENGTH} characters."
)
if should_trigger_error
else None
)
actual_failures = ClusterNameValidator().execute(cluster_name)
assert_failure_messages(actual_failures, expected_message)
@pytest.mark.parametrize(
"region, expected_message",
[
("invalid-region", "Region 'invalid-region' is not yet officially supported "),
("us-east-1", None),
],
)
def test_region_validator(region, expected_message):
actual_failures = RegionValidator().execute(region)
assert_failure_messages(actual_failures, expected_message)
@pytest.mark.parametrize(
"os, scheduler, expected_message",
[
("centos7", "slurm", None),
("ubuntu1804", "slurm", None),
("ubuntu2004", "slurm", None),
("alinux2", "slurm", None),
("centos7", "awsbatch", "scheduler supports the following operating systems"),
("ubuntu1804", "awsbatch", "scheduler supports the following operating systems"),
("ubuntu2004", "awsbatch", "scheduler supports the following operating systems"),
("alinux2", "awsbatch", None),
],
)
def test_scheduler_os_validator(os, scheduler, expected_message):
actual_failures = SchedulerOsValidator().execute(os, scheduler)
assert_failure_messages(actual_failures, expected_message)
@pytest.mark.parametrize(
"min_count, max_count, expected_message",
[
(1, 2, None),
(1, 1, None),
(2, 1, "Max count must be greater than or equal to min count"),
],
)
def test_compute_resource_size_validator(min_count, max_count, expected_message):
actual_failures = ComputeResourceSizeValidator().execute(min_count, max_count)
assert_failure_messages(actual_failures, expected_message)
@pytest.mark.parametrize(
"resource_name, resources_length, max_length, expected_message",
[
("SlurmQueues", 5, 10, None),
("SchedulerQueues", 10, 10, None),
("ComputeResources", 4, 5, None),
(
"SlurmQueues",
11,
10,
"Invalid number of SlurmQueues (11) specified. Currently only supports up to 10 SlurmQueues.",
),
(
"SchedulerQueues",
12,
10,
"Invalid number of SchedulerQueues (12) specified. Currently only supports up to 10 SchedulerQueues.",
),
(
"ComputeResources",
6,
5,
"Invalid number of ComputeResources (6) specified. Currently only supports up to 5 ComputeResources.",
),
],
)
def test_max_count_validator(resource_name, resources_length, max_length, expected_message):
actual_failures = MaxCountValidator().execute(
resource_name=resource_name, resources_length=resources_length, max_length=max_length
)
assert_failure_messages(actual_failures, expected_message)
# ---------------- EFA validators ---------------- #
@pytest.mark.parametrize(
"instance_type, efa_enabled, gdr_support, efa_supported, expected_message",
[
# EFAGDR without EFA
("c5n.18xlarge", False, True, True, "GDR Support can be used only if EFA is enabled"),
# EFAGDR with EFA
("c5n.18xlarge", True, True, True, None),
# EFA without EFAGDR
("c5n.18xlarge", True, False, True, None),
# Unsupported instance type
("t2.large", True, False, False, "does not support EFA"),
("t2.large", False, False, False, None),
# EFA not enabled for instance type that supports it
("c5n.18xlarge", False, False, True, "supports EFA, but it is not enabled"),
],
)
def test_efa_validator(mocker, boto3_stubber, instance_type, efa_enabled, gdr_support, efa_supported, expected_message):
mock_aws_api(mocker)
get_instance_type_info_mock = mocker.patch(
"pcluster.aws.ec2.Ec2Client.get_instance_type_info",
return_value=InstanceTypeInfo(
{
"InstanceType": instance_type,
"VCpuInfo": {"DefaultVCpus": 4, "DefaultCores": 2},
"NetworkInfo": {"EfaSupported": instance_type == "c5n.18xlarge"},
}
),
)
actual_failures = EfaValidator().execute(instance_type, efa_enabled, gdr_support)
assert_failure_messages(actual_failures, expected_message)
if efa_enabled:
get_instance_type_info_mock.assert_called_with(instance_type)
@pytest.mark.parametrize(
"efa_enabled, placement_group_enabled, placement_group_config_implicit, expected_message",
[
# Efa disabled
(False, False, False, None),
(False, True, False, None),
(False, False, True, None),
(False, True, True, None),
# Efa enabled
(True, False, False, "may see better performance using a placement group"),
(True, False, True, "placement group for EFA-enabled compute resources must be explicit"),
(True, True, True, "placement group for EFA-enabled compute resources must be explicit"),
(True, True, False, None),
],
)
def test_efa_placement_group_validator(
efa_enabled, placement_group_enabled, placement_group_config_implicit, expected_message
):
actual_failures = EfaPlacementGroupValidator().execute(
efa_enabled, placement_group_enabled, placement_group_config_implicit
)
assert_failure_messages(actual_failures, expected_message)
@pytest.mark.parametrize(
"efa_enabled, security_groups, additional_security_groups, ip_permissions, ip_permissions_egress, expected_message",
[
# Efa disabled, no checks on security groups
(False, [], [], [], [], None),
# Efa enabled, if not specified SG will be created by the cluster
(True, [], [], [], [], None),
(True, [], ["sg-12345678"], [{"IpProtocol": "-1", "UserIdGroupPairs": []}], [], None),
# Inbound rules only
(
True,
["sg-12345678"],
[],
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
[],
"security group that allows all inbound and outbound",
),
# right sg
(
True,
["sg-12345678"],
[],
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
None,
),
# right sg. Test when UserIdGroupPairs contains more entries
(
True,
["sg-12345678"],
[],
[
{
"IpProtocol": "-1",
"UserIdGroupPairs": [
{"UserId": "123456789012", "GroupId": "sg-23456789"},
{"UserId": "123456789012", "GroupId": "sg-12345678"},
],
}
],
[
{
"IpProtocol": "-1",
"UserIdGroupPairs": [
{"UserId": "123456789012", "GroupId": "sg-23456789"},
{"UserId": "123456789012", "GroupId": "sg-12345678"},
],
}
],
None,
),
# Multiple sec groups, one right
(
True,
["sg-23456789", "sg-12345678"],
[],
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
None,
),
# Multiple sec groups, no one right
(True, ["sg-23456789", "sg-34567890"], [], [], [], "security group that allows all inbound and outbound"),
# Wrong rules
(
True,
["sg-12345678"],
[],
[
{
"PrefixListIds": [],
"FromPort": 22,
"IpRanges": [{"CidrIp": "203.0.113.0/24"}],
"ToPort": 22,
"IpProtocol": "tcp",
"UserIdGroupPairs": [],
}
],
[],
"security group that allows all inbound and outbound",
),
# Right SG specified as additional sg
(
True,
["sg-23456789"],
["sg-12345678"],
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
None,
),
],
)
def test_efa_security_group_validator(
boto3_stubber,
efa_enabled,
security_groups,
additional_security_groups,
ip_permissions,
ip_permissions_egress,
expected_message,
):
def _append_mocked_describe_sg_request(ip_perm, ip_perm_egress, sec_group):
describe_security_groups_response = {
"SecurityGroups": [
{
"IpPermissionsEgress": ip_perm_egress,
"Description": "My security group",
"IpPermissions": ip_perm,
"GroupName": "MySecurityGroup",
"OwnerId": "123456789012",
"GroupId": sec_group,
}
]
}
return MockedBoto3Request(
method="describe_security_groups",
response=describe_security_groups_response,
expected_params={"GroupIds": [security_group]},
)
if efa_enabled:
# Set SG different by sg-12345678 as incomplete. The only full valid SG can be the sg-12345678 one.
perm = ip_permissions if "sg-12345678" else []
perm_egress = ip_permissions_egress if "sg-12345678" else []
mocked_requests = []
if security_groups:
for security_group in security_groups:
mocked_requests.append(_append_mocked_describe_sg_request(perm, perm_egress, security_group))
# We don't need to check additional sg only if security_group is not a custom one.
if additional_security_groups:
for security_group in additional_security_groups:
mocked_requests.append(_append_mocked_describe_sg_request(perm, perm_egress, security_group))
boto3_stubber("ec2", mocked_requests)
actual_failures = EfaSecurityGroupValidator().execute(efa_enabled, security_groups, additional_security_groups)
assert_failure_messages(actual_failures, expected_message)
# ---------------- Architecture Validators ---------------- #
@pytest.mark.parametrize(
"disable_simultaneous_multithreading, architecture, expected_message",
[
(True, "x86_64", None),
(False, "x86_64", None),
(
True,
"arm64",
"Disabling simultaneous multithreading is only supported"
" on instance types that support these architectures",
),
(False, "arm64", None),
],
)
def test_disable_simultaneous_multithreading_architecture_validator(
disable_simultaneous_multithreading, architecture, expected_message
):
actual_failures = DisableSimultaneousMultithreadingArchitectureValidator().execute(
disable_simultaneous_multithreading, architecture
)
assert_failure_messages(actual_failures, expected_message)
@pytest.mark.parametrize(
"efa_enabled, os, architecture, expected_message",
[
(True, "alinux2", "x86_64", None),
(True, "alinux2", "arm64", None),
(True, "ubuntu1804", "x86_64", None),
(True, "ubuntu1804", "arm64", None),
(True, "ubuntu2004", "x86_64", None),
(True, "ubuntu2004", "arm64", None),
],
)
def test_efa_os_architecture_validator(efa_enabled, os, architecture, expected_message):
actual_failures = EfaOsArchitectureValidator().execute(efa_enabled, os, architecture)
assert_failure_messages(actual_failures, expected_message)
@pytest.mark.parametrize(
"os, architecture, custom_ami, ami_search_filters, expected_message",
[
# All OSes supported for x86_64
("alinux2", "x86_64", None, None, None),
("alinux2", "x86_64", "custom-ami", None, None),
("centos7", "x86_64", None, None, None),
("centos7", "x86_64", "custom-ami", None, None),
("ubuntu1804", "x86_64", None, None, None),
("ubuntu2004", "x86_64", None, None, None),
# All OSes supported for x86_64
("alinux2", "arm64", None, None, None),
("alinux2", "arm64", "custom-ami", None, None),
(
"centos7",
"arm64",
None,
None,
"The aarch64 CentOS 7 OS is not validated for the 6th generation aarch64 instances",
),
("centos7", "arm64", None, {"ami_search_filters"}, None),
("centos7", "arm64", "custom-ami", None, None),
("ubuntu1804", "arm64", None, None, None),
("ubuntu2004", "arm64", None, None, None),
],
)
def test_architecture_os_validator(os, architecture, custom_ami, ami_search_filters, expected_message):
"""Verify that the correct set of OSes is supported for each supported architecture."""
actual_failures = ArchitectureOsValidator().execute(os, architecture, custom_ami, ami_search_filters)
assert_failure_messages(actual_failures, expected_message)
@pytest.mark.parametrize(
"head_node_architecture, compute_architecture, compute_instance_type, expected_message",
[
("x86_64", "x86_64", "c5.xlarge", None),
(
"x86_64",
"arm64",
"m6g.xlarge",
"none of which are compatible with the architecture supported by the head node instance type",
),
(
"arm64",
"x86_64",
"c5.xlarge",
"none of which are compatible with the architecture supported by the head node instance type",
),
("arm64", "arm64", "m6g.xlarge", None),
],
)
def test_instance_architecture_compatibility_validator(
mocker, head_node_architecture, compute_architecture, compute_instance_type, expected_message
):
mock_aws_api(mocker)
mocker.patch("pcluster.aws.ec2.Ec2Client.get_supported_architectures", return_value=[compute_architecture])
actual_failures = InstanceArchitectureCompatibilityValidator().execute(
compute_instance_type, head_node_architecture
)
assert_failure_messages(actual_failures, expected_message)
@pytest.mark.parametrize(
"name, expected_message",
[
("default", "forbidden"),
("1queue", "must begin with a letter"),
("queue_1", "only contain lowercase letters, digits and hyphens"),
("aQUEUEa", | |
import django
import unittest
from django.core.validators import validate_email, validate_slug, URLValidator
from django.utils.timezone import utc
from django.core.files.images import ImageFile
from django.core.management import call_command
import string
import datetime
import os
import six
from sampledatahelper.helper import SampleDataHelper
from sampledatahelper.model_helper import ModelDataHelper
from sampledata.mixins import image_mixin
from sampledata.exceptions import ParameterError, NotChoicesFound
if django.VERSION >= (1, 7):
django.setup()
call_command('migrate', run_syncdb=True, interactive=False)
else:
call_command('syncdb', interactive=False)
from . import models
class TestNumberHelpers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sd = SampleDataHelper(datetime.datetime.now())
def test_int(self):
self.assertEqual(self.sd.int(min_value=5, max_value=5), 5)
self.assertTrue(self.sd.int(min_value=1000000000) >= 1000000000)
self.assertTrue(self.sd.int(max_value=3) <= 3)
self.assertTrue(isinstance(self.sd.int(), int))
val = self.sd.int(5, 10)
self.assertTrue(val <= 10)
self.assertTrue(val >= 5)
with self.assertRaises(ParameterError):
self.sd.int(10, 5)
def test_number(self):
self.assertTrue(len(str(self.sd.number(5))) <= 5)
with self.assertRaises(ParameterError):
self.sd.number(0)
with self.assertRaises(ParameterError):
self.sd.number(-1)
def test_digits(self):
self.assertEqual(len(str(self.sd.digits(5))), 5)
with self.assertRaises(ParameterError):
self.sd.digits(0)
with self.assertRaises(ParameterError):
self.sd.digits(-1)
def test_float(self):
value = self.sd.float(1, 5)
self.assertTrue(isinstance(value, float))
self.assertTrue(value >= 1)
self.assertTrue(value <= 5)
self.assertEqual(self.sd.float(0, 0), 0)
self.assertEqual(self.sd.float(5, 5), 5)
self.assertEqual(self.sd.float(-5, -5), -5)
with self.assertRaises(ParameterError):
self.sd.float(10, 5)
def test_number_string(self):
value = self.sd.number_string(5)
self.assertTrue(isinstance(value, six.string_types))
self.assertEqual(len(value), 5)
self.assertEqual(self.sd.number_string(0), '')
with self.assertRaises(ParameterError):
self.sd.number_string(-1)
class TestTextHelpers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sd = SampleDataHelper()
def test_char(self):
value = self.sd.char()
self.assertTrue(isinstance(value, six.string_types))
self.assertTrue(value in string.ascii_letters)
def test_chars(self):
value = self.sd.chars()
self.assertTrue(isinstance(value, six.string_types))
self.assertTrue(len(value) >= 1)
self.assertTrue(len(value) <= 5)
value = self.sd.chars(5, 5)
self.assertTrue(len(value) == 5)
self.assertEqual(self.sd.chars(0, 0), '')
with self.assertRaises(ParameterError):
value = self.sd.chars(10, 5)
def test_word(self):
value = self.sd.word()
self.assertTrue(isinstance(value, six.string_types))
def test_words(self):
value = self.sd.words()
self.assertTrue(isinstance(value, six.string_types))
self.assertTrue(len(value.split(' ')) >= 1)
self.assertTrue(len(value.split(' ')) <= 5)
value = self.sd.words(5, 5)
self.assertTrue(len(value.split(' ')) == 5)
self.assertEqual(self.sd.words(0, 0), '')
with self.assertRaises(ParameterError):
value = self.sd.words(10, 5)
def test_email(self):
value = self.sd.email()
validate_email(value)
def test_url(self):
value = self.sd.url()
URLValidator()(value)
def test_sentence(self):
for x in range(1, 10):
value = self.sd.sentence()
self.assertTrue(isinstance(value, six.string_types))
self.assertTrue(len(value) <= 255)
def test_short_sentence(self):
for x in range(1, 10):
value = self.sd.short_sentence()
self.assertTrue(isinstance(value, six.string_types))
self.assertTrue(len(value) <= 100)
def test_long_sentence(self):
for x in range(1, 10):
value = self.sd.long_sentence()
self.assertTrue(isinstance(value, six.string_types))
self.assertTrue(len(value) >= 150)
def test_paragraph(self):
value = self.sd.paragraph()
self.assertTrue(isinstance(value, six.string_types))
def test_paragraphs(self):
for x in range(1, 10):
value = self.sd.paragraphs()
self.assertTrue(isinstance(value, six.string_types))
self.assertTrue(len(value.split('\n\n')) >= 1)
self.assertTrue(len(value.split('\n\n')) <= 5)
with self.assertRaises(ParameterError):
value = self.sd.paragraphs(5, 1)
def test_slug(self):
value = self.sd.slug()
validate_slug(value)
value = self.sd.slug(5, 5)
self.assertEqual(len(value.split(' ')), 1)
validate_slug(value)
with self.assertRaises(ParameterError):
value = self.sd.slug(10, 5)
def test_tags(self):
value = self.sd.tags()
self.assertTrue(isinstance(value, six.string_types))
value = self.sd.tags(5, 5)
self.assertEqual(len(value.split(',')), 5)
value = self.sd.tags(5, 5, ['a', 'b', 'c'])
self.assertTrue(value.split(',')[0] in ['a', 'b', 'c'])
with self.assertRaises(ParameterError):
value = self.sd.tags(10, 5)
class TestTimeHelpers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sd = SampleDataHelper()
def test_date_between(self):
value = self.sd.date_between(
datetime.date(year=2000, month=1, day=1),
datetime.date(year=2001, month=1, day=1),
)
self.assertTrue(isinstance(value, datetime.date))
self.assertTrue(value > datetime.date(year=2000, month=1, day=1))
self.assertTrue(value < datetime.date(year=2001, month=1, day=1))
with self.assertRaises(ParameterError):
self.sd.date_between(
datetime.date(year=2001, month=1, day=1),
datetime.date(year=2000, month=1, day=1),
)
def test_future_date(self):
value = self.sd.future_date()
self.assertTrue(isinstance(value, datetime.date))
self.assertTrue(value >= datetime.date.today())
self.assertTrue(value <= (datetime.date.today() + datetime.timedelta(days=365)))
value = self.sd.future_date(0, 10)
self.assertTrue(value >= datetime.date.today())
self.assertTrue(value <= (datetime.date.today() + datetime.timedelta(days=10)))
with self.assertRaises(ParameterError):
self.sd.future_date(100, 0)
with self.assertRaises(ParameterError):
self.sd.future_date(-10, 10)
def test_past_date(self):
value = self.sd.past_date()
self.assertTrue(isinstance(value, datetime.date))
self.assertTrue(value <= datetime.date.today())
self.assertTrue(value >= (datetime.date.today() - datetime.timedelta(days=365)))
value = self.sd.past_date(0, 10)
self.assertTrue(value <= datetime.date.today())
self.assertTrue(value >= (datetime.date.today() - datetime.timedelta(days=10)))
with self.assertRaises(ParameterError):
self.sd.past_date(100, 0)
with self.assertRaises(ParameterError):
self.sd.past_date(-10, 10)
def test_datetime_between(self):
value = self.sd.datetime_between(
datetime.datetime(year=2000, month=1, day=1),
datetime.datetime(year=2001, month=1, day=1),
)
self.assertTrue(isinstance(value, datetime.datetime))
self.assertTrue(value > datetime.datetime(year=2000, month=1, day=1))
self.assertTrue(value < datetime.datetime(year=2001, month=1, day=1))
with self.assertRaises(ParameterError):
self.sd.datetime_between(
datetime.datetime(year=2001, month=1, day=1),
datetime.datetime(year=2000, month=1, day=1),
)
def test_future_datetime(self):
now = datetime.datetime.utcnow().replace(tzinfo=utc)
value = self.sd.future_datetime()
self.assertTrue(isinstance(value, datetime.datetime))
self.assertTrue(value >= now)
self.assertTrue(value <= (now + datetime.timedelta(minutes=1440)))
now = datetime.datetime.utcnow().replace(tzinfo=utc)
value = self.sd.future_datetime(1, 10)
self.assertTrue(value >= now)
self.assertTrue(value <= (now + datetime.timedelta(minutes=10)))
with self.assertRaises(ParameterError):
self.sd.future_datetime(100, 0)
with self.assertRaises(ParameterError):
self.sd.future_datetime(-10, 10)
def test_past_datetime(self):
value = self.sd.past_datetime()
self.assertTrue(isinstance(value, datetime.datetime))
self.assertTrue(value <= datetime.datetime.utcnow().replace(tzinfo=utc))
self.assertTrue(value >= (datetime.datetime.utcnow().replace(tzinfo=utc) - datetime.timedelta(minutes=1440)))
value = self.sd.past_datetime(0, 10)
self.assertTrue(value <= datetime.datetime.utcnow().replace(tzinfo=utc))
self.assertTrue(value >= (datetime.datetime.utcnow().replace(tzinfo=utc) - datetime.timedelta(minutes=10)))
with self.assertRaises(ParameterError):
self.sd.past_datetime(100, 0)
with self.assertRaises(ParameterError):
self.sd.past_datetime(-10, 10)
def test_date(self):
value = self.sd.date()
self.assertTrue(isinstance(value, datetime.date))
self.assertTrue(value >= (datetime.date.today() - datetime.timedelta(days=365)))
self.assertTrue(value <= (datetime.date.today() + datetime.timedelta(days=365)))
value = self.sd.date(-10, 10)
self.assertTrue(value >= (datetime.date.today() - datetime.timedelta(days=10)))
self.assertTrue(value <= (datetime.date.today() + datetime.timedelta(days=10)))
with self.assertRaises(ParameterError):
self.sd.date(100, 0)
def test_datetime(self):
value = self.sd.datetime()
self.assertTrue(isinstance(value, datetime.datetime))
self.assertTrue(value >= (datetime.datetime.utcnow().replace(tzinfo=utc) - datetime.timedelta(minutes=1440)))
self.assertTrue(value <= (datetime.datetime.utcnow().replace(tzinfo=utc) + datetime.timedelta(minutes=1440)))
value = self.sd.datetime(-10, 10)
self.assertTrue(value >= (datetime.datetime.utcnow().replace(tzinfo=utc) - datetime.timedelta(minutes=10)))
self.assertTrue(value <= (datetime.datetime.utcnow().replace(tzinfo=utc) + datetime.timedelta(minutes=10)))
with self.assertRaises(ParameterError):
self.sd.datetime(100, 0)
def test_time(self):
value = self.sd.time()
self.assertTrue(isinstance(value, datetime.time))
class TestLocalizedHelpers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sd = SampleDataHelper()
def test_state_code(self):
value = self.sd.state_code('es')
self.assertTrue(value in ['01', '02', '03', '04', '05', '06', '07',
'08', '09', '10', '11', '12', '13', '14',
'15', '16', '17', '18', '19', '20', '21',
'22', '23', '24', '25', '26', '27', '28',
'29', '30', '31', '32', '33', '34', '35',
'36', '37', '38', '39', '40', '41', '42',
'43', '44', '45', '46', '47', '48', '49',
'50', '51', '52', 'AD', ])
value = self.sd.state_code('us')
self.assertTrue(value in ['AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT',
'DE', 'FL', 'GA', 'HI', 'ID', 'IL', 'IN',
'IA', 'KS', 'KY', 'LA', 'ME', 'MD', 'MA',
'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV',
'NH', 'NJ', 'NM', 'NY', 'NC', 'ND', 'OH',
'OK', 'OR', 'PA', 'RI', 'SC', 'SD', 'TN',
'TX', 'UT', 'VT', 'VA', 'WA', 'WV', 'WI',
'WY', 'AS', 'DC', 'FM', 'GU', 'MH', 'MP',
'PW', 'PR', 'VI', ])
with self.assertRaises(ParameterError):
self.sd.state_code('invalid-code')
def test_name(self):
value = self.sd.name()
self.assertTrue(isinstance(value, six.string_types))
value = self.sd.name(as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 1)
value = self.sd.name(number=3, as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 3)
value = self.sd.name(locale='es', as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 1)
value = self.sd.name(locale='cat', as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 1)
value = self.sd.name(locale='fr', as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 1)
value = self.sd.name(locale='us', as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 1)
with self.assertRaises(ParameterError):
value = self.sd.name(number=0)
with self.assertRaises(ParameterError):
value = self.sd.name(number=-1)
with self.assertRaises(ParameterError):
value = self.sd.name(locale="not-valid-locale")
def test_surname(self):
value = self.sd.surname()
self.assertTrue(isinstance(value, six.string_types))
value = self.sd.surname(as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 1)
value = self.sd.surname(number=3, as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 3)
value = self.sd.surname(locale='es', as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 2)
value = self.sd.surname(locale='cat', as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 2)
value = self.sd.surname(locale='fr', as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 1)
value = self.sd.surname(locale='us', as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 1)
with self.assertRaises(ParameterError):
value = self.sd.surname(number=0)
with self.assertRaises(ParameterError):
value = self.sd.surname(number=-1)
with self.assertRaises(ParameterError):
value = self.sd.surname(locale="not-valid-locale")
def test_fullname(self):
value = self.sd.fullname()
self.assertTrue(isinstance(value, six.string_types))
value = self.sd.fullname(as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 2)
value = self.sd.fullname(locale='es', as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 3)
value = self.sd.fullname(locale='cat', as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 3)
value = self.sd.fullname(locale='fr', as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 2)
value = self.sd.fullname(locale='us', as_list=True)
self.assertTrue(isinstance(value, list))
self.assertEqual(len(value), 2)
with self.assertRaises(ParameterError):
value = self.sd.fullname(locale="not-valid-locale")
def test_phone(self):
value = self.sd.phone(locale='es')
self.assertTrue(isinstance(value, six.string_types))
self.assertEqual(len(value), 9)
self.assertTrue(value[0] in ['6', '9'])
value = self.sd.phone(locale='es', country_code=True)
self.assertTrue(isinstance(value, six.string_types))
self.assertEqual(len(value), 13)
self.assertTrue(value[0:5] in ['+34 6', '+34 9'])
with self.assertRaises(ParameterError):
value = self.sd.phone(locale="not-valid-locale")
def test_zip_code(self):
value = self.sd.zip_code(locale='es')
self.assertTrue(isinstance(value, six.string_types))
self.assertEqual(len(value), 5)
with self.assertRaises(ParameterError):
value = self.sd.zip_code(locale="not-valid-locale")
def test_id_card(self):
value = self.sd.id_card(locale='es')
self.assertTrue(isinstance(value, six.string_types))
self.assertEqual(len(value), 9)
self.assertTrue(value[8] in "TRWAGMYFPDXBNJZSQVHLCKET")
with self.assertRaises(ParameterError):
value = self.sd.id_card(locale="not-valid-locale")
class TestImageHelpers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sd = SampleDataHelper()
def test_image_from_directory(self):
value = self.sd.image_from_directory(os.path.dirname(__file__))
self.assertTrue(isinstance(value, ImageFile))
with self.assertRaises(ParameterError):
self.sd.image_from_directory('not-existing-directory')
with self.assertRaises(NotChoicesFound):
self.sd.image_from_directory(os.path.dirname(__file__), ['.not-valid-extension'])
def test_image(self):
value = self.sd.image(100, 100)
self.assertTrue(isinstance(value, ImageFile))
value = self.sd.image(100, 100, typ="simple")
self.assertTrue(isinstance(value, ImageFile))
value = self.sd.image(100, 100, typ="plasma")
self.assertTrue(isinstance(value, ImageFile))
value = self.sd.image(100, 100, typ="mandelbrot")
self.assertTrue(isinstance(value, ImageFile))
value = self.sd.image(100, 100, typ="ifs")
self.assertTrue(isinstance(value, ImageFile))
value = self.sd.image(100, 100, typ="random")
self.assertTrue(isinstance(value, ImageFile))
image_mixin.PIL_INSTALLED = False
with self.assertRaises(ImportError):
value = self.sd.image(100, 100, typ="random")
image_mixin.PIL_INSTALLED = True
with self.assertRaises(ParameterError):
value = self.sd.image(100, 100, typ="not-valid-type")
with self.assertRaises(ParameterError):
value = self.sd.image(0, 100)
with self.assertRaises(ParameterError):
value = self.sd.image(100, 0)
class TestOtherHelpers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sd = SampleDataHelper()
def test_boolean(self):
self.assertTrue(isinstance(self.sd.boolean(), bool))
def test_ipv4(self):
value = self.sd.ipv4()
self.assertTrue(isinstance(value, six.string_types))
self.assertEqual(len(value.split('.')), 4)
def test_ipv6(self):
value = self.sd.ipv6()
self.assertTrue(isinstance(value, six.string_types))
self.assertEqual(len(value.split(':')), 8)
def test_mac_address(self):
value = self.sd.mac_address()
self.assertTrue(isinstance(value, six.string_types))
self.assertEqual(len(value.split(':')), 6)
def test_path(self):
value = self.sd.path()
self.assertTrue(isinstance(value, six.string_types))
value = self.sd.path(absolute=False, min_levels=5, max_levels=5)
self.assertEqual(len(value.split('/')), 5)
value = self.sd.path(absolute=True, min_levels=5, max_levels=5)
self.assertEqual(len(value.split('/')), 6)
value = self.sd.path(extension=".jpg")
self.assertEqual(value[-4:], ".jpg")
with self.assertRaises(ParameterError):
self.sd.path(absolute=True, min_levels=10, max_levels=5)
def test_hex_chars(self):
value = self.sd.hex_chars()
self.assertTrue(isinstance(value, six.string_types))
self.assertTrue(len(value) >= 1)
self.assertTrue(len(value) <= 5)
value = self.sd.hex_chars(5, 5)
self.assertTrue(len(value) == 5)
self.assertEqual(self.sd.hex_chars(0, 0), '')
with self.assertRaises(ParameterError):
value = self.sd.hex_chars(10, 5)
def test_choice(self):
self.assertEqual(self.sd.choice([10]), 10)
with self.assertRaises(ParameterError):
self.sd.choice([])
with self.assertRaises(ParameterError):
self.sd.choice(7)
def test_choices_key(self):
choices = [
(1, 'test')
]
self.assertEqual(self.sd.choices_key(choices), 1)
with self.assertRaises(ParameterError):
self.sd.choices_key([])
with self.assertRaises(ParameterError):
self.sd.choices_key(7)
with self.assertRaises(ParameterError):
self.sd.choices_key([10])
def test_db_object(self):
mdh = | |
from ballot import ballot2form, form2ballot, blank_ballot, sign, uuid, regex_email, rsakeys
from ranking_algorithms import iro, borda, schulze
import re
def index():
return dict()
@auth.requires_login()
def elections():
response.subtitle = T('My Elections')
elections = db(db.election.created_by==auth.user.id).select(
orderby=~db.election.created_on)
ballots = db(db.voter.email == auth.user.email)(
db.voter.voted==False)(db.voter.election_id==db.election.id)(
(db.election.deadline==None)|(db.election.deadline>request.now)).select()
return dict(elections=elections,ballots=ballots)
@auth.requires(auth.user and auth.user.is_manager)
def edit():
response.subtitle = T('Edit Ballot')
election = db.election(request.args(0,cast=int,default=0))
if election and not election.created_by==auth.user_id:
redirect(URL('not_authorized'))
if not election:
(pubkey, privkey) = rsakeys()
db.election.voters.default = auth.user.email
db.election.managers.default = auth.user.email
db.election.public_key.default = pubkey
db.election.private_key.default = privkey
form = SQLFORM(db.election,election,deletable=True,
submit_button="Save and Preview").process()
if form.accepted: redirect(URL('start',args=form.vars.id))
return dict(form=form)
@auth.requires(auth.user and auth.user.is_manager)
def start():
election = db.election(request.args(0,cast=int)) or redirect(URL('index'))
check_closed(election)
response.subtitle = election.title+T(' / Start')
demo = ballot2form(election.ballot_model)
return dict(demo=demo,election=election)
@auth.requires(auth.user and auth.user.is_manager)
def start_callback():
election = db.election(request.args(0,cast=int)) or redirect(URL('index'))
check_closed(election)
form = SQLFORM.factory(
submit_button=T('Email Voters and Start Election Now!'))
form.element(_type='submit').add_class('btn')
failures = []
emails = []
owner_email = election.created_by.email
if form.process().accepted:
ballot_counter = db(db.ballot.election_id==election.id).count()
for email in regex_email.findall(election.voters):
email = email.lower()
voter = db(db.voter.election_id==election.id)\
(db.voter.email==email).select().first()
if voter:
voter_uuid = voter.voter_uuid
else:
# create a voter
voter_uuid = 'voter-'+uuid()
voter = db.voter.insert(
election_id=election.id,
voter_uuid=voter_uuid,
email=email,invited_on=None)
# create a ballot
ballot_counter+=1
ballot_uuid = 'ballot-%i-%.6i' % (election.id,ballot_counter)
blank_ballot_content = blank_ballot(ballot_uuid)
signature = 'signature-'+sign(blank_ballot_content,
election.private_key)
db.ballot.insert(
election_id=election.id,
ballot_content = blank_ballot_content,
ballot_uuid=ballot_uuid,
signature = signature)
link_vote = URL('vote',args=(election.id,voter_uuid),scheme=SCHEME)
link_ballots = URL('ballots',args=election.id,scheme=SCHEME)
link_results = URL('results',args=election.id,scheme=SCHEME)
body = message_replace(election.vote_email,
election_id = election.id,
owner_email = owner_email,
title=election.title,
link=link_vote,
link_ballots=link_ballots,
link_results=link_results)
subject = '%s [%s]' % (election.title, election.id)
emails.append((voter,email,subject,body))
db.commit()
sender = election.email_sender or mail.settings.sender
for voter, to, subject, body in emails:
if mail.send(to=to, subject=subject, message=body, sender=sender):
db(db.voter.id==voter).update(invited_on=request.now)
else:
failures.append(to)
if not failures:
session.flash = T('Emails sent successfully')
redirect(URL('elections'),client_side=True)
return dict(form=form,failures=failures,election=election)
@auth.requires(False) # for now this is disabled
def self_service():
form = SQLFORM.factory(
Field('election_id','integer',requires=IS_NOT_EMPTY()),
Field('email',requires=IS_EMAIL()))
if form.process().accepted:
election = db.election(form.vars.id)
if not election: form.errors['election_id'] = 'Invalid'
voter = db.voter(election=election_id,email=form.vars.email)
if not voter: form.errors['voter'] = 'Invalid'
if voter.voted:
response.flash = T('User has voted alreday')
else:
link_vote = URL('vote',args=(election.id,voter_uuid),scheme=SCHEME)
link_ballots = URL('ballots',args=election.id,scheme=SCHEME)
link_results = URL('results',args=election.id,scheme=SCHEME)
body = message_replace(election.vote_email,
election_id = election.id,
owner_email = owner_email,
title=election.title,
link=link_vote,
link_ballots=link_ballots,
link_results=link_results)
sender = election.email_sender or mail.settings.sender
if mail.send(to=voter.email, subject=election.title, message=body, sender=sender):
response.flash = T('Email sent')
else:
response.flash = T('Unable to send email')
return dict(form=form)
@auth.requires(auth.user and auth.user.is_manager)
def reminders():
election = db.election(request.args(0,cast=int)) or redirect(URL('index'))
response.subtitle = election.title+T(' / Reminders')
return dict(election=election)
@auth.requires(auth.user and auth.user.is_manager)
def reminders_callback():
election = db.election(request.args(0,cast=int)) or redirect(URL('index'))
owner_email = election.created_by.email
failures = []
emails = []
fields = []
for email in regex_email.findall(election.voters):
voter = db(db.voter.election_id==election.id)\
(db.voter.email==email).select().first()
voter_uuid = voter.voter_uuid
key = 'voter_%s' % voter.id
fields.append(Field(key,'boolean',default=not voter.voted,
label = voter.email))
if key in request.post_vars:
link = URL('vote',args=(election.id,voter_uuid),scheme=SCHEME)
link_ballots = URL('ballots',args=election.id,scheme=SCHEME)
link_results = URL('results',args=election.id,scheme=SCHEME)
body = message_replace(election.vote_email,
election_id = election.id,
owner_email = owner_email,
title=election.title,
link=link,
link_ballots=link_ballots,
link_results=link_results)
subject = '%s [%s]' % (election.title, election.id)
emails.append((email,subject,body))
form = SQLFORM.factory(*fields).process()
if form.accepted:
sender = election.email_sender or mail.settings.sender
for to, subject, body in emails:
if not mail.send(to=to, subject=subject, message=body, sender=sender):
failures.append(email)
if not failures:
session.flash = T('Emails sent successfully')
redirect(URL('elections'),client_side=True)
return dict(form=form,failures=failures,election=election)
@auth.requires(auth.user and auth.user.is_manager)
def recompute_results():
election = db.election(request.args(0,cast=int)) or redirect(URL('index'))
compute_results(election)
redirect(URL('results',args=election.id))
def compute_results(election):
query = db.ballot.election_id==election.id
voted_ballots = db(query)(db.ballot.voted==True).select()
counters = {}
rankers = {}
for k,ballot in enumerate(voted_ballots):
for name in ballot.results:
# name is the name of a group as in {{name:ranking}}
# scheme is "ranking" or "checkbox" (default)
# value is the <input value="value"> assigned to this checkbox or input
# INPORTANT ONLY SUPPORT SIMPLE MAJORITY
key = name +'/simple-majority/' + ballot.results[name]
(name,scheme,value) = key.split('/',3)
if scheme == 'simple-majority':
# counters[key] counts how many times this checkbox was checked
counters[key] = counters.get(key,0) + 1
elif scheme == 'ranking':
raise NotImplementedError
# rankers[name] = [[2,1,3],[3,1,2],[1,2,3],...]
# The sublists in rankers mean:
# [[my first-preferred candidate is
# the candidate whose identifying
# number on the original ballot was
# <the number given in this first
# position here>],
# [my second-preferred candidate is
# the candidate whose identifying
# number on the original ballot was
# <the number given in this second
# position here>],
# [my third-preferred candidate is
# the candidate whose identifying
# number on the original ballot was
# <the number given in this third
# position here>], ...]
#
# len(rankers[name]) = len(voted_ballots)
# rankers[name][i][0] = ...?
if not name in rankers:
rankers[name] = []
if len(rankers[name])<k+1:
rankers[name].append([])
vote = rankers[name][-1]
print "ballot id:",ballot.id, "key:",key, "results[key]:",results[key], "vote:",vote
ranking = int(results[key])
d = ranking-len(vote)
if d>0:
print "vote before:", vote
vote+=[0]*d
print "vote after: ", vote
vote[ranking-1] = value
else:
raise RuntimeError("Invalid Voting Scheme")
for name in rankers:
votes = rankers[name]
cmajority = borda(votes,mode='exponential')
ciro = iro(votes)
cschulze = schulze(votes)
key = name+'/simple-majority/'+k
for (r,k) in cmajority:
counters[key] = 'M:%s' % r
for (r,k) in ciro:
counters[key] += ' I:%s' % r
for (r,k) in cschulze:
counters[key] += ' S:%s' % r
print counters
election.update_record(counters=counters)
#@cache(request.env.path_info,time_expire=300,cache_model=cache.ram)
def results():
id = request.args(0,cast=int) or redirect(URL('index'))
election = db.election(id) or redirect(URL('index'))
if auth.user_id!=election.created_by and \
not(election.deadline and request.now>election.deadline):
session.flash = T('Results not yet available')
redirect(URL('index'))
response.subtitle = election.title + T(' / Results')
if (DEBUG_MODE or not election.counters or
not election.deadline or request.now<=election.deadline):
compute_results(election)
form = ballot2form(election.ballot_model, counters=election.counters)
return dict(form=form,election=election)
def hash_ballot(text):
import re
text = text.replace('checked="checked" ','')
text = text.replace('disabled="disabled" ','')
text = re.sub('value="\d+"','',text)
text = re.sub('ballot\S+','',text)
return hash(text)
def ballots():
election = db.election(request.args(0,cast=int)) or \
redirect(URL('invalid_link'))
response.subtitle = election.title + T(' / Ballots')
ballots = db(db.ballot.election_id==election.id).select(
orderby=db.ballot.ballot_uuid)
tampered = len(set(hash_ballot(b.ballot_content)
for b in ballots if b.voted))>1
return dict(ballots=ballots,election=election, tampered=tampered)
# @auth.requires(auth.user and auth.user.is_manager)
def email_voter_and_managers(election,voter,ballot,body):
import cStringIO
attachment = mail.Attachment(
filename=ballot.ballot_uuid+'.html',
payload=cStringIO.StringIO(ballot.ballot_content))
sender = election.email_sender or mail.settings.sender
ret = mail.send(to=voter.email,
subject='Receipt for %s' % election.title,
message=body,attachments=[attachment],
sender=sender)
mail.send(to=regex_email.findall(election.managers),
subject='Copy of Receipt for %s' % election.title,
message=body,
attachments=[attachment],
sender=sender)
return ret
def check_closed(election):
if election.closed:
session.flash = T('Election already closed')
redirect(URL('elections'))
@auth.requires(auth.user and auth.user.is_manager)
def close_election():
import zipfile, os
election = db.election(request.args(0,cast=int)) or \
redirect(URL('invalid_link'))
#check_closed(election)
response.subtitle = election.title
dialog = FORM.confirm(T('Close'),
{T('Cancel'):URL('elections')})
if dialog.accepted:
election.update_record(deadline=request.now)
voters = db(db.voter.election_id==election.id)\
(db.voter.voted==False).select()
ballots = db(db.ballot.election_id==election.id)\
(db.ballot.voted==False)(db.ballot.assigned==False).select()
if ballots and len(voters)!=len(ballots):
session.flash = T('Voted corrupted ballots/voter mismatch')
redirect(URL('elections'))
owner_email = election.created_by.email
for i in range(len(voters)):
voter, ballot = voters[i], ballots[i]
link = URL('ballot',args=ballot.ballot_uuid,scheme='http')
body = message_replace(election.not_voted_email,
election_id=election.id,
owner_email = owner_email,
title=election.title,
signature=ballot.signature,link=link)
email_voter_and_managers(election,voter,ballot,body)
ballot.update_record(assigned=True)
compute_results(election)
zippath = os.path.join(request.folder,'static','zips')
if not os.path.exists(zippath):
os.mkdir(zippath)
archive = zipfile.ZipFile(
os.path.join(zippath,'%s.zip' % election.id),'w')
dbset = db(db.ballot.election_id==election.id)
ballots = dbset.select()
for ballot in ballots:
archive.writestr(ballot.ballot_uuid,ballot.ballot_content)
ballots = dbset.select(
db.ballot.election_id,
db.ballot.ballot_uuid,
db.ballot.assigned,
db.ballot.voted,
db.ballot.voted_on,
db.ballot.signature,
orderby=db.ballot.ballot_uuid)
archive.writestr('ballots.csv',str(ballots))
archive.close()
election.update_record(closed=True)
session.flash = 'Election Closed!'
redirect(URL('results',args=election.id))
return dict(dialog=dialog,election=election)
def ballot():
ballot_uuid = request.args(0) or redirect(URL('index'))
signature = request.args(1)
election_id = int(ballot_uuid.split('-')[1])
election = db.election(election_id) or redirect(URL('index'))
ballot = db.ballot(election_id=election.id,ballot_uuid=ballot_uuid) \
or redirect(URL('invalid_link'))
if (not election.deadline or election.deadline>request.now) \
and ballot.signature!=signature:
session.flash = "your ballot is not visible until election is closed"
redirect(URL('index'))
response.subtitle = election.title + T(' / Ballot')
return dict(ballot=ballot,election=election)
def recorded():
return dict()
def ballot_verifier():
response.headers['Content-Type'] = 'text/plain'
return ballot()
def vote():
import hashlib
response.menu = []
election_id = request.args(0,cast=int)
voter_uuid = request.args(1)
election = db.election(election_id) or redirect(URL('invalid_link'))
voter = db(db.voter.election_id==election_id)\
(db.voter.voter_uuid==voter_uuid).select().first() or \
redirect(URL('invalid_link'))
if not DEBUG_MODE and voter.voted:
redirect(URL('voted_already'))
if election.deadline and request.now>election.deadline:
session.flash = T('Election is closed')
if voter.voted:
session.flash += T('Your vote was recorded')
else:
session.flash += T('Your vote was NOT recorded')
redirect(URL('results',args=election.id))
response.subtitle = election.title + ' / Vote'
form = ballot2form(election.ballot_model, readonly=False)
form.process()
if form.accepted:
results = form.vars
for_update = not db._uri.startswith('sqlite') # not suported by sqlite
#if not for_update: db.executesql('begin immediate transaction;')
ballot = db(db.ballot.election_id==election_id)\
(db.ballot.voted==False).select(
orderby='<random>',limitby=(0,1),for_update=for_update).first() \
or redirect(URL('no_more_ballots'))
ballot_content = form2ballot(election.ballot_model,
token=ballot.ballot_uuid,
vars=request.post_vars,results=results)
signature = 'signature-'+sign(ballot_content,election.private_key)
ballot.update_record(results=results,
ballot_content=ballot_content,
signature=signature,
voted=True,assigned=True,voted_on=request.now)
voter.update_record(voted=True)
link = URL('ballot',args=(ballot.ballot_uuid,ballot.signature), scheme='http')
body = message_replace(election.voted_email,link=link,
election_id=election.id,
owner_email = election.created_by.email,
title=election.title,signature=signature)
emailed = email_voter_and_managers(election,voter,ballot,body)
session.flash = \
T('Your vote was recorded and we sent you an email') \
if emailed else \
T('Your vote was recorded but we failed to email you')
redirect(URL('recorded',vars=dict(link=link)))
return dict(form=form)
def user():
return dict(form=auth())
def invalid_link():
return dict(message=T('Invalid Link'))
def voted_already():
return dict(message=T('You already voted'))
def not_authorized():
return dict(message=T('Not Authorized'))
def no_more_ballots():
return dict(message=T('Run out of ballots. Your vote was not recorded'))
@auth.requires(auth.user and auth.user.is_manager)
def | |
50], yet not a valid multiple of 15
#invalid multiple passed, yet in the range
with pytest.raises(ArgumentError):
api.check_args(args={"q": 43}, op=api.ops["getAllStatisticsbyUserID"])
#invalid multiple passed, but out of the range
with pytest.raises(ArgumentError):
api.check_args(args={"q": 63}, op=api.ops["getAllStatisticsbyUserID"])
#valid multiple integer passed and in the range [10, 50]
try:
api.check_args(args={"q": 30}, op=api.ops["getAllStatisticsbyUserID"])
except ArgumentError:
pytest.fail("Unexpected argument error on max-min multiple for integer args test. ")
#check if a too high integer passed
with pytest.raises(ArgumentError):
api.check_args(args={"q": 1000}, op=api.ops["getAllStatisticsbyUserID"])
#check if a too low integer passed
with pytest.raises(ArgumentError):
api.check_args(args={"q": 1}, op=api.ops["getAllStatisticsbyUserID"])
def test_header_yaml_max_min_int_multiple():
HERE = os.path.dirname(__file__)
YML = os.path.join(HERE, './', 'static', 'api_multiple_int_min_max_test_header.yaml')
api = SwaggerBlueprint('API', __name__, swagger_spec=YML)
#check if passing a number in the range [10, 50], yet not a valid multiple of 15
#invalid multiple passed, yet in the range
with pytest.raises(ArgumentError):
api.check_header(headers={("X-Request-ID", 43)}, op=api.ops["getAllStatisticsbyUserID"])
#invalid multiple passed, but out of the range
with pytest.raises(ArgumentError):
api.check_header(headers={("X-Request-ID", 63)}, op=api.ops["getAllStatisticsbyUserID"])
#valid multiple integer passed and in the range [10, 50]
try:
api.check_header(headers={("X-Request-ID", 30)}, op=api.ops["getAllStatisticsbyUserID"])
except ArgumentError:
pytest.fail("Unexpected argument error on max-min multiple for integer header test. ")
#check if a too high integer passed
with pytest.raises(ArgumentError):
api.check_header(headers={("X-Request-ID", 1000)}, op=api.ops["getAllStatisticsbyUserID"])
#check if a too low integer passed
with pytest.raises(ArgumentError):
api.check_header(headers={("X-Request-ID", 1)}, op=api.ops["getAllStatisticsbyUserID"])
#Test if checks on multipleOf do indeed work for number (float)
def test_path_yaml_max_min_num_multiple():
HERE = os.path.dirname(__file__)
YML = os.path.join(HERE, './', 'static', 'api_multiple_num_min_max_test.yaml')
api = SwaggerBlueprint('API', __name__, swagger_spec=YML)
#check if passing a number in the range [10, 50], yet not a valid multiple of 15.5
# invalid multiple passed, yet in the range
with pytest.raises(ArgumentError):
api.check_path(path="/users/44.12/statistics", op=api.ops["getAllStatisticsbyUserID"])
# invalid multiple passed, but out of the range
with pytest.raises(ArgumentError):
api.check_path(path="/users/63.12/statistics", op=api.ops["getAllStatisticsbyUserID"])
#valid multiple of 15.5 passed
try:
api.check_path(path="/users/31/statistics", op=api.ops["getAllStatisticsbyUserID"])
except ArgumentError:
pytest.fail("Unexpected argument error on max-min multiple for number path test")
#check if a too high number passed
with pytest.raises(ArgumentError):
api.check_path(path="/users/11020.12/statistics", op=api.ops["getAllStatisticsbyUserID"])
# check if a too low number passed
with pytest.raises(ArgumentError):
api.check_path(path="/users/0.34/statistics", op=api.ops["getAllStatisticsbyUserID"])
def test_args_yaml_max_min_num_multiple():
HERE = os.path.dirname(__file__)
YML = os.path.join(HERE, './', 'static', 'api_multiple_num_min_max_test.yaml')
api = SwaggerBlueprint('API', __name__, swagger_spec=YML)
#check if passing a number in the range [10, 50], yet not a valid multiple of 15.5
# invalid multiple passed, yet in the range
with pytest.raises(ArgumentError):
api.check_args(args={"q": 44.12}, op=api.ops["getAllStatisticsbyUserID"])
# invalid multiple passed, but out of the range
with pytest.raises(ArgumentError):
api.check_args(args={"q": 63.12}, op=api.ops["getAllStatisticsbyUserID"])
#valid multiple of 15.5 passed
try:
api.check_args(args={"q": 31}, op=api.ops["getAllStatisticsbyUserID"])
except ArgumentError:
pytest.fail("Unexpected argument error on max-min multiple for number args test")
#check if a too high number passed
with pytest.raises(ArgumentError):
api.check_args(args={"q": 11020.12}, op=api.ops["getAllStatisticsbyUserID"])
#check if a too low number passed
with pytest.raises(ArgumentError):
api.check_args(args={"q": 0.34}, op=api.ops["getAllStatisticsbyUserID"])
def test_header_yaml_max_min_num_multiple():
HERE = os.path.dirname(__file__)
YML = os.path.join(HERE, './', 'static', 'api_multiple_num_min_max_test_header.yaml')
api = SwaggerBlueprint('API', __name__, swagger_spec=YML)
#check if passing a number in the range [10, 50], yet not a valid multiple of 15.5
# invalid multiple passed, yet in the range
with pytest.raises(ArgumentError):
api.check_header(headers={("X-Request-ID", 44.12)}, op=api.ops["getAllStatisticsbyUserID"])
# invalid multiple passed, but out of the range
with pytest.raises(ArgumentError):
api.check_header(headers={("X-Request-ID", 63.12)}, op=api.ops["getAllStatisticsbyUserID"])
#valid multiple of 15.5 passed
try:
api.check_header(headers={("X-Request-ID", 31)}, op=api.ops["getAllStatisticsbyUserID"])
except ArgumentError:
pytest.fail("Unexpected argument error on max-min multiple for number header test")
#check if a too high number passed
with pytest.raises(ArgumentError):
api.check_header(headers={("X-Request-ID", 11020.12)}, op=api.ops["getAllStatisticsbyUserID"])
#check if a too low number passed
with pytest.raises(ArgumentError):
api.check_header(headers={("X-Request-ID", 0.34)}, op=api.ops["getAllStatisticsbyUserID"])
#Test if checks on the minimum length and maximum length of a string work
def test_path_yaml_min_max_string_length():
HERE = os.path.dirname(__file__)
YML = os.path.join(HERE, './', 'static', 'api_min_max_string.yaml')
api = SwaggerBlueprint('API', __name__, swagger_spec=YML)
#boundaries set for string are [5, 12]
#check if a too small string yields an error
with pytest.raises(ArgumentError):
api.check_path(path="/users/abc/statistics", op=api.ops["getAllStatisticsbyUserID"])
#check if a too long string also yields an error
with pytest.raises(ArgumentError):
api.check_path(path="/users/supercalifragilistico/statistics", op=api.ops["getAllStatisticsbyUserID"])
#now check if a string with length between 5 and 12 is fine
try:
api.check_path(path="/users/daniele/statistics", op=api.ops["getAllStatisticsbyUserID"])
except ArgumentError:
pytest.fail("Unexpected argument error on the min max string length path test. ")
#Test if checks on the minimum length and maximum length of a string work
def test_args_yaml_min_max_string_length():
HERE = os.path.dirname(__file__)
YML = os.path.join(HERE, './', 'static', 'api_min_max_string.yaml')
api = SwaggerBlueprint('API', __name__, swagger_spec=YML)
#boundaries set for string are [5, 12]
#check if a too small string yields an error
with pytest.raises(ArgumentError):
api.check_args(args={"q": "abc"}, op=api.ops["getAllStatisticsbyUserID"])
#check if a too long string also yields an error
with pytest.raises(ArgumentError):
api.check_args(args={"q": "supercalifragilistico"}, op=api.ops["getAllStatisticsbyUserID"])
#now check if a string with length between 5 and 12 is fine
try:
api.check_args(args={"q": "daniele"}, op=api.ops["getAllStatisticsbyUserID"])
except ArgumentError:
pytest.fail("Unexpected argument error on the min max string length args test. ")
def test_header_yaml_min_max_string_length():
HERE = os.path.dirname(__file__)
YML = os.path.join(HERE, './', 'static', 'api_min_max_string_header.yaml')
api = SwaggerBlueprint('API', __name__, swagger_spec=YML)
#boundaries set for string are [5, 12]
#check if a too small string yields an error
with pytest.raises(ArgumentError):
api.check_header(headers={("X-Request-ID", "abc")}, op=api.ops["getAllStatisticsbyUserID"])
#check if a too long string also yields an error
with pytest.raises(ArgumentError):
api.check_header(headers={("X-Request-ID", "supercalifragilistico")}, op=api.ops["getAllStatisticsbyUserID"])
#now check if a string with length between 5 and 12 is fine
try:
api.check_header(headers={("X-Request-ID", "daniele")}, op=api.ops["getAllStatisticsbyUserID"])
except ArgumentError:
pytest.fail("Unexpected argument error on the min max string length header test. ")
#Test if a date with valid format is indeed accepted
def test_path_yaml_valid_date_format():
HERE = os.path.dirname(__file__)
YML = os.path.join(HERE, './', 'static', 'api_date_test.yaml')
api = SwaggerBlueprint('API', __name__, swagger_spec=YML)
#expected date: Y-%m-%d
#passing a normal string as date should raise an error
with pytest.raises(ArgumentError):
api.check_path(path="/users/fakeDate/statistics", op=api.ops["getAllStatisticsbyUserID"])
#passing a date without the proper "-" ticks would raise an error
with pytest.raises(ArgumentError):
api.check_path(path="/users/2018_10_10/statistics", op=api.ops["getAllStatisticsbyUserID"])
#a valid date and time shouldn't be accepted as "date"
with pytest.raises(ArgumentError):
api.check_path(path="/users/2018-10-10T21:20:10Z/statistics", op=api.ops["getAllStatisticsbyUserID"])
#a date with an invalid month shouldn't be accepted as date
with pytest.raises(ArgumentError):
api.check_path(path="/users/2018-13-10/statistics", op=api.ops["getAllStatisticsbyUserID"])
#a date with an invalid day shouldn't be accepted as date either
with pytest.raises(ArgumentError):
api.check_path(path="/users/2018-07-32/statistics", op=api.ops["getAllStatisticsbyUserID"])
# a valid date should finally be accepted as date
try:
api.check_path(path="/users/2018-10-10/statistics", op=api.ops["getAllStatisticsbyUserID"])
except ArgumentError:
pytest.fail("Unexpected argument error in the valid date path test. ")
def test_args_yaml_valid_date_format():
HERE = os.path.dirname(__file__)
YML = os.path.join(HERE, './', 'static', 'api_date_test.yaml')
api = SwaggerBlueprint('API', __name__, swagger_spec=YML)
#expected date: Y-%m-%d
#passing a normal string as date should raise an error
with pytest.raises(ArgumentError):
api.check_args(args={"q": "fakeDate"}, op=api.ops["getAllStatisticsbyUserID"])
#passing a date without the proper "-" ticks would raise an error
with pytest.raises(ArgumentError):
api.check_args(args={"q": "2018_10_10"}, op=api.ops["getAllStatisticsbyUserID"])
#a valid date and time shouldn't be accepted as "date"
with pytest.raises(ArgumentError):
api.check_args(args={"q": "2018-10-10T21:20:10Z"}, op=api.ops["getAllStatisticsbyUserID"])
#a date with an invalid month shouldn't be accepted as date
with pytest.raises(ArgumentError):
api.check_args(args={"q": "2018-13-10"}, op=api.ops["getAllStatisticsbyUserID"])
#a date with an invalid day shouldn't be accepted as date either
with pytest.raises(ArgumentError):
api.check_args(args={"q": "2018-07-32"}, op=api.ops["getAllStatisticsbyUserID"])
# a valid date should finally be accepted as date
try:
api.check_args(args={"q": "2018-10-10"}, op=api.ops["getAllStatisticsbyUserID"])
except ArgumentError:
pytest.fail("Unexpected argument error in the valid date args test. ")
def test_header_yaml_valid_date_format():
HERE = os.path.dirname(__file__)
YML = os.path.join(HERE, './', 'static', 'api_date_test_header.yaml')
api = SwaggerBlueprint('API', __name__, swagger_spec=YML)
#expected date: Y-%m-%d
#passing a normal string as date should raise an error
with pytest.raises(ArgumentError):
api.check_header(headers={("X-Request-ID", "fakeDate")}, op=api.ops["getAllStatisticsbyUserID"])
#passing a date without the proper "-" ticks would raise an error
with pytest.raises(ArgumentError):
api.check_header(headers={("X-Request-ID", "2018_10_10")}, op=api.ops["getAllStatisticsbyUserID"])
#a valid date and time shouldn't be accepted as "date"
with pytest.raises(ArgumentError):
api.check_header(headers={("X-Request-ID", "2018-10-10T21:20:10Z")}, op=api.ops["getAllStatisticsbyUserID"])
#a date with an invalid month shouldn't be accepted as date
with pytest.raises(ArgumentError):
api.check_header(headers={("X-Request-ID", "2018-13-10")}, op=api.ops["getAllStatisticsbyUserID"])
#a date with an invalid day shouldn't be accepted as date either
with pytest.raises(ArgumentError):
api.check_header(headers={("X-Request-ID", "2018-07-32")}, op=api.ops["getAllStatisticsbyUserID"])
# a valid date should finally be accepted as date
try:
api.check_header(headers={("X-Request-ID", "2018-10-10")}, op=api.ops["getAllStatisticsbyUserID"])
except ArgumentError:
pytest.fail("Unexpected argument error in the valid date args test. ")
#Test if a date-time timestamp is accepted
def test_path_yaml_valid_date_time_format():
HERE = os.path.dirname(__file__)
YML = os.path.join(HERE, './', 'static', 'api_date_time_test.yaml')
api = SwaggerBlueprint('API', __name__, swagger_spec=YML)
#let's check if a valid date-time is accepted
try:
api.check_path(path="/users/2017-07-21T17:32:28Z/statistics", op=api.ops["getAllStatisticsbyUserID"])
except ArgumentError:
pytest.fail("Unexpected argument error on date time test. ")
#passing a normal string as date.time should raise an error
with pytest.raises(ArgumentError):
api.check_path(path="/users/fakeDate/statistics", op=api.ops["getAllStatisticsbyUserID"])
#let's pass the date-time in an invalid format:
with pytest.raises(ArgumentError):
api.check_path(path="/users/2017-07-21 17:32:28/statistics", op=api.ops["getAllStatisticsbyUserID"])
#let's pass an invalid hour
with pytest.raises(ArgumentError):
api.check_path(path="/users/2017-07-21T25:32:28Z/statistics", op=api.ops["getAllStatisticsbyUserID"])
#invalid minutes
with pytest.raises(ArgumentError):
api.check_path(path="/users/2017-07-21T12:78:28Z/statistics", op=api.ops["getAllStatisticsbyUserID"])
# invalid seconds
with pytest.raises(ArgumentError):
api.check_path(path="/users/2017-07-21T12:32:99Z/statistics", op=api.ops["getAllStatisticsbyUserID"])
#we already tested the date, so we can assume the date works fine
def test_args_yaml_valid_date_time_format():
HERE = os.path.dirname(__file__)
YML = os.path.join(HERE, './', 'static', 'api_date_time_test.yaml')
api = SwaggerBlueprint('API', __name__, swagger_spec=YML)
#let's check if a valid date-time is | |
np.expand_dims(points, 1)
v1 = points - self.front_left_vertices
v2 = points - self.front_right_vertices
v3 = points - self.back_right_vertices
v4 = points - self.back_left_vertices
# x_hat = np.zeros([n_points, n_vortices, 3])
# x_hat[:, :, 0] = 1
# Do some useful arithmetic
v1_cross_v2 = np.cross(v1, v2, axis = 2)
v2_cross_v3 = np.cross(v2, v3, axis = 2)
v3_cross_v4 = np.cross(v3, v4, axis = 2)
v4_cross_v1 = np.cross(v4, v1, axis = 2)
v1_dot_v2 = np.einsum('ijk,ijk->ij', v1, v2)
v2_dot_v3 = np.einsum('ijk,ijk->ij', v2, v3)
v3_dot_v4 = np.einsum('ijk,ijk->ij', v3, v4)
v4_dot_v1 = np.einsum('ijk,ijk->ij', v4, v1)
norm_v1 = np.linalg.norm(v1, axis = 2)
norm_v2 = np.linalg.norm(v2, axis = 2)
norm_v3 = np.linalg.norm(v3, axis = 2)
norm_v4 = np.linalg.norm(v4, axis = 2)
norm_v1_inv = 1 / norm_v1
norm_v2_inv = 1 / norm_v2
norm_v3_inv = 1 / norm_v3
norm_v4_inv = 1 / norm_v4
# Check for the special case where the collocation point is along the bound vortex leg
# Find where cross product is near zero, and set the dot product to infinity so that the init_val of the bound term is zero.
v1_v2_singularity_indices = (
np.einsum('ijk,ijk->ij', v1_cross_v2, v1_cross_v2) # norm(cross_product)^2
< 3.0e-16)
v1_dot_v2 = v1_dot_v2 + v1_v2_singularity_indices
v2_v3_singularity_indices = (
np.einsum('ijk,ijk->ij', v2_cross_v3, v2_cross_v3) # norm(cross_product)^2
< 3.0e-16)
v2_dot_v3 = v2_dot_v3 + v2_v3_singularity_indices
v3_v4_singularity_indices = (
np.einsum('ijk,ijk->ij', v3_cross_v4, v3_cross_v4) # norm(cross_product)^2
< 3.0e-16)
v3_dot_v4 = v3_dot_v4 + v3_v4_singularity_indices
v4_v1_singularity_indices = (
np.einsum('ijk,ijk->ij', v4_cross_v1, v4_cross_v1) # norm(cross_product)^2
< 3.0e-16)
v4_dot_v1 = v4_dot_v1 + v4_v1_singularity_indices
# Calculate Vij
term1 = (norm_v1_inv + norm_v2_inv) / (norm_v1 * norm_v2 + v1_dot_v2)
term1 = np.expand_dims(term1, 2)
term2 = (norm_v2_inv + norm_v3_inv) / (norm_v2 * norm_v3 + v2_dot_v3)
term2 = np.expand_dims(term2, 2)
term3 = (norm_v3_inv + norm_v4_inv) / (norm_v3 * norm_v4 + v3_dot_v4)
term3 = np.expand_dims(term3, 2)
term4 = (norm_v4_inv + norm_v1_inv) / (norm_v4 * norm_v1 + v4_dot_v1)
term4 = np.expand_dims(term4, 2)
Vij_doublets = 1 / (4 * np.pi) * (
v1_cross_v2 * term1 +
v2_cross_v3 * term2 +
v3_cross_v4 * term3 +
v4_cross_v1 * term4
)
return Vij_doublets
def calculate_Vij_horseshoes(self,points):
# Calculates Vij, the velocity influence matrix (First index is collocation point number, second index is vortex number).
# points: the list of points (Nx3) to calculate the velocity influence at.
# Make lv and rv
left_vortex_vertices = self.left_horseshoe_vortex_vertices
right_vortex_vertices = self.right_horseshoe_vortex_vertices
points = np.reshape(points, (-1, 3))
n_points = len(points)
n_vortices = len(left_vortex_vertices)
# Make attrib_name and b vectors.
# attrib_name: Vector from all collocation points to all horseshoe vortex left vertices, NxNx3.
# # First index is collocation point #, second is vortex #, and third is xyz. N=n_panels
# b: Vector from all collocation points to all horseshoe vortex right vertices, NxNx3.
# # First index is collocation point #, second is vortex #, and third is xyz. N=n_panels
# attrib_name[i,j,:] = c[i,:] - lv[j,:]
# b[i,j,:] = c[i,:] - rv[j,:]
points = np.expand_dims(points, 1)
a = points - left_vortex_vertices
b = points - right_vortex_vertices
# x_hat = np.zeros([n_points, n_vortices, 3])
# x_hat[:, :, 0] = 1
# Do some useful arithmetic
a_cross_b = np.cross(a, b, axis=2)
a_dot_b = np.einsum('ijk,ijk->ij', a, b)
a_cross_x = np.stack((
np.zeros((n_points, n_vortices)),
a[:, :, 2],
-a[:, :, 1]
), axis=2)
a_dot_x = a[:, :, 0]
b_cross_x = np.stack((
np.zeros((n_points, n_vortices)),
b[:, :, 2],
-b[:, :, 1]
), axis=2)
b_dot_x = b[:, :, 0] # np.sum(b * x_hat,axis=2)
norm_a = np.linalg.norm(a, axis=2)
norm_b = np.linalg.norm(b, axis=2)
norm_a_inv = 1 / norm_a
norm_b_inv = 1 / norm_b
# Check for the special case where the collocation point is along the bound vortex leg
# Find where cross product is near zero, and set the dot product to infinity so that the init_val of the bound term is zero.
bound_vortex_singularity_indices = (
np.einsum('ijk,ijk->ij', a_cross_b, a_cross_b) # norm(a_cross_b) ** 2
< 3.0e-16)
a_dot_b = a_dot_b + bound_vortex_singularity_indices
left_vortex_singularity_indices = (
np.einsum('ijk,ijk->ij', a_cross_x, a_cross_x)
< 3.0e-16
)
a_dot_x = a_dot_x + left_vortex_singularity_indices
right_vortex_singularity_indices = (
np.einsum('ijk,ijk->ij', b_cross_x, b_cross_x)
< 3.0e-16
)
b_dot_x = b_dot_x + right_vortex_singularity_indices
# Calculate Vij
term1 = (norm_a_inv + norm_b_inv) / (norm_a * norm_b + a_dot_b)
term2 = (norm_a_inv) / (norm_a - a_dot_x)
term3 = (norm_b_inv) / (norm_b - b_dot_x)
term1 = np.expand_dims(term1, 2)
term2 = np.expand_dims(term2, 2)
term3 = np.expand_dims(term3, 2)
Vij_horseshoes = 1 / (4 * np.pi) * (
a_cross_b * term1 +
a_cross_x * term2 -
b_cross_x * term3
)
return Vij_horseshoes # TODO do this
def calculate_delta_cp(self):
# Find the area of each panel ()
diag1 = self.front_left_vertices - self.back_right_vertices
diag2 = self.front_right_vertices - self.back_left_vertices
self.areas = np.linalg.norm(np.cross(diag1, diag2, axis=1), axis=1) / 2
# Calculate panel data
self.Fi_normal = np.einsum('ij,ij->i', self.Fi_geometry, self.normal_directions)
self.pressure_normal = self.Fi_normal / self.areas
self.delta_cp = self.pressure_normal / self.op_point.dynamic_pressure()
def get_induced_velocity_at_point(self, point):
# Input: attrib_name Nx3 numpy array of points that you would like to know the induced velocities at.
# Output: attrib_name Nx3 numpy array of the induced velocities at those points.
point = np.reshape(point, (-1, 3))
Vij = self.calculate_Vij(point)
solution_expanded = np.expand_dims(self.solution, 1)
# freestream = self.op_point.compute_freestream_velocity_geometry_axes()
# V_x = Vij[:, :, 0] @ vortex_strengths_expanded + freestream[0]
# V_y = Vij[:, :, 1] @ vortex_strengths_expanded + freestream[1]
# V_z = Vij[:, :, 2] @ vortex_strengths_expanded + freestream[2]
Vi_x = Vij[:, :, 0] @ solution_expanded
Vi_y = Vij[:, :, 1] @ solution_expanded
Vi_z = Vij[:, :, 2] @ solution_expanded
Vi = np.hstack((Vi_x, Vi_y, Vi_z))
return Vi
def get_velocity_at_point(self, point):
# Input: attrib_name Nx3 numpy array of points that you would like to know the velocities at.
# Output: attrib_name Nx3 numpy array of the velocities at those points.
point = np.reshape(point, (-1, 3))
Vi = self.get_induced_velocity_at_point(point)
freestream = self.op_point.compute_freestream_velocity_geometry_axes()
V = Vi + freestream
return V
def get_streamlines(self,
seed_points, # A Nx3 ndarray of points to use for streamlines
n_steps = 300, # number of points in streamline
length = 0.7, # meters
):
# Calculates streamlines eminating from an array of given initial points.
# "streamlines" is attrib_name MxNx3 array, where M is the index of the streamline number,
# N is the index of the timestep, and the last index is xyz
# Resolution
length_per_step = length / n_steps
n_streamlines = len(seed_points)
# Initialize
streamlines = np.zeros((n_streamlines, n_steps, 3))
streamlines[:, 0, :] = seed_points
# Iterate with forward Euler # TODO consider higher order methods like RK4
for step_num in range(1, n_steps):
update_amount = self.get_velocity_at_point(streamlines[:, step_num - 1, :])
update_amount = update_amount * length_per_step / np.expand_dims(np.linalg.norm(update_amount, axis=1),
axis=1)
streamlines[:, step_num, :] = streamlines[:, step_num - 1, :] + update_amount
return streamlines
def draw(self,
shading_type="solid", # Can be None, "solid", "doublet_strengths", "delta_cp", or "trailing_edges"
streamlines_type="trailing", # Can be None, "trailing", "line", or attrib_name Nx3 numpy array with custom points.
points_type = None, # You can supply the name of attrib_name property of this class to plot attrib_name point cloud. Useful for debugging, mostly.
):
# Note: NOT autograd-compatible!
print("Drawing...")
plotter = pv.Plotter() # Initialize Plotter
# Shading
if shading_type is not None:
# Make airplane geometry
vertices = np.vstack((
self.front_left_vertices,
self.front_right_vertices,
self.back_right_vertices,
self.back_left_vertices
))
faces = np.transpose(np.vstack((
4 * np.ones(self.n_panels),
np.arange(self.n_panels),
np.arange(self.n_panels) + self.n_panels,
np.arange(self.n_panels) + 2 * self.n_panels,
np.arange(self.n_panels) + 3 * self.n_panels,
)))
faces = np.reshape(faces, (-1), order='C')
wing_surfaces = pv.PolyData(vertices, faces)
if shading_type == "solid":
plotter.add_mesh(wing_surfaces, color='tan', show_edges=True,
smooth_shading=True)
elif shading_type == "doublet_strengths":
if not hasattr(self, 'doublet_strengths'):
print("Doublet strengths not found, running again.")
self.run()
# min = -1.5
# max = 1.5
scalars = self.doublet_strengths #np.minimum(np.maximum(self.delta_cp, delta_cp_min), delta_cp_max)
cmap = plt.cm.get_cmap('viridis')
plotter.add_mesh(wing_surfaces, scalars=scalars, cmap=cmap, color='tan', show_edges=True,
smooth_shading=True)
plotter.add_scalar_bar(title="Doublet Strengths", n_labels=5, shadow=True,
font_family='arial')
elif shading_type == "delta_cp":
if not hasattr(self, 'delta_cp'):
self.calculate_delta_cp()
delta_cp_min = -1.5
delta_cp_max = 1.5
scalars = np.minimum(np.maximum(self.delta_cp, delta_cp_min), delta_cp_max)
cmap = plt.cm.get_cmap('viridis')
plotter.add_mesh(wing_surfaces, scalars=scalars, cmap=cmap, color='tan', show_edges=True,
smooth_shading=True)
plotter.add_scalar_bar(title="Pressure Coefficient Differential", n_labels=5, shadow=True,
font_family='arial')
elif shading_type == "all_trailing_edges":
scalars = np.logical_or(self.is_trailing_edge_upper, self.is_trailing_edge_lower) # type: np.ndarray
scalars = scalars.astype(int)
plotter.add_mesh(wing_surfaces, scalars=scalars, color='tan', show_edges=True,
smooth_shading=True)
elif shading_type == "upper_trailing_edges":
scalars = self.is_trailing_edge_upper # type: np.ndarray
scalars = | |
"""Adapted from Nematode: https://github.com/demelin/nematode """
import tensorflow as tf
import exception
import rnn_inference
import sampler_inputs
from transformer import INT_DTYPE, FLOAT_DTYPE
import transformer_inference
class BeamSearchSampler:
"""Implements beam search with one or more models.
If there are multiple models, then at each timestep the top k tokens are
selected according to the sum of the models' log probabilities (where k is
the beam size).
The algorithm continues searching until either the maximum translation
length is reached or until none of the partial translations could go on
to finish with a better score than the worst finished translation.
Prior to running the sampler, the placeholders in self.inputs must be
fed appropriate values (see the SamplerInputs class). Model inputs are fed
to the model placeholders, as in training.
The resulting sample can be accessed via the outputs() property, which
returns a pair of tensors, (sequences, scores). sequences contains target
vocabulary IDs and has shape (batch_size_x, beam_size, seq_len), where
seq_len <= max_translation_len is the length of the longest translation.
scores contains floats representing the length-normalized log
probabilities. It has the shape (batch_size_x, beam_size).
TODO Make beam_size a placeholder?
See also: RandomSampler.
"""
def __init__(self, models, configs, beam_size):
"""Sets some things up then calls _beam_search() to do the real work.
Args:
models: a sequence of RNN or Transformer objects.
configs: a sequence of model configs (argparse.Namespace objects).
beam_size: an integer specifying the beam width.
"""
self._models = models
self._configs = configs
self._beam_size = beam_size
with tf.name_scope('beam_search'):
# Define placeholders.
self.inputs = sampler_inputs.SamplerInputs()
# Create model adapters to get a consistent interface to
# Transformer and RNN models.
model_adapters = []
for i, (model, config) in enumerate(zip(models, configs)):
with tf.name_scope('model_adapter_{}'.format(i)) as scope:
if config.model_type == 'transformer':
adapter = transformer_inference.ModelAdapter(
model, config, scope)
else:
assert config.model_type == 'rnn'
adapter = rnn_inference.ModelAdapter(
model, config, scope)
model_adapters.append(adapter)
# Check that individual models are compatible with each other.
vocab_sizes = [a.target_vocab_size for a in model_adapters]
if len(set(vocab_sizes)) > 1:
raise exception.Error('Cannot ensemble models with different '
'target vocabulary sizes')
target_vocab_size = vocab_sizes[0]
# Build the graph to do the actual work.
sequences, scores = _beam_search(
model_adapters=model_adapters,
beam_size=beam_size,
batch_size_x=self.inputs.batch_size_x,
max_translation_len=self.inputs.max_translation_len,
normalization_alpha=self.inputs.normalization_alpha,
vocab_size=target_vocab_size,
eos_id=0)
self._outputs = sequences, scores
@property
def outputs(self):
return self._outputs
@property
def models(self):
return self._models
@property
def configs(self):
return self._configs
@property
def beam_size(self):
return self._beam_size
def _beam_search(model_adapters, beam_size, batch_size_x, max_translation_len,
normalization_alpha, vocab_size, eos_id):
"""See description of BeamSearchSampler above.
Args:
model_adapters: sequence of ModelAdapter objects.
beam_size: integer specifying beam width.
batch_size_x: tf.int32 scalar specifying number of input sentences.
max_translation_len: tf.int32 scalar specifying max translation length.
normalization_alpha: tf.float32 scalar specifying alpha parameter for
length normalization.
vocab_size: float specifying the target vocabulary size.
eos_id: integer specifying the vocabulary ID of the EOS symbol.
Returns:
A pair of tensors: (sequences, scores). sequences contains vocabulary
IDs. It has shape (batch_size, len), where len <= max_translation_len
is the length of the longest translation in the batch. scores contains
sequnces scores, which are summed probabilities.
"""
# Encode the input and generate a 1-step decoding function for each model.
decoding_functions = []
for adapter in model_adapters:
encoder_output = adapter.encode()
func = adapter.generate_decoding_function(encoder_output)
decoding_functions.append(func)
# Initialize the timestep counter.
current_time_step = tf.constant(1)
# Initialize sequences with <GO>.
alive_sequences = tf.ones([batch_size_x, beam_size, 1], dtype=INT_DTYPE)
finished_sequences = tf.ones([batch_size_x, beam_size, 1], dtype=INT_DTYPE)
# Initialize alive sequence scores.
alive_scores = tf.zeros([batch_size_x, beam_size])
# Initialize finished sequence scores to a low value.
finished_scores = tf.fill([batch_size_x, beam_size], -1. * 1e7)
# Initialize flags indicating which finished_sequences are really finished.
finished_eos_flags = tf.fill([batch_size_x, beam_size], False)
# Initialize memories (i.e. states carried over from the last timestep).
alive_memories = [ma.generate_initial_memories(batch_size_x, beam_size)
for ma in model_adapters]
# Generate the conditional and body functions for the beam search loop.
loop_cond = _generate_while_loop_cond_func(max_translation_len)
loop_body = _generate_while_loop_body_func(model_adapters,
decoding_functions,
max_translation_len,
batch_size_x, beam_size,
vocab_size, eos_id,
normalization_alpha)
loop_vars = [current_time_step,
alive_sequences,
alive_scores,
finished_sequences,
finished_scores,
finished_eos_flags,
alive_memories]
shape_invariants=[
tf.TensorShape([]), # timestep
tf.TensorShape([None, None, None]), # alive sequences
alive_scores.get_shape(), # alive scores
tf.TensorShape([None, None, None]), # finished sequence
finished_scores.get_shape(), # finished scores
finished_eos_flags.get_shape(), # finished EOS flags
[adapter.get_memory_invariants(memories) # alive memories
for adapter, memories in zip(model_adapters, alive_memories)]]
# Execute the auto-regressive decoding step via while loop.
_, alive_sequences, alive_scores, finished_sequences, finished_scores, \
finished_eos_flags, _ = \
tf.while_loop(
loop_cond,
loop_body,
loop_vars,
shape_invariants,
parallel_iterations=10,
swap_memory=False,
back_prop=False)
alive_sequences.set_shape((None, beam_size, None))
finished_sequences.set_shape((None, beam_size, None))
# Account for the case in which no translations terminate in <EOS> for a
# particular input sentence. In that case, copy the contents of the alive
# beam for that sentence into the finished beam (sequence + score).
finished_sequences = tf.where(tf.reduce_any(finished_eos_flags, 1),
finished_sequences, alive_sequences)
# Attention: alive_scores are not length normalized!
finished_scores = tf.where(tf.reduce_any(finished_eos_flags, 1),
finished_scores, alive_scores)
# Truncate finished sequences to remove initial <GO>.
finished_sequences = finished_sequences[:, :, 1:]
# Normalize scores. Note that we include the <EOS> token when calculating
# sequence length.
seq_len = tf.shape(finished_sequences)[2]
indices = tf.range(seq_len, dtype=tf.int32)
indices = tf.reshape(indices, [1, 1, seq_len])
indices = tf.tile(indices, [batch_size_x, beam_size, 1])
seq_lens = tf.reshape(seq_len, [1, 1, 1])
seq_lens = tf.tile(seq_lens, [batch_size_x, beam_size, seq_len])
eos_indices = tf.where(tf.equal(finished_sequences, eos_id),
indices, seq_lens)
lengths = tf.reduce_min(eos_indices+1, axis=2)
float_lengths = tf.cast(lengths, dtype=tf.float32)
length_penalties = ((5 + float_lengths) ** normalization_alpha) / ((5 + 1) ** normalization_alpha)
finished_scores = finished_scores / length_penalties
return finished_sequences, finished_scores
def _compute_batch_indices(batch_size_x, beam_size):
"""Generates a matrix of batch indices for the 'merged' beam tensor.
Each index denotes the batch from which the sequence occupying the same
relative position as the index within the 'merged' tensor belongs.
"""
batch_range = tf.range(batch_size_x * beam_size) // beam_size
batch_index_matrix = tf.reshape(batch_range, [batch_size_x, beam_size])
return batch_index_matrix
def _gather_top_sequences(model_adapters, all_sequences, all_scores,
all_scores_to_gather, all_eos_flags, all_memories,
beam_size, batch_size_x, prefix):
"""Selects the top-k sequences from a sequence set."""
# Obtain indices of the top-k scores within the scores tensor.
_, top_indices = tf.nn.top_k(all_scores, k=beam_size)
# Create a lookup-indices tensor for gathering the sequences associated
# with the top scores.
batch_index_matrix = _compute_batch_indices(batch_size_x, beam_size)
gather_coordinates = tf.stack([batch_index_matrix, top_indices], axis=2)
# Collect top outputs.
gathered_sequences = tf.gather_nd(all_sequences, gather_coordinates,
name='{:s}_sequences'.format(prefix))
gathered_scores = tf.gather_nd(all_scores_to_gather, gather_coordinates,
name='{:s}_scores'.format(prefix))
gathered_eos_flags = tf.gather_nd(all_eos_flags, gather_coordinates,
name='{:s}_eos_flags'.format(prefix))
gathered_memories = None
if all_memories is not None:
gathered_memories = [
adapter.gather_memories(memories, gather_coordinates)
for adapter, memories in zip(model_adapters, all_memories)]
return gathered_sequences, gathered_scores, gathered_eos_flags, \
gathered_memories
def _generate_while_loop_cond_func(max_translation_len):
def continue_decoding(curr_time_step, alive_sequences, alive_scores,
finished_sequences, finished_scores,
finished_eos_flags, alive_memories):
"""Determines whether decoding should continue or terminate."""
# Check maximum prediction length has not been reached.
length_criterion = tf.less(curr_time_step, max_translation_len)
# Otherwise, check if the most likely alive hypothesis is less likely
# than the least probable completed sequence.
# Calculate the best possible score of the most probable sequence
# currently alive.
highest_alive_score = alive_scores[:, 0]
# Calculate the score of the least likely sequence currently finished.
lowest_finished_score = tf.reduce_min(
finished_scores * tf.cast(finished_eos_flags, FLOAT_DTYPE), axis=1)
# Account for the case in which none of the sequences in 'finished'
# have terminated so far; In that case, each of the unfinished
# sequences is assigned a high negative probability, so that the
# termination condition is not met.
tmp = tf.reduce_any(finished_eos_flags, 1)
mask_unfinished = (1. - tf.to_float(tmp)) * (-1. * 1e7)
lowest_finished_score += mask_unfinished
# Check is the current highest alive score is lower than the current
# lowest finished score.
likelihood_criterion = \
tf.logical_not(
tf.reduce_all(
tf.greater(lowest_finished_score, highest_alive_score)))
# Decide whether to continue the decoding process.
return tf.logical_and(length_criterion, likelihood_criterion)
return continue_decoding
def _generate_while_loop_body_func(model_adapters, decoding_functions,
max_translation_len, batch_size_x, beam_size,
vocab_size, eos_id, normalization_alpha):
# Construct an alternate set of 'log probabilities' to use when extending
# sequences beyond EOS. The value is set very low to ensure that these
# overgrown sequences are never chosen over incomplete or just-finished
# sequences.
tmp = tf.constant(tf.float32.min, shape=[1, 1], dtype=tf.float32)
eos_log_probs = tf.tile(tmp,
multiples=[batch_size_x*beam_size, vocab_size])
def extend_hypotheses(current_time_step, alive_sequences, alive_scores,
alive_memories):
"""Generates top-k extensions of the alive beam candidates."""
# Get the vocab IDs for this timestep in the order of the model inputs.
next_ids = alive_sequences[:, :, -1] # [batch_size_x, beam_size]
next_ids = tf.transpose(next_ids, [1, 0]) # [beam_size, batch_size_x]
next_ids = tf.reshape(next_ids, [-1]) # [beam_size * batch_size_x]
# | |
options_dict = self.options_master
# Delete any user defined option fields
for option in self.options_delete:
if option in options_dict:
options_dict.pop(option)
# Update any user defined option fields
for option in self.options_overwrite.keys():
if option in options_dict:
options_dict[option] = self.options_overwrite[option]
# Add any user defined class options
for option in self.options_add.keys():
if option not in options_dict:
options_dict[option] = self.options_add[option]
self.options_add.pop(option)
# Add any user defined option fields
for option in options_dict.keys():
if option in self.options_add:
value_list = self.options_add[option]
if type(value_list) is not list:
value_list = [value_list]
for value in value_list:
options_dict[option].append(value)
# Build out options
option_prev = 0
for option_num in self.build_option_list(options_dict):
for option_value in options_dict[option_num]:
self.tx_dict['Options'].append((option_num, option_value))
option_len = len(option_value)
# Build out the 3 delta values
option_delta = option_num - option_prev
option_delta1 = -1
option_delta2 = -1
if option_delta < 13:
option_byte1 = option_delta * 16
elif option_delta < 269:
option_byte1 = 208
option_delta1 = option_delta - 13
else:
option_byte1 = 224
option_delta2 = option_delta - 269
# Build out the 3 length values
option_len1 = -1
option_len2 = -1
if option_len < 13:
option_byte1 += option_len
elif option_len < 269:
option_byte1 += 13
option_len1 = option_len - 13
else:
option_byte1 += 14
option_len2 = option_len - 269
# Build out the option
option_string += chr(option_byte1)
if option_delta1 != -1:
option_string += chr(option_delta1)
if option_delta2 != -1:
option_string += chr(option_delta2 / 256)
option_string += chr(option_delta2 % 256)
if option_len1 != -1:
option_string += chr(option_len1)
if option_len2 != -1:
option_string += chr(option_len2 / 256)
option_string += chr(option_len2 % 256)
option_string += option_value
# Set the previous option value
option_prev = option_num
# Any adjustment to the string
option_string = adjust_string(option_string, adjustment)
# Append any additional characters to the string
option_string += add_string
# Return options string
return option_string
#-----------------------------------------------------------------------
# Method:
# build_msg
# Description:
# Build a CoAP message
# Input:
# options = Options value as string
# payload = Payload value as string
# truncate = Strip characters from the end of the string
# Output:
# <string> = CoAP byte string
#-----------------------------------------------------------------------
def build_msg(self, options='', payload=MISSING, truncate=0):
# Initialize the dictionary to store current values
self.tx_dict = {}
# Build out CoAP header fields
version = self.update_version(self.VERSION)
transaction_type = self.update_transaction_type(self.TRANSACTION_TYPE)
token = self.update_token('')
token_length = self.get_token_length()
method_code = self.update_method_code(self.POST)
message_id = self.get_message_id()
# Build out 1st byte (Version, Transaction Type, Token Length)
self.tx_dict['Version'] = version
self.tx_dict['Transaction Type'] = transaction_type
self.tx_dict['Token Length'] = token_length
byte = (version * 64) + (transaction_type * 16) + token_length
coap_str = chr(byte)
# Build out 2nd byte (Method Code)
self.tx_dict['Method Code'] = method_code
coap_str += chr(method_code)
# Build out 3rd & 4th bytes (Message ID)
self.tx_dict['Message ID'] = message_id
coap_str += chr(message_id / 256) + chr(message_id & 255)
# Build out token
self.tx_dict['Token'] = token
coap_str += token
# Build out options
self.tx_dict['Options'] = options
coap_str += options
# Add payload
if payload == MISSING:
self.tx_dict['Payload'] = ''
elif payload == EMPTY:
self.tx_dict['Payload'] = ''
coap_str += chr(0xFF)
else:
self.tx_dict['Payload'] = payload
coap_str += chr(0xFF) + payload
# Truncate a portion of the string
if truncate:
coap_str = coap_str[:truncate]
# Return the resulting string
self.parse_msg(coap_str, 'tx')
return coap_str
#-----------------------------------------------------------------------
# Method:
# build_payload
# Description:
# Build payload to fit in next message to be sent
# Input:
# payload = User defined payload for this block
# Output:
# <string> - Payload to be sent on next message
#-----------------------------------------------------------------------
def build_payload(self, payload=DEFAULT):
# No more BLOCK1 options when done sending message
if self.get_block1_mbit() == 0:
return MISSING
# If user defined payload, then just return it
if payload != DEFAULT:
return payload
# Get the BLOCK1 size (in bytes)
block_size = self.block_num2bytes(self.get_block1_size())
# Update payload strings
payload = self.tx_pending[:block_size]
self.tx_sending = payload
self.tx_pending = self.tx_pending[block_size:]
self.tx_sent += self.tx_sending
# Pull out the portion of the payload and return it
if payload == '':
payload = MISSING
return payload
#-----------------------------------------------------------------------
# Method:
# calc_block1_mbit
# Description:
# Determine the mbit value for the next BLOCK1 to be sent
# Input:
# mbit = User defined mbit value for this block
# Output:
# 0/1/None - Mbit
#-----------------------------------------------------------------------
def calc_block1_mbit(self, mbit=DEFAULT_NUM):
# User defined value, just return it
if mbit != DEFAULT_NUM:
return mbit
# M-bit state
mbit = 0
if len(self.tx_pending) > 0:
mbit = 1
elif self.tx_sending == self.tx_sent:
mbit = None
# Record value for later verification
self.set_block1_mbit(mbit)
# Return value
return mbit
#-----------------------------------------------------------------------
# Method:
# build_block1_rsp
# Description:
# Build a block1 option to the message to be sent
# Input:
# options = Pass in any dictionary options already built out
# (Optional, default is no dictionary)
# Output:
# <dict> - Updated options
#-----------------------------------------------------------------------
def build_block1_rsp(self, options=None):
# If no options provided, then initialize return dictionary
if options is None:
options = {}
# M-bit
mbit = 0
if len(self.tx_pending) > 0:
mbit = 1
elif self.tx_sending == self.tx_sent:
return options
# Block number
size = self.block_num2bytes(self.get_block1_size())
number = (len(self.tx_sent) - 1) / size
self.set_block1_number(number)
# Block length
length = self.calc_block_length(number)
# Return a BLOCK1 for this message
block_list = (length, number, mbit, size,)
options = self.setup_block_option(options=options,
block=self.BLOCK1,
block_values=block_list,)
return options
#-----------------------------------------------------------------------
# Method:
# build_block2_rsp
# Description:
# Build a block2 option in response to message just received
# Input:
# options = Pass in any dictionary options already built out
# (Optional, default is no dictionary)
# Output:
# <dict> - Updated options
#-----------------------------------------------------------------------
def build_block2_rsp(self, options=None):
# If no message has been received, then just return
if len(self.rx_dict) == 0:
return options
# If no options provided, then initialize return dictionary
if options is None:
options = {}
# Get current option parameters
value = self.get_option_value(self.BLOCK2)
# No BLOCK2 option, don't add BLOCK2
if value is None:
return options
# Parse out BLOCK2 options
length, number, dont_care, dont_care = value
mbit = 0
block_size = self.get_block2_size()
# Update the BLOCK2 number parameter
if number:
number += 1
else:
number = len(self.rx_body) / self.block_num2bytes(block_size)
# Update the BLOCK2 length parameter
length = self.calc_block_length(number)
# Return a BLOCK2 for the next message
value = (length, number, mbit, block_size)
options = self.setup_block_option(options=options,
block=self.BLOCK2,
block_values=value,)
return options
#-----------------------------------------------------------------------
# Method:
# build_size1_rsp
# Description:
# Build a size1 option to the message to be sent
# Input:
# options = Pass in any dictionary options already built out
# (Optional, default is no dictionary)
# payload = Outgoing payload string
# size = Block1 M-bit value
# Output:
# <dict> - Updated options
#-----------------------------------------------------------------------
def build_size1_rsp(self, options=None, payload='', size=DEFAULT_NUM):
# If no options provided, then initialize return dictionary
if options is None:
options = {}
# If no payload, then just return with options
if payload == MISSING:
return options
# Calculate SIZE1 option value
if size == DEFAULT_NUM:
size = len(payload)
if payload == EMPTY:
size = 0
# Add a SIZE1 option to the list of options
options = self.setup_option(self.SIZE1, size, options)
return options
#-----------------------------------------------------------------------
# Method:
# next_msg
# Description:
# Update/Store information needed for next portion of a response
# Input:
# None
# Output:
# None
#-----------------------------------------------------------------------
def next_msg(self):
# Increment number of messages transmitted
self.tx_count += 1
# Save off this portion of the payload
if 'Payload' in self.rx_dict:
self.rx_body += self.rx_dict['Payload']
# Empty ACK received
if (self.rx_dict['Method Code'] == self.EMPTY_MSG) and \
(self.rx_dict['Transaction Type'] == self.ACK):
self.message_start_side = SERVER
# Get current option parameters
current_number = self.get_option_value(self.BLOCK2, 'number')
if current_number is None:
return
# Next BLOCK2 number
next_number = 1
if current_number == 0:
next_number = \
len(self.rx_body) / self.block_num2bytes(self.get_block2_size())
# Update the block number for next message received
self.set_block2_number(current_number, increment=next_number)
#-----------------------------------------------------------------------
# Method:
# done
# Description:
# Keep track of the server responses to check if all data received
# Input:
# direction = (Optional) which direction is data | |
'jp';").fetchall()
lock_id = session.add_lock()
keyboard = [[InlineKeyboardButton(piece[0], callback_data="['c102', {}, {}]".format(piece[1], lock_id))] for piece in piece_list]
text = "Choose a piece to remove:"
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(chat_id = chat_id[0][0], text = text, reply_markup = reply_markup)
session.thread_lock(lock_id)
if function.can_recuit('jp', 40, db):
battlebuild.recuit(bot, 'jp', 40, 102, session)
def c102_cb(bot, query, query_list, session):
db = sqlite3.connect(session.get_db_dir())
bot.delete_message(chat_id=query.message.chat_id, message_id=query.message.message_id)
battlebuild.remove(bot, 'jp', query_list[1], 40, 102, session)
session.release_lock(query_list[-1])
#------------------c103----------------------
def c103(bot, session):
db = sqlite3.connect(session.get_db_dir())
if 37 in function.recuit_list('jp', db):
battlebuild.recuit(bot, 'jp', 37, 103, session)
#------------------c104----------------------
def c104(bot, session):
db = sqlite3.connect(session.get_db_dir())
if 32 in function.build_list('jp', db):
battlebuild.build(bot, 'jp', 32, 104, session)
#------------------c105----------------------
def c105(bot, session):
db = sqlite3.connect(session.get_db_dir())
c82(bot, session)
#------------------c106----------------------
def c106(bot, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
battlebuild.remove(bot, 'jp', session.handler_list[handler_id].piece_id, session.handler_list[handler_id].space_id, 106, session)
#------------------c107----------------------
def c107(bot, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
battlebuild.restore(bot, session.handler_list[handler_id].piece_id, session.handler_list[handler_id].space_id, session)
db.execute("update piece set noremove = 1 where pieceid = :piece", {'piece':session.handler_list[handler_id].piece_id})
db.commit()
#------------------c108----------------------
def c108(bot, session):
db = sqlite3.connect(session.get_db_dir())
space_list1 = function.within('Axis', [37], 1, db)
space_list2 = function.battle_list('jp', db, space_type = 'land')
space_list = list(set(space_list1) & set(space_list2))
if len(space_list) > 0:
lock_id = session.add_lock()
info = battlebuild.battle_info(bot, 'jp', space_list, 108, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c109----------------------
def c109(bot, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
battlebuild.restore(bot, session.handler_list[handler_id].piece_id, session.handler_list[handler_id].space_id, session)
db.execute("update piece set noremove = 1 where pieceid = :piece", {'piece':session.handler_list[handler_id].piece_id})
db.commit()
#------------------c110---------------------
def c110(bot, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
built_space = session.handler_list[handler_id].space_id
for i in range(2):
space_list1 = function.within('Axis', [built_space], 1, db)
space_list2 = function.battle_list('jp', db, space_type = 'land')
space_list = list(set(space_list1) & set(space_list2))
if len(space_list) > 0:
lock_id = session.add_lock()
info = battlebuild.battle_info(bot, 'jp', space_list, 110, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#--------------- ---c111----------------------
def c111(bot, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
built_space = session.handler_list[handler_id].space_id
for i in range(2):
space_list1 = function.within('Axis', [built_space], 1, db)
space_list2 = function.build_list('jp', db, space_type = 'land')
space_list = list(set(space_list1) & set(space_list2))
if len(space_list) > 0:
lock_id = session.add_lock()
info = battlebuild.build_info(bot, 'jp', space_list, 111, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c112----------------------
def c112(bot, session):
db = sqlite3.connect(session.get_db_dir())
space_list = function.battle_list('jp', db, space_type = 'sea')
if len(space_list) > 0:
lock_id = session.add_lock()
info = battlebuild.battle_info(bot, 'jp', space_list, 112, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
space_list = function.battle_list('jp', db, space_type = 'land')
if len(space_list) > 0:
lock_id = session.add_lock()
info = battlebuild.battle_info(bot, 'jp', space_list, 112, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c113----------------------
def c113(bot, session):
db = sqlite3.connect(session.get_db_dir())
if 36 in function.recuit_list('jp', db):
battlebuild.recuit(bot, 'jp', 36, 113, session)
#------------------c114----------------------
def c114(bot, session):
db = sqlite3.connect(session.get_db_dir())
db.execute("update card set location = 'turn' where cardid = 114;")
db.execute("update piece set supply = 1 where control = 'jp' and type = 'navy';")
db.execute("update piece set supply = 1 where control = 'jp' and type = 'army' and location in (select distinct spaceid from space where adjacency = 44);")
db.commit()
#------------------c115~118----------------------
#------------------c119----------------------
def c119_cost(bot, session):
db = sqlite3.connect(session.get_db_dir())
function.discardresponse(bot, 'jp', 1, session)
def c119(bot, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
battled_space = session.handler_list[handler_id].space_id
space_list1 = function.within('Axis', [battled_space], 1, db)
space_list2 = function.battle_list('jp', db, space_type = 'sea')
space_list = list(set(space_list1) & set(space_list2))
if len(space_list) > 0:
lock_id = session.add_lock()
info = battlebuild.battle_info(bot, 'jp', space_list, 119, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c120----------------------
def c120_cost(bot, session):
db = sqlite3.connect(session.get_db_dir())
function.discardresponse(bot, 'jp', 1, session)
def c120(bot, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
battlebuild.restore(bot, session.handler_list[handler_id].piece_id, session.handler_list[handler_id].space_id, session)
db.commit()
#------------------c121----------------------
def c121_cost(bot, session):
db = sqlite3.connect(session.get_db_dir())
function.discardresponse(bot, 'jp', 1, session)
def c121(bot, session):
db = sqlite3.connect(session.get_db_dir())
lock_id = session.add_lock()
space_list = function.build_list('jp', db, space_type = 'sea')
info = battlebuild.build_info(bot, 'jp', space_list, 121, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c122----------------------
def c122_cost(bot, session):
db = sqlite3.connect(session.get_db_dir())
function.discardresponse(bot, 'jp', 1, session)
def c122(bot, session):
db = sqlite3.connect(session.get_db_dir())
lock_id = session.add_lock()
space_list1 = function.recuit_list('jp', db)
space_list2 = [33, 39, 45, 46]
space_list = list(set(space_list1) & set(space_list2))
info = battlebuild.recuit_info(bot, 'jp', space_list, 122, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c123----------------------
c123_used = False
def c123(bot, session):
global c123_used
c123_used = True
#------------------c124----------------------
def c124_cost(bot, session):
db = sqlite3.connect(session.get_db_dir())
function.discardresponse(bot, 'jp', 1, session)
def c124(bot, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
marshalled_space = session.handler_list[handler_id].space_id
space_list1 = function.within('Axis', [marshalled_space], 1, db)
space_list2 = function.battle_list('jp', db)
space_list = list(set(space_list1) & set(space_list2))
lock_id = session.add_lock()
info = battlebuild.battle_info(bot, 'jp', space_list, 124, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c125----------------------
def c125(bot, session):
db = sqlite3.connect(session.get_db_dir())
vp_space_count = db.execute("select count(distinct space.spaceid) from space inner join piece on space.spaceid = piece.location where space.name like '%pacific%' and piece.control = 'jp';").fetchall()
function.add_vp(bot, 'jp', vp_space_count[0][0], db)
#------------------c126----------------------
def c126_cost(bot, session):
db = sqlite3.connect(session.get_db_dir())
function.discardresponse(bot, 'jp', 1, session)
def c126(bot, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
marshalled_space = session.handler_list[handler_id].space_id
lock_id = session.add_lock()
space_list1 = function.within('Axis', [marshalled_space], 1, db)
space_list2 = function.control_side_air_space_list('Allied', db, space_type = 'all')
space_list = list(set(space_list1) & set(space_list2))
session.remove_list.append(battlebuild.remove_obj('jp', space_list, 126, lock_id, 'air', session))
print("remove_id: " + str(len(session.remove_list)-1))
remove_id = len(session.remove_list)-1
info = session.remove_list[remove_id].remove_info(session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c127----------------------
def c127_cost(bot, session):
function.discardresponse(bot, 'jp', 1, session)
def c127(bot, session):
db = sqlite3.connect(session.get_db_dir())
lock_id = session.add_lock()
space_list = function.deploy_list('jp', db, space_type = 'sea')
info = air.deploy_info(bot, 'jp', space_list, 127, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c128~131----------------------
def c128(bot, session):
db = sqlite3.connect(session.get_db_dir())
lock_id = session.add_lock()
space_list = function.build_list('it', db, space_type = 'land')
info = battlebuild.build_info(bot, 'it', space_list, 128, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c132~135----------------------
def c132(bot, session):
db = sqlite3.connect(session.get_db_dir())
lock_id = session.add_lock()
space_list = function.build_list('it', db, space_type = 'sea')
info = battlebuild.build_info(bot, 'it', space_list, 132, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c136~140----------------------
def c136(bot, session):
db = sqlite3.connect(session.get_db_dir())
lock_id = session.add_lock()
space_list = function.battle_list('it', db, space_type = 'land')
info = battlebuild.battle_info(bot, 'it', space_list, 136, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c141~142----------------------
def c141(bot, session):
db = sqlite3.connect(session.get_db_dir())
lock_id = session.add_lock()
space_list = function.battle_list('it', db, space_type = 'sea')
info = battlebuild.battle_info(bot, 'it', space_list, 141, lock_id, session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(lock_id)
#------------------c143----------------------
def c143(bot, session):
db = sqlite3.connect(session.get_db_dir())
function.ewdiscard(bot, 143, 'it', 'uk', 1, session)
function.add_vp(bot, 'it', 1, db)
#------------------c144----------------------
def c144(bot, session):
db = sqlite3.connect(session.get_db_dir())
if 8 in function.control_space_list('uk', db):
lock_id = session.add_lock()
chat_id = db.execute("select playerid from country where id = 'uk';").fetchall()
keyboard = [[InlineKeyboardButton("Discard the top 3 cards from draw deck", callback_data="['c144', 'discard', {}]".format(lock_id))],
[InlineKeyboardButton("Eliminate an Army from the United Kingdom", callback_data="['c144', 'remove', {}]".format(lock_id))]]
text = "Choose what to do:"
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(chat_id = chat_id[0][0], text = text, reply_markup = reply_markup)
session.thread_lock(lock_id)
else:
function.ewdiscard(bot, 144, 'it', 'uk', 3, session)
def c144_cb(bot, query, query_list, session):
db = sqlite3.connect(session.get_db_dir())
if query_list[1] == 'discard':
function.ewdiscard(bot, 144, 'it', 'uk', 3, session)
elif query_list[1] == 'remove':
piece = db.execute("select pieceid from piece where location = '8' and control = 'uk' and type = 'army';").fetchall()
battlebuild.remove(bot, 'uk', piece[0][0], 8, 144, session)
bot.delete_message(chat_id = query.message.chat_id, message_id = query.message.message_id)
session.release_lock(query_list[-1])
#------------------c145----------------------
def c145(bot, session):
db = sqlite3.connect(session.get_db_dir())
space_list = function.within('Allied', function.control_space_list('uk', db, space_type='land'), 2, db)
if 24 in space_list:
function.ewdiscard(bot, 145, 'it', 'uk', 2, session)
function.add_vp(bot, 'it', 1, db)
#------------------c146----------------------
def c146(bot, session):
db = sqlite3.connect(session.get_db_dir())
space_list = function.control_space_list('it', db)
if 18 in space_list:
function.ewdiscard(bot, 146, 'it', 'uk', 1, session)
function.add_vp(bot, 'it', 2, db)
#------------------c147----------------------
def c147(bot, session):
db = sqlite3.connect(session.get_db_dir())
lock_id = session.add_lock()
chat_id = db.execute("select playerid from country where id = 'it';").fetchall()
keyboard = [[InlineKeyboardButton("Recruit a German Army in North Africa", callback_data="['c147', 'na', {}]".format(lock_id))],
[InlineKeyboardButton("Recruit a German Navy in the Mediterranean", callback_data="['c147', 'm', {}]".format(lock_id))]]
text = "Choose what to do first:"
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(chat_id = chat_id[0][0], text = text, reply_markup = reply_markup)
session.thread_lock(lock_id)
def c147_cb(bot, query, query_list, session):
db = sqlite3.connect(session.get_db_dir())
bot.delete_message(chat_id=query.message.chat_id, message_id=query.message.message_id)
if query_list[1] == 'na':
if function.can_recuit('ge', 19, db):
battlebuild.recuit(bot, 'ge', 19, 147, session)
if function.can_recuit('ge', 18, db):
| |
"""
redpatch
A module for segmenting diseased leaf images to find healthy regions, lesion associated regions and lesion centres.
Workflow Overview
-----------------
1. Set scale sliders to find HSV values that segment whole leaves from background
2. Segment image into leaf sub-images
3. Set scale sliders to find HSV values that segment healthy regions within leaf sub-images
4. Segment the healthy region
5. Set scale sliders to find HSV values that segment lesion regions within leaf sub-images
6. Segment the lesion regions
7. Set scale sliders that segment lesion centres within lesion regions
8. Segment lesion centres
9. Quantify found objects
Basic Usage
-----------
1. Import module
.. highlight:: python
.. code-block:: python
import redpatch as rp
2. Create an IPython file browser to select images
.. highlight:: python
.. code-block:: python
f = rp.FileBrowser()
f.widget()
3. Load an image into HSV colour space
.. highlight:: python
.. code-block:: python
hsv_image = rp.load_as_hsv( f.path )
4. Create an image slider
.. highlight:: python
.. code-block:: python
rp.run_threshold_preview(hsv_image, width = 2)
5. Find objects using HSV values
.. highlight:: python
.. code-block:: python
# find raw objects
lesion_region_mask, lesion_region_volume = rp.griffin_lesion_regions(cleared_image, h =(0.0, 0.82),
s = (0.2, 1.0),
v = (0.4, 1.0))
# label regions
labelled_lesion_regions, lesion_region_count = rp.label_image(lesion_region_mask)
# visual preview of regions
rp.preview_object_labels(labelled_lesion_regions, color.hsv2rgb(cleared_image))
#filter regions
lesion_regions_properties_list = rp.get_object_properties(labelled_lesion_regions)
lesion_regions_to_keep = rp.filter_region_property_list(lesion_regions_properties_list, rp.is_not_small
6. Examine objects as scikit-image measure RegionProps objects
.. highlight:: python
.. code-block:: python
first_lesion_region = lesion_regions_to_keep[0]
first_lesion_region.area
"""
from skimage import io
from skimage import color
from skimage import measure
from skimage import feature
from skimage import transform
from scipy import ndimage as ndi
from scipy import misc
import matplotlib.pyplot as plt
import numpy as np
from typing import Callable, List, Tuple, Union
from ipywidgets import FloatRangeSlider, FloatProgress
from IPython.display import display
import ipywidgets as widgets
import math
from numba import njit
#: Default values for griffin named functions
LEAF_AREA_HUE = tuple([i / 255 for i in (0, 255)])
#: Default values for griffin named functions
LEAF_AREA_SAT = tuple([i / 255 for i in (50, 255)])
#: Default values for griffin named functions
LEAF_AREA_VAL = tuple([i / 255 for i in (40, 255)])
#: Default values for griffin named functions
HEALTHY_HUE = tuple([i / 255 for i in (40, 255)])
#: Default values for griffin named functions
HEALTHY_SAT = tuple([i / 255 for i in (50, 255)])
#: Default values for griffin named functions
HEALTHY_VAL = tuple([i / 255 for i in (0, 255)])
#: Default values for griffin named functions
HEALTHY_RED = (4, 155)
#: Default values for griffin named functions
HEALTHY_GREEN = (120, 175)
#: Default values for griffin named functions
HEALTHY_BLUE = (0, 255)
#: Default values for griffin named functions
LESION_HUE = tuple([i / 255 for i in (0, 41)])
#: Default values for griffin named functions
LESION_SAT = tuple([i / 255 for i in (38, 255)])
#: Default values for griffin named functions
LESION_VAL = tuple([i / 255 for i in (111, 255)])
#presumed value, needs updating
LESION_CENTRE_HUE = tuple([i / 255 for i in (0, 41)])
LESION_CENTRE_SAT = tuple([i / 255 for i in (38, 255)])
LESION_CENTRE_VAL = tuple([i / 255 for i in (111, 255)])
#: Default values for griffin named functions
SCALE_CARD_HUE = (0.61, 1.0)
#: Default values for griffin named functions
SCALE_CARD_SAT = (0.17, 1.0)
#: Default values for griffin named functions
SCALE_CARD_VAL = (0.25, 0.75)
def pixel_volume_to_circular_area(pixels: int, scale: float) -> float:
"""helps work out the area of a circular object with a similar pixel volume at the same scale
pixels = pixels in the object , scale = pixels per cm in this image, obtainable from rp.griffin_scale_card()
returns a float giving the area a circle of that number of pixels would take up
"""
i = 1
r = 0
while i < pixels / 2.0:
i = i + (i + 2)
r += 1
r = r / scale
return math.pi * (r**2)
def threshold_hsv_img(im: np.ndarray,
h: Tuple[float, float] = HEALTHY_HUE,
s: Tuple[float, float] = HEALTHY_SAT,
v: Tuple[float, float] = HEALTHY_VAL) -> np.ndarray:
"""
Selects pixels passing an HSV image threshold in all three channels.
Returns a logical binary mask array (dtype bool of dimension im ( an HSV image) in which pixels in im pass the lower
and upper thresholds specified in h, s and v (hue lower,upper; sat lower,upper and val lower, upper;
respectively)
:param: im np.ndarray -- a numpy ndarray
:param: h Tuple -- a 2-tuple of Hue thresholds (lower, upper)
:param: s Tuple -- a 2-tuple of Saturation thresholds (lower, upper)
:param: v Tuple -- a 2-tuple of Value thresholds (lower, upper)
:return: np.ndarray -- a logical array (dtype bool) with shape == im
"""
assert im.dtype.type is np.float64, "im must be np.ndarray of type float64. Looks like you're not using an HSV image."
return _threshold_three_channels(im, c1_limits=h, c2_limits=s, c3_limits=v)
def hsv_to_rgb255(img: np.ndarray) -> np.ndarray:
"""
Convert HSV image to RGB image.
Given an HSV image in (0.0,1.0) range, converts to RGB image in (0,255). Usually needed prior to viewing as most viewers interpret images as RGB.
:param: img np.ndarray -- a numpy ndarray
:return: np.ndarray -- a numpy ndarray with shape == img
"""
return (color.hsv2rgb(img) * 255).astype('int')
@njit
def _threshold_three_channels(im: np.ndarray,
c1_limits: Tuple[Union[int, float], Union[int, float]] = (0, 1),
c2_limits: Tuple[Union[int, float], Union[int, float]] = (0, 1),
c3_limits: Tuple[Union[int, float], Union[int, float]] = (0, 1)
) -> np.ndarray:
"""
Thresholds an image.
Internal method.
Returns a logical binary mask array (dtype bool_ of dimension im in which pixels in im pass the lower
and upper thresholds specified in c1_limits, c2_limits and c3_limits respectively)
:param: im np.ndarray -- a numpy ndarray
:param: c1_limits Tuple -- a 2-tuple of channel 1 thresholds (lower, upper)
:param: c2_limits Tuple -- a 2-tuple of channel 2 thresholds (lower, upper)
:param: c3_limits Tuple -- a 2-tuple of channel 3 thresholds (lower, upper)
:return: np.ndarray -- a logical array (dtype bool_) with shape == im
"""
c1_min, c1_max = c1_limits
c2_min, c2_max = c2_limits
c3_min, c3_max = c3_limits
result = np.zeros_like(im[:, :, 0])
x_d, y_d, _ = im.shape
for x in range(x_d):
for y in range(y_d):
c1_pass = (im[x, y, 0] >= c1_min and im[x, y, 0] <= c1_max)
c2_pass = (im[x, y, 1] >= c2_min and im[x, y, 1] <= c2_max)
c3_pass = (im[x, y, 2] >= c3_min and im[x, y, 2] <= c3_max)
if c1_pass and c2_pass and c3_pass:
result[x, y] = 1
return result.astype(np.bool_)
def load_as_hsv(fname: str) -> np.ndarray:
"""
Load a file into HSV colour space.
Takes a file path and opens the image then converts to HSV colour space.
returns numpy array dtype float 64. Strips the alpha (fourth) channel if it exists.
Input must be colour image. One channel images will be rejected.
:param: fname str -- path to the image
:return: np.ndarray -- numpy array containing image
"""
img = io.imread(fname)
if img.shape[-1] == 4:
img = img[:,:,:3]
assert len(img.shape) == 3, "Image at: {} does not appear to be a 3 channel colour image.".format(fname)
hsv_img = color.rgb2hsv(img)
return hsv_img
def preview_mask(m: np.ndarray, width: int = 5, height: int = 5) -> None:
"""
Draw a mask to screen.
Given a binary bool mask array draws a plot in two colours black = 1/True, white = 0/False.
Intended for use in IPython and interactive sessions as the plot renders immediately
:param: m np.ndarray -- the mask to draw.
:param: width int -- width in inches of the plot
:param: height int -- height in inches of the plot
:return: None
"""
plt.figure(figsize=(width, height))
plt.imshow(m, cmap="binary_r")
plt.show()
def preview_hsv(img: np.ndarray, width: int = 5, height: int = 5) -> None:
"""
Draw an HSV image to screen.
Given an HSV image, generates a preview image and draws to screen.
Intended for use in IPython and interactive sessions as the plot renders immediately.
:param: img np.ndarray -- the image to draw
:param: width int -- width in inches of the plot
:param: height int -- height in inches of the plot
:return: None
"""
plt.figure(figsize=(width, height))
plt.imshow(color.hsv2rgb(img))
plt.show()
def preview_object_labels(label_array: np.ndarray, binary_image: np.ndarray, width: int = 5, height: int = 5) -> None:
"""
Draw a preview image of objects in a mask, colouring by labels.
Given a labelled | |
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates the Chrome reference builds.
Usage:
$ /path/to/update_reference_build.py
$ git commit -a
$ git cl upload
"""
import argparse
import collections
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import urllib2
import zipfile
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'py_utils'))
from py_utils import cloud_storage
from dependency_manager import base_config
_CHROME_BINARIES_CONFIG = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..', 'common',
'py_utils', 'py_utils', 'chrome_binaries.json')
_CHROME_GS_BUCKET = 'chrome-unsigned'
_CHROMIUM_GS_BUCKET = 'chromium-browser-snapshots'
# How many commit positions to search below and above omaha branch position to
# find closest chromium build snapshot. The value 10 is chosen because it looks
# more than sufficient from manual inspection of the bucket.
_CHROMIUM_SNAPSHOT_SEARCH_WINDOW = 10
# Remove a platform name from this list to disable updating it.
# Add one to enable updating it. (Must also update _PLATFORM_MAP.)
_PLATFORMS_TO_UPDATE = ['mac_x86_64', 'win_x86', 'win_AMD64', 'linux_x86_64',
'android_k_armeabi-v7a', 'android_l_arm64-v8a',
'android_l_armeabi-v7a', 'android_n_armeabi-v7a',
'android_n_arm64-v8a', 'android_n_bundle_armeabi-v7a',
'android_n_bundle_arm64-v8a']
# Add platforms here if you also want to update chromium binary for it.
# Must add chromium_info for it in _PLATFORM_MAP.
_CHROMIUM_PLATFORMS = ['mac_x86_64', 'win_x86', 'win_AMD64', 'linux_x86_64']
# Remove a channel name from this list to disable updating it.
# Add one to enable updating it.
_CHANNELS_TO_UPDATE = ['stable', 'canary', 'dev']
# Omaha is Chrome's autoupdate server. It reports the current versions used
# by each platform on each channel.
_OMAHA_PLATFORMS = { 'stable': ['mac', 'linux', 'win', 'android'],
'dev': ['linux'], 'canary': ['mac', 'win']}
# All of the information we need to update each platform.
# omaha: name omaha uses for the platforms.
# zip_name: name of the zip file to be retrieved from cloud storage.
# gs_build: name of the Chrome build platform used in cloud storage.
# chromium_info: information needed to update chromium (optional).
# destination: Name of the folder to download the reference build to.
UpdateInfo = collections.namedtuple('UpdateInfo',
'omaha, gs_folder, gs_build, chromium_info, zip_name')
# build_dir: name of the build directory in _CHROMIUM_GS_BUCKET.
# zip_name: name of the zip file to be retrieved from cloud storage.
ChromiumInfo = collections.namedtuple('ChromiumInfo', 'build_dir, zip_name')
_PLATFORM_MAP = {'mac_x86_64': UpdateInfo(
omaha='mac',
gs_folder='desktop-*',
gs_build='mac64',
chromium_info=ChromiumInfo(
build_dir='Mac',
zip_name='chrome-mac.zip'),
zip_name='chrome-mac.zip'),
'win_x86': UpdateInfo(
omaha='win',
gs_folder='desktop-*',
gs_build='win-clang',
chromium_info=ChromiumInfo(
build_dir='Win',
zip_name='chrome-win.zip'),
zip_name='chrome-win-clang.zip'),
'win_AMD64': UpdateInfo(
omaha='win',
gs_folder='desktop-*',
gs_build='win64-clang',
chromium_info=ChromiumInfo(
build_dir='Win_x64',
zip_name='chrome-win.zip'),
zip_name='chrome-win64-clang.zip'),
'linux_x86_64': UpdateInfo(
omaha='linux',
gs_folder='desktop-*',
gs_build='linux64',
chromium_info=ChromiumInfo(
build_dir='Linux_x64',
zip_name='chrome-linux.zip'),
zip_name='chrome-linux64.zip'),
'android_k_armeabi-v7a': UpdateInfo(
omaha='android',
gs_folder='android-*',
gs_build='arm',
chromium_info=None,
zip_name='Chrome.apk'),
'android_l_arm64-v8a': UpdateInfo(
omaha='android',
gs_folder='android-*',
gs_build='arm_64',
chromium_info=None,
zip_name='ChromeModern.apk'),
'android_l_armeabi-v7a': UpdateInfo(
omaha='android',
gs_folder='android-*',
gs_build='arm',
chromium_info=None,
zip_name='Chrome.apk'),
'android_n_armeabi-v7a': UpdateInfo(
omaha='android',
gs_folder='android-*',
gs_build='arm',
chromium_info=None,
zip_name='Monochrome.apk'),
'android_n_arm64-v8a': UpdateInfo(
omaha='android',
gs_folder='android-*',
gs_build='arm_64',
chromium_info=None,
zip_name='Monochrome.apk'),
'android_n_bundle_armeabi-v7a': UpdateInfo(
omaha='android',
gs_folder='android-*',
gs_build='arm',
chromium_info=None,
zip_name='Monochrome.apks'),
'android_n_bundle_arm64-v8a': UpdateInfo(
omaha='android',
gs_folder='android-*',
gs_build='arm_64',
chromium_info=None,
zip_name='Monochrome.apks')
}
VersionInfo = collections.namedtuple('VersionInfo',
'version, branch_base_position')
def _ChannelVersionsMap(channel):
rows = _OmahaReportVersionInfo(channel)
omaha_versions_map = _OmahaVersionsMap(rows, channel)
channel_versions_map = {}
for platform in _PLATFORMS_TO_UPDATE:
omaha_platform = _PLATFORM_MAP[platform].omaha
if omaha_platform in omaha_versions_map:
channel_versions_map[platform] = omaha_versions_map[omaha_platform]
return channel_versions_map
def _OmahaReportVersionInfo(channel):
url ='https://omahaproxy.appspot.com/all?channel=%s' % channel
lines = urllib2.urlopen(url).readlines()
return [l.split(',') for l in lines]
def _OmahaVersionsMap(rows, channel):
platforms = _OMAHA_PLATFORMS.get(channel, [])
if (len(rows) < 1 or
rows[0][0:3] != ['os', 'channel', 'current_version'] or
rows[0][7] != 'branch_base_position'):
raise ValueError(
'Omaha report is not in the expected form: %s.' % rows)
versions_map = {}
for row in rows[1:]:
if row[1] != channel:
raise ValueError(
'Omaha report contains a line with the channel %s' % row[1])
if row[0] in platforms:
versions_map[row[0]] = VersionInfo(version=row[2],
branch_base_position=int(row[7]))
logging.warn('versions map: %s' % versions_map)
if not all(platform in versions_map for platform in platforms):
raise ValueError(
'Omaha report did not contain all desired platforms '
'for channel %s' % channel)
return versions_map
RemotePath = collections.namedtuple('RemotePath', 'bucket, path')
def _ResolveChromeRemotePath(platform_info, version_info):
# Path example: desktop-*/30.0.1595.0/precise32/chrome-precise32.zip
return RemotePath(bucket=_CHROME_GS_BUCKET,
path=('%s/%s/%s/%s' % (platform_info.gs_folder,
version_info.version,
platform_info.gs_build,
platform_info.zip_name)))
def _FindClosestChromiumSnapshot(base_position, build_dir):
"""Returns the closest chromium snapshot available in cloud storage.
Chromium snapshots are pulled from _CHROMIUM_BUILD_DIR in CHROMIUM_GS_BUCKET.
Continuous chromium snapshots do not always contain the exact release build.
This function queries the storage bucket and find the closest snapshot within
+/-_CHROMIUM_SNAPSHOT_SEARCH_WINDOW to find the closest build.
"""
min_position = base_position - _CHROMIUM_SNAPSHOT_SEARCH_WINDOW
max_position = base_position + _CHROMIUM_SNAPSHOT_SEARCH_WINDOW
# Getting the full list of objects in cloud storage bucket is prohibitively
# slow. It's faster to list objects with a prefix. Assuming we're looking at
# +/- 10 commit positions, for commit position 123456, we want to look at
# positions between 123446 an 123466. We do this by getting all snapshots
# with prefix 12344*, 12345*, and 12346*. This may get a few more snapshots
# that we intended, but that's fine since we take the min distance anyways.
min_position_prefix = min_position / 10;
max_position_prefix = max_position / 10;
available_positions = []
for position_prefix in range(min_position_prefix, max_position_prefix + 1):
query = '%s/%d*' % (build_dir, position_prefix)
try:
ls_results = cloud_storage.ListDirs(_CHROMIUM_GS_BUCKET, query)
except cloud_storage.NotFoundError:
# It's fine if there is no chromium snapshot available for one prefix.
# We will look at the rest of the prefixes.
continue
for entry in ls_results:
# entry looks like '/Linux_x64/${commit_position}/'.
position = int(entry.split('/')[2])
available_positions.append(position)
if len(available_positions) == 0:
raise ValueError('No chromium build found +/-%d commit positions of %d' %
(_CHROMIUM_SNAPSHOT_SEARCH_WINDOW, base_position))
distance_function = lambda position: abs(position - base_position)
min_distance_snapshot = min(available_positions, key=distance_function)
return min_distance_snapshot
def _ResolveChromiumRemotePath(channel, platform, version_info):
platform_info = _PLATFORM_MAP[platform]
branch_base_position = version_info.branch_base_position
omaha_version = version_info.version
build_dir = platform_info.chromium_info.build_dir
# Look through chromium-browser-snapshots for closest match.
closest_snapshot = _FindClosestChromiumSnapshot(
branch_base_position, build_dir)
if closest_snapshot != branch_base_position:
print ('Channel %s corresponds to commit position ' % channel +
'%d on %s, ' % (branch_base_position, platform) +
'but closest chromium snapshot available on ' +
'%s is %d' % (_CHROMIUM_GS_BUCKET, closest_snapshot))
return RemotePath(bucket=_CHROMIUM_GS_BUCKET,
path = ('%s/%s/%s' % (build_dir, closest_snapshot,
platform_info.chromium_info.zip_name)))
def _QueuePlatformUpdate(binary, platform, version_info, config, channel):
""" platform: the name of the platform for the browser to
be downloaded & updated from cloud storage. """
platform_info = _PLATFORM_MAP[platform]
if binary == 'chrome':
remote_path = _ResolveChromeRemotePath(platform_info, version_info)
elif binary == 'chromium':
remote_path = _ResolveChromiumRemotePath(channel, platform, version_info)
else:
raise ValueError('binary must be \'chrome\' or \'chromium\'')
if not cloud_storage.Exists(remote_path.bucket, remote_path.path):
cloud_storage_path = 'gs://%s/%s' % (remote_path.bucket, remote_path.path)
logging.warn('Failed to find %s build for version %s at path %s.' % (
platform, version_info.version, cloud_storage_path))
logging.warn('Skipping this update for this platform/channel.')
return
reference_builds_folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'chrome_telemetry_build',
'reference_builds', binary, channel)
if not os.path.exists(reference_builds_folder):
os.makedirs(reference_builds_folder)
local_dest_path = os.path.join(reference_builds_folder,
platform,
platform_info.zip_name)
cloud_storage.Get(remote_path.bucket, remote_path.path, local_dest_path)
_ModifyBuildIfNeeded(binary, local_dest_path, platform)
config.AddCloudStorageDependencyUpdateJob('%s_%s' % (binary, channel),
platform, local_dest_path, version=version_info.version,
execute_job=False)
def _ModifyBuildIfNeeded(binary, location, platform):
"""Hook to modify the build before saving it for Telemetry to use.
This can be used to remove various utilities that cause noise in a
test environment. Right now, it is just used to remove Keystone,
which is a tool used to autoupdate Chrome.
"""
if binary != 'chrome':
return
if platform == 'mac_x86_64':
_RemoveKeystoneFromBuild(location)
return
if 'mac' in platform:
raise NotImplementedError(
'Platform <%s> sounds like it is an OSX version. If so, we may need to '
'remove Keystone from it per crbug.com/932615. Please edit this script'
' and teach it what needs to be done :).')
def _RemoveKeystoneFromBuild(location):
"""Removes the Keystone autoupdate binary from the chrome mac zipfile."""
logging.info('Removing keystone from mac build at %s' % location)
temp_folder = tempfile.mkdtemp(prefix='RemoveKeystoneFromBuild')
try:
subprocess.check_call(['unzip', '-q', location, '-d', temp_folder])
keystone_folder = os.path.join(
temp_folder, 'chrome-mac', 'Google Chrome.app', 'Contents',
'Frameworks', 'Google Chrome Framework.framework', 'Frameworks',
'KeystoneRegistration.framework')
shutil.rmtree(keystone_folder)
os.remove(location)
subprocess.check_call(['zip', '--quiet', '--recurse-paths', '--symlinks',
location, 'chrome-mac'],
cwd=temp_folder)
finally:
shutil.rmtree(temp_folder)
def _NeedsUpdate(config, binary, channel, platform, version_info):
channel_version = version_info.version
print 'Checking %s (%s channel) on %s' % (binary, channel, platform)
current_version = config.GetVersion('%s_%s' % (binary, channel), platform)
print 'current: %s, channel: %s' % (current_version, channel_version)
if current_version and current_version == channel_version:
print 'Already up to date.'
return False
return True
def UpdateBuilds(args):
config = base_config.BaseConfig(_CHROME_BINARIES_CONFIG, writable=True)
for channel in _CHANNELS_TO_UPDATE:
channel_versions_map = _ChannelVersionsMap(channel)
for platform in channel_versions_map:
version_info = channel_versions_map.get(platform)
if args.update_chrome:
if _NeedsUpdate(config, 'chrome', channel, platform, version_info):
_QueuePlatformUpdate('chrome', platform, version_info, config,
channel)
if args.update_chromium and platform in _CHROMIUM_PLATFORMS:
if _NeedsUpdate(config, 'chromium', channel, platform, version_info):
_QueuePlatformUpdate('chromium', platform, version_info,
config, channel)
print 'Updating builds with downloaded binaries'
config.ExecuteUpdateJobs(force=True)
def main():
logging.getLogger().setLevel(logging.DEBUG)
parser = argparse.ArgumentParser(
description='Update reference binaries used by perf bots.')
parser.add_argument('--no-update-chrome', action='store_false',
dest='update_chrome', default=True,
help='do not update chrome binaries')
parser.add_argument('--no-update-chromium', action='store_false',
dest='update_chromium', default=True,
help='do not update chromium binaries')
args = parser.parse_args()
UpdateBuilds(args)
if __name__ == | |
<reponame>skiehl/wwz
# -*- coding: utf-8 -*-
#!/usr/bin/env python
"""A class for plotting results of the weighted wavelet z-transform analysis.
"""
import matplotlib.gridspec as gs
import matplotlib.pyplot as plt
from matplotlib.ticker import LogLocator
import numpy as np
import os
import sys
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__license__ = "GPL"
__version__ = "1.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
#==============================================================================
# CLASSES
#==============================================================================
class WWZPlotter:
"""A class for plotting WWZ results."""
#--------------------------------------------------------------------------
def __init__(self, wwz, tunit=None):
"""A class for plotting WWZ results.
Parameters
----------
wwz : wwz.WWZ
A WWZ instance that is used for plotting.
Returns
-------
None.
"""
self.wwz = wwz
self.okay = True
if wwz.wwz is None:
print('Note: There is no WWZ transform data stored in this WWZ ' \
'instance. There will be nothing to plot.')
self.okay = False
if wwz.freq is not None:
# check if frequencies are linearly scaled:
freq = self.wwz.freq
df = np.diff(freq)
self.linear_freq = np.all(np.isclose(df, df.mean()))
self.fmin = freq.min()
self.fmax = freq.max()
# get periods and check if linearly scaled:
period = 1. / freq
dp = np.diff(period)
self.linear_period = np.all(np.isclose(dp, dp.mean()))
self.pmin = period.min()
self.pmax = period.max()
self.n_ybins = freq.size
else:
self.okay = False
if wwz.tau is not None:
self.tmin = wwz.tau.min()
self.tmax = wwz.tau.max()
else:
self.okay = False
if self.okay:
if self.linear_freq:
self.ymin = self.fmax
self.ymax = self.fmin
self.ymin_alt = self.pmax
self.ymax_alt = self.pmin
self.ylabel = f'Frequency [1/{tunit}]' \
if isinstance(tunit, str) else 'Frequency'
self.ylabel_alt = f'Period [{tunit}]' \
if isinstance(tunit, str) else 'Period'
elif self.linear_period:
self.ymin = self.pmin
self.ymax = self.pmax
self.ymin_alt = self.fmin
self.ymax_alt = self.fmax
self.ylabel = f'Period [{tunit}]' if isinstance(tunit, str) \
else 'Period'
self.ylabel_alt = f'Frequency [1/{tunit}]' \
if isinstance(tunit, str) else 'Frequency'
else:
self.ymin = 0
self.ymax = 1
self.ymin_alt = 1
self.ymax_alt = 0
self.ylabel = 'Non-linear scale'
self.ylabel_alt = 'Non-linear scale'
#--------------------------------------------------------------------------
def _select_map(self, select):
"""Helper method to select a map from a WWZ instance.
Parameters
----------
select : str
Select either 'wwz' or 'wwa'.
Raises
------
ValueError
Raised if 'select' is not one of the allowed options.
Returns
-------
result : numpy.ndarray
The selected WWZ or WWA array.
"""
# check that selection is allowed:
if select.lower() not in ['wwz', 'wwa']:
raise ValueError(f"'{select}' is not a valid selection.")
select = select.lower()
result = eval(f'self.wwz.{select}')
# check if result map is available:
if result is None:
print(f'No {select.upper()} transform available.')
result = result.transpose()
return result
#--------------------------------------------------------------------------
def plot_map(
self, select, ax=None, xlabel=None, **kwargs):
"""Plot the resulting map from a WWZ instance.
Parameters
----------
select : str
Select either 'wwz' or 'wwa'.
ax : matplotlib.pyplot.axis, optional
The axis to plot to. If None is given a new axis is crated. The
default is None.
xlabel : str, optional
The x-axis label. If None is provided no label is placed. The
default is None.
kwargs : dict, optional
Keyword arguments forwarded to the matplotlib.pyplot.imshow()
function.
Returns
-------
matplotlib.pyplot.axis
The axis to which the map was plotted.
matplotlib.image.AxesImage
The image.
"""
if not self.okay:
return None, None
# select result:
result = self._select_map(select)
if result is None:
return None, None
# create figure if needed:
if ax is None:
__, ax = plt.subplots(1)
# plot:
extent = [self.tmin, self.tmax, self.ymin, self.ymax]
im = ax.imshow(
result, origin='upper', aspect='auto', extent=extent,
**kwargs)
# add labels:
if xlabel:
ax.set_xlabel(xlabel)
ax.set_ylabel(self.ylabel)
return ax, im
#--------------------------------------------------------------------------
def plot_map_avg(
self, select, statistic='mean', ax=None, ylabel=False, **kwargs):
"""Vertically plot an average along the time axis of the transform map.
Parameters
----------
select : str
Select either 'wwz' or 'wwa'.
statistic : str, optional
Choose either 'mean' or 'median'. The default is 'mean'.
ax : matplotlib.pyplot.axis, optional
The axis to plot to. If None is given a new axis is crated. The
default is None.
ylabel : bool, optional
If True a label is added to the y-axis. The default is False.
**kwargs : dict
Keyword arguments forwarded to the matplotlib.pyplot.plot()
function.
Raises
------
ValueError
Raised if 'statistic' is not one of the allowed options.
Returns
-------
matplotlib.pyplot.axis
The axis to which the data was plotted.
"""
if not self.okay:
return None
# select result:
result = self._select_map(select)
if result is None:
return None, None
# calculate statistic:
if statistic not in ['mean', 'median']:
raise ValueError(f"'{statistic}' is not a valid statistic.")
elif statistic == 'median':
result_avg = np.median(result, axis=1)
else:
result_avg = np.mean(result, axis=1)
# create figure if needed:
if ax is None:
__, ax = plt.subplots(1)
# plot:
y = np.linspace(self.ymin, self.ymax, result_avg.size)
ax.plot(result_avg[::-1], y, **kwargs)
# add labels:
if ylabel:
ax.set_ylabel(self.ylabel)
ax.set_xlabel(f'{statistic.capitalize()} {select.upper()}')
return ax
#--------------------------------------------------------------------------
def plot_data(
self, ax=None, errorbars=True, xlabel=None, ylabel=None, **kwargs):
"""Plot the data stored in a WWZ instance.
Parameters
----------
ax : matplotlib.pyplot.axis, optional
The axis to plot to. If None is given a new axis is crated. The
default is None.
errorbars : bool, optional
If True errorbars are shown, if uncertainties were stored in the
WWZ instance. The default is True.
xlabel : str, optional
The x-axis description. If None is provided no label is printed.
The default is None.
ylabel : str, optional
The y-axis description. If None is provided no label is printed.
The default is None.
**kwargs : dict
Keyword arguments forwarded to the matplotlib.pyplot.errorbar()
function.
Returns
-------
matplotlib.pyplot.axis
The axis to which the data was plotted.
"""
# check if data is available:
if self.wwz.t is None:
print('No data available.')
return None
# create figure if needed:
if ax is None:
__, ax = plt.subplots(1)
# plot:
if errorbars and self.wwz.s_x is not None:
ax.errorbar(self.wwz.t, self.wwz.x, self.wwz.s_x, **kwargs)
else:
ax.plot(self.wwz.t, self.wwz.x, **kwargs)
# add labels:
if isinstance(xlabel, str):
ax.set_xlabel(xlabel)
if isinstance(ylabel, str):
ax.set_ylabel(ylabel)
return ax
#--------------------------------------------------------------------------
def add_right_labels(self, ax):
"""Add ticks and labels to the right side of a plot showing the
alternative unit, i.e. frequency if period is used on the left side and
vice versa.
Parameters
----------
ax : matplotlib.pyplot.axis, optional
The axis to plot to. If None is given a new axis is crated. The
default is None.
Returns
-------
ax2 : matplotlib.pyplot.axis
The new axis to which the labels were added.
"""
ax2 = ax.twinx()
plt.setp(ax2.get_xticklabels(), visible=False)
ax2.yaxis.set_label_position("right")
ax2.yaxis.tick_right()
ax2.set_ylim(self.ymin_alt, self.ymax_alt)
sys.stderr = open(os.devnull, "w") # silence stderr to supress warning
conversion = lambda x: 1/x
ax2.set_yscale('function', functions=(conversion, conversion))
sys.stderr = sys.__stderr__ # unsilence stderr
ax2.yaxis.set_major_locator(LogLocator(subs='all'))
ax2.set_ylabel(self.ylabel_alt)
return ax2
#--------------------------------------------------------------------------
def plot(self, select, statistic='mean', errorbars=True,
peaks_quantile=None, xlabel=None, ylabel=None, figsize=None,
height_ratios=(2, 1), width_ratios=(5, 1), kwargs_map={},
kwargs_map_avg={}, kwargs_data={}, kwargs_peaks={}):
"""Plot the WWZ map, average, and data.
Parameters
----------
select : str
Select either 'wwz' or 'wwa'.
statistic : str, optional
Choose either 'mean' or 'median'. The default is 'mean'.
errorbars : bool, optional
If True errorbars are shown, if uncertainties were stored in the
WWZ instance. The default is True.
peaks_quantile : float, optional
If not None, a ridge line along the peak position is shown.
peaks_quantile needs to be a float between 0 and 1. Only peaks in
the quantile above this threshold are shown. The default is None.
xlabel : str, optional
The x-axis description. If None is provided no label is printed.
The default is None.
ylabel : str, optional
The y-axis description. If None is provided no label is printed.
The default is None.
figsize : tuple, optional
Set the figure size. The default is None.
height_ratios : tuple, optional
Set the size ratio between the top and bottom panel with two values
in a tuple. The default is (2, 1).
width_ratios : tuple, optional
Set the size ratio between the left and right panel with two values
in a tuple. The default is (5, 1).
kwargs_map : dict, optional
Keyword arguments forwarded to plotting the map. The default is {}.
kwargs_map_avg : dict, optional
Keyword arguments forwarded to plotting the map average. The
default is {}.
kwargs_data : dict, optional
Keyword arguments forwarded to plotting the data. The default is
{}.
kwargs_peaks : dict, optional
Keyword arguments forwarded to plotting the peak ridge lines. The
| |
'Trimming reverse {at} reads for sample {name} at depth {depth} to length {length}'
.format(at=analysistype,
name=sample.name,
depth=depth,
length=sample[read_type][depth][read_pair].reverse_reads.length))
if sample[read_type][depth][read_pair].reverse_reads.length != '0':
# Use the reformat method in the OLCTools bbtools wrapper to trim the reads
out, \
err, \
sample[read_type][depth][read_pair].reverse_reads.sample_call = bbtools \
.reformat_reads(forward_in=sample[read_type][depth][read_pair].reverse_reads.fastq,
reverse_in=None,
forward_out=sample[read_type][depth][read_pair].reverse_reads[fastq_type],
returncmd=True,
**{'ziplevel': '9',
'forcetrimright':
sample[read_type][depth][read_pair].reverse_reads.length,
'tossbrokenreads': 't',
'tossjunk': 't',
'Xmx': self.mem
})
# # Remove the untrimmed reads
# try:
# os.remove(sample[read_type][depth][read_pair].reverse_reads.fastq)
# except FileNotFoundError:
# pass
# Update the JSON file
self.write_json(sample)
def read_quality_trim(self):
"""
Perform quality trim, and toss reads below appropriate thresholds
"""
logging.info('Quality trim')
for sample in self.metadata:
sample.sampled_reads = GenObject()
sample.sampled_reads.outputdir = os.path.join(sample.outputdir, 'sampled')
sample.sampled_reads.trimmed_dir = os.path.join(sample.sampled_reads.outputdir, 'qualitytrimmed_reads')
make_path(sample.sampled_reads.trimmed_dir)
for depth in self.read_depths:
# Create the depth GenObject
setattr(sample.sampled_reads, depth, GenObject())
# Set the depth and output directory attributes for the depth GenObject
sample.sampled_reads[depth].depth = depth
sample.sampled_reads[depth].depth_dir = os.path.join(sample.sampled_reads.outputdir, depth)
# Create the output directory
make_path(sample.sampled_reads[depth].depth_dir)
for read_pair in self.read_lengths:
# Create the read_pair GenObject within the depth GenObject
setattr(sample.sampled_reads[depth], read_pair, GenObject())
# Set and create the output directory
sample.sampled_reads[depth][read_pair].outputdir = \
os.path.join(sample.sampled_reads[depth].depth_dir, read_pair)
make_path(sample.sampled_reads[depth][read_pair].outputdir)
# Create both forward_reads and reverse_reads sub-GenObjects
sample.sampled_reads[depth][read_pair].forward_reads = GenObject()
sample.sampled_reads[depth][read_pair].reverse_reads = GenObject()
sample.sampled_reads[depth][read_pair].trimmed_dir = \
os.path.join(sample.sampled_reads.trimmed_dir,
read_pair)
make_path(sample.sampled_reads[depth][read_pair].trimmed_dir)
# Extract the forward and reverse reads lengths from the read_pair variable
sample.sampled_reads[depth][read_pair].forward_reads.length, \
sample.sampled_reads[depth][read_pair].reverse_reads.length = read_pair.split('_')
logging.info('Performing quality trimming on reads from sample {name} at depth {depth} '
'for minimum read length {forward}'
.format(name=sample.name,
depth=depth,
forward=sample.sampled_reads[depth][read_pair].forward_reads.length))
# Set the attributes for the trimmed forward and reverse reads to use for subsampling
sample.sampled_reads[depth][read_pair].trimmed_forwardfastq = \
os.path.join(sample.sampled_reads[depth][read_pair].trimmed_dir,
'{name}_{length}_R1.fastq.gz'
.format(name=sample.name,
length=sample.sampled_reads[depth][read_pair].forward_reads.length))
sample.sampled_reads[depth][read_pair].trimmed_reversefastq = \
os.path.join(sample.sampled_reads[depth][read_pair].trimmed_dir,
'{name}_{length}_R2.fastq.gz'
.format(name=sample.name,
length=sample.sampled_reads[depth][read_pair].forward_reads.length))
# Create the trimmed output directory attribute
sample.sampled_reads[depth][read_pair].sampled_trimmed_outputdir \
= os.path.join(sample.sampled_reads[depth][read_pair].outputdir,
'sampled_trimmed')
# Set the name of the forward trimmed reads - include the depth and read length information
# This is set now, as the untrimmed files will be removed, and a check is necessary
sample.sampled_reads[depth][read_pair].forward_reads.trimmed_sampled_fastq = \
os.path.join(sample.sampled_reads[depth][read_pair].sampled_trimmed_outputdir,
'{name}_sampled_{depth}_{read_pair}_R1.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Reverse reads
sample.sampled_reads[depth][read_pair].reverse_reads.trimmed_sampled_fastq = \
os.path.join(sample.sampled_reads[depth][read_pair].sampled_trimmed_outputdir,
'{name}_sampled_{depth}_{read_pair}_R2.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Sample if the forward output file does not already exist
if not os.path.isfile(sample.sampled_reads[depth][read_pair].trimmed_forwardfastq) and \
not os.path.isfile(
sample.sampled_reads[depth][read_pair].forward_reads.trimmed_sampled_fastq):
out, \
err, \
sample.sampled_reads[depth][read_pair].sample_cmd = \
bbtools.bbduk_trim(forward_in=sample.forward_fastq,
forward_out=sample.sampled_reads[depth][read_pair]
.trimmed_forwardfastq,
reverse_in=sample.reverse_fastq,
reverse_out=sample.sampled_reads[depth][read_pair]
.trimmed_reversefastq,
minlength=sample.sampled_reads[depth][read_pair]
.forward_reads.length,
forcetrimleft=0,
returncmd=True,
**{'ziplevel': '9',
'Xmx': self.mem})
# Update the JSON file
self.write_json(sample)
def sample_reads(self):
"""
For each PacBio assembly, sample reads from corresponding FASTQ files for appropriate forward and reverse
lengths and sequencing depths using reformat.sh from the bbtools suite
"""
logging.info('Read sampling')
for sample in self.metadata:
# Iterate through all the desired depths of coverage
for depth in self.read_depths:
for read_pair in self.read_lengths:
# Set the name of the output directory
sample.sampled_reads[depth][read_pair].sampled_outputdir \
= os.path.join(sample.sampled_reads[depth][read_pair].outputdir, 'sampled')
# Set the name of the forward reads - include the depth and read length information
sample.sampled_reads[depth][read_pair].forward_reads.fastq = \
os.path.join(sample.sampled_reads[depth][read_pair].sampled_outputdir,
'{name}_{depth}_{read_pair}_R1.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
# Reverse reads
sample.sampled_reads[depth][read_pair].reverse_reads.fastq = \
os.path.join(sample.sampled_reads[depth][read_pair].sampled_outputdir,
'{name}_{depth}_{read_pair}_R2.fastq.gz'
.format(name=sample.name,
depth=depth,
read_pair=read_pair))
logging.info(
'Sampling {num_reads} paired reads for sample {name} with the following parameters:\n'
'depth {dp}, forward reads {fl}bp, and reverse reads {rl}bp'
.format(num_reads=sample.simulated_reads[depth][read_pair].num_reads,
dp=depth,
name=sample.name,
fl=sample.sampled_reads[depth][read_pair].forward_reads.length,
rl=sample.sampled_reads[depth][read_pair].reverse_reads.length))
# Use the reformat method in the OLCTools bbtools wrapper
# Note that upsample=t is used to ensure that the target number of reads (samplereadstarget) is met
if not os.path.isfile(sample.sampled_reads[depth][read_pair].forward_reads.trimmed_sampled_fastq):
out, \
err, \
sample.sampled_reads[depth][read_pair].sample_call = bbtools \
.reformat_reads(forward_in=sample.sampled_reads[depth][read_pair].trimmed_forwardfastq,
reverse_in=sample.sampled_reads[depth][read_pair].trimmed_reversefastq,
forward_out=sample.sampled_reads[depth][read_pair].forward_reads.fastq,
reverse_out=sample.sampled_reads[depth][read_pair].reverse_reads.fastq,
returncmd=True,
**{'samplereadstarget': sample.simulated_reads[depth][read_pair].num_reads,
'upsample': 't',
'minlength':
sample.sampled_reads[depth][read_pair].forward_reads.length,
'ziplevel': '9',
'tossbrokenreads': 't',
'tossjunk': 't',
'Xmx': self.mem
}
)
# # Remove the trimmed reads, as they are no longer necessary
# try:
# os.remove(sample.sampled_reads[depth][read_pair].trimmed_forwardfastq)
# os.remove(sample.sampled_reads[depth][read_pair].trimmed_reversefastq)
# except FileNotFoundError:
# pass
# Update the JSON file
self.write_json(sample)
def link_reads(self, analysistype):
"""
Create folders with relative symlinks to the desired simulated/sampled reads. These folders will contain all
the reads created for each sample, and will be processed with GeneSippr and COWBAT pipelines
:param analysistype: Current analysis type. Will either be 'simulated' or 'sampled'
"""
logging.info('Linking {at} reads'.format(at=analysistype))
for sample in self.metadata:
# Create the output directories
genesippr_dir = os.path.join(self.path, 'genesippr', sample.name)
sample.genesippr_dir = genesippr_dir
make_path(genesippr_dir)
cowbat_dir = os.path.join(self.path, 'cowbat', sample.name)
sample.cowbat_dir = cowbat_dir
make_path(cowbat_dir)
# Iterate through all the desired depths of coverage
for depth in self.read_depths:
for read_pair in self.read_lengths:
# Create variables using the analysis type. These will be used in setting GenObject attributes
read_type = '{at}_reads'.format(at=analysistype)
fastq_type = 'trimmed_{at}_fastq'.format(at=analysistype)
# Link reads to both output directories
for output_dir in [genesippr_dir, cowbat_dir]:
# If the original reads are shorter than the specified read length, the FASTQ files will exist,
# but will be empty. Do not create links for these files
size = os.path.getsize(sample[read_type][depth][read_pair].forward_reads[fastq_type])
if size > 20:
# Create relative symlinks to the FASTQ files - use the relative path from the desired
# output directory to the read storage path e.g.
# ../../2013-SEQ-0072/simulated/40/50_150/simulated_trimmed/2013-SEQ-0072_simulated_40_50_150_R1.fastq.gz
# is the relative path to the output_dir. The link name is the base name of the reads
# joined to the desired output directory e.g.
# output_dir/2013-SEQ-0072/2013-SEQ-0072_simulated_40_50_150_R1.fastq.gz
relative_symlink(sample[read_type][depth][read_pair].forward_reads[fastq_type],
output_dir)
# Original FASTQ files
relative_symlink(sample.forward_fastq,
output_dir)
relative_symlink(sample.reverse_fastq,
output_dir)
# Reverse reads
try:
size = os.path.getsize(sample[read_type][depth][read_pair].reverse_reads[fastq_type])
if size > 20:
relative_symlink(sample[read_type][depth][read_pair].reverse_reads[fastq_type],
output_dir)
except FileNotFoundError:
pass
def run_genesippr(self):
"""
Run GeneSippr on each of the samples
"""
from pathlib import Path
home = str(Path.home())
logging.info('GeneSippr')
# These unfortunate hard coded paths appear to be necessary
miniconda_path = os.path.join(home, 'miniconda3')
miniconda_path = miniconda_path if os.path.isdir(miniconda_path) else os.path.join(home, 'miniconda')
logging.debug(miniconda_path)
activate = 'source {mp}/bin/activate {mp}/envs/sipprverse'.format(mp=miniconda_path)
sippr_path = '{mp}/envs/sipprverse/bin/sippr.py'.format(mp=miniconda_path)
for sample in self.metadata:
logging.info(sample.name)
# Run the pipeline. Check to make sure that the serosippr report, which is created last doesn't exist
if not os.path.isfile(os.path.join(sample.genesippr_dir, 'reports', 'genesippr.csv')):
cmd = 'python {py_path} -o {outpath} -s {seqpath} -r {refpath} -F'\
.format(py_path=sippr_path,
outpath=sample.genesippr_dir,
seqpath=sample.genesippr_dir,
refpath=self.referencefilepath
)
logging.critical(cmd)
# Create another shell script to execute within the PlasmidExtractor conda environment
template = "#!/bin/bash\n{activate} && {cmd}".format(activate=activate,
cmd=cmd)
genesippr_script = os.path.join(sample.genesippr_dir, 'run_genesippr.sh')
with open(genesippr_script, 'w+') as file:
file.write(template)
# Modify the permissions of the script to allow it to be run on the node
self.make_executable(genesippr_script)
# Run shell script
os.system('/bin/bash {}'.format(genesippr_script))
# quit()
def parse_genesippr(self):
"""
"""
import pandas
for sample in self.metadata:
sample.genesippr_reports = sorted(glob(os.path.join(sample.genesippr_dir, 'reports', '*.csv')))
for report in sample.genesippr_reports:
# Extract the analysis type from the report name
report_name = os.path.splitext(os.path.basename(report))[0]
# A dictionary to store the parsed CSV file in a more readable format
nesteddictionary = dict()
# Use pandas to read in the CSV file, and subsequently convert the pandas data frame to a dictionary
# (.to_dict()).
dictionary = pandas.read_csv(report).to_dict()
# Iterate through the dictionary - each header from the CSV file
for header in dictionary:
# primary_key is the primary key, and value is the value of the cell for that
# primary key + header combination
for primary_key, value in dictionary[header].items():
# Update the dictionary with the new data
try:
nesteddictionary[primary_key].update({header: value})
# Create the nested dictionary if it hasn't been created yet
except KeyError:
nesteddictionary[primary_key] = dict()
nesteddictionary[primary_key].update({header: value})
#
strain = str()
for name, value in nesteddictionary.items():
# As strain name is not printed on every line, it is entered as 'nan' by pandas. This is a float.
if type(value['Strain']) is not float:
strain = value['Strain']
# Find the 'original' sample
if len(strain.split('_')) > 1:
strain, analysis_type, depth, forward_length, reverse_length = strain.split('_')
print(strain, analysis_type, depth, forward_length, reverse_length)
else:
print(strain)
@staticmethod
def make_executable(path):
"""
Takes a shell script and makes it executable (chmod +x)
:param path: path to shell script
"""
mode = os.stat(path).st_mode
mode |= (mode & 0o444) >> 2
os.chmod(path, mode)
# def run_cowbat(self):
# """
# Run COWBAT on all the samples
# """
# logging.info('COWBAT')
# # Create a MetadataObject to spoof ArgumentParser supplied arguments
# args = MetadataObject()
# args.referencefilepath = self.referencefilepath
# args.numreads = 2
# args.preprocess = False
# args.startingtime = self.start
# args.customsamplesheet = False
# args.threads = multiprocessing.cpu_count() - 1
# args.commit = b''
# args.homepath = ''
# | |
if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(device, 'Device')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Device', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update_device.metadata = {'url': '/devices/{id}'}
def delete_device(
self, id, if_match=None, custom_headers=None, raw=False, **operation_config):
"""Delete the identity of a device from the identity registry of an IoT
hub.
Delete the identity of a device from the identity registry of an IoT
hub. This request requires the If-Match header. The client may specify
the ETag for the device identity on the request in order to compare to
the ETag maintained by the service for the purpose of optimistic
concurrency. The delete operation is performed only if the ETag sent by
the client matches the value maintained by the server, indicating that
the device identity has not been modified since it was retrieved by the
client. To force an unconditional delete, set If-Match to the wildcard
character (*).
:param id: Device ID.
:type id: str
:param if_match:
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.delete_device.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete_device.metadata = {'url': '/devices/{id}'}
def apply_configuration_on_device(
self, id, content, custom_headers=None, raw=False, **operation_config):
"""Applies the provided configuration content to the specified device.
Applies the provided configuration content to the specified device.
Configuration content must have modules content.
:param id: Device ID.
:type id: str
:param content:
:type content: ~service.models.ConfigurationContent
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: object or ClientRawResponse if raw=true
:rtype: object or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.apply_configuration_on_device.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(content, 'ConfigurationContent')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
apply_configuration_on_device.metadata = {'url': '/devices/{id}/applyConfigurationContent'}
def create_job(
self, job_properties, custom_headers=None, raw=False, **operation_config):
"""Create a new job on an IoT hub.
Create a new job on an IoT hub. See
https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities
for more information.
:param job_properties:
:type job_properties: ~service.models.JobProperties
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: JobProperties or ClientRawResponse if raw=true
:rtype: ~service.models.JobProperties or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.create_job.metadata['url']
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(job_properties, 'JobProperties')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('JobProperties', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_job.metadata = {'url': '/jobs/create'}
def get_jobs(
self, custom_headers=None, raw=False, **operation_config):
"""Gets the status of all jobs in an iot hub.
Gets the status of all jobs in an iot hub. See
https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities
for more information.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~service.models.JobProperties] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_jobs.metadata['url']
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[JobProperties]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_jobs.metadata = {'url': '/jobs'}
def get_job(
self, id, custom_headers=None, raw=False, **operation_config):
"""Gets the status of job in an iot hub.
Gets the status of job in an iot hub. See
https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities
for more information.
:param id: Job ID.
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: JobProperties or ClientRawResponse if raw=true
:rtype: ~service.models.JobProperties or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_job.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('JobProperties', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_job.metadata = {'url': '/jobs/{id}'}
def cancel_job(
self, id, custom_headers=None, raw=False, **operation_config):
"""Cancels job in an IoT hub.
Cancels job in an IoT hub. See
https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities
for more information.
:param id: Job ID.
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: object or ClientRawResponse if raw=true
:rtype: object or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.cancel_job.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
cancel_job.metadata = | |
<filename>ospy/sensors.py
# -*- coding: utf-8 -*-
__author__ = u'<NAME>'
# System imports
from threading import Thread, Timer
import traceback
import logging
import traceback
import time
import datetime
import subprocess
import os
import json
# Local imports
from ospy.options import options, rain_blocks, program_level_adjustments
from ospy.helpers import now, password_hash, datetime_string, mkdir_p
from ospy.log import log, logEM
from ospy.programs import programs
from ospy.stations import stations
from ospy.scheduler import predicted_schedule, combined_schedule
### Sensors ###
class _Sensor(object):
SAVE_EXCLUDE = ['SAVE_EXCLUDE', 'index', '_sensors']
def __init__(self, sensors_instance, index):
self._sensors = sensors_instance
self.name = "" # sensor name
self.encrypt = password_hash(str(now()), 'notarandomstring')[:16] # sensor security encrypted code
self.enabled = 0 # sensor enable or disable
self.sens_type = 0 # selector sensor type: 0-5 'None', 'Dry Contact', 'Leak Detector', 'Moisture', 'Motion', 'Temperature', 'Multi'
self.com_type = 0 # selector sensor communication type 0-1: 'Wi-Fi/LAN', 'Radio'
self.multi_type = 0 # selector multi type 0-9: 'Temperature DS1, DS2, DS3, DS4', 'Dry Contact', 'Leak Detector', 'Moisture', 'Motion', 'Ultrasonic', 'Soil moisture'
self.notes = "" # notes for sensor
self.log_samples = 0 # log samples
self.last_log_samples = now() # last log samples millis
self.log_event = 0 # log event
self.send_email = 0 # send e-mail
self.sample_rate = 60 # sample rate
self.last_read_value = [""]*10 # last read value (actual)
self.prev_read_value = -127 # prev read value
self.sensitivity = 0 # sensitivity
self.stabilization_time = 5 # stabilization time
self.liter_per_pulses = 0.5 # leak detector (xx liter/one pulses from detector)
self.trigger_low_program = ["-1"] # open program (-1 is default none program)
self.trigger_high_program = ["-1"]# close Program
self.trigger_low_threshold = "10" # low threshold
self.trigger_high_threshold = "30"# high threshold
self.ip_address = [0,0,0,0] # ip address for sensor
self.mac_address = "" # mac address for sensor
self.last_battery = "" # battery voltage
self.rssi = "" # rssi signal
self.radio_id = 0 # radio id
self.response = 0 # response 0 = offline, 1 = online
self.fw = 0 # sensor firmware (ex: 100 is 1.00)
self.last_response = 0 # last response (last now time when the sensor sent data)
self.last_response_datetime = ""# last response (datetime string)
self.last_low_report = now() # now in moisture, temperature
self.last_good_report = now() # now in moisture, temperature
self.last_high_report = 0 # now in moisture, temperature
self.show_in_footer = 1 # show sensor data in footer on home page
self.cpu_core = 0 # 0 = ESP32, 1 = ESP8266, 2 = todo
self.used_stations_one = ["-1"] # Selected stations for the scheduler will stop in dry open contact
self.used_stations_two = ["-1"] # Selected stations for the scheduler will stop in dry close contact
# used in ultrasonic sensor
self.distance_top = 10 # The distance from the sensor to the maximum water level in the tank in cm
self.distance_bottom = 95 # The distance from the sensor to the minimum water level in the tank in cm
self.water_minimum = 10 # The water level from the bottom to the minimum water level in the tank
self.diameter = 100 # Cylinder diameter for volume calculation in cm
self.check_liters = 0 # Display as liters or m3
self.use_stop = 0 # Stop stations if minimum water level
self.use_water_stop = 0 # If the level sensor fails, the above selected stations in the scheduler will stop
self.enable_reg = 0 # If checked regulation is enabled
self.used_stations = ["-1"] # Selected stations for the scheduler will stop in ultrasonic sensor
self.reg_max = 100 # If the measured water level exceeds this set value, the output is activated
self.reg_mm = 60 # Maximum run time in activate min
self.reg_ss = 0 # Maximum run time in activate sec
self.reg_min = 90 # If the measured water level falls below this set value, the output is deactivated
self.reg_output = 0 # Select Output for regulation
self.delay_duration = 0 # rain delay if water is not in tank (water minimum)
self.aux_mini = 1 # Auxiliary value true and false for triggering events only when changed (water minimum)
self.aux_reg_u = 1 # Auxiliary value true and false for triggering events only when changed (regulation >)
self.aux_reg_d = 1 # Auxiliary value true and false for triggering events only when changed (regulation <)
self.aux_reg_p = 1 # Auxiliary value true and false for triggering events only when changed (probe fault)
# used in soil moisture sensor
self.soil_last_read_value = [""]*17 # last soil read value (actual)
self.soil_prev_read_value = [-127]*17 # prev soil read value
self.soil_calibration_min = [3.00]*17 # calibration for soil probe (0 %)
self.soil_calibration_max = [0.00]*17 # calibration for soil probe (100 %)
self.soil_invert_probe_in = [1]*17 # 1= inverted probe type (ex: 100 % = 0V, 0% = 3,3V)
self.soil_program = ["-1"]*17 # program for soil moisture
self.soil_probe_label = [_(u'Probe')]*17 # label for soil moisture probe
# for events log and email
self.last_msg = [0]*10
self.err_msg = [0]*10
# custom event naming
self.dry_open_msg = _(u'Open Contact') # dry contact open
self.dry_clos_msg = _(u'Closed Contact') # dry contact closed
self.motion_msg = _(u'Motion Detected') # motion
self.no_motion_msg = _(u'No Motion') # no motion
options.load(self, index)
@property
def index(self):
try:
return self._sensors.get().index(self)
except ValueError:
return -1
def __setattr__(self, key, value):
try:
super(_Sensor, self).__setattr__(key, value)
if not key.startswith('_') and key not in self.SAVE_EXCLUDE:
options.save(self, self.index)
except ValueError: # No index available yet
logging.debug(traceback.format_exc())
pass
class _Sensors(object):
def __init__(self):
self._sensors = []
self._loading = True
options.load(self)
self._loading = False
try:
logging.debug(_(u'Loading sensors...'))
i = 0
while options.available(_Sensor, i):
self._sensors.append(_Sensor(self, i))
i += 1
except:
logging.debug(traceback.format_exc())
pass
def add_sensors(self, sensor=None):
try:
if sensor is None:
sensor = _Sensor(self, len(self._sensors))
self._sensors.append(sensor)
options.save(sensor, sensor.index)
logging.debug(_(u'Adding new sensor: {} with id: {}').format(sensor.name,sensor.index))
except:
logging.debug(traceback.format_exc())
pass
def create_sensors(self):
"""Returns a new sensor, but doesn't add it to the list."""
try:
return _Sensor(self, -1-len(self._sensors))
except:
logging.debug(traceback.format_exc())
pass
def remove_sensors(self, index):
try:
if 0 <= index < len(self._sensors):
del self._sensors[index]
for i in range(index, len(self._sensors)):
options.save(self._sensors[i], i) # Save sensor using new indices
options.erase(_Sensor, len(self._sensors)) # Remove info in last index
logging.debug(_(u'Removing sensor id: {}').format(index))
except:
logging.debug(traceback.format_exc())
pass
def count(self):
return len(self._sensors)
def get(self, index=None):
try:
if index is None:
result = self._sensors[:]
else:
result = self._sensors[index]
return result
except:
logging.debug(traceback.format_exc())
pass
def __setattr__(self, key, value):
super(_Sensors, self).__setattr__(key, value)
if not key.startswith('_') and not self._loading:
options.save(self)
__getitem__ = get
sensors = _Sensors()
### Timing loop for sensors ###
class _Sensors_Timer(Thread):
def __init__(self):
super(_Sensors_Timer, self).__init__()
self.status = []
self._sleep_time = 0
self.daemon = True
def start_status(self, name, msg, btn):
try:
for i in range(0, self.len_status()-1):
if name in self.status[i]:
del self.status[i]
self.status.append((name, msg, btn))
except:
pass
def stop_status(self, name):
try:
for i in range(0, self.len_status()):
if name in self.status[i]:
del self.status[i] #self.status.pop(i)
except:
pass
def read_status(self):
return self.status
def len_status(self):
return int(len(self.status))
def update(self):
self._sleep_time = 0
def _sleep(self, secs):
self._sleep_time = secs
while self._sleep_time > 0:
time.sleep(1)
self._sleep_time -= 1
def _try_send_mail(self, text, logtext, attachment=None, subject=None):
try:
from plugins.email_notifications import try_mail
try_mail(text, logtext, attachment, subject)
except:
log.debug(u'sensors.py', _(u'E-mail not send! The Email Notifications plug-in is not found in OSPy or not correctly setuped.'))
pass
def _read_log(self, dir_name):
try:
with open(dir_name) as logf:
return json.load(logf)
except IOError:
return []
def _write_log(self, dir_name, data):
try:
with open(dir_name, 'w') as outfile:
json.dump(data, outfile)
except Exception:
logging.debug(traceback.format_exc())
def _check_high_trigger(self, sensor):
major_change = True
status_update = True
if int(sensor.last_low_report) < int(sensor.last_high_report) or int(sensor.last_good_report) < int(sensor.last_high_report):
major_change = False
if not major_change and int(now()) - int(sensor.last_high_report) < 3600:
status_update = False
return (major_change, status_update)
def _check_low_trigger(self, sensor):
major_change = True
status_update = True
if int(sensor.last_high_report) < int(sensor.last_low_report) and int(sensor.last_good_report) < int(sensor.last_low_report):
major_change = False
if not major_change and int(now()) - int(sensor.last_low_report) < 3600:
status_update = False
return (major_change, status_update)
def _check_good_trigger(self, sensor):
major_change = True
status_update = True
if int(sensor.last_low_report) < int(sensor.last_good_report) and int(sensor.last_low_report) < int(sensor.last_good_report):
major_change = False
if not major_change and int(now()) - int(sensor.last_good_report) < 3600:
status_update = False
if int(sensor.last_low_report) == 0 and int(sensor.last_high_report) == 0:
major_change = False # if no | |
<gh_stars>1-10
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import sys
import ast
import configparser
from scipy import interpolate, integrate
from scipy.signal import argrelextrema
from scipy.stats import linregress
from scipy.optimize import curve_fit
from astropy import units, constants
# kepler_grids
from pyburst.burst_analyser import burst_tools
from pyburst.grids import grid_tools, grid_strings
from pyburst.kepler import kepler_tools
from pyburst.kepler import kepler_plot
from pyburst.physics import accretion
plt.rc('text', usetex=False)
plt.rc('font', family='serif')
# TODO:
# - Generalise to non-batch organised models
# - param description docstring
# - Add print statements to functions
class NoBursts(Exception):
pass
class NoDumps(Exception):
pass
class BurstRun(object):
def __init__(self, run, batch, source, verbose=True, basename='xrb',
reload=False, save_lum=True, analyse=True, plot=False,
exclude_outliers=True, exclude_short_wait=True, load_lum=True,
load_bursts=False, load_summary=False, try_mkdir_plots=False,
load_dumps=False, set_paramaters=None, auto_discard=False,
get_slopes=False, load_model_params=True, truncate_edd=True,
check_stable_burning=True, quick_discard=True,
check_lumfile_monotonic=True, remove_zero_lum=True,
subtract_background_lum=True, load_config=True,
get_tail_timescales=False, fit_tail_power_law=False):
self.flags = {'lum_loaded': False,
'lum_does_not_exist': False,
'dumps_loaded': False,
'analysed': False,
'too_few_bursts': False,
'short_waits': False,
'outliers': False,
'regress_too_few_bursts': False,
'converged': False,
'spikes': False,
'zeros': False,
'calculated_slopes': False,
'super_eddington': False,
'stable_burning': False,
}
# TODO: move these into default config file
self.options = {'verbose': verbose,
'reload': reload,
'save_lum': save_lum,
'exclude_outliers': exclude_outliers,
'exclude_short_wait': exclude_short_wait,
'try_mkdir_plots': try_mkdir_plots,
'auto_discard': auto_discard,
'get_slopes': get_slopes,
'load_model_params': load_model_params,
'truncate_edd': truncate_edd,
'check_stable_burning': check_stable_burning,
'quick_discard': quick_discard,
'check_lumfile_monotonic': check_lumfile_monotonic,
'subtract_background_lum': subtract_background_lum,
'get_tail_timescales': get_tail_timescales,
'fit_tail_power_law': fit_tail_power_law,
}
self.check_options()
self.parameters = {'lum_cutoff': 1e37, # luminosity cutoff for burst detection
'spike_frac': 2.0, # lum factor that spikes exceed neighbours by
'zero_replacement': 1e35, # zero lums set to this
'maxima_radius': 60, # bursts are largest maxima within (sec)
'pre_time': 60, # look for burst rise within sec before peak
'start_frac': 0.25, # burst start as frac of peak lum above lum_pre
'peak_frac': 2, # peak must be larger than pre_lum by this frac
'end_frac': 1e-5, # burst end lum is this frac of peak lum
'min_length': 5, # min time between burst peak and end (sec)
'short_wait_frac': 0.5, # short_waits below frac of following dt
'min_discard': 2, # min num of bursts to discard
'target_discard': 10, # no. bursts to attempt to discard, but fall back on min_discard
'min_bursts': 10, # min no. bursts (after discards) to calculate mean properties
'min_regress': 20, # min num of bursts to do linear regression
'n_bimodal': 20, # n_bursts to check for bimodality
'bimodal_sigma': 3, # number of std's modes are separated by
'outlier_bprops': ('dt', 'fluence', 'peak'), # bprops to check
'outlier_distance': 3., # fraction of IQR above Q3
'dump_time_offset': 0.0, # time offset (s) from burst start
'dump_time_min': 1, # min time (s) between t_start and dump time
'min_rise_steps': 5, # min time steps between t_pre and t_peak
'stable_dt': 5, # no. of dt's from last burst to end of model to flag stable burning
'short_wait_dt': 45, # threshold for short-wait bursts (minutes)
'spike_radius_t': 0.1, # radius in s around maxima to check for spike conditions
't_buffer': 300, # time buffer (s) from start/end of model to ignore
'mdot_edd': 1.75e-8, # reference Eddington accretion rate (Msun/yr)
'tail_fit_start': 2, # time (s) after t_tail_start to begin fitting power law
'tail_fit_stop': 60, # time (s) after tail_fit_start to fit power law for
}
self.overwrite_parameters(set_paramaters)
self.colours = {'bursts': 'C1',
'candidates': 'C3',
'outliers': 'C9',
'short_waits': 'C4',
'burst_stages': 'C2',
'spikes': 'C3',
'dumps': 'C3',
}
self.cols = ['n', 'dt', 'rate', 'fluence', 'peak', 'length', 't_peak', 't_peak_i',
't_pre', 't_pre_i', 'lum_pre', 't_start', 't_start_i',
'lum_start', 't_end', 't_end_i', 'lum_end',
't_tail_start', 't_tail_start_i', 'tail_50', 'tail_25', 'tail_10',
'slope_dt', 'slope_dt_err', 'slope_fluence', 'slope_fluence_err',
'slope_peak', 'slope_peak_err', 'short_wait', 'outlier',
'dump_start', 'acc_mass', 'qnuc',
'tail_index', 'tail_b', 'tail_c']
self.paths = {'batch_models': grid_strings.get_batch_models_path(batch, source),
'source': grid_strings.get_source_path(source),
'analysis': grid_strings.batch_analysis_path(batch, source),
'plots': grid_strings.get_source_subdir(source, 'plots'),
'lightcurves': grid_strings.model_lightcurves_path(run, batch, source)
}
self.config = None
self.run = run
self.batch = batch
self.source = source
self.basename = basename
self.run_str = grid_strings.get_run_string(run, basename)
self.batch_str = grid_strings.get_batch_string(batch, source)
self.model_str = grid_strings.get_model_string(run, batch, source)
self.lum = None
self.lumf = None
self.new_lum = None
self.l_edd = None
self.model_params = None
self.load_bursts = load_bursts
self.load_summary = load_summary
self.load_dumps = load_dumps
self.bursts = pd.DataFrame(columns=self.cols)
self.n_bursts = None
self.n_short_wait = None
self.n_outliers = None
self.n_outliers_unique = None
self.summary = {}
self.candidates = None
# Burst properties which will be averaged and added to summary
self.bprops = ['dt', 'fluence', 'peak', 'length', 'acc_mass', 'qnuc',
'tail_index']
self.n_spikes = None
self.spikes = []
self.dumpfiles = None
self.dump_table = None
# ====== linregress things ======
self.regress_bprops = ['dt', 'fluence', 'peak']
self.discard = None
# ====== Loading things ======
if load_config:
self.load_config()
self.apply_config()
if self.options['load_model_params']:
self.load_model_params()
if load_lum:
self.load_lum_file()
if remove_zero_lum:
self.remove_zero_lum()
if self.load_bursts:
self.load_burst_table()
if self.load_dumps:
self.load_dumpfiles()
if self.load_summary:
if not self.load_bursts:
self.print_warn('Loading summary but not bursts. The summary values are '
+ 'not gauranteed to match the burst properties.'
+ '\nTHIS IS NOT RECOMMENDED')
self.load_summary_table()
if truncate_edd:
# assumes Eddington limit for pure helium
self.l_edd = accretion.eddington_lum_newtonian(mass=self.model_params['mass'],
x=0.0)
if analyse:
self.analyse()
if plot:
self.plot()
# ===========================================================
# Loading/setup
# ===========================================================
def load_config(self):
"""Loads config parameters from file
"""
# TODO: Load defaults also
config_filepath = grid_strings.config_filepath(self.source,
module_dir='burst_analyser')
self.printv(f'Loading config: {config_filepath}')
if not os.path.exists(config_filepath):
raise FileNotFoundError(f'Config file not found: {config_filepath}'
'\nEither create one or set load_config=False')
ini = configparser.ConfigParser()
ini.read(config_filepath)
config = {}
for section in ini.sections():
config[section] = {}
for option in ini.options(section):
config[section][option] = ast.literal_eval(ini.get(section, option))
self.config = config
def apply_config(self):
"""Applies loaded config parameters
"""
self.printv('Overwriting default parameters with config')
for param, val in self.config['parameters'].items():
self.parameters[param] = val
def check_options(self):
"""Checks consistency of selected options
"""
if self.options['quick_discard'] and self.options['auto_discard']:
raise ValueError('Only one of (quick_discard, auto_discard) can be activated')
def check_parameters(self):
"""Check consistency of analysis parameters
"""
pass
def load_model_params(self):
"""Load model parameters from grid table
"""
try:
batch_table = grid_tools.load_model_table(self.batch, source=self.source,
verbose=self.options['verbose'])
except FileNotFoundError:
try:
grid_table = grid_tools.load_grid_table('params', source=self.source)
batch_table = grid_tools.reduce_table(grid_table,
params={'batch': self.batch})
except FileNotFoundError:
self.print_warn('Model parameter table not found. '
'Has the source grid been analysed yet?')
return
model_row = grid_tools.reduce_table(batch_table, params={'run': self.run})
params_dict = model_row.to_dict(orient='list')
for key, value in params_dict.items():
params_dict[key] = value[0]
self.model_params = params_dict
def check_lum_loaded(self):
"""Checks if luminosity file has been loaded
"""
if not self.flags['lum_loaded']:
if self.flags['lum_does_not_exist']:
return
else:
self.load_lum_file()
def load_lum_file(self):
"""Load luminosity data from kepler simulation
"""
self.lum = burst_tools.load_lum(run=self.run, batch=self.batch,
source=self.source, basename=self.basename,
save=self.options['save_lum'],
reload=self.options['reload'],
check_monotonic=self.options['check_lumfile_monotonic'],
verbose=self.options['verbose'])
if self.lum is None:
self.flags['lum_does_not_exist'] = True
self.n_bursts = 0
return
self.setup_lum_interpolator()
self.flags['lum_loaded'] = True
def setup_lum_interpolator(self):
"""Creates interpolator function of model lightcurve
"""
self.lumf = interpolate.interp1d(self.lum[:, 0], self.lum[:, 1])
def remove_zero_lum(self):
"""kepler outputs random zero luminosity (for some reason...)
"""
zeros = np.where(self.lum[:, 1] == 0.0)[0]
n_zeros = len(zeros)
if n_zeros > 0:
self.printv(f'{n_zeros} points with zero luminosity replaced')
self.flags['zeros'] = True
self.lum[zeros, 1] = self.parameters['zero_replacement']
def overwrite_parameters(self, set_parameters):
"""Overwrite default analysis parameters
"""
if set_parameters is not None:
if type(set_parameters) is dict:
for param, value in set_parameters.items():
self.printv(f'Overwriting default analysis parameter: {param}={value}')
if param in self.parameters:
self.parameters[param] = value
else:
raise ValueError(f"parameter '{param}' not in self.parameters")
else:
raise TypeError("'set_parameters' must be type dict")
def load_burst_table(self):
"""Load pre-extracted burst properties from file
"""
self.printv('Loading pre-extracted bursts from file')
self.bursts = burst_tools.load_run_table(run=self.run, batch=self.batch,
source=self.source, table='bursts')
self.n_bursts = len(self.bursts)
self.n_short_wait = len(self.short_waits())
self.n_outliers = len(self.outliers())
self.n_outliers_unique = len(self.outliers(unique=True))
self.determine_flags_from_table()
def determine_flags_from_table(self):
"""Determine flags from a loaded burst table (without full analysis)
"""
if self.n_short_wait > 0:
self.flags['short_waits'] = True
if self.n_outliers_unique > 0:
self.flags['outliers'] = True
if self.n_bursts < 2:
self.flags['too_few_bursts'] = True
if False not in np.isnan(np.array(self.bursts['slope_dt'])):
self.flags['regress_too_few_bursts'] = True
def load_summary_table(self):
self.printv('Loading pre-extracted model summary from file')
summary_table = burst_tools.load_run_table(run=self.run, batch=self.batch,
source=self.source, table='summary')
self.summary = summary_table.to_dict('list')
for key, val in self.summary.items():
self.summary[key] = val[0] # don't store as arrays
def load_dumpfiles(self):
"""Load available kepler dumpfiles
"""
# TODO: what happens if there are no dumpfiles?
self.dumpfiles = kepler_tools.load_dumps(self.run, batch=self.batch,
source=self.source,
basename=self.basename)
self.dump_table = kepler_tools.extract_dump_table(self.run, batch=self.batch,
source=self.source,
basename=self.basename,
dumps=self.dumpfiles)
self.flags['dumps_loaded'] = True
def check_dumpfiles(self):
"""Checks if dumpfiles are loaded, and whether they need to be
"""
if not self.flags['dumps_loaded']:
if self.load_dumps:
self.load_dumpfiles()
else:
self.printv('Dumpfiles not loaded')
raise NoDumps
def setup_summary(self):
"""Collects remaining model properties into dictionary
"""
self.summary['batch'] = self.batch
self.summary['run'] = self.run
self.summary['num'] = self.n_bursts
self.summary['burn_in'] = self.discard
self.summary['converged'] = self.flags['converged']
self.summary['short_waits'] = self.flags['short_waits']
self.summary['outliers'] = self.flags['outliers']
self.summary['n_outliers'] = self.n_outliers_unique
self.summary['n_short_waits'] = self.n_short_wait
self.get_means()
self.test_bimodal()
# ===========================================================
# Saving/Loading/Accessing data
# ===========================================================
def printv(self, string):
| |
ET.tostring(emit._xml())
'<emit><CNPJ>08427847000169</CNPJ><IE>111222333444</IE><IM>123456789012345</IM><cRegTribISSQN>1</cRegTribISSQN><indRatISSQN>S</indRatISSQN></emit>'
"""
def __init__(self, **kwargs):
super(Emitente, self).__init__(schema={
'CNPJ': {
'type': 'cnpj',
'required': True},
'IE': {
'type': 'string',
'required': True,
'regex': r'^\d{2,12}$'},
'IM': {
'type': 'string',
'required': False,
'regex': r'^\d{1,15}$'},
'cRegTribISSQN': {
'type': 'string',
'required': False,
'allowed':
[v for v,s in constantes.C15_CREGTRIBISSQN_EMIT]},
'indRatISSQN': {
'type': 'string',
'required': True,
'allowed': [v for v,s in constantes.C16_INDRATISSQN_EMIT]},
}, **kwargs)
def _construir_elemento_xml(self, *args, **kwargs):
emit = ET.Element('emit')
ET.SubElement(emit, 'CNPJ').text = self.CNPJ
ET.SubElement(emit, 'IE').text = self.IE
if hasattr(self, 'IM'):
ET.SubElement(emit, 'IM').text = self.IM
if hasattr(self, 'cRegTribISSQN'):
ET.SubElement(emit, 'cRegTribISSQN').text = self.cRegTribISSQN
ET.SubElement(emit, 'indRatISSQN').text = self.indRatISSQN
return emit
class Destinatario(Entidade):
"""Identificação do destinatário do CF-e (``dest``, grupo ``E01``).
:param str CNPJ: Número do CNPJ do destinatário, contendo apenas os
digitos e incluindo os zeros não significativos. **Não deve ser
informado se o ``CPF`` for informado.**
:param str CPF: Número do CPF do destinatário, contendo apenas os digitos e
incluindo os zeros não significativos. **Não deve ser informado se o
``CNPJ`` for informado.**
:param str xNome: *Opcional*. Nome ou razão social do destinatário.
Note que os parâmetros ``CNPJ`` e ``CPF`` são mutuamente exclusivos.
.. sourcecode:: python
>>> dest = Destinatario()
>>> ET.tostring(dest._xml(), encoding='utf-8')
'<dest />'
>>> dest = Destinatario(CNPJ='08427847000169')
>>> ET.tostring(dest._xml(), encoding='utf-8')
'<dest><CNPJ>08427847000169</CNPJ></dest>'
>>> dest = Destinatario(CPF='11122233396', xNome=u'<NAME>')
>>> ET.tostring(dest._xml(), encoding='utf-8')
'<dest><CPF>11122233396</CPF><xNome><NAME></xNome></dest>'
>>> dest = Destinatario(CPF='11122233396', CNPJ='08427847000169')
>>> dest._xml()
Traceback (most recent call last):
...
ValidationError: ...
# testa criação do XML para cancelamento; o nome deverá ser ignorado
>>> dest = Destinatario(CPF='11122233396', xNome=u'<NAME>')
>>> ET.tostring(dest._xml(cancelamento=True), encoding='utf-8')
'<dest><CPF>11122233396</CPF></dest>'
"""
class _Validator(ExtendedValidator):
def _validate_type_CNPJ_E02(self, field, value):
if 'CPF' in self.document:
self._error(field,
u'CNPJ (E02) e CPF (E03) são mutuamente exclusivos.')
elif not br.is_cnpj(value):
self._error(field, u'CNPJ (E02) não é válido: "%s"' % value)
def _validate_type_CPF_E03(self, field, value):
if 'CNPJ' in self.document:
self._error(field,
u'CNPJ (E02) e CPF (E03) são mutuamente exclusivos.')
elif not br.is_cpf(value):
self._error(field, u'CPF (E03) não é válido: "%s"' % value)
def __init__(self, **kwargs):
super(Destinatario, self).__init__(schema={
'CNPJ': {'type': 'CNPJ_E02'}, # E02
'CPF': {'type': 'CPF_E03'}, # E03
'xNome': { # E04
'type': 'string',
'required': False,
'minlength': 2, 'maxlength': 60}
}, validator_class=Destinatario._Validator, **kwargs)
def _construir_elemento_xml(self, *args, **kwargs):
is_cancelamento = kwargs.pop('cancelamento', False)
dest = ET.Element('dest')
if hasattr(self, 'CNPJ'):
ET.SubElement(dest, 'CNPJ').text = self.CNPJ
if hasattr(self, 'CPF'):
ET.SubElement(dest, 'CPF').text = self.CPF
if hasattr(self, 'xNome') and not is_cancelamento:
ET.SubElement(dest, 'xNome').text = self.xNome
return dest
class LocalEntrega(Entidade):
"""Identificação do Local de Entrega (``entrega``, grupo ``G01``).
:param str xLgr:
:param str nro:
:param str xCpl: *Opcional*
:param str xBairro:
:param str xMun:
:param str UF:
.. sourcecode:: python
>>> entrega = LocalEntrega()
>>> ET.tostring(entrega._xml(), encoding='utf-8')
Traceback (most recent call last):
...
ValidationError: ...
>>> entrega.xLgr = '<NAME>'
>>> entrega.nro = '65'
>>> entrega.xBairro = 'Parque Gloria III'
>>> entrega.xMun = 'Catanduva'
>>> entrega.UF = 'SP'
>>> ET.tostring(entrega._xml(), encoding='utf-8')
'<entrega><xLgr><NAME></xLgr><nro>65</nro><xBairro>Parque Gloria III</xBairro><xMun>Catanduva</xMun><UF>SP</UF></entrega>'
"""
class _Validator(ExtendedValidator):
def _validate_type_UF_G07(self, field, value):
if not br.is_uf(value):
self._error(field, u'UF (G07) do Local de Entrega, '
u'não é válido: "%s"' % value)
def __init__(self, **kwargs):
super(LocalEntrega, self).__init__(schema={
'xLgr': { # G02
'type': 'string',
'required': True,
'minlength': 2, 'maxlength': 60},
'nro': { # G03
'type': 'string',
'required': True,
'minlength': 1, 'maxlength': 60},
'xCpl': { # G04
'type': 'string',
'required': False,
'minlength': 1, 'maxlength': 60},
'xBairro': { # G05
'type': 'string',
'required': True,
'minlength': 1, 'maxlength': 60},
'xMun': { # G06
'type': 'string',
'required': True,
'minlength': 2, 'maxlength': 60},
'UF': { # G07
'type': 'UF_G07',
'required': True},
}, validator_class=LocalEntrega._Validator, **kwargs)
def _construir_elemento_xml(self, *args, **kwargs):
entrega = ET.Element('entrega')
ET.SubElement(entrega, 'xLgr').text = self.xLgr
ET.SubElement(entrega, 'nro').text = self.nro
if hasattr(self, 'xCpl'):
ET.SubElement(entrega, 'xCpl').text = self.xCpl
ET.SubElement(entrega, 'xBairro').text = self.xBairro
ET.SubElement(entrega, 'xMun').text = self.xMun
ET.SubElement(entrega, 'UF').text = self.UF
return entrega
class Detalhamento(Entidade):
"""Detalhamento do produto ou serviço do CF-e (``det``, grupo ``H01``).
:param ProdutoServico produto:
:param Imposto imposto:
:param str infAdProd: *Opcional*
Note que o atributo XML ``nItem`` (``H02``) não é determinado aqui, mas
atribuído automaticamente, conforme a sua posição na lista de
:attr:`~CFeVenda.detalhamentos`.
.. sourcecode:: python
>>> det = Detalhamento(
... produto=ProdutoServico(
... cProd='123456',
... xProd='BORRACHA STAEDTLER',
... CFOP='5102',
... uCom='UN',
... qCom=Decimal('1.0000'),
... vUnCom=Decimal('5.75'),
... indRegra='A'),
... imposto=Imposto(
... pis=PISSN(CST='49'),
... cofins=COFINSSN(CST='49'),
... icms=ICMSSN102(Orig='2', CSOSN='500')),
... infAdProd='Teste')
>>> ET.tostring(det._xml(nItem=1))
'<det nItem="1"><prod><cProd>123456</cProd><xProd>BORRACHA STAEDTLER</xProd><CFOP>5102</CFOP><uCom>UN</uCom><qCom>1.0000</qCom><vUnCom>5.75</vUnCom><indRegra>A</indRegra></prod><imposto><ICMS><ICMSSN102><Orig>2</Orig><CSOSN>500</CSOSN></ICMSSN102></ICMS><PIS><PISSN><CST>49</CST></PISSN></PIS><COFINS><COFINSSN><CST>49</CST></COFINSSN></COFINS></imposto><infAdProd>Teste</infAdProd></det>'
"""
def __init__(self, produto=None, imposto=None, **kwargs):
self._produto = produto
self._imposto = imposto
super(Detalhamento, self).__init__(schema={
'infAdProd': {
'type': 'string',
'required': False,
'minlength': 1, 'maxlength': 500},
}, **kwargs)
@property
def produto(self):
"""O produto ou serviço como uma instância de :class:`ProdutoServico`
ao qual o detalhamento se refere.
"""
return self._produto
@property
def imposto(self):
"""O grupo de tributos incidentes no produto ou serviço ao qual o
detalhamento se refere, como uma instância de :class:`Imposto`.
"""
return self._imposto
def _construir_elemento_xml(self, *args, **kwargs):
det = ET.Element('det')
det.attrib['nItem'] = str(kwargs.pop('nItem'))
det.append(self.produto._xml())
det.append(self.imposto._xml())
if hasattr(self, 'infAdProd'):
ET.SubElement(det, 'infAdProd').text = self.infAdProd
return det
class ProdutoServico(Entidade):
"""Produto ou serviço do CF-e (``prod``, grupo ``I01``).
:param str cProd:
:param str cEAN: *Opcional*
:param str xProd:
:param str NCM: *Opcional*
:param str CFOP:
:param str uCom:
:param Decimal qCom:
:param Decimal vUnCom:
:param str indRegra:
:param Decimal vDesc: *Opcional*
:param Decimal vOutro: *Opcional*
:param list observacoes_fisco: *Opcional*
.. sourcecode:: python
# apenas os atributos requeridos;
# note que, diferente da NF-e/NFC-e a ER SAT indica que o
# atributo NCM não é obrigatório
>>> prod = ProdutoServico(
... cProd='123456',
... xProd='BORRACHA STAEDTLER',
... CFOP='5102',
... uCom='UN',
... qCom=Decimal('1.0000'),
... vUnCom=Decimal('5.75'),
... indRegra='A')
>>> ET.tostring(prod._xml())
'<prod><cProd>123456</cProd><xProd>BORRACHA STAEDTLER</xProd><CFOP>5102</CFOP><uCom>UN</uCom><qCom>1.0000</qCom><vUnCom>5.75</vUnCom><indRegra>A</indRegra></prod>'
# todos os atributos (se vDesc for informado, então não informa vOutro)
>>> prod = ProdutoServico(
... cProd='123456',
... cEAN='4007817525074',
... xProd='BORRACHA STAEDTLER',
... NCM='40169200',
... CFOP='5102',
... uCom='UN',
... qCom=Decimal('1.0000'),
... vUnCom=Decimal('5.75'),
... indRegra='A',
... vDesc=Decimal('0.25'))
>>> ET.tostring(prod._xml())
'<prod><cProd>123456</cProd><cEAN>4007817525074</cEAN><xProd>BORRACHA STAEDTLER</xProd><NCM>40169200</NCM><CFOP>5102</CFOP><uCom>UN</uCom><qCom>1.0000</qCom><vUnCom>5.75</vUnCom><indRegra>A</indRegra><vDesc>0.25</vDesc></prod>'
# todos os atributos (informando vOutro)
>>> prod = ProdutoServico(
... cProd='123456',
... cEAN='4007817525074',
... xProd='BORRACHA STAEDTLER',
... NCM='40169200',
... CFOP='5102',
... uCom='UN',
... qCom=Decimal('1.0000'),
... vUnCom=Decimal('5.75'),
... indRegra='A',
... vOutro=Decimal('0.25'))
>>> ET.tostring(prod._xml())
'<prod><cProd>123456</cProd><cEAN>4007817525074</cEAN><xProd>BORRACHA STAEDTLER</xProd><NCM>40169200</NCM><CFOP>5102</CFOP><uCom>UN</uCom><qCom>1.0000</qCom><vUnCom>5.75</vUnCom><indRegra>A</indRegra><vOutro>0.25</vOutro></prod>'
# informa vDesc e vOutro, não deve validar
>>> prod = ProdutoServico(
... cProd='123456',
... xProd='BORRACHA STAEDTLER',
... CFOP='5102',
... uCom='UN',
... qCom=Decimal('1.0000'),
... vUnCom=Decimal('5.75'),
... indRegra='A',
... vDesc=Decimal('0.25'),
... vOutro=Decimal('0.25'))
>>> prod._xml()
Traceback (most recent call last):
...
ValidationError: 'ProdutoServico' (grupo H01 'prod') atributos 'vDesc' e 'vOutro' sao mutuamente exclusivos
"""
def __init__(self, observacoes_fisco=[], **kwargs):
self._observacoes_fisco = observacoes_fisco
super(ProdutoServico, self).__init__(schema={
'cProd': { # I02
'type': 'string',
'required': True,
'minlength': 1, 'maxlength': 60},
'cEAN': { # I03
'type': 'string',
'required': False,
'regex': r'^(\d{8}|\d{12}|\d{13}|\d{14})$'},
'xProd': { # I04
'type': 'string',
'required': True,
'minlength': 1, 'maxlength': 120},
'NCM': { # I05
'type': 'string',
'required': False,
'regex': r'^(\d{2}|\d{8})$'},
'CFOP': { # I06
'type': 'string',
'required': True,
'regex': r'^\d{4}$'},
'uCom': { # I07
'type': 'string',
'required': True,
'minlength': 1, 'maxlength': 6},
'qCom': { # I08
'type': 'decimal',
'required': True},
'vUnCom': { # I09
'type': 'decimal',
'required': True},
'indRegra': { # I11
'type': 'string',
'required': True,
'allowed': [v for v,s in constantes.I11_INDREGRA]},
'vDesc': { # I12
'type': 'decimal',
'required': False},
'vOutro': { # I13
'type': 'decimal',
'required': False},
}, **kwargs)
@property
def observacoes_fisco(self):
"""Cada produto, pode opcionalmente, conter uma lista de campos de uso
livre do fisco, cujos campos e valores são representados por instâncias
da classe :class:`ObsFiscoDet`.
"""
return self._observacoes_fisco
def _construir_elemento_xml(self, *args, **kwargs):
if hasattr(self, 'vDesc') and hasattr(self, 'vOutro'):
raise cerberus.ValidationError("'%s' (grupo H01 'prod') atributos "
"'vDesc' e 'vOutro' sao mutuamente exclusivos" %
self.__class__.__name__)
prod = ET.Element('prod')
ET.SubElement(prod, 'cProd').text = self.cProd
if hasattr(self, 'cEAN'):
ET.SubElement(prod, 'cEAN').text = self.cEAN
ET.SubElement(prod, 'xProd').text = self.xProd
if hasattr(self, 'NCM'):
ET.SubElement(prod, 'NCM').text = self.NCM
ET.SubElement(prod, 'CFOP').text = self.CFOP
ET.SubElement(prod, 'uCom').text = self.uCom
ET.SubElement(prod, 'qCom').text = str(self.qCom)
ET.SubElement(prod, 'vUnCom').text = str(self.vUnCom)
ET.SubElement(prod, 'indRegra').text = self.indRegra
if hasattr(self, 'vDesc'):
ET.SubElement(prod, 'vDesc').text = str(self.vDesc)
if hasattr(self, 'vOutro'):
ET.SubElement(prod, 'vOutro').text = str(self.vOutro)
if self.observacoes_fisco:
for obs in self.observacoes_fisco:
prod.append(obs._xml())
return prod
class ObsFiscoDet(Entidade):
"""Grupo do campo de uso livre do Fisco (``obsFiscoDet``, grupo ``I17``).
:param str xCampoDet:
:param str xTextoDet:
.. sourcecode:: python
>>> obs = ObsFiscoDet(xCampoDet='Cod. Produto ANP', xTextoDet='320101001')
>>> ET.tostring(obs._xml())
'<obsFiscoDet xCampoDet="Cod. Produto ANP"><xTextoDet>320101001</xTextoDet></obsFiscoDet>'
"""
def __init__(self, **kwargs):
super(ObsFiscoDet, self).__init__(schema={
'xCampoDet': {
'type': 'string',
'required': True,
'minlength': 1, 'maxlength': 20},
'xTextoDet': {
| |
########################################################################################################
# data_sql.py - Data pull from json, clean it up and upload to SQL
# by <NAME>
#
# This is Python script Pulls the metadata (link) from following three json data:-
# 1. https://api.weather.gov/points/31.7276,-110.8754
# 2. https://api.weather.gov/points/32.395,-110.6911
# 3. https://api.weather.gov/points/32.4186,-110.7383
#
# The Link pulled (json data) from the above three json data are
# the grid data links that are use to pull all the weather related data for the three capmgrounds:-
# 1. https://api.weather.gov/gridpoints/TWC/91,26
# 2. https://api.weather.gov/gridpoints/TWC/101,54
# 3. https://api.weather.gov/gridpoints/TWC/100,56
#
# From the above grid data 4 dataframes are created. The challenge was pulling the data from the
# above json links and then converting the date-time columns to the format (date-time) that can be used
# to upload to SQL and creating the graphs. Also Temperatures need to be converted to degreeF and wind
# speeds to Miles per hour:-
# 1. Campgroud information dF with information like lat, lon, elevation,
# meta url, grid url, forest url, campsite url fire danger and map code.
# 2. One for each campground (bs_grid_df, rc_grid_df, sc_grid_df). These df
# have columns (temp in degreeF, temp time, wind speed, wind speed time, wind gust,
# wind gust time, prob precipitation, Prob precp time, qty precip, qty precip time).
#
# SQLalchemy was used to create 4 tables in postgres SQL and then the above 4 DataFrames were uploaded
# Postgres SQL. The table names in SQL are:
# 1. camp_wx
# 2. cg_bog_spring
# 3. cg_rose_canyon
# 4. cg_spencer_canyon
#
# This script was converted from data_sql.ipynb
##########################################################################################################
# %%
# ------------------------
# Dependencies and Setup
# ------------------------
import pandas as pd
import json
import requests
import numpy as np
import datetime
from datetime import timedelta
from splinter import Browser
from bs4 import BeautifulSoup
def update_db(uri):
# %%
# --------------------------------------------------------------------
# Bog Spring CAMPGROUND
# --------------------------------------------------------------------
# ---------------------------------------------
# Pull Grid Data URL From Metadata url for
# ---------------------------------------------
bs_url = "https://api.weather.gov/points/31.7276,-110.8754"
response_bs = requests.get(bs_url)
data_bs = response_bs.json()
data_bs
grid_data_bs = data_bs["properties"]["forecastGridData"]
grid_data_bs
# %%
# ------------------------------------------------------------------------
# Pull latitude, Longitude and Elevation data for BogSprings Campground
# ------------------------------------------------------------------------
bs_forcast_url = grid_data_bs
response_bs_forecast = requests.get(bs_forcast_url)
data_bs_forecast = response_bs_forecast.json()
data_bs_forecast
lat_bs = data_bs_forecast["geometry"]["coordinates"][0][0][1]
lat_bs
lng_bs = data_bs_forecast["geometry"]["coordinates"][0][0][0]
lng_bs
elevation_bs = data_bs_forecast["properties"]["elevation"]["value"]
elevation_bs
# ---------------------------------------------------------------------------------
# Create a Dataframe with Latitude, Longitude Elevation and all other related URL
# ---------------------------------------------------------------------------------
bs_df = pd.DataFrame({"id": 1,
"campground": "Bog Springs",
"lat": [lat_bs],
"lon": [lng_bs],
"elevation": [elevation_bs],
"nws_meta_url": [bs_url],
"nws_grid_url": [grid_data_bs],
"forest_url":"https://www.fs.usda.gov/recarea/coronado/recreation/camping-cabins/recarea/?recid=25732&actid=29",
"campsite_url": "https://www.fs.usda.gov/Internet/FSE_MEDIA/fseprd746637.jpg",
"fire_danger": "Very High",
"map_code": '<iframe src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3393.5714340164473!2d-110.87758868361043!3d31.72759998130141!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x86d6970db0a5e44d%3A0x1b48084e4d6db970!2sBog%20Springs%20Campground!5e0!3m2!1sen!2sus!4v1626560932236!5m2!1sen!2sus" width="600" height="450" style="border:0;" allowfullscreen="" loading="lazy"></iframe>'
})
bs_df
# %%
# -------------------------------------------------------------------------------------------------
# Pull temperate, Wind Speed, Wind Gust, Probability of Precipitation, Quantity or Precipitation
# data along with the date and time for each.
# -------------------------------------------------------------------------------------------------
# =================== Temperature Data ======================
temp = []
for i in data_bs_forecast["properties"]["temperature"]["values"]:
temp.append(i)
temp_df = pd.DataFrame(temp)
temp_df
# Temperature conversion to Degree Fahrenheit
temp_df['degF'] = (temp_df['value'] * 9 / 5) + 32
temp_df
# validTime Column split to date and time for Temperature
date_temp = temp_df['validTime'].str.split('T', n=1, expand=True)
time_temp = date_temp[1].str.split('+', n=1, expand=True)
time_temp
temp_df['date_temp'] = date_temp[0]
temp_df['time_temp'] = time_temp[0]
# Combine date and time with a space in between the two
temp_df['date_time_temp'] = temp_df['date_temp'] + ' ' + temp_df['time_temp']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# temp_df['date_time_temp'] = pd.to_datetime(temp_df['date_time_temp'])
# Pull all the data for today + 3 days
time_delta_temp = datetime.datetime.strptime(temp_df['date_temp'][0],"%Y-%m-%d") + timedelta(days = 4)
temp_df['times_temp'] = time_delta_temp.strftime("%Y-%m-%d")
temp_df = temp_df.loc[temp_df['date_temp'] < temp_df['times_temp']]
temp_df
# temp_df.dtypes
# =================== Wind Speed Data ======================
wind_speed = []
for i in data_bs_forecast["properties"]["windSpeed"]["values"]:
wind_speed.append(i)
windSpeed_df = pd.DataFrame(wind_speed)
windSpeed_df
# Converting KM/hour to Miles/hour
windSpeed_df['miles/hour'] = windSpeed_df['value'] * 0.621371
windSpeed_df
# validTime Column split to date and time for Wind Speed
date_ws = windSpeed_df['validTime'].str.split('T', n=1, expand=True)
time_ws = date_ws[1].str.split('+', n=1, expand=True)
time_ws
windSpeed_df['date_ws'] = date_ws[0]
windSpeed_df['time_ws'] = time_ws[0]
# Combine date and time with a space in between the two
windSpeed_df['date_time_ws'] = windSpeed_df['date_ws'] + ' ' + windSpeed_df['time_ws']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# windSpeed_df['date_time_ws'] = pd.to_datetime(windSpeed_df['date_time_ws'])
# Pull all the data for today + 3 days
time_delta_ws = datetime.datetime.strptime(windSpeed_df['date_ws'][0],"%Y-%m-%d") + timedelta(days = 4)
windSpeed_df['times_ws'] = time_delta_ws.strftime("%Y-%m-%d")
windSpeed_df = windSpeed_df.loc[windSpeed_df['date_ws'] < windSpeed_df['times_ws']]
windSpeed_df
# windSpeed_df.dtypes
# =================== Wind Gust Data ======================
wind_gust = []
for i in data_bs_forecast["properties"]["windGust"]["values"]:
wind_gust.append(i)
wind_gust_df = pd.DataFrame(wind_gust)
wind_gust_df
# Converting KM/hour to Miles/hour
wind_gust_df['m/h'] = wind_gust_df['value'] * 0.621371
wind_gust_df
# # validTime Column split to date and time for Wind Gusts
date_wg = wind_gust_df['validTime'].str.split('T', n=1, expand=True)
time_wg = date_wg[1].str.split('+', n=1, expand=True)
time_wg
wind_gust_df['date_wg'] = date_wg[0]
wind_gust_df['time_wg'] = time_wg[0]
# Combine date and time with a space in between the two
wind_gust_df['date_time_wg'] = wind_gust_df['date_wg'] + ' ' + wind_gust_df['time_wg']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# wind_gust_df['date_time_wg'] = pd.to_datetime(wind_gust_df['date_time_wg'])
wind_gust_df
# Pull all the data for today + 3 days
time_delta_wg = datetime.datetime.strptime(wind_gust_df['date_wg'][0],"%Y-%m-%d") + timedelta(days = 4)
wind_gust_df['times_wg'] = time_delta_wg.strftime("%Y-%m-%d")
wind_gust_df = wind_gust_df.loc[wind_gust_df['date_wg'] < wind_gust_df['times_wg']]
wind_gust_df
# wind_gust_df.dtypes
# =================== Probability of Precipitation Data ======================
prob_precip = []
for i in data_bs_forecast["properties"]["probabilityOfPrecipitation"]["values"]:
prob_precip.append(i)
prob_precip_df = pd.DataFrame(prob_precip)
prob_precip_df
# # validTime Column split to date and time for Probability Precipitation
date_pp = prob_precip_df['validTime'].str.split('T', n=1, expand=True)
time_pp = date_pp[1].str.split('+', n=1, expand=True)
time_pp
prob_precip_df['date_pp'] = date_pp[0]
prob_precip_df['time_pp'] = time_pp[0]
# Combine date and time with a space in between the two
prob_precip_df['date_time_pp'] = prob_precip_df['date_pp'] + ' ' + prob_precip_df['time_pp']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# prob_precip_df['date_time_pp'] = pd.to_datetime(prob_precip_df['date_time_pp'])
prob_precip_df
# Pull all the data for today + 3 days
time_delta_pp = datetime.datetime.strptime(prob_precip_df['date_pp'][0],"%Y-%m-%d") + timedelta(days = 4)
prob_precip_df['times_pp'] = time_delta_pp.strftime("%Y-%m-%d")
prob_precip_df = prob_precip_df.loc[prob_precip_df['date_pp'] < prob_precip_df['times_pp']]
prob_precip_df
# prob_precip_df.dtypes
# =================== Quantity of Precipitation Data ======================
qty_precip = []
for i in data_bs_forecast["properties"]["quantitativePrecipitation"]["values"]:
qty_precip.append(i)
qty_precip_df = pd.DataFrame(qty_precip)
qty_precip_df
# # validTime Column split to date and time for quantity Precipitation
date_qp = qty_precip_df['validTime'].str.split('T', n=1, expand=True)
time_qp = date_qp[1].str.split('+', n=1, expand=True)
time_qp
qty_precip_df['date_qp'] = date_qp[0]
qty_precip_df['time_qp'] = time_qp[0]
# Combine date and time with a space in between the two
qty_precip_df['date_time_qp'] = qty_precip_df['date_qp'] + ' ' + qty_precip_df['time_qp']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
# qty_precip_df['date_time_qp'] = pd.to_datetime(qty_precip_df['date_time_qp'])
qty_precip_df
# Pull all the data for today + 3 days
time_delta_qp = datetime.datetime.strptime(qty_precip_df['date_qp'][0],"%Y-%m-%d") + timedelta(days = 4)
qty_precip_df['times_qp'] = time_delta_qp.strftime("%Y-%m-%d")
qty_precip_df = qty_precip_df.loc[qty_precip_df['date_qp'] < qty_precip_df['times_qp']]
qty_precip_df
# qty_precip_df.dtypes
# =================== Create DataFrame with all the above data for Bog Spring Campground ======================
bs_grid_df = pd.DataFrame({"id":1,
"campground": "Bog Springs",
"forecasted_temperature_degF": temp_df['degF'],
"forecastTime_temperature": temp_df['date_time_temp'],
"forecasted_windSpeed_miles_per_h": windSpeed_df['miles/hour'],
"forecastTime_windSpeed": windSpeed_df['date_time_ws'],
"forecasted_windGust_miles_per_h": wind_gust_df['m/h'],
"forecastTime_windGust": wind_gust_df['date_time_wg'],
"forecasted_probabilityOfPrecipitation": prob_precip_df['value'],
"forecastTime_probabilityOfPrecipitation": prob_precip_df['date_time_pp'],
"forecasted_quantityOfPrecipitation_mm": qty_precip_df['value'],
"forecastTime_quantityOfPrecipitation": qty_precip_df['date_time_qp'],
})
bs_grid_df
# bs_grid_df.dtypes
# %%
# --------------------------------------------------------------------
# ROSE CANYON CAMPGROUND
# --------------------------------------------------------------------
# -------------------------------------------
# Pull Grid Data URL From Metadata url
# -------------------------------------------
rc_url = "https://api.weather.gov/points/32.395,-110.6911"
response_rc = requests.get(rc_url)
data_rc = response_rc.json()
data_rc
grid_data_rc = data_rc["properties"]["forecastGridData"]
grid_data_rc
# %%
# ------------------------------------------------------------------------
# Pull latitude, Longitude and Elevation data for Rose Canyon Campground
# ------------------------------------------------------------------------
rc_forcast_url = grid_data_rc
response_rc_forecast = requests.get(rc_forcast_url)
data_rc_forecast = response_rc_forecast.json()
data_rc_forecast
lat_rc = data_rc_forecast["geometry"]["coordinates"][0][0][1]
lat_rc
lng_rc = data_rc_forecast["geometry"]["coordinates"][0][0][0]
lng_rc
elevation_rc = data_rc_forecast["properties"]["elevation"]["value"]
elevation_rc
# ---------------------------------------------------------------------------------
# Create a Dataframe with Latitude, Longitude Elevation and all other related URL
# ---------------------------------------------------------------------------------
rc_df = pd.DataFrame({"id": 2,
"campground": "Rose Canyon",
"lat": [lat_rc],
"lon": [lng_rc],
"elevation": [elevation_rc],
"nws_meta_url": [rc_url],
"nws_grid_url": [grid_data_rc],
"forest_url":"https://www.fs.usda.gov/recarea/coronado/recreation/camping-cabins/recarea/?recid=25698&actid=29",
"campsite_url": "https://cdn.recreation.gov/public/2019/06/20/00/19/232284_beeddff5-c966-49e2-93a8-c63c1cf21294_700.jpg",
# "nws_meta_json":[data_rc],
# "nws_grid_json": [data_rc_forecast],
"fire_danger": "Very High",
"map_code": '<iframe src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3368.97130566869!2d-110.70672358360277!3d32.39313088108983!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x86d6400421614087%3A0xb6cfb84a4b05c95b!2sRose%20Canyon%20Campground!5e0!3m2!1sen!2sus!4v1626560965073!5m2!1sen!2sus" width="600" height="450" style="border:0;" allowfullscreen="" loading="lazy"></iframe>'
})
rc_df
# %%
# -------------------------------------------------------------------------------------------------
# Pull temperate, Wind Speed, Wind Gust, Probability of Precipitation, Quantity or Precipitation
# data along with the date and time for each.
# -------------------------------------------------------------------------------------------------
# =================== Temperature Data ======================
temp_rc = []
for i in data_rc_forecast["properties"]["temperature"]["values"]:
temp_rc.append(i)
temp_rc_df = pd.DataFrame(temp_rc)
temp_rc_df
# Temperature conversion to Degree | |
Path object with file path to the file
Raises
------
NotDumpableExtractorError
"""
ext = extensions[0]
file_path = Path(file_path)
file_path.parent.mkdir(parents=True, exist_ok=True)
folder_path = file_path.parent
if Path(file_path).suffix == '':
file_path = folder_path / (str(file_path) + ext)
assert file_path.suffix in extensions, \
"'file_path' should have one of the following extensions:" \
" %s" % (', '.join(extensions))
return file_path
def dump(self, file_path, relative_to=None, folder_metadata=None):
"""
Dumps extractor to json or pickle
Parameters
----------
file_path: str or Path
The output file (either .json or .pkl/.pickle)
relative_to: str, Path, or None
If not None, file_paths are serialized relative to this path
"""
if str(file_path).endswith('.json'):
self.dump_to_json(file_path, relative_to=relative_to, folder_metadata=folder_metadata)
elif str(file_path).endswith('.pkl') or str(file_path).endswith('.pickle'):
self.dump_to_pickle(file_path, relative_to=relative_to, folder_metadata=folder_metadata)
else:
raise ValueError('Dump: file must .json or .pkl')
def dump_to_json(self, file_path=None, relative_to=None, folder_metadata=None):
"""
Dump recording extractor to json file.
The extractor can be re-loaded with load_extractor_from_json(json_file)
Parameters
----------
file_path: str
Path of the json file
relative_to: str, Path, or None
If not None, file_paths are serialized relative to this path
"""
assert self.check_if_dumpable()
dump_dict = self.to_dict(include_annotations=True,
include_properties=False,
include_features=False,
relative_to=relative_to,
folder_metadata=folder_metadata)
file_path = self._get_file_path(file_path, ['.json'])
file_path.write_text(
json.dumps(check_json(dump_dict), indent=4),
encoding='utf8'
)
def dump_to_pickle(self, file_path=None, include_properties=True, include_features=True,
relative_to=None, folder_metadata=None):
"""
Dump recording extractor to a pickle file.
The extractor can be re-loaded with load_extractor_from_json(json_file)
Parameters
----------
file_path: str
Path of the json file
include_properties: bool
If True, all properties are dumped
include_features: bool
If True, all features are dumped
relative_to: str, Path, or None
If not None, file_paths are serialized relative to this path
"""
assert self.check_if_dumpable()
dump_dict = self.to_dict(include_annotations=True,
include_properties=False,
include_features=False,
relative_to=relative_to,
folder_metadata=folder_metadata)
file_path = self._get_file_path(file_path, ['.pkl', '.pickle'])
file_path.write_bytes(pickle.dumps(dump_dict))
@staticmethod
def load(file_path, base_folder=None):
"""
Load extractor from file path (.json or .pkl)
Used both after:
* dump(...) json or pickle file
* save (...) a folder which contain data + json (or pickle) + metadata.
"""
file_path = Path(file_path)
if file_path.is_file():
# standard case based on a file (json or pickle)
if str(file_path).endswith('.json'):
with open(str(file_path), 'r') as f:
d = json.load(f)
elif str(file_path).endswith('.pkl') or str(file_path).endswith('.pickle'):
with open(str(file_path), 'rb') as f:
d = pickle.load(f)
else:
raise ValueError(f'Impossible to load {file_path}')
if 'warning' in d and 'not dumpable' in d['warning']:
print('The extractor was not dumpable')
return None
extractor = BaseExtractor.from_dict(d, base_folder=base_folder)
return extractor
elif file_path.is_dir():
# case from a folder after a calling extractor.save(...)
folder = file_path
file = None
for dump_ext in ('json', 'pkl', 'pickle'):
f = folder / f'cached.{dump_ext}'
if f.is_file():
file = f
if file is None:
raise ValueError(f'This folder is not a cached folder {file_path}')
extractor = BaseExtractor.load(file, base_folder=folder)
return extractor
else:
raise ValueError('spikeinterface.Base.load() file_path must be an existing folder or file')
@staticmethod
def load_from_folder(folder):
return BaseExtractor.load(folder)
def _save(self, folder, **save_kwargs):
# This implemented in BaseRecording or baseSorting
# this is internally call by cache(...) main function
raise NotImplementedError
def _extra_metadata_from_folder(self, folder):
# This implemented in BaseRecording for probe
pass
def _extra_metadata_to_folder(self, folder):
# This implemented in BaseRecording for probe
pass
def save(self, **kwargs):
"""
Save a SpikeInterface object
Parameters
----------
kwargs: Keyword arguments for saving.
* format: "memory" or "binary" (for recording) / "memory" or "npz" for sorting.
In case format is not memory, the recording is saved to a folder
* folder: if provided, the folder path where the object is saved
* name: if provided and folder is not given, the name of the folder in the global temporary
folder (use set_global_tmp_folder() to change this folder) where the object is saved.
If folder and name are not given, the object is saved in the global temporary folder with
a random string
* dump_ext: 'json' or 'pkl', default 'json' (if format is "folder")
* verbose: if True output is verbose
* **save_kwargs: additional kwargs format-dependent and job kwargs for recording
{}
Returns
-------
loaded_extractor: BaseRecording or BaseSorting
The reference to the saved object after it is loaded back
"""
format = kwargs.get('format', None)
if format == 'memory':
loaded_extractor = self.save_to_memory(**kwargs)
else:
loaded_extractor = self.save_to_folder(**kwargs)
return loaded_extractor
save.__doc__ = save.__doc__.format(_shared_job_kwargs_doc)
def save_to_memory(self, **kwargs):
# used only by recording at the moment
cached = self._save(**kwargs)
self.copy_metadata(cached)
return cached
def save_to_folder(self, name=None, folder=None, dump_ext='json', verbose=True, **save_kwargs):
"""
Save extractor to folder.
The save consist of:
* extracting traces by calling get_trace() method in chunks
* saving data into file (memmap with BinaryRecordingExtractor)
* dumping to json/pickle the original extractor for provenance
* dumping to json/pickle the cached extractor (memmap with BinaryRecordingExtractor)
This replaces the use of the old CacheRecordingExtractor and CacheSortingExtractor.
There are 2 option for the 'folder' argument:
* explicit folder: `extractor.save(folder="/path-for-saving/")`
* explicit sub-folder, implicit base-folder : `extractor.save(name="extarctor_name")`
* generated: `extractor.save()`
The second option saves to subfolder "extarctor_name" in
"get_global_tmp_folder()". You can set the global tmp folder with:
"set_global_tmp_folder("path-to-global-folder")"
The folder must not exist. If it exists, remove it before.
Parameters
----------
name: None str or Path
Name of the subfolder in get_global_tmp_folder()
If 'name' is given, 'folder' must be None.
folder: None str or Path
Name of the folder.
If 'folder' is given, 'name' must be None.
Returns
-------
cached: saved copy of the extractor.
"""
if folder is None:
cache_folder = get_global_tmp_folder()
if name is None:
name = ''.join(random.choices(string.ascii_uppercase + string.digits, k=8))
folder = cache_folder / name
if verbose:
print(f'Use cache_folder={folder}')
else:
folder = cache_folder / name
if not is_set_global_tmp_folder():
if verbose:
print(f'Use cache_folder={folder}')
else:
folder = Path(folder)
assert not folder.exists(), f'folder {folder} already exists, choose another name'
folder.mkdir(parents=True, exist_ok=False)
# dump provenance
provenance_file = folder / f'provenance.{dump_ext}'
if self.check_if_dumpable():
self.dump(provenance_file)
else:
provenance_file.write_text(
json.dumps({'warning': 'the provenace is not dumpable!!!'}),
encoding='utf8'
)
# save data (done the subclass)
cached = self._save(folder=folder, verbose=verbose, **save_kwargs)
self.save_metadata_to_folder(folder)
# copy properties/
self.copy_metadata(cached)
# dump
cached.dump(folder / f'cached.{dump_ext}', relative_to=folder, folder_metadata=folder)
return cached
def _make_paths_relative(d, relative):
dcopy = deepcopy(d)
if "kwargs" in dcopy.keys():
relative_kwargs = _make_paths_relative(dcopy["kwargs"], relative)
dcopy["kwargs"] = relative_kwargs
return dcopy
else:
for k in d.keys():
# in SI, all input paths have the "path" keyword
if "path" in k:
# paths can be str or list of str
if isinstance(d[k], str):
# we use os.path.relpath here to allow for relative paths with respect to shared root
d[k] = os.path.relpath(str(d[k]), start=str(relative.absolute()))
else:
assert isinstance(d[k], list), "Paths can be strings or lists in kwargs"
relative_paths = []
for path in d[k]:
# we use os.path.relpath here to allow for relative paths with respect to shared root
relative_paths.append(os.path.relpath(str(path), start=str(relative.absolute())))
d[k] = relative_paths
return d
def _make_paths_absolute(d, base):
base = Path(base)
dcopy = deepcopy(d)
if "kwargs" in dcopy.keys():
base_kwargs = _make_paths_absolute(dcopy["kwargs"], base)
dcopy["kwargs"] = base_kwargs
return dcopy
else:
for k in d.keys():
# in SI, all input paths have the "path" keyword
if "path" in k:
# paths can be str or list of str
if isinstance(d[k], str):
if not Path(d[k]).exists():
d[k] = str(base / d[k])
else:
assert isinstance(d[k], list), "Paths can be strings or lists in kwargs"
absolute_paths = []
for path in d[k]:
if not Path(path).exists():
absolute_paths.append(str(base / path))
d[k] = absolute_paths
return d
def _check_if_dumpable(d):
kwargs = d['kwargs']
if np.any([isinstance(v, dict) and 'dumpable' in v.keys() for (k, v) in kwargs.items()]):
# check nested
for k, v in kwargs.items():
if 'dumpable' in v.keys():
return _check_if_dumpable(v)
else:
return d['dumpable']
def is_dict_extractor(d):
"""
Check if a dict describe an extractor.
"""
if not isinstance(d, dict):
return False
is_extractor = ('module' in d) and ('class' in d) and ('version' in d) and ('annotations' in d)
return is_extractor
def _load_extractor_from_dict(dic):
cls = None
class_name = None
if 'kwargs' not in dic:
raise Exception(f'This dict cannot be load into extractor {dic}')
kwargs = deepcopy(dic['kwargs'])
# handle nested
for k, v in kwargs.items():
if isinstance(v, dict) and is_dict_extractor(v):
kwargs[k] = _load_extractor_from_dict(v)
# handle list of extractors list
for k, v in kwargs.items():
if isinstance(v, list):
if all(is_dict_extractor(e) for e in v):
kwargs[k] = [_load_extractor_from_dict(e) for e in v]
class_name = dic['class']
cls = _get_class_from_string(class_name)
assert cls is not None and class_name is not None, "Could not load spikeinterface class"
if not _check_same_version(class_name, dic['version']):
print('Versions are | |
<reponame>hlubenow/yetanother_raycaster.py
#!/usr/bin/python
# coding: utf-8
""" Yet another ray caster 2.0 - (C) 2021, hlubenow
Python/Pygame version of the tutorial code by 3DSage
(https://github.com/3DSage/OpenGL-Raycaster_v1)
License: MIT
"""
import pygame
import math
import os, sys
from mazegenerator import *
RANDOMMAZE = True
RANDOMMAZE_SIZE = (20, 20)
JOYSTICK = False
SHADING = False
SHOWRAYS = True
NUMRAYS = 60
FPS = 50
PI = 3.1416
WALLSYMBOL = "1"
COLORS = {"black" : (0, 0, 0),
"white" : (255, 255, 255),
"darkgrey" : (76, 76, 76),
"grey" : (140, 140, 140),
"lightgrey" : (220, 220, 220),
"bluegreen" : (0, 255, 255),
"skyblue" : (0, 186, 255),
"red" : (204, 0, 0),
"blue" : (0, 0, 200)}
class Map:
def __init__(self):
if RANDOMMAZE:
self.m = MazeGenerator(RANDOMMAZE_SIZE)
self.textmap = self.m.maze
else:
self.textmap = ( "11111111",
"10100001",
"10100001",
"10100001",
"10000001",
"10000101",
"10000001",
"11111111" )
"""
self.textmap = ( "1111111111111111",
"1010000110100001",
"1010000000100001",
"1010000100100001",
"1000000000000001",
"1000010010000101",
"1000000000000001",
"1000000010000001",
"1000000010000001",
"1010000110100001",
"1010000000100001",
"1010000100100001",
"1000000000000001",
"1000010010000101",
"1000000010000001",
"1111111111111111")
"""
self.mapX = len(self.textmap[0])
self.mapY = len(self.textmap)
self.tilesize = 64
self.mappart_width = 8 * self.tilesize
self.mappart_height = 8 * self.tilesize
self.mappart_halfwidth = self.mappart_width // 2
self.mappart_halfheight = self.mappart_height // 2
self.lastpart_x = self.mapX * self.tilesize - self.mappart_width
self.lastpart_y = self.mapY * self.tilesize - self.mappart_height
def createSurface(self):
self.surface = pygame.Surface((self.mapX * self.tilesize, self.mapY * self.tilesize))
self.surface = self.surface.convert()
self.rect = pygame.Rect(0, 0, self.mappart_width, self.mappart_height)
for y in range(self.mapY):
for x in range(self.mapX):
xo = x * self.tilesize
yo = y * self.tilesize
if self.textmap[y][x] == WALLSYMBOL:
c = COLORS["white"]
else:
c = COLORS["black"]
pygame.draw.rect(self.surface,
c,
pygame.Rect(xo + 1,
yo + 1,
self.tilesize - 2,
self.tilesize - 2))
def move(self, player):
self.rect.topleft = (player.x - player.drawx, player.y - player.drawy)
class TwoDimensionalWindow:
def __init__(self, map, player):
self.map = map
self.player = player
self.width = 512
self.height = 512
self.surface = pygame.Surface((self.width, self.height))
self.surface = self.surface.convert()
self.rect = self.surface.get_rect()
self.rect.topleft = (0, 0)
def drawMapPart(self):
self.surface.blit(self.map.surface, (0, 0), self.map.rect)
def drawPlayer(self):
pygame.draw.rect(self.surface,
COLORS["lightgrey"],
pygame.Rect(self.player.drawx, self.player.drawy,
8, 8))
def drawBeam(self):
pygame.draw.line(self.surface,
COLORS["red"],
(self.player.drawx + 3, self.player.drawy + 3),
(self.player.drawx + 3 + self.player.dx * 20, self.player.drawy + 3 + self.player.dy * 20),
4)
def drawRay(self, color, rx, ry):
if not SHOWRAYS:
return
pygame.draw.line(self.surface,
color,
(self.player.drawx + 3, self.player.drawy + 3),
(self.player.drawx + rx - self.player.x,
self.player.drawy + ry - self.player.y),
2)
def draw(self, surface):
surface.blit(self.surface, (0, 0), self.rect)
class ThreeDimensionalWindow:
def __init__(self):
self.width = 480
self.height = 320
self.wallwidth = 8
self.surface = pygame.Surface((self.width, self.height))
self.surface = self.surface.convert()
self.rect = self.surface.get_rect()
self.rect.topleft = (530, 0)
def drawBackground(self):
# Sky:
pygame.draw.rect(self.surface,
COLORS["skyblue"],
pygame.Rect(0, 0, self.width, self.height / 2))
# Floor:
pygame.draw.rect(self.surface,
COLORS["blue"],
pygame.Rect(0, self.height / 2, self.width, self.height / 2))
def drawWallRay(self, color, r, lineOff, lineH):
pygame.draw.line(self.surface,
color,
(r * self.wallwidth, lineOff),
(r * self.wallwidth, lineOff + lineH),
self.wallwidth)
def draw(self, surface):
surface.blit(self.surface, self.rect)
class Main:
def __init__(self):
self.map = Map()
self.keystates = {}
for i in (pygame.K_a, pygame.K_LEFT, pygame.K_d, pygame.K_RIGHT,
pygame.K_w, pygame.K_UP, pygame.K_s, pygame.K_DOWN,
pygame.K_q, pygame.K_ESCAPE):
self.keystates[i] = False
if JOYSTICK:
self.joystickstates = {}
for i in ("left", "right", "up", "down", "firing"):
self.joystickstates[i] = False
os.environ['SDL_VIDEO_WINDOW_POS'] = "112, 72"
self.screen = pygame.display.set_mode((1024, 510))
pygame.display.set_caption("Yet another raycaster")
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.init()
self.map.createSurface()
if JOYSTICK:
self.joystick = pygame.joystick.Joystick(0)
self.joystick.init()
self.sounds = {}
self.sounds["wallhit"] = pygame.mixer.Sound("sound/wallhit.wav")
self.player = Player(self.map, self.sounds)
self.d2window = TwoDimensionalWindow(self.map, self.player)
self.d3window = ThreeDimensionalWindow()
self.clock = pygame.time.Clock()
while True:
self.clock.tick(FPS)
self.screen.fill(COLORS["darkgrey"])
if self.processEvents() == "quit":
break
self.d2window.drawMapPart()
self.d2window.drawPlayer()
self.drawRays2DandScenery3D()
self.d2window.drawBeam()
self.d2window.draw(self.screen)
self.d3window.draw(self.screen)
pygame.display.flip()
pygame.quit()
def processEvents(self):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
for i in self.keystates:
if event.key == i:
self.keystates[i] = True
if event.type == pygame.KEYUP:
for i in self.keystates:
if event.key == i:
self.keystates[i] = False
if JOYSTICK:
if event.type == pygame.JOYBUTTONDOWN:
self.joystickstates["firing"] = True
if event.type == pygame.JOYBUTTONUP:
self.joystickstates["firing"] = False
if event.type == pygame.JOYAXISMOTION:
# Joystick pushed:
if event.axis == 0 and int(event.value) == -1:
self.joystickstates["left"] = True
if event.axis == 0 and int(event.value) == 1:
self.joystickstates["right"] = True
if event.axis == 1 and int(event.value) == -1:
self.joystickstates["up"] = True
if event.axis == 1 and int(event.value) == 1:
self.joystickstates["down"] = True
# Joystick released:
if event.axis == 0 and int(event.value) == 0:
self.joystickstates["left"] = False
self.joystickstates["right"] = False
if event.axis == 1 and int(event.value) == 0:
self.joystickstates["up"] = False
self.joystickstates["down"] = False
if self.keystates[pygame.K_q] or self.keystates[pygame.K_ESCAPE]:
return "quit"
if self.keystates[pygame.K_a]:
self.player.move("strafe_left")
if self.keystates[pygame.K_d]:
self.player.move("strafe_right")
if self.keystates[pygame.K_LEFT]:
self.player.move("turn_left")
if self.keystates[pygame.K_RIGHT]:
self.player.move("turn_right")
if self.keystates[pygame.K_UP] or self.keystates[pygame.K_w]:
self.player.move("forward")
if self.keystates[pygame.K_DOWN] or self.keystates[pygame.K_s]:
self.player.move("backwards")
if JOYSTICK:
if self.joystickstates["firing"]:
self.player.setSpeed("fast")
else:
self.player.setSpeed("normal")
if self.joystickstates["left"]:
self.player.move("turn_left")
if self.joystickstates["right"]:
self.player.move("turn_right")
if self.joystickstates["up"]:
self.player.move("forward")
if self.joystickstates["down"]:
self.player.move("backwards")
return 0
def fixAngle(self, a):
if a > 2 * PI:
a -= 2 * PI
if a < 0:
a += 2 * PI
return a
def distance(self, ax, ay, bx, by, ang):
return math.cos(ang) * (bx-ax) - math.sin(ang) * (by-ay)
def drawRays2DandScenery3D(self):
self.d3window.drawBackground()
if NUMRAYS == 1:
ray_angle = self.player.angle
else:
# Ray set back 30 degrees:
ray_angle = self.fixAngle(self.player.angle + PI / 6)
for r in range(NUMRAYS):
# ---Vertical---
dof = 0
side = 0
disV = 100000
Tan = math.tan(ray_angle)
if math.cos(ray_angle) > 0.001:
# Looking left:
rx = ((int(self.player.x) // self.map.tilesize) * self.map.tilesize) + self.map.tilesize
ry = (self.player.x - rx) * Tan + self.player.y
xo = self.map.tilesize
yo = -xo * Tan
elif math.cos(ray_angle) < -0.001:
# Looking right:
rx = ((int(self.player.x) // self.map.tilesize) * self.map.tilesize) - 0.0001
ry = (self.player.x - rx) * Tan + self.player.y
xo = -self.map.tilesize
yo = -xo * Tan
else:
# Looking up or down. no hit:
rx = self.player.x
ry = self.player.y
dof = 8
while dof < 8:
mx = int(rx) // self.map.tilesize
my = int(ry) // self.map.tilesize
if mx >= 0 and mx < self.map.mapX and my >= 0 and my < self.map.mapY and self.map.textmap[my][mx] == WALLSYMBOL:
# Hit:
dof = 8
disV = math.cos(ray_angle) * (rx - self.player.x) - math.sin(ray_angle) * (ry - self.player.y)
else:
# Check next horizontal:
rx += xo
ry += yo
dof += 1
vx = rx
vy = ry
# ---Horizontal---
dof = 0
disH = 100000
if Tan == 0:
Tan = 100000
else:
Tan = 1.0 / Tan
if math.sin(ray_angle) > 0.001:
# Looking up:
ry = ((int(self.player.y) // self.map.tilesize) * self.map.tilesize) - 0.0001
rx = (self.player.y - ry) * Tan + self.player.x
yo = -self.map.tilesize
xo = -yo * Tan
elif math.sin(ray_angle) < -0.001:
# Looking down:
# ry = ((int(self.player.y) // self.map.tilesize) * self.map.tilesize) + self.map.tilesize
ry = ((int(self.player.y) // self.map.tilesize) * self.map.tilesize) + self.map.tilesize
rx = (self.player.y - ry) * Tan + self.player.x
yo = self.map.tilesize
xo = -yo * Tan
else:
# Looking straight left or right:
rx = self.player.x
ry = self.player.y
dof = 8
while dof < 8:
mx = int(rx) // self.map.tilesize
my = int(ry) // self.map.tilesize
if mx >= 0 and mx < self.map.mapX and my >= 0 and my < self.map.mapY and self.map.textmap[my][mx] == WALLSYMBOL:
# Hit:
dof = 8
disH = math.cos(ray_angle) * (rx - self.player.x) - math.sin(ray_angle) * (ry - self.player.y)
else:
# Check next horizontal:
rx += xo
ry += yo
dof += 1
if NUMRAYS == 1:
beamcolor = COLORS["red"]
else:
beamcolor = COLORS["bluegreen"]
if not SHADING:
wallcolor = COLORS["lightgrey"]
if disV < disH:
rx = vx
ry = vy
disH = disV
# Horizontal hit first:
if not SHADING:
wallcolor = COLORS["grey"]
beamcolor = COLORS["skyblue"]
# Draw 2D beam:
self.d2window.drawRay(beamcolor, rx, ry)
ca = self.fixAngle(self.player.angle - ray_angle)
# Fix fish eye:
disH = disH * math.cos(ca)
lineH = int((self.map.tilesize * 320) / (disH))
# Line height and limit:
if lineH > 320:
lineH = 320
# Line offset:
lineOff = 160 - (lineH // 2)
if SHADING:
# Simple but effective shading code from:
# https://github.com/StanislavPetrovV/Raycasting-3d-game-tutorial
c = 255 / (1 + disH * disH * 0.00002)
wallcolor = (c, c, c)
# Draw 3D walls:
self.d3window.drawWallRay(wallcolor, r, lineOff, | |
<reponame>pombredanne/synapse-3
import asyncio
import logging
import binascii
import collections
import regex
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.lib.chop as s_chop
import synapse.lib.node as s_node
import synapse.lib.time as s_time
import synapse.lib.cache as s_cache
import synapse.lib.layer as s_layer
import synapse.lib.config as s_config
import synapse.lib.msgpack as s_msgpack
import synapse.lib.grammar as s_grammar
logger = logging.getLogger(__name__)
class Type:
_opt_defs = ()
stortype: int = None # type: ignore
# a fast-access way to determine if the type is an array
# ( due to hot-loop needs in the storm runtime )
isarray = False
def __init__(self, modl, name, info, opts):
'''
Construct a new Type object.
Args:
modl (synpase.datamodel.DataModel): The data model instance.
name (str): The name of the type.
info (dict): The type info (docs etc).
opts (dict): Options that are specific to the type.
'''
# these fields may be referenced by callers
self.modl = modl
self.name = name
self.info = info
self.form = None # this will reference a Form() if the type is a form
self.subof = None # This references the name that a type was extended from.
self.info.setdefault('bases', ())
self.opts = dict(self._opt_defs)
self.opts.update(opts)
self._type_norms = {} # python type to norm function map str: _norm_str
self._cmpr_ctors = {} # cmpr string to filter function constructor map
self._cmpr_ctor_lift = {} # if set, create a cmpr which is passed along with indx ops
self.setCmprCtor('=', self._ctorCmprEq)
self.setCmprCtor('!=', self._ctorCmprNe)
self.setCmprCtor('~=', self._ctorCmprRe)
self.setCmprCtor('^=', self._ctorCmprPref)
self.setCmprCtor('in=', self._ctorCmprIn)
self.setCmprCtor('range=', self._ctorCmprRange)
self.setNormFunc(s_node.Node, self._normStormNode)
self.storlifts = {
'=': self._storLiftNorm,
'~=': self._storLiftRegx,
'?=': self._storLiftSafe,
'in=': self._storLiftIn,
'range=': self._storLiftRange,
}
self.locked = False
self.deprecated = bool(self.info.get('deprecated', False))
self.postTypeInit()
def _storLiftSafe(self, cmpr, valu):
try:
return self.storlifts['=']('=', valu)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception:
return ()
def _storLiftIn(self, cmpr, valu):
retn = []
for realvalu in valu:
retn.extend(self.getStorCmprs('=', realvalu))
return retn
def _storLiftNorm(self, cmpr, valu):
# NOTE: this may also be used for any other supported
# lift operation that requires a simple norm(valu)
norm, info = self.norm(valu)
return ((cmpr, norm, self.stortype),)
def _storLiftRange(self, cmpr, valu):
minv, minfo = self.norm(valu[0])
maxv, maxfo = self.norm(valu[1])
return ((cmpr, (minv, maxv), self.stortype),)
def _storLiftRegx(self, cmpr, valu):
return ((cmpr, valu, self.stortype),)
def getStorCmprs(self, cmpr, valu):
func = self.storlifts.get(cmpr)
if func is None:
mesg = f'Type ({self.name}) has no cmpr: "{cmpr}".'
raise s_exc.NoSuchCmpr(mesg=mesg)
return func(cmpr, valu)
def getStorNode(self, form):
ndef = (form.name, form.type.norm(self.name)[0])
buid = s_common.buid(ndef)
ctor = '.'.join([self.__class__.__module__, self.__class__.__qualname__])
props = {
'doc': self.info.get('doc'),
'ctor': ctor,
}
opts = {k: v for k, v in self.opts.items()}
if opts:
props['opts'] = opts
if self.subof is not None:
props['subof'] = self.subof
pnorms = {}
for prop, valu in props.items():
formprop = form.props.get(prop)
if formprop is not None and valu is not None:
pnorms[prop] = formprop.type.norm(valu)[0]
return (buid, {
'ndef': ndef,
'props': pnorms,
})
def getCompOffs(self, name):
'''
If this type is a compound, return the field offset for the given
property name or None.
'''
return None
def _normStormNode(self, node):
return self.norm(node.ndef[1])
def pack(self):
return {
'info': dict(self.info),
'opts': dict(self.opts),
'stortype': self.stortype,
}
def getTypeDef(self):
basename = self.info['bases'][-1]
info = self.info.copy()
info['stortype'] = self.stortype
return (self.name, (basename, self.opts), info)
def getTypeVals(self, valu):
yield valu
def setCmprCtor(self, name, func):
'''
Set a comparator ctor for a given named comparison operation.
Args:
name (str): Name of the comparison operation.
func: Function which returns a comparator.
Notes:
Comparator ctors should expect to get the right-hand-side of the
comparison as their argument, and the returned function should
expect to get the left hand side of the comparison and return a
boolean from there.
'''
self._cmpr_ctors[name] = func
def getCmprCtor(self, name):
return self._cmpr_ctors.get(name)
def setLiftHintCmprCtor(self, name, func):
self._cmpr_ctor_lift[name] = func
def getLiftHintCmprCtor(self, name):
return self._cmpr_ctor_lift.get(name)
def getLiftHintCmpr(self, valu, cmpr):
ctor = self.getLiftHintCmprCtor(cmpr)
if ctor:
return ctor(valu)
return None
def cmpr(self, val1, name, val2):
'''
Compare the two values using the given type specific comparator.
'''
ctor = self.getCmprCtor(name)
if ctor is None:
raise s_exc.NoSuchCmpr(cmpr=name, name=self.name)
norm1 = self.norm(val1)[0]
norm2 = self.norm(val2)[0]
return ctor(norm2)(norm1)
def _ctorCmprEq(self, text):
norm, info = self.norm(text)
def cmpr(valu):
return norm == valu
return cmpr
def _ctorCmprNe(self, text):
norm, info = self.norm(text)
def cmpr(valu):
return norm != valu
return cmpr
def _ctorCmprPref(self, valu):
text = str(valu)
def cmpr(valu):
vtxt = self.repr(valu)
return vtxt.startswith(text)
return cmpr
def _ctorCmprRe(self, text):
regx = regex.compile(text)
def cmpr(valu):
vtxt = self.repr(valu)
return regx.search(vtxt) is not None
return cmpr
def _ctorCmprIn(self, vals):
norms = [self.norm(v)[0] for v in vals]
def cmpr(valu):
return valu in norms
return cmpr
def _ctorCmprRange(self, vals):
if not isinstance(vals, (list, tuple)):
raise s_exc.BadCmprValu(name=self.name, valu=vals, cmpr='range=')
if len(vals) != 2:
raise s_exc.BadCmprValu(name=self.name, valu=vals, cmpr='range=')
minv = self.norm(vals[0])[0]
maxv = self.norm(vals[1])[0]
def cmpr(valu):
return minv <= valu <= maxv
return cmpr
def setNormFunc(self, typo, func):
'''
Register a normalizer function for a given python type.
Args:
typo (type): A python type/class to normalize.
func (function): A callback which normalizes a python value.
'''
self._type_norms[typo] = func
def postTypeInit(self):
pass
def norm(self, valu):
'''
Normalize the value for a given type.
Args:
valu (obj): The value to normalize.
Returns:
((obj,dict)): The normalized valu, info tuple.
Notes:
The info dictionary uses the following key conventions:
subs (dict): The normalized sub-fields as name: valu entries.
'''
func = self._type_norms.get(type(valu))
if func is None:
raise s_exc.BadTypeValu(name=self.name, mesg='no norm for type: %r.' % (type(valu),))
return func(valu)
def repr(self, norm):
'''
Return a printable representation for the value.
This may return a string or a tuple of values for display purposes.
'''
return str(norm)
def merge(self, oldv, newv):
'''
Allow types to "merge" data from two sources based on value precedence.
Args:
valu (object): The current value.
newv (object): The updated value.
Returns:
(object): The merged value.
'''
return newv
def extend(self, name, opts, info):
'''
Extend this type to construct a sub-type.
Args:
name (str): The name of the new sub-type.
opts (dict): The type options for the sub-type.
info (dict): The type info for the sub-type.
Returns:
(synapse.types.Type): A new sub-type instance.
'''
tifo = self.info.copy()
tifo.update(info)
bases = self.info.get('bases') + (self.name,)
tifo['bases'] = bases
topt = self.opts.copy()
topt.update(opts)
tobj = self.__class__(self.modl, name, tifo, topt)
tobj.subof = self.name
return tobj
def clone(self, opts):
'''
Create a new instance of this type with the specified options.
Args:
opts (dict): The type specific options for the new instance.
'''
topt = self.opts.copy()
topt.update(opts)
return self.__class__(self.modl, self.name, self.info, topt)
class Bool(Type):
stortype = s_layer.STOR_TYPE_U8
def postTypeInit(self):
self.setNormFunc(str, self._normPyStr)
self.setNormFunc(int, self._normPyInt)
self.setNormFunc(bool, self._normPyInt)
def _normPyStr(self, valu):
ival = s_common.intify(valu)
if ival is not None:
return int(bool(ival)), {}
sval = valu.lower().strip()
if sval in ('true', 't', 'y', 'yes', 'on'):
return 1, {}
if sval in ('false', 'f', 'n', 'no', 'off'):
return 0, {}
raise s_exc.BadTypeValu(name=self.name, valu=valu,
mesg='Failed to norm bool')
def _normPyInt(self, valu):
return int(bool(valu)), {}
def repr(self, valu):
return repr(bool(valu))
class Array(Type):
isarray = True
def postTypeInit(self):
self.isuniq = self.opts.get('uniq', False)
self.issorted = self.opts.get('sorted', False)
self.splitstr = self.opts.get('split', None)
typename = self.opts.get('type')
if typename is None:
mesg = 'Array type requires type= option.'
raise s_exc.BadTypeDef(mesg=mesg)
typeopts = self.opts.get('typeopts', {})
basetype = self.modl.type(typename)
if basetype is None:
mesg = f'Array type ({self.name}) based on unknown type: {typename}.'
raise s_exc.BadTypeDef(mesg=mesg)
self.arraytype = basetype.clone(typeopts)
if isinstance(self.arraytype, Array):
mesg = 'Array type of array values is not (yet) supported.'
raise s_exc.BadTypeDef(mesg)
if self.arraytype.deprecated:
if self.info.get('custom'):
mesg = f'The Array type {self.name} is based on a deprecated type {self.arraytype.name} type which ' \
f'which will be removed in 3.0.0'
logger.warning(mesg)
self.setNormFunc(str, self._normPyStr)
self.setNormFunc(list, self._normPyTuple)
self.setNormFunc(tuple, self._normPyTuple)
self.stortype = s_layer.STOR_FLAG_ARRAY | self.arraytype.stortype
def _normPyStr(self, text):
if self.splitstr is None:
mesg = f'{self.name} type has no split-char defined.'
raise s_exc.BadTypeValu(name=self.name, mesg=mesg)
parts = [p.strip() for p in text.split(self.splitstr)]
return self._normPyTuple(parts)
def _normPyTuple(self, valu):
adds = []
norms = []
for item in valu:
norm, info = self.arraytype.norm(item)
adds.extend(info.get('adds', ()))
norms.append(norm)
form = self.modl.form(self.arraytype.name)
if form is not None:
adds.extend([(form.name, n) for n in norms])
adds = list(set(adds))
if self.isuniq:
uniqs = []
uniqhas = | |
@property
def pitchSet(self):
r'''
Gets the pitch set of all elements in a verticality.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> for pitch in sorted(verticality.pitchSet):
... pitch
...
<music21.pitch.Pitch F#3>
<music21.pitch.Pitch C#4>
<music21.pitch.Pitch F#4>
<music21.pitch.Pitch A4>
'''
pitchNameSet = set()
pitchSet = set()
for timespan in self.startAndOverlapTimespans:
if not hasattr(timespan, 'pitches'):
continue
for p in timespan.pitches:
pName = p.nameWithOctave
if pName in pitchNameSet:
continue
pitchNameSet.add(pName)
pitchSet.add(p)
return pitchSet
@property
def pitchClassSet(self):
r'''
Gets a set of all pitches in a verticality with distinct pitchClasses
>>> n1 = note.Note('C4')
>>> n2 = note.Note('B#5')
>>> s = stream.Stream()
>>> s.insert(4.0, n1)
>>> s.insert(4.0, n2)
>>> scoreTree = s.asTimespans()
>>> verticality = scoreTree.getVerticalityAt(4.0)
>>> pitchSet = verticality.pitchSet
>>> list(sorted(pitchSet))
[<music21.pitch.Pitch C4>, <music21.pitch.Pitch B#5>]
PitchClassSet will return only one pitch. Which of these
is returned is arbitrary.
>>> pitchClassSet = verticality.pitchClassSet
>>> #_DOCS_SHOW list(sorted(pitchClassSet))
>>> print('[<music21.pitch.Pitch B#5>]') #_DOCS_HIDE
[<music21.pitch.Pitch B#5>]
'''
outPitchSet = set()
pitchClassSet = set()
for currentPitch in self.pitchSet:
pitchClass = currentPitch.pitchClass
if pitchClass in pitchClassSet:
continue
pitchClassSet.add(pitchClass)
outPitchSet.add(currentPitch)
return outPitchSet
@property
def previousVerticality(self):
r'''
Gets the previous verticality before a verticality.
>>> score = corpus.parse('bwv66.6')
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> print(verticality)
<music21.tree.verticality.Verticality 1.0 {F#3 C#4 F#4 A4}>
>>> previousVerticality = verticality.previousVerticality
>>> print(previousVerticality)
<music21.tree.verticality.Verticality 0.5 {G#3 B3 E4 B4}>
Continue it:
>>> v = scoreTree.getVerticalityAt(1.0)
>>> while v is not None:
... print(v)
... v = v.previousVerticality
<music21.tree.verticality.Verticality 1.0 {F#3 C#4 F#4 A4}>
<music21.tree.verticality.Verticality 0.5 {G#3 B3 E4 B4}>
<music21.tree.verticality.Verticality 0.0 {A3 E4 C#5}>
Verticality objects created by an offset-tree hold a reference back to
that offset-tree. This means that they determine their next or previous
verticality dynamically based on the state of the offset-tree only when
asked. Because of this, it is safe to mutate the offset-tree by
inserting or removing timespans while iterating over it.
>>> scoreTree.removeTimespanList(previousVerticality.startTimespans)
>>> verticality.previousVerticality
<music21.tree.verticality.Verticality 0.0 {A3 E4 C#5}>
'''
tree = self.timespanTree
if tree is None:
return None
offset = tree.getPositionBefore(self.offset)
if offset is None:
return None
return tree.getVerticalityAt(offset)
@property
def startAndOverlapTimespans(self):
'''
Return a tuple adding the start and overlap timespans into one.
>>> n1 = note.Note('C4')
>>> n2 = note.Note('D4')
>>> s = stream.Stream()
>>> s.insert(4.0, n1)
>>> s.insert(4.5, n2)
>>> scoreTree = s.asTimespans()
>>> verticality = scoreTree.getVerticalityAt(4.5)
>>> verticality.startTimespans
(<PitchedTimespan (4.5 to 5.5) <music21.note.Note D>>,)
>>> verticality.overlapTimespans
(<PitchedTimespan (4.0 to 5.0) <music21.note.Note C>>,)
>>> verticality.startAndOverlapTimespans
(<PitchedTimespan (4.5 to 5.5) <music21.note.Note D>>,
<PitchedTimespan (4.0 to 5.0) <music21.note.Note C>>)
>>> verticality = scoreTree.getVerticalityAt(4.0)
>>> verticality.startAndOverlapTimespans
(<PitchedTimespan (4.0 to 5.0) <music21.note.Note C>>,)
'''
if self.overlapTimespans is None:
return tuple(self.startTimespans)
return tuple(self.startTimespans[:] + self.overlapTimespans[:])
# makeElement
def makeElement(self,
quarterLength=1.0,
*,
addTies=True,
addPartIdAsGroup=False,
removeRedundantPitches=True,
gatherArticulations='single',
gatherExpressions='single',
copyPitches=True,
):
r'''
Makes a Chord or Rest from this verticality and quarterLength.
>>> score = tree.makeExampleScore()
>>> scoreTree = tree.fromStream.asTimespans(score, flatten=True,
... classList=(note.Note, chord.Chord))
>>> verticality = scoreTree.getVerticalityAt(4.0)
>>> verticality
<music21.tree.verticality.Verticality 4.0 {E#3 G3}>
>>> verticality.startTimespans
(<PitchedTimespan (4.0 to 5.0) <music21.note.Note G>>,
<PitchedTimespan (4.0 to 6.0) <music21.note.Note E#>>)
>>> el = verticality.makeElement(2.0)
>>> el
<music21.chord.Chord E#3 G3>
>>> el.duration.quarterLength
2.0
>>> el.duration.type
'half'
If there is nothing there, then a Rest is created
>>> verticality = scoreTree.getVerticalityAt(400.0)
>>> verticality
<music21.tree.verticality.Verticality 400.0 {}>
>>> el = verticality.makeElement(1/3)
>>> el
<music21.note.Rest 1/3ql>
>>> el.duration.fullName
'Eighth Triplet (1/3 QL)'
>>> n1 = note.Note('C4')
>>> n2 = note.Note('C4')
>>> s = stream.Score()
>>> s.insert(0, n1)
>>> s.insert(0.5, n2)
>>> scoreTree = s.asTimespans()
>>> verticality = scoreTree.getVerticalityAt(0.5)
>>> c = verticality.makeElement(0.5)
>>> c
<music21.chord.Chord C4>
>>> c = verticality.makeElement(0.5, removeRedundantPitches=False)
>>> c
<music21.chord.Chord C4 C4>
Generally the pitches of the new element are not connected to the original pitch:
>>> c[0].pitch.name = 'E'
>>> c[1].pitch.name = 'F'
>>> (n1.name, n2.name)
('C', 'C')
But if `copyPitches` is False then the original pitch will be used:
>>> n1.name = 'D'
>>> n2.name = 'E'
>>> c = verticality.makeElement(0.5, removeRedundantPitches=False, copyPitches=False)
>>> c
<music21.chord.Chord D4 E4>
>>> c[0].pitch.name = 'F'
>>> c[1].pitch.name = 'G'
>>> (n1.name, n2.name)
('F', 'G')
gatherArticulations and gatherExpressions can be True, False, or (default) 'single'.
* If False, no articulations (or expressions) are transferred to the chord.
* If True, all articulations are transferred to the chord.
* If 'single', then no more than one articulation of each class (chosen from the lowest
note) will be added. This way, the chord does not get 4 fermatas, etc.
>>> n1 = note.Note('C4')
>>> n2 = note.Note('D4')
>>> s = stream.Stream()
>>> s.insert(0, n1)
>>> s.insert(0.5, n2)
>>> class AllAttachArticulation(articulations.Articulation):
... def __init__(self):
... super().__init__()
... self.tieAttach = 'all'
>>> class OtherAllAttachArticulation(articulations.Articulation):
... def __init__(self):
... super().__init__()
... self.tieAttach = 'all'
>>> n1.articulations.append(articulations.Accent())
>>> n1.articulations.append(AllAttachArticulation())
>>> n1.expressions.append(expressions.Fermata())
>>> n2.articulations.append(articulations.Staccato())
>>> n2.articulations.append(AllAttachArticulation())
>>> n2.articulations.append(OtherAllAttachArticulation())
>>> n2.expressions.append(expressions.Fermata())
>>> scoreTree = s.asTimespans()
>>> verticality = scoreTree.getVerticalityAt(0.0)
>>> c = verticality.makeElement(1.0)
>>> c.expressions
[<music21.expressions.Fermata>]
>>> c.articulations
[<music21.articulations.Accent>, <...AllAttachArticulation>]
>>> verticality = scoreTree.getVerticalityAt(0.5)
Here there will be no expressions, because there is no note ending
at 0.75 and Fermatas attach to the last note:
>>> c = verticality.makeElement(0.25)
>>> c.expressions
[]
>>> c = verticality.makeElement(0.5)
>>> c.expressions
[<music21.expressions.Fermata>]
Only two articulations, since accent attaches to beginning and staccato attaches to last
and we are beginning after the start of the first note (with an accent)
and cutting right through the second note (with a staccato)
>>> c.articulations
[<...AllAttachArticulation>,
<...OtherAllAttachArticulation>]
>>> c = verticality.makeElement(0.5, gatherArticulations=True)
>>> c.articulations
[<...AllAttachArticulation>,
<...AllAttachArticulation>,
<...OtherAllAttachArticulation>]
>>> c = verticality.makeElement(0.5, gatherArticulations=False)
>>> c.articulations
[]
>>> verticality = scoreTree.getVerticalityAt(1.0)
>>> c = verticality.makeElement(0.5)
>>> c.expressions
[<music21.expressions.Fermata>]
>>> c.articulations
[<music21.articulations.Staccato>,
<...AllAttachArticulation>,
<...OtherAllAttachArticulation>]
Added in v6.3: copyPitches option
OMIT_FROM_DOCS
Test that copyPitches works with expressions:
>>> c = verticality.makeElement(0.5, copyPitches=False)
>>> c
<music21.chord.Chord D4>
>>> c.pitches[0].accidental = pitch.Accidental('sharp')
>>> n2
<music21.note.Note D#>
'''
if not self.pitchSet:
r = note.Rest()
r.duration.quarterLength = common.opFrac(quarterLength)
return r
# easy stuff done, time to get to the hard stuff...
c = chord.Chord()
c.duration.quarterLength = common.opFrac(quarterLength)
dur = c.duration
seenPitches = set()
notesToAdd = {}
startStopSet = {'start', 'stop'}
pitchBust = 0 # used if removeRedundantPitches is False.
def newNote(ts, n):
'''
Make a copy of the note and clear some settings
'''
nNew = copy.deepcopy(n)
nNew.duration = dur
if not copyPitches:
nNew.pitch = n.pitch
if nNew.stemDirection != 'noStem':
nNew.stemDirection = None
if not addTies:
return nNew
offsetDifference = common.opFrac(self.offset - ts.offset)
endTimeDifference = common.opFrac(ts.endTime - (self.offset + quarterLength))
if offsetDifference == 0 and endTimeDifference <= 0:
addTie = None
elif offsetDifference > 0:
if endTimeDifference > 0:
addTie = 'continue'
else:
addTie = 'stop'
elif endTimeDifference > 0:
addTie = 'start'
else:
raise VerticalityException('What possibility was missed?',
offsetDifference, endTimeDifference, ts, self)
if nNew.tie is not None and {nNew.tie.type, addTie} == startStopSet:
nNew.tie.type = 'continue'
elif nNew.tie is not None and nNew.tie.type == 'continue':
nNew.tie.placement = None
elif addTie is None and nNew.tie is not None:
nNew.tie.placement = None
elif addTie:
nNew.tie = tie.Tie(addTie)
return nNew
def conditionalAdd(ts, n):
'''
Add an element only if it is not already in the chord.
If it has more tie information than the previously
added note, then remove the previously added note and add it
'''
nonlocal pitchBust # love Py3!!!
p = n.pitch
pitchKey = p.nameWithOctave
pitchGroup = None
if addPartIdAsGroup:
partContext = n.getContextByClass('Part')
if partContext is not None:
pidStr = str(partContext.id)
pitchGroup = pidStr.replace(' ', '_') # spaces are not allowed as group names
n.pitch.groups.append(pitchGroup)
n.groups.append(pitchGroup)
if pitchKey not in seenPitches:
seenPitches.add(pitchKey)
notesToAdd[pitchKey] = newNote(ts, n)
return
elif not removeRedundantPitches:
notesToAdd[pitchKey + str(pitchBust)] = newNote(ts, n)
pitchBust += 1
return
elif addPartIdAsGroup:
notesToAdd[pitchKey].groups.append(pitchGroup)
notesToAdd[pitchKey].pitch.groups.append(pitchGroup)
if not addTies:
return
# else add derivation once multiple derivations are allowed.
oldNoteTie = notesToAdd[pitchKey].tie
if oldNoteTie is not None and oldNoteTie.type == 'continue':
return # previous note was as good or better
possibleNewNote = newNote(ts, n)
possibleNewNote.groups = notesToAdd[pitchKey].groups
if possibleNewNote.tie | |
<filename>MUNIT/networks.py
"""
Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
from torch import nn
from torch.autograd import Variable
import torch
import torch.nn.functional as F
import utils
import functools
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
from torch.nn import init
##################################################################################
# Discriminator
##################################################################################
# Defines the PatchGAN discriminator with the specified arguments.
class PatchDis(nn.Module):
def __init__(self, input_dim, params):
super(PatchDis, self).__init__()
self.n_layer = params['patch_n_layer']
self.dim = params['dim']
self.norm = params['norm']
self.activ = params['activ']
self.num_scales = params['num_scales']
self.gan_type = params['gan_type']
self.pad_type = params['pad_type']
self.use_sigmoid = not (self.gan_type =='lsgan')
self.input_dim = input_dim
self.cnns = nn.ModuleList()
for _ in range(self.num_scales):
self.cnns.append(self._make_net())
def _make_net(self):
dim = self.dim
kw = 4
padw = 1
sequence = [nn.Conv2d(self.input_dim, self.dim, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)]
for i in range(self.n_layer - 1):
sequence += [
Conv2dBlock(dim, dim * 2, kw, 2, padw, norm=self.norm, activation=self.activ, pad_type='zero')]
dim *= 2
sequence +=[
Conv2dBlock(dim, dim * 2, kw, 1, padw, norm=self.norm, activation=self.activ, pad_type='zero')]
sequence += [nn.Conv2d(dim * 2, 1, kernel_size=kw, stride=1, padding=padw)]
if self.use_sigmoid:
sequence += [nn.Sigmoid()]
sequence = nn.Sequential(*sequence)
return sequence
def forward(self, x):
outputs = []
for model in self.cnns:
outputs.append(model(x))
# x = self.downsample(x)
return outputs
def calc_dis_loss(self, input_fake, input_real):
# calculate the loss to train D
outs0 = self.forward(input_fake)
outs1 = self.forward(input_real)
loss = 0
for it, (out0, out1) in enumerate(zip(outs0, outs1)):
if self.gan_type == 'lsgan':
loss += torch.mean((out0 - 0)**2) + torch.mean((out1 - 1)**2)
elif self.gan_type == 'nsgan':
all0 = Variable(torch.zeros_like(out0.data).cuda(), requires_grad=False)
all1 = Variable(torch.ones_like(out1.data).cuda(), requires_grad=False)
loss += torch.mean(F.binary_cross_entropy(F.sigmoid(out0), all0) +
F.binary_cross_entropy(F.sigmoid(out1), all1))
else:
assert 0, "Unsupported GAN type: {}".format(self.gan_type)
return loss
def calc_gen_loss(self, input_fake):
# calculate the loss to train G
outs0 = self.forward(input_fake)
loss = 0
for it, (out0) in enumerate(outs0):
if self.gan_type == 'lsgan':
loss += torch.mean((out0 - 1)**2) # LSGAN
elif self.gan_type == 'nsgan':
all1 = Variable(torch.ones_like(out0.data).cuda(), requires_grad=False)
loss += torch.mean(F.binary_cross_entropy(F.sigmoid(out0), all1))
else:
assert 0, "Unsupported GAN type: {}".format(self.gan_type)
return loss
class MsImageDis(nn.Module):
# Multi-scale discriminator architecture
def __init__(self, input_dim, params):
super(MsImageDis, self).__init__()
self.n_layer = params['n_layer']
self.gan_type = params['gan_type']
self.dim = params['dim']
self.norm = params['norm']
self.activ = params['activ']
self.num_scales = params['num_scales']
self.pad_type = params['pad_type']
self.input_dim = input_dim
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
self.cnns = nn.ModuleList()
for _ in range(self.num_scales):
self.cnns.append(self._make_net())
def _make_net(self):
dim = self.dim
cnn_x = []
cnn_x += [Conv2dBlock(self.input_dim, dim, 4, 2, 1, norm='none', activation=self.activ, pad_type=self.pad_type)]
for i in range(self.n_layer - 1):
cnn_x += [Conv2dBlock(dim, dim * 2, 4, 2, 1, norm=self.norm, activation=self.activ, pad_type=self.pad_type)]
dim *= 2
cnn_x += [nn.Conv2d(dim, 1, 1, 1, 0)]
cnn_x = nn.Sequential(*cnn_x)
return cnn_x
def forward(self, x):
outputs = []
for model in self.cnns:
outputs.append(model(x))
x = self.downsample(x)
return outputs
def calc_dis_loss(self, input_fake, input_real):
# calculate the loss to train D
outs0 = self.forward(input_fake)
outs1 = self.forward(input_real)
loss = 0
for it, (out0, out1) in enumerate(zip(outs0, outs1)):
if self.gan_type == 'lsgan':
loss += torch.mean((out0 - 0)**2) + torch.mean((out1 - 1)**2)
elif self.gan_type == 'nsgan':
all0 = Variable(torch.zeros_like(out0.data).cuda(), requires_grad=False)
all1 = Variable(torch.ones_like(out1.data).cuda(), requires_grad=False)
loss += torch.mean(F.binary_cross_entropy(F.sigmoid(out0), all0) +
F.binary_cross_entropy(F.sigmoid(out1), all1))
else:
assert 0, "Unsupported GAN type: {}".format(self.gan_type)
return loss
def calc_gen_loss(self, input_fake):
# calculate the loss to train G
outs0 = self.forward(input_fake)
loss = 0
for it, (out0) in enumerate(outs0):
if self.gan_type == 'lsgan':
loss += torch.mean((out0 - 1)**2) # LSGAN
elif self.gan_type == 'nsgan':
all1 = Variable(torch.ones_like(out0.data).cuda(), requires_grad=False)
loss += torch.mean(F.binary_cross_entropy(F.sigmoid(out0), all1))
else:
assert 0, "Unsupported GAN type: {}".format(self.gan_type)
return loss
##################################################################################
# Generator
##################################################################################
class AdaINGen(nn.Module):
# AdaIN auto-encoder architecture
def __init__(self, input_dim, params):
super(AdaINGen, self).__init__()
dim = params['dim']
style_dim = params['style_dim']
n_downsample = params['n_downsample']
n_res = params['n_res']
activ = params['activ']
pad_type = params['pad_type']
mlp_dim = params['mlp_dim']
mpl_n_blk = params['mlp_n_blk']
# style encoder
self.enc_style = StyleEncoder(4, input_dim, dim, style_dim, norm='none', activ=activ, pad_type=pad_type)
# content encoder
self.enc_content = ContentEncoder(n_downsample, n_res, input_dim, dim, 'in', activ, pad_type=pad_type)
self.dec = Decoder(n_downsample, n_res, self.enc_content.output_dim, input_dim, res_norm='adain', activ=activ, pad_type=pad_type)
# MLP to generate AdaIN parameters
self.mlp = MLP(style_dim, self.get_num_adain_params(self.dec), mlp_dim, mpl_n_blk, norm='none', activ=activ)
def forward(self, images):
# reconstruct an image
content, style_fake = self.encode(images)
images_recon = self.decode(content, style_fake)
return images_recon
def encode(self, images):
# encode an image to its content and style codes
style_fake = self.enc_style(images)
content = self.enc_content(images)
return content, style_fake
def decode(self, content, style):
# decode content and style codes to an image
adain_params = self.mlp(style)
self.assign_adain_params(adain_params, self.dec)
images = self.dec(content)
return images
def assign_adain_params(self, adain_params, model):
# assign the adain_params to the AdaIN layers in model
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
mean = adain_params[:, :m.num_features]
std = adain_params[:, m.num_features:2*m.num_features]
m.bias = mean.contiguous().view(-1)
m.weight = std.contiguous().view(-1)
if adain_params.size(1) > 2*m.num_features:
adain_params = adain_params[:, 2*m.num_features:]
def get_num_adain_params(self, model):
# return the number of AdaIN parameters needed by the model
num_adain_params = 0
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
num_adain_params += 2*m.num_features
return num_adain_params
class AdaINGanilla(nn.Module):
# ------------ ADAIN WITH GANILLA GENRATOR ------------ #
# AdaIN Ganilla Generator auto-encoder architecture
# Generator architector taken from:
# https://github.com/giddyyupp/ganilla
# ------------------------------------------------------- #
def __init__(self, input_dim, params):
super(AdaINGanilla, self).__init__()
dim = params['dim']
style_dim = params['style_dim']
n_downsample = params['n_downsample']
n_res = params['n_res']
activ = params['activ']
pad_type = params['pad_type']
mlp_dim = params['mlp_dim']
ganilla_ngf = params['ganilla_ngf']
ganilla_block_nf = params['ganilla_block_nf']
ganilla_layer_nb = params['ganilla_layer_nb']
use_dropout = params['use_dropout']
output_dim = params['output_dim']
use_style_enc_simple = params['use_style_enc_simple']
# Ganilla Style Encoder
if use_style_enc_simple:
self.enc_style = StyleEncoder(4, input_dim, dim, style_dim, norm='none', activ=activ, pad_type=pad_type)
else:
self.enc_style = GanillaStyleEncoder(input_dim, style_dim, ganilla_ngf, ganilla_block_nf, ganilla_layer_nb,
use_dropout, norm = 'none', pad_type =pad_type)
# Ganilla Content Encoder
self.enc_content = GanillaContentEncoder(input_dim, ganilla_ngf, ganilla_block_nf, ganilla_layer_nb,
use_dropout, norm = 'in', pad_type =pad_type)
sk_sizes = [self.enc_content.layer1[ganilla_layer_nb[0] - 1].conv2.out_channels,
self.enc_content.layer2[ganilla_layer_nb[1] - 1].conv2.out_channels,
self.enc_content.layer3[ganilla_layer_nb[2] - 1].conv2.out_channels,
self.enc_content.layer4[ganilla_layer_nb[3] - 1].conv2.out_channels]
self.dec = GanillaDecoder(output_dim, *sk_sizes, res_norm='adain', activ=activ, pad_type=pad_type)
#self.dec = GanillaDecoder2(n_res,output_dim, *sk_sizes, res_norm='adain', activ=activ, pad_type=pad_type)
# MLP to generate AdaIN parameters
self.mlp = MLP(style_dim, self.get_num_adain_params(self.dec), mlp_dim, 3, norm='none', activ=activ)
# input_dim, output_dim, dim, n_blk, norm = 'none', activ = 'relu'
def forward(self, images):
# reconstruct an image
content, style_fake = self.encode(images)
images_recon = self.decode(content, style_fake)
return images_recon
def encode(self, images):
# encode an image to its content and style codes
style_fake = self.enc_style(images)
content = self.enc_content(images)
return content, style_fake
def decode(self, content, style):
# decode content and style codes to an image
adain_params = self.mlp(style)
self.assign_adain_params(adain_params, self.dec)
images = self.dec(content)
return images
def assign_adain_params(self, adain_params, model):
# assign the adain_params to the AdaIN layers in model
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
mean = adain_params[:, :m.num_features]
std = adain_params[:, m.num_features:2*m.num_features]
m.bias = mean.contiguous().view(-1)
m.weight = std.contiguous().view(-1)
if adain_params.size(1) > 2*m.num_features:
adain_params = adain_params[:, 2*m.num_features:]
def get_num_adain_params(self, model):
# return the number of AdaIN parameters needed by the model
num_adain_params = 0
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
num_adain_params += 2*m.num_features
return num_adain_params
class VAEGen(nn.Module):
# VAE architecture
def __init__(self, input_dim, params):
super(VAEGen, self).__init__()
dim = params['dim']
n_downsample = params['n_downsample']
n_res = params['n_res']
activ = params['activ']
pad_type = params['pad_type']
# content encoder
self.enc = ContentEncoder(n_downsample, n_res, input_dim, dim, 'in', activ, pad_type=pad_type)
self.dec = Decoder(n_downsample, n_res, self.enc.output_dim, input_dim, res_norm='in', activ=activ, pad_type=pad_type)
def forward(self, images):
# This is a reduced VAE implementation where we assume the outputs are multivariate Gaussian distribution with mean = hiddens and std_dev = all ones.
hiddens = self.encode(images)
if self.training == True:
noise = Variable(torch.randn(hiddens.size()).cuda(hiddens.data.get_device()))
images_recon = self.decode(hiddens + noise)
else:
images_recon = self.decode(hiddens)
return images_recon, hiddens
def encode(self, images):
hiddens = self.enc(images)
noise = Variable(torch.randn(hiddens.size()).cuda(hiddens.data.get_device()))
return hiddens, noise
def decode(self, hiddens):
images = self.dec(hiddens)
return images
##################################################################################
# Encoder and Decoders
##################################################################################
class StyleEncoder(nn.Module):
def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, pad_type):
super(StyleEncoder, self).__init__()
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type)]
for i in range(2):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)]
dim *= 2
for i in range(n_downsample - 2):
self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)]
self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling
self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x):
return self.model(x)
class GanillaStyleEncoder(nn.Module):
def __init__(self, input_dim, style_dim, ganilla_ngf, ganilla_block_nf, ganilla_layer_nb, | |
ctx.invoke(self.hostconfig_set, ip=ip, password=password, port=port)
@hostconfig.command(name="set", aliases=["+", "add"])
@commands.cooldown(rate=1, per=15, type=commands.BucketType.user)
async def hostconfig_set(self, ctx, ip: str, password: str = "<PASSWORD>", port: int = 2333):
"""
{command_prefix}hostconfig set [ip]
{command_prefix}hostconfig set [ip] [password]
{command_prefix}hostconfig set [ip] [password] [port]
Allows to set your node credentials.
Default password is <PASSWORD> (more than recommended to change it).
Default port is 2333.
"""
if ctx.guild and ip:
try:
await ctx.message.delete()
# TODO: Translations
await ctx.send('Please use this command in DMs for your privacy!')
except discord.HTTPException:
pass
# TODO: Translations
msg = await ctx.send('Connecting...')
if not await self.ensure_node_connection(ip, port, password):
return await msg.edit(content='Failed to connect to this server, please ensure that your credentials are correct! Also make sure that your server is running, and your firewall is not blocking the connection. You can also check if your port is opened correctly. For futher assistance, you can join Watora\'s discord.') # TODO: Translations
# TODO: Translations
await msg.edit(content="Successfully connected to the server!")
settings = await SettingsDB.get_instance().get_glob_settings()
settings.custom_hosts[str(ctx.author.id)] = {
'host': ip,
'port': port,
'password': password
}
await SettingsDB.get_instance().set_glob_settings(settings)
resume_config = {
'resume_key': str(ctx.author.id) + str(sum(self.bot.shards.keys())),
'resume_timeout': 600
}
node = self.bot.lavalink.node_manager.get_node_by_name(
str(ctx.author.id), True)
if node:
await self.bot.lavalink.node_manager.destroy_node(node)
self.bot.lavalink.add_node(
region=None, host=ip, password=password, name=f'{ctx.author.id}', port=port, is_perso=True, **resume_config)
@hostconfig.command(name="delete", aliases=["remove", "-", "off", "stop", "leave"])
@commands.cooldown(rate=1, per=3, type=commands.BucketType.guild)
async def hostconfig_delete(self, ctx):
"""
{command_prefix}hostconfig delete
Removes your node configuration.
"""
settings = await SettingsDB.get_instance().get_glob_settings()
if str(ctx.author.id) in settings.custom_hosts.keys():
del settings.custom_hosts[str(ctx.author.id)]
await SettingsDB.get_instance().set_glob_settings(settings)
node = self.bot.lavalink.node_manager.get_node_by_name(
str(ctx.author.id), True)
if node:
await self.bot.lavalink.node_manager.destroy_node(node)
await ctx.send("☑️")
@hostconfig.command(name="now", aliases=["current", "atm"])
async def hostconfig_now(self, ctx):
"""
{command_prefix}hostconfig now
Shows your node configuration.
"""
settings = await SettingsDB.get_instance().get_glob_settings()
if str(ctx.author.id) not in settings.custom_hosts.keys():
# TODO: Translations
return await ctx.send('No config currently registered! Use `{}hostconfig set` to set one.'.format(get_server_prefixes(self.bot, ctx.guild)))
info = settings.custom_hosts[str(ctx.author.id)]
# TODO: Translations
embed = discord.Embed(description="Your server configuration")
embed.add_field(name='IP', value=info['host'], inline=False)
embed.add_field(name='Password', value=info['password'], inline=False)
embed.add_field(name='Port', value=info['port'], inline=False)
node = self.bot.lavalink.node_manager.get_node_by_name(
str(ctx.author.id))
# TODO: Translations
text = "Server is currently " + \
("connected" if node else "disconnected")
embed.set_footer(text=text)
try:
await ctx.author.send(embed=embed)
except discord.HTTPException:
return await ctx.send(get_str(ctx, "cant-send-pm"))
if ctx.guild:
await ctx.send(get_str(ctx, "message-send-to-mp"))
@checks.has_permissions(manage_guild=True)
@hostconfig.command(name="link", aliases=["connect", "setserver"])
async def hostconfig_link(self, ctx):
"""
{command_prefix}hostconfig link
Links your configuration to this server.
Your node will be used by default when people of this guild
are trying to listen to music.
"""
settings = await SettingsDB.get_instance().get_guild_settings(ctx.guild.id)
if settings.defaultnode == str(ctx.author.id):
settings.defaultnode = None
await SettingsDB.get_instance().set_guild_settings(settings)
# TODO: Translations
return await ctx.send("Your node is not linked to this server anymore.")
settings_glob = await SettingsDB.get_instance().get_glob_settings()
if str(ctx.author.id) not in settings_glob.custom_hosts.keys():
# TODO: Translations
return await ctx.send('No config currently registered!')
settings.defaultnode = str(ctx.author.id)
await SettingsDB.get_instance().set_guild_settings(settings)
# TODO: Translations
await ctx.send("Your node is now linked to this server.")
@hostconfig.command(name="switch", aliases=["move", "change"])
@commands.cooldown(rate=1, per=15, type=commands.BucketType.guild)
async def hostconfig_switch(self, ctx):
"""
{command_prefix}hostconfig switch
Switch current player to your node, or make it leaves your node.
"""
settings = await SettingsDB.get_instance().get_glob_settings()
if str(ctx.author.id) not in settings.custom_hosts.keys():
# TODO: Translations
return await ctx.send('No config currently registered!')
info = settings.custom_hosts[str(ctx.author.id)]
node = self.bot.lavalink.node_manager.get_node_by_name(
str(ctx.author.id))
if not node:
# TODO: Translations
return await ctx.send("Your node doesn't seem to be connected!")
if ctx.guild.id not in self.bot.lavalink.players.players:
return await ctx.send(get_str(ctx, "not-connected"), delete_after=20)
player = await self.get_player(ctx.guild)
if not await self.is_dj(ctx) and not player.node == node:
raise commands.errors.CheckFailure
is_user = True
if player.node == node:
# TODO: Translations
await ctx.send('This player is already on your node! Moving it to another node...')
node = self.bot.lavalink.node_manager.find_ideal_node(
str(ctx.guild.id))
if await self.bot.server_is_claimed(ctx.guild.id, settings):
node = self.bot.lavalink.node_manager.get_node_by_name(
str("Premium")) or node
is_user = False
if not node:
# TODO: Translations
return await ctx.send('No other node available!')
else:
await ctx.send('Moving...') # TODO: Translations
await player.change_node(node)
await self.reload_np_msg(player)
if is_user:
# TODO: Translations
return await ctx.send(f'Moved to {ctx.author} node.')
return await ctx.send(f'Left {ctx.author} node.') # TODO: Translations
@commands.is_owner()
@commands.command()
async def togglesource(self, ctx, *, source: str):
"""
{command_prefix}togglesource
Toggle default video source.
"""
settings = await SettingsDB.get_instance().get_glob_settings()
settings.source = source
await SettingsDB.get_instance().set_glob_settings(settings)
await ctx.send(f"Source toggled to `{source}`")
@commands.is_owner()
@commands.command()
async def musiceval(self, ctx, *, stmt: str):
"""
{command_prefix}musiceval
Evals something.
"""
try:
result = eval(stmt)
if inspect.isawaitable(result):
result = await result
except Exception:
exc = traceback.format_exc().splitlines()
result = exc[-1]
self.temp = result
await ctx.channel.send("```py\n--- In ---\n{}\n--- Out ---\n{}\n```".format(stmt, result))
@commands.Cog.listener()
async def on_guild_update(self, before, after):
if before.region != after.region:
log.debug("[Guild] \"%s\" changed regions: %s -> %s" %
(after.name, before.region, after.region))
@commands.Cog.listener()
async def on_guild_remove(self, guild):
if hasattr(self.bot, 'lavalink') and guild.id in self.bot.lavalink.players.players:
log.debug(
f"Removing player in removed guild ({guild.id}: {guild.name})")
player = await self.get_player(guild)
await self.disconnect_player(player)
@commands.Cog.listener()
async def on_guild_channel_delete(self, channel):
guild = channel.guild
if hasattr(self.bot, 'lavalink') and guild.id in self.bot.lavalink.players.players:
if isinstance(channel, discord.VoiceChannel):
if guild.me in channel.members:
log.debug(
f"Removing player in removed channel ({guild.id}: {guild.name})")
player = await self.get_player(guild)
await self.disconnect_player(player)
async def auto_join_play(self, member, after):
"""Joins and plays a song based on settings, and voice updates"""
if member.bot or (member.voice and member.voice.mute):
return
song_info = self.bot.autosongs_map[member.guild.id][str(
after.channel.id)]
if not song_info:
player = await self.get_player(member.guild, True, member.id)
return await player.connect(after.channel.id)
song_info = random.choice(song_info.split('|')).strip()
if 'autoplaylist:' in song_info:
settings = await SettingsDB.get_instance().get_glob_settings()
file_name = song_info.replace('autoplaylist:', '').strip()
if str(file_name.lower()) not in settings.autoplaylists:
return
player = await self.get_player(member.guild, True, member.id)
if not player.is_connected:
await player.connect(after.channel.id)
tries = 0
while not player.is_connected and tries < 5:
# Wait till the player connects to discord.. REE..
await asyncio.sleep(tries)
tries += 0.5
else:
if int(player.channel_id) != after.channel.id:
await player.connect(after.channel.id)
player.autoplaylist = settings.autoplaylists[str(
file_name.lower())]
player.authorplaylist = member
player.queue.clear()
await player.skip()
else:
chan_id = after.channel.id
if 'radio:' in song_info:
song_info = song_info.replace('radio:', '').strip()
song_info = self.list_radiolist.get(song_info)
if not song_info:
return
player = await self.get_player(member.guild, True, member.id)
results = await self.prepare_url(query=song_info, node=player.node)
if not results or not results['tracks']:
return
await player.connect(chan_id)
if results['playlistInfo']:
tracks = results['tracks']
for track in tracks:
player.add(requester=member.id, track=track)
else:
track = results['tracks'][0]
track = self.prepare_track(track)
player.add(requester=member.id, track=track)
await self.player_play(player, song_info)
@commands.Cog.listener()
async def on_voice_state_update(self, member, before, after):
if not hasattr(self.bot, 'lavalink'):
return
if not all([before, after, member]):
return
if (member.guild.id not in self.bot.lavalink.players.players):
if member.guild.id not in self.bot.autosongs_map:
return
if not all([after.channel, member.guild]):
return
if str(after.channel.id) not in self.bot.autosongs_map[member.guild.id]:
return
if after.channel and (sum(1 for m in after.channel.members if not (m.voice.deaf or m.bot or m.voice.self_deaf or m == m.guild.me))) and not (member.guild.me.voice and member.guild.me.voice.mute):
await self.auto_join_play(member, after)
try:
player = self.bot.lavalink.players.players[member.guild.id]
except KeyError:
return
if member == member.guild.me and not after.channel:
log.warning(
f"[Player] Just left voice for some reason. Disconnecting from {member.guild.id}/{member.guild.name}.")
await self.disconnect_player(player)
return
if not player.connected_channel or not player.channel_id:
return
if member.guild.id in self.bot.autosongs_map:
if after.channel:
if str(after.channel.id) in self.bot.autosongs_map[member.guild.id]:
if member != member.guild.me and member.guild.me.voice:
if before.channel != after.channel:
if not (sum(1 for m in member.guild.me.voice.channel.members if not (m.voice.deaf or m.bot or m.voice.self_deaf or m == m.guild.me))):
if after.channel and (sum(1 for m in after.channel.members if not (m.voice.deaf or m.bot or m.voice.self_deaf or m == m.guild.me))) and not (member.guild.me.voice and member.guild.me.voice.mute):
await self.auto_join_play(member, after)
# We don't care, right ?
if player.connected_channel not in [after.channel, before.channel]:
return
my_voice_channel = player.connected_channel
guild = self.bot.get_guild(int(player.guild_id)) # member.guild ?
if (sum(1 for m in my_voice_channel.members if not (m.voice.deaf or m.bot or m.voice.self_deaf or m == guild.me))) and not (guild.me.voice and guild.me.voice.mute):
if player.auto_paused:
player.auto_paused = False
if player.paused:
await player.set_pause(False)
log.debug(
"[Voice] The player is now resumed on {}".format(guild.name))
if guild.id in self.timeout_tasks:
self.timeout_tasks[guild.id].cancel()
self.timeout_tasks.pop(guild.id)
else:
if not player.auto_paused:
player.auto_paused = True
if not player.paused and player.is_playing:
await player.set_pause(True)
log.debug(
"[Voice] The player is now paused on {}".format(guild.name))
if player.timer_value is not False:
task = asyncio.ensure_future(self.timeout_task(
player, additional_time=10 if guild.me.voice and guild.me.voice.mute else 0)) # don't instant leave for ever the vc
self.timeout_tasks[guild.id] = task
async def timeout_task(self, player, additional_time):
guild = self.bot.get_guild(int(player.guild_id))
# if timer_value is at 0, don't instant leave the voice channel for ever if muted otherwise users can't unmute her
await asyncio.sleep(min(1800, max(max(player.timer_value + additional_time, 0), 0)))
if player in dict(self.bot.lavalink.players).values(): # prevent from stupid issues
# if not sum(1 for m in player.connected_channel.members if not (m.voice.deaf or m.bot or | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
# disable: accessing protected members, too many methods
# pylint: disable=W0212,R0904
import unittest
import fudge
from hamcrest import is_
from hamcrest import none
from hamcrest import not_none
from hamcrest import assert_that
from hamcrest import has_property
from hamcrest import contains_string
from pyramid.testing import setUp as psetUp
from pyramid.testing import tearDown as ptearDown
from pyramid.interfaces import IRendererFactory
from pyramid_mailer.interfaces import IMailer
from pyramid_mailer.mailer import DummyMailer as _DummyMailer
from repoze.sendmail.interfaces import IMailDelivery
from zope import component
from zope import interface
from zope.i18n.interfaces import IUserPreferredLanguages
from zope.i18nmessageid import MessageFactory
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.testing.cleanup import CleanUp
from zope.security.interfaces import IPrincipal
from nti.app.pyramid_zope import z3c_zpt
from nti.mailer._compat import parseaddr
from nti.mailer._default_template_mailer import _pyramid_message_to_message
from nti.mailer._default_template_mailer import create_simple_html_text_email
from nti.mailer.interfaces import IEmailAddressable
from nti.mailer.interfaces import EmailAddressablePrincipal
from nti.mailer.interfaces import IPrincipalEmailValidation
MSG_DOMAIN = u'nti.mailer.tests'
_ = MessageFactory(MSG_DOMAIN)
class ITestMailDelivery(IMailer, IMailDelivery):
pass
class TestMailDelivery(_DummyMailer):
default_sender = '<EMAIL>'
@interface.implementer(IBrowserRequest)
class Request(object):
response = None
application_url = 'foo'
def __init__(self):
self.annotations = {}
self.context = None
@interface.implementer(IUserPreferredLanguages)
class TestPreferredLanguages(object):
def __init__(self, context):
self.context = context
def getPreferredLanguages(self):
return ('test', 'en')
class PyramidMailerLayer(object):
request = None
@classmethod
def setUp(cls):
import nti.mailer
from zope.configuration import xmlconfig
from zope.i18n.testmessagecatalog import TestMessageFallbackDomain
cls.config = psetUp(registry=component.getGlobalSiteManager(),
request=cls.request,
hook_zca=True)
cls.config.setup_registry()
cls.config.include('pyramid_chameleon')
cls.config.include('pyramid_mako')
component.provideUtility(z3c_zpt.renderer_factory,
IRendererFactory,
name=".pt")
cls._mailer = mailer = TestMailDelivery()
component.provideUtility(mailer, ITestMailDelivery)
# Provide a ITranslationDomain that knows about the 'test' language
cls.i18n_domain = TestMessageFallbackDomain(MSG_DOMAIN)
component.provideUtility(cls.i18n_domain, name=cls.i18n_domain.domain)
# Configure the default INegotiator
xmlconfig.file('configure.zcml', nti.mailer)
# Add an adapter for our Request to IUserPreferredLanguages, as used
# by the default INegotiator
component.provideAdapter(TestPreferredLanguages, (Request,))
@classmethod
def tearDown(cls):
from zope.testing import cleanup
cleanup.cleanUp() # Clear the site manager
ptearDown() # unhook ZCA
cls._mailer = None
@classmethod
def testSetUp(cls):
pass
@classmethod
def testTearDown(cls):
# Must implement
pass
@interface.implementer(IPrincipalEmailValidation)
class TestEmailAddressablePrincipal(EmailAddressablePrincipal):
def __init__(self, user, is_valid=True, *args, **kwargs):
super(TestEmailAddressablePrincipal, self).__init__(user, *args, **kwargs)
self.is_valid = is_valid
def is_valid_email(self):
return self.is_valid
class _User(object):
def __init__(self, username):
self.username = username
class _Profile(object):
def __init__(self, realname):
self.realname = realname
_NotGiven = object()
class TestEmail(unittest.TestCase):
layer = PyramidMailerLayer
@fudge.patch('nti.mailer._verp._brand_name')
def test_create_mail_message_with_non_ascii_name_and_string_bcc(self, brand_name):
brand_name.is_callable().returns(None)
class User(object):
username = 'the_user'
class Profile(object):
# Note the umlaut e
realname = u'<NAME>'
user = User()
profile = Profile()
request = Request()
request.context = user
token_url = 'url_to_verify_email'
msg = create_simple_html_text_email('tests/templates/test_new_user_created',
subject='Hi there',
recipients=['<EMAIL>'],
bcc='<EMAIL>',
template_args={'user': user,
'profile': profile,
'context': user,
'href': token_url,
'support_email': 'support_email' },
package='nti.mailer',
text_template_extension=".mak",
request=request)
assert_that(msg, is_(not_none()))
base_msg = _pyramid_message_to_message(msg, ['<EMAIL>'], None)
base_msg_string = str(base_msg)
# quoted-prinatble encoding of iso-8859-1 value of umlaut-e
assert_that(base_msg_string, contains_string('Hi=20Suz=EB=20Schwartz'))
# Because we can't get to IPrincial, no VERP info
name, email = parseaddr(msg.sender)
assert_that(name, is_('NextThought'))
assert_that(email, is_('<EMAIL>'))
#
assert_that(msg, has_property('bcc', ['<EMAIL>']))
@fudge.patch('nti.mailer._verp._brand_name')
def test_create_email_with_verp(self, brand_name):
brand_name.is_callable().returns(None)
@interface.implementer(IPrincipal, IEmailAddressable)
class User(object):
username = 'the_user'
id = 'the_user'
# this address encodes badly to simple base64
# XXX: What?
email = '<EMAIL>'
class Profile(object):
realname = u'<NAME>'
user = User()
profile = Profile()
request = Request()
request.context = user
token_url = 'url_to_verify_email'
msg = create_simple_html_text_email('tests/templates/test_new_user_created',
subject='Hi there',
recipients=[TestEmailAddressablePrincipal(user, is_valid=True)],
template_args={'user': user,
'profile': profile,
'context': user,
'href': token_url,
'support_email': 'support_email' },
package='nti.mailer',
request=request)
assert_that(msg, is_(not_none()))
# import pyramid_mailer
# from pyramid_mailer.interfaces import IMailer
# from zope import component
# mailer = pyramid_mailer.Mailer.from_settings(
# {'mail.queue_path': '/tmp/ds_maildir',
# 'mail.default_sender': '<EMAIL>'
# } )
# component.provideUtility( mailer, IMailer )
# component.provideUtility(mailer.queue_delivery)
# from .._default_template_mailer import _send_mail
# _send_mail(msg, [user], None)
# import transaction
# transaction.commit()
_pyramid_message_to_message(msg, [user], None)
# we can get to IPrincipal, so we have VERP
# The first part will be predictable, the rest won't
name, email = parseaddr(msg.sender)
assert_that(name, is_('NextThought'))
assert_that(email, contains_string('no-reply+'))
# Test invalid
invalid_user = TestEmailAddressablePrincipal(user, is_valid=False)
msg = create_simple_html_text_email('tests/templates/test_new_user_created',
subject='Hi there',
recipients=[invalid_user],
template_args={'user': user,
'profile': profile,
'context': user,
'href': token_url,
'support_email': 'support_email' },
package='nti.mailer',
request=request)
assert_that(msg, none())
@fudge.patch('nti.mailer._verp._brand_name')
def test_create_email_with_mako(self, brand_name):
brand_name.is_callable().returns(None)
user = _User('the_user')
request = Request()
request.context = user
msg = self._create_simple_email(request,
text_template_extension=".mak",
user=user)
assert_that(msg, is_(not_none()))
@fudge.patch('nti.mailer._verp._brand_name')
def test_create_email_no_request_context(self, brand_name):
brand_name.is_callable().returns(None)
request = Request()
del request.context
assert not hasattr(request, 'context')
msg = self._create_simple_email(request,
text_template_extension=".mak")
assert_that(msg, is_(not_none()))
def _create_simple_email(self,
request,
user=None,
profile=None,
text_template_extension=".txt",
subject=u'Hi there',
context=_NotGiven,
reply_to=_NotGiven):
user = user or _User('the_user')
profile = profile or _Profile(u'Mickey Mouse')
token_url = 'url_to_verify_email'
kwargs = {}
if context is not _NotGiven:
kwargs['context'] = context
if reply_to is not _NotGiven:
kwargs['reply_to'] = reply_to
msg = create_simple_html_text_email(
'tests/templates/test_new_user_created',
subject=subject,
recipients=['<EMAIL>'],
template_args={'user': user,
'profile': profile,
'context': user,
'href': token_url,
'support_email': 'support_email'},
package='nti.mailer',
text_template_extension=text_template_extension,
request=request,
**kwargs)
return msg
def test_create_email_localizes_subject(self):
import warnings
request = Request()
subject = _(u'Hi there')
# If we don't provide a `context` object, by default
# the ``translate`` function won't try to negotiate a language;
# creating the message works around that by using the `request` as the context.
msg = self._create_simple_email(request, subject=subject)
assert_that(msg.subject, is_(u'[[nti.mailer.tests][Hi there]]'))
# We can be explicit about that
with warnings.catch_warnings():
warnings.simplefilter('ignore')
msg = self._create_simple_email(request, subject=subject, context=request)
assert_that(msg.subject, is_(u'[[nti.mailer.tests][Hi there]]'))
# If we *do* provide a context, but there is no
# IUserPreferredLanguages available for the context, we
# fallback to using the request for translation. This can either
# be in the ``request.context``, or the ``context`` argument
request.context = self
with warnings.catch_warnings():
warnings.simplefilter('ignore')
msg = self._create_simple_email(request, subject=subject)
assert_that(msg.subject, is_(u'[[nti.mailer.tests][Hi there]]'))
with warnings.catch_warnings():
warnings.simplefilter('ignore')
msg = self._create_simple_email(request, subject=subject, context=request)
assert_that(msg.subject, is_(u'[[nti.mailer.tests][Hi there]]'))
def test_warning_about_mismatch_of_context(self):
# If we pass a context argument we get the warning because the
# function always puts ``context=User()`` in the arguments.
import warnings
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self._create_simple_email(Request(), context=self)
if str is not bytes: # pragma: no cover
# There's a bug in the 'always' simple filter on Python 2:
# It doesn't properly clear the stacklevel cache,
# so if we've emitted this warning by calling self._create_simple_email
# before, we don't catch that warning.
self.assertEqual(len(warns), 1)
self.assertIn('Mismatch between the explicit', str(warns[0].message))
def test_create_email_reply_to(self):
msg = self._create_simple_email(Request())
assert_that(msg.extra_headers, is_({}))
msg = self._create_simple_email(Request(), reply_to='<EMAIL>')
assert_that(msg.extra_headers, is_({'Reply-To': '<EMAIL>'}))
class TestFunctions(CleanUp, unittest.TestCase):
def test_get_renderer_spec_and_package_no_colon_no_slash_no_package(self):
from nti.mailer import tests
from .._default_template_mailer import _get_renderer_spec_and_package
template, package = _get_renderer_spec_and_package('no_colon', '.txt',
level=2)
assert_that(template, is_('templates/no_colon.txt'))
assert_that(package, is_(tests))
def test_get_renderer_spec_and_package_no_colon_no_package(self):
from nti.mailer import tests
from .._default_template_mailer import _get_renderer_spec_and_package
template, package = _get_renderer_spec_and_package('subdir/no_colon', '.txt',
level=2)
assert_that(template, is_('subdir/no_colon.txt'))
assert_that(package, is_(tests))
@fudge.patch('nti.mailer._default_template_mailer.get_renderer')
def test__get_renderer(self, fake_get_renderer):
from nti.mailer import tests
from .._default_template_mailer import _get_renderer
fake_get_renderer.expects_call().calls(lambda *args, **kwargs: (args, kwargs))
args, kwargs = _get_renderer('no_colon', '.txt', level=2)
assert_that(args, is_(('templates/no_colon.txt',)))
assert_that(kwargs, is_({'package': tests}))
@fudge.patch('nti.mailer._default_template_mailer._get_renderer')
def test_do_html_text_templates_exist(self, fake__get_renderer):
from .._default_template_mailer import do_html_text_templates_exist
class MyException(Exception):
pass
def _get_renderer(base_template, extension, package=None, level=3):
if extension in ('.pt', '.good'):
return
if extension == '.mako':
# Unexpected case should propagate
raise MyException
# Expected case should return false.
raise ValueError
fake__get_renderer.expects_call().calls(_get_renderer)
# This will raise ValueError on the second one
result = do_html_text_templates_exist('base_template')
self.assertFalse(result)
with self.assertRaises(MyException):
do_html_text_templates_exist('base_template', '.mako')
result = do_html_text_templates_exist('base_template', '.good')
self.assertTrue(result)
def test_create_no_subject(self):
result = create_simple_html_text_email(
'base_template',
subject=None,
recipients=('<EMAIL>')
)
assert_that(result, is_(none()))
def test__make_template_args_calls_all_IMailerTemplateArgsUtility(self):
from ..interfaces import IMailerTemplateArgsUtility
from .._default_template_mailer import _make_template_args
the_request = object()
# Note that we have to use different keys, because
# the order in which these are called is not specified
class A(object):
def get_template_args(self, request):
assert request is the_request
return {'A': 1}
class B(object):
def get_template_args(self, request):
assert request is the_request
return {'B': 2}
# unnamed
component.provideUtility(A(), IMailerTemplateArgsUtility)
# named
component.provideUtility(B(), IMailerTemplateArgsUtility, u'B')
template_args = {"C": 3}
template_args_copy = template_args.copy()
result = _make_template_args(
the_request,
self,
'.txt',
'.txt',
template_args
)
# Got a new instance
self.assertIsNot(result, template_args)
# template args was left unchanged
assert_that(template_args, is_(template_args_copy))
assert_that(result, is_({
'context': self,
'A': 1,
'B': 2,
'C': 3
}))
def test__get_from_address_not_found(self):
from .._default_template_mailer import _get_from_address
with self.assertRaises(RuntimeError) as exc:
_get_from_address(pyramid_mail_message=None, recipients=(), request=None)
assert_that(exc.exception.args, is_(('No one to send mail from',)))
def test__send_mail_with_IMailDelivery(self):
from .._default_template_mailer import _send_mail
class MailDelivery(object):
sender = to = email_message = None
def send(self, *args):
self.sender, self.to, self.email_message = args
class MockPyramidMailMessage(object):
sender = '<EMAIL>'
send_to = '<EMAIL>'
email_message = object()
def to_message(self):
return self.email_message
delivery = MailDelivery()
component.provideUtility(delivery, IMailDelivery)
_send_mail(MockPyramidMailMessage())
# Verp gets applied to the sender, which inserts a default
# realname if there is no IMailerPolicy
self.assertEqual(delivery.sender, 'NextThought <%s>' % MockPyramidMailMessage.sender)
self.assertIs(delivery.to, MockPyramidMailMessage.send_to)
self.assertIs(delivery.email_message, MockPyramidMailMessage.email_message)
# If there is no IMailDelivery, but the IMailer has a `queue_delivery`,
# it gets used instead.
result = component.getSiteManager().unregisterUtility(delivery, IMailDelivery)
self.assertTrue(result)
class Mailer(object):
def __init__(self):
self.queue_delivery = MailDelivery()
mailer = Mailer()
delivery = mailer.queue_delivery
| |
# Copyright 2015 Fortinet Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from netaddr import IPAddress
from netaddr import IPNetwork
from oslo_log import log as logging
from oslo_utils import excutils
from neutron_lib import constants as n_consts
from neutron_lib.plugins import directory
from neutron.api import extensions as neutron_extensions
from neutron import context as neutron_context
from neutron.db.models import l3 as l3_db
from neutron.plugins.common import constants as const
from neutron_fwaas.db.firewall import firewall_db
from neutron_fwaas.db.firewall import firewall_router_insertion_db
import neutron_fwaas.extensions as extensions
from neutron_fwaas.extensions import firewall as fw_ext
from bell_fortinet._i18n import _LE
from bell_fortinet.common import config
from bell_fortinet.common import constants as constants
from bell_fortinet.common import utils as utils
from bell_fortinet.db import models as fortinet_db
from bell_fortinet.tasks import constants as t_consts
from bell_fortinet.tasks import tasks
LOG = logging.getLogger(__name__)
FORTINET_FW = "fortinet_fw"
FORTINET_FW_PLUGIN = "fortinet_fw_plugin"
class FortinetFirewallPlugin(
firewall_db.Firewall_db_mixin,
firewall_router_insertion_db.FirewallRouterInsertionDbMixin):
"""Implementation of the Neutron Firewall Service Plugin.
This class manages the workflow of FWaaS request/response.
Most DB related works are implemented in class
firewall_db.Firewall_db_mixin.
"""
neutron_extensions.append_api_extensions_path(extensions.__path__)
supported_extension_aliases = ["fwaas", "fwaasrouterinsertion"]
path_prefix = fw_ext.FIREWALL_PREFIX
def __init__(self):
"""Do the initialization for the firewall service plugin here."""
self._fortigate = config.fgt_info
self._driver = config.get_apiclient()
self.task_manager = tasks.TaskManager()
self.task_manager.start()
firewall_db.subscribe()
def _rpc_update_firewall(self, context, firewall_id):
status_update = {"firewall": {"status": const.PENDING_UPDATE}}
super(FortinetFirewallPlugin, self).update_firewall(
context, firewall_id, status_update)
fw_with_rules = self._make_firewall_dict_with_rules(context,
firewall_id)
# this is triggered on an update to fw rule or policy, no
# change in associated routers.
fw_with_rules['add-router-ids'] = self.get_firewall_routers(
context, firewall_id)
fw_with_rules['del-router-ids'] = []
self._apply_firewall(context, **fw_with_rules)
def _rpc_update_firewall_policy(self, context, firewall_policy_id):
firewall_policy = self.get_firewall_policy(context, firewall_policy_id)
if firewall_policy:
for firewall_id in firewall_policy['firewall_list']:
self._rpc_update_firewall(context, firewall_id)
def _ensure_update_firewall(self, context, firewall_id):
fwall = self.get_firewall(context, firewall_id)
if fwall['status'] in [const.PENDING_CREATE,
const.PENDING_UPDATE,
const.PENDING_DELETE]:
raise fw_ext.FirewallInPendingState(firewall_id=firewall_id,
pending_state=fwall['status'])
def _ensure_update_firewall_policy(self, context, firewall_policy_id):
firewall_policy = self.get_firewall_policy(context, firewall_policy_id)
if firewall_policy and 'firewall_list' in firewall_policy:
for firewall_id in firewall_policy['firewall_list']:
self._ensure_update_firewall(context, firewall_id)
def _ensure_update_firewall_rule(self, context, firewall_rule_id):
fw_rule = self.get_firewall_rule(context, firewall_rule_id)
if 'firewall_policy_id' in fw_rule and fw_rule['firewall_policy_id']:
self._ensure_update_firewall_policy(context,
fw_rule['firewall_policy_id'])
def _get_routers_for_create_firewall(self, tenant_id, context, firewall):
# pop router_id as this goes in the router association db
# and not firewall db
LOG.debug("# _get_routers_for_create_firewall called Fortinet_plugin")
router_ids = firewall['firewall'].pop('router_ids', None)
if router_ids == n_consts.ATTR_NOT_SPECIFIED:
# old semantics router-ids keyword not specified pick up
# all routers on tenant.
l3_plugin = directory.get_plugin(n_consts.L3)
ctx = neutron_context.get_admin_context()
routers = l3_plugin.get_routers(ctx)
router_ids = [
router['id']
for router in routers
if router['tenant_id'] == tenant_id]
# validation can still fail this if there is another fw
# which is associated with one of these routers.
self.validate_firewall_routers_not_in_use(context, router_ids)
return router_ids
else:
if not router_ids:
# This indicates that user specifies no routers.
return []
else:
# some router(s) provided.
self.validate_firewall_routers_not_in_use(context, router_ids)
return router_ids
def create_firewall(self, context, firewall):
LOG.debug("create_firewall() called Fortinet_plugin")
tenant_id = firewall['firewall']['tenant_id']
fw_new_rtrs = self._get_routers_for_create_firewall(
tenant_id, context, firewall)
if not fw_new_rtrs:
# no messaging to agent needed, and fw needs to go
# to INACTIVE(no associated rtrs) state.
status = const.INACTIVE
fw = super(FortinetFirewallPlugin, self).create_firewall(
context, firewall, status)
fw['router_ids'] = []
return fw
else:
fw = super(FortinetFirewallPlugin, self).create_firewall(
context, firewall)
fw['router_ids'] = fw_new_rtrs
fw_with_rules = (
self._make_firewall_dict_with_rules(context, fw['id']))
fw_with_rtrs = {'fw_id': fw['id'], 'router_ids': fw_new_rtrs}
self.set_routers_for_firewall(context, fw_with_rtrs)
fw_with_rules['add-router-ids'] = fw_new_rtrs
fw_with_rules['del-router-ids'] = []
self._apply_firewall(context, **fw_with_rules)
return fw
def update_firewall(self, context, id, firewall):
LOG.debug("Fortinet_plugin update_firewall() called, "
"id is %(id)s, firewall is %(fw)s",
{'id': id, 'fw': firewall})
self._ensure_update_firewall(context, id)
# pop router_id as this goes in the router association db
# and not firewall db
router_ids = firewall['firewall'].pop('router_ids', None)
fw_current_rtrs = self.get_firewall_routers(context, id)
if router_ids is not None:
if router_ids == []:
# This indicates that user is indicating no routers.
fw_new_rtrs = []
else:
self.validate_firewall_routers_not_in_use(
context, router_ids, id)
fw_new_rtrs = router_ids
self.update_firewall_routers(context, {'fw_id': id,
'router_ids': fw_new_rtrs})
else:
# router-ids keyword not specified for update pick up
# existing routers.
fw_new_rtrs = self.get_firewall_routers(context, id)
if not fw_new_rtrs and not fw_current_rtrs:
# no messaging to agent needed, and we need to continue
# in INACTIVE state
firewall['firewall']['status'] = const.INACTIVE
fw = super(FortinetFirewallPlugin, self).update_firewall(
context, id, firewall)
fw['router_ids'] = []
return fw
else:
firewall['firewall']['status'] = const.PENDING_UPDATE
fw = super(FortinetFirewallPlugin, self).update_firewall(
context, id, firewall)
fw['router_ids'] = fw_new_rtrs
fw_with_rules = (
self._make_firewall_dict_with_rules(context, fw['id']))
# determine rtrs to add fw to and del from
fw_with_rules['add-router-ids'] = fw_new_rtrs
fw_with_rules['del-router-ids'] = list(
set(fw_current_rtrs).difference(set(fw_new_rtrs)))
# last-router drives agent to ack with status to set state to INACTIVE
fw_with_rules['last-router'] = not fw_new_rtrs
LOG.debug("## update_firewall %s: Add Routers: %s, Del Routers: %s",
fw['id'],
fw_with_rules['add-router-ids'],
fw_with_rules['del-router-ids'])
self._apply_firewall(context, **fw_with_rules)
#self.agent_rpc.update_firewall(context, fw_with_rules)
return fw
def update_firewall_for_delete_router(self, context, router_id):
LOG.debug("fwaas delete_router() called, router_id: %(rtid)s",
{'rtid': router_id})
cls = firewall_router_insertion_db.FirewallRouterAssociation
db_fw_rt = fortinet_db.query_record(context, cls, router_id=router_id)
if not db_fw_rt:
return None
firewall = {u'firewall': {'router_ids': []}}
return self.update_firewall(context, db_fw_rt.fw_id, firewall)
def delete_db_firewall_object(self, context, id):
super(FortinetFirewallPlugin, self).delete_firewall(context, id)
def delete_firewall(self, context, id):
LOG.debug("Fortinet_plugin delete_firewall() called, fw_id %(id)s",
{'id': id})
fw_with_rules = (
self._make_firewall_dict_with_rules(context, id))
status = {"firewall": {"status": const.PENDING_DELETE}}
super(FortinetFirewallPlugin, self).update_firewall(
context, id, status)
# Reflect state change in fw_with_rules
fw_with_rules['del-router-ids'] = self.get_firewall_routers(
context, id)
self._apply_firewall(context, **fw_with_rules)
self.delete_db_firewall_object(context, id)
def update_firewall_policy(self, context, id, firewall_policy):
LOG.debug("update_firewall_policy called, "
"id =%(id)s, firewall_policy=%(fp)s",
{'id': id, 'fp': firewall_policy})
self._ensure_update_firewall_policy(context, id)
firewall_policy_old = self.get_firewall_policy(context, id)
firewall_rule_ids = firewall_policy_old.get('firewall_rules', [])
tenant_id = firewall_policy_old.get('tenant_id', None)
fwp = super(FortinetFirewallPlugin,
self).update_firewall_policy(context, id, firewall_policy)
for fwr_id in firewall_rule_ids:
fw_rule = self.get_firewall_rule(context, fwr_id)
self._delete_firewall_rule(context, tenant_id, **fw_rule)
self._rpc_update_firewall_policy(context, id)
return fwp
def create_firewall_rule(self, context, firewall_rule):
"""
:param context:
:param firewall_rule:
firewall_rule={'firewall_rule': {... }}
:return:
"""
LOG.debug("create_firewall_rule() firewall_rule=%(fwr)s",
{'fwr': firewall_rule})
return super(FortinetFirewallPlugin,
self).create_firewall_rule(context, firewall_rule)
def delete_firewall_rule(self, context, id):
super(FortinetFirewallPlugin, self).delete_firewall_rule(context, id)
def update_firewall_rule(self, context, id, firewall_rule):
LOG.debug("update_firewall_rule() id: %(id)s, "
"firewall_rule: %(firewall_rule)s",
{'id': id, 'firewall_rule': firewall_rule})
try:
fwr = self._update_firewall_rule_dict(context, id, firewall_rule)
self._update_firewall_rule(context, id, fwr)
self._ensure_update_firewall_rule(context, id)
fwr = super(FortinetFirewallPlugin,
self).update_firewall_rule(context, id, firewall_rule)
utils.update_status(self, context, t_consts.TaskStatus.COMPLETED)
return fwr
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("update_firewall_rule %(fwr)s failed"),
{'fwr': firewall_rule})
utils._rollback_on_err(self, context, e)
def insert_rule(self, context, id, rule_info):
self._ensure_update_firewall_policy(context, id)
try:
fwp = super(FortinetFirewallPlugin,
self).insert_rule(context, id, rule_info)
self._rpc_update_firewall_policy(context, id)
utils.update_status(self, context, t_consts.TaskStatus.COMPLETED)
return fwp
except Exception as e:
with excutils.save_and_reraise_exception():
self.remove_rule(context, id, rule_info)
utils._rollback_on_err(self, context, e)
def remove_rule(self, context, id, rule_info):
LOG.debug("Fortinet_plugin remove_rule() called")
self._ensure_update_firewall_policy(context, id)
if rule_info.get('firewall_rule_id', None):
firewall_rule = self._get_firewall_rule(
context, rule_info['firewall_rule_id'])
fwr = self._make_firewall_rule_dict(firewall_rule)
self._delete_firewall_rule(context, fwr['tenant_id'], **fwr)
fwp = super(FortinetFirewallPlugin, self).remove_rule(
context, id, rule_info)
self._rpc_update_firewall_policy(context, id)
return fwp
def get_firewalls(self, context, filters=None, fields=None):
LOG.debug("fwaas get_firewalls() called, filters=%(filters)s, "
"fields=%(fields)s",
{'filters': filters, 'fields': fields})
fw_list = super(FortinetFirewallPlugin, self).get_firewalls(
context, filters, fields)
for fw in fw_list:
fw_current_rtrs = self.get_firewall_routers(context, fw['id'])
fw['router_ids'] = fw_current_rtrs
return fw_list
def get_firewall(self, context, id, fields=None):
LOG.debug("fwaas get_firewall() called")
res = super(FortinetFirewallPlugin, self).get_firewall(
context, id, fields)
fw_current_rtrs = self.get_firewall_routers(context, id)
res['router_ids'] = fw_current_rtrs
return res
def _apply_firewall(self, context, **fw_with_rules):
tenant_id = fw_with_rules['tenant_id']
default_fwr = self._make_default_firewall_rule_dict(tenant_id)
try:
if fw_with_rules.get('del-router-ids', None):
for fwr in list(fw_with_rules.get('firewall_rule_list', None)):
self._delete_firewall_rule(context, tenant_id, **fwr)
if default_fwr:
self._delete_firewall_rule(
context, tenant_id, **default_fwr)
self.update_firewall_status(
context, fw_with_rules['id'], const.INACTIVE)
if fw_with_rules.get('add-router-ids', None):
vdom = getattr(
fortinet_db.Fortinet_ML2_Namespace.query_one(
context, tenant_id=tenant_id), 'vdom', None)
if not vdom:
raise fw_ext.FirewallInternalDriverError(
driver='Fortinet_fwaas_plugin')
if default_fwr:
self._add_firewall_rule(context, tenant_id, **default_fwr)
for fwr in reversed(
list(fw_with_rules.get('firewall_rule_list', None))):
self._add_firewall_rule(context, tenant_id, **fwr)
self.update_firewall_status(
context, fw_with_rules['id'], const.ACTIVE)
else:
self.update_firewall_status(
context, fw_with_rules['id'], const.INACTIVE)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("apply_firewall %(fws)s failed"),
{'fws': fw_with_rules})
utils._rollback_on_err(self, context, e)
utils.update_status(self, context, t_consts.TaskStatus.COMPLETED)
def _add_firewall_rule(self, context, fwp_tenant_id, **fwr):
"""
:param obj:
:param context:
:param kwargs: dictionary, firewall rule
firewall_rule: {'source_ip_address': u'192.168.3.11/24',... }
:return:
"""
LOG.debug("# _add_firewall_rule() called")
namespace = fortinet_db.Fortinet_ML2_Namespace.query_one(
context, tenant_id=fwp_tenant_id)
vdom = getattr(namespace, 'vdom', None)
if not vdom or not fwr:
return None
inf_int, inf_ext = utils.get_vlink_intf(
self, context, vdom=namespace.vdom)
srcaddr = self._add_fwr_ip_address(
context, vdom, place='source_ip_address', **fwr)
dstaddr = self._add_fwr_ip_address(
context, vdom, place='destination_ip_address', **fwr)
service = self._add_fwr_service(context, vdom, **fwr)
action = self._get_fwr_action(**fwr)
profiles = self._get_fwp_profiles(action)
match_vip = 'enable'
name = fwr.get('name', '')
# add a basic firewall rule('accept': incoming, 'deny': bidirectional)
fortinet_fwp = utils.add_fwpolicy_to_head(self, context,
vdom=vdom,
srcaddr=srcaddr['name'],
srcintf='any',
dstaddr=dstaddr['name'],
dstintf='any',
service=service['name'],
match_vip=match_vip,
| |
<reponame>XanaduAI/xir
# pylint: disable=redefined-outer-name
"""Unit tests for the program class"""
from decimal import Decimal
from typing import Any, Dict, Iterable, List, MutableSet, Sequence
import pytest
import xir
@pytest.fixture
def program():
"""Returns an empty XIR program."""
return xir.Program()
# pylint: disable=protected-access
def make_program(
called_functions: MutableSet[str] = None,
declarations: Dict[str, List[xir.Declaration]] = None,
gates: Dict[str, Sequence] = None,
includes: List[str] = None,
observables: Dict[str, Sequence] = None,
options: Dict[str, Any] = None,
statements: List[xir.Statement] = None,
variables: MutableSet[str] = None,
):
"""Returns an XIR program with the given attributes."""
program = xir.Program()
program._called_functions = called_functions or set()
program._declarations = declarations or {"gate": [], "func": [], "out": [], "obs": []}
program._gates = gates or {}
program._includes = includes or []
program._observables = observables or {}
program._options = options or {}
program._statements = statements or []
program._variables = variables or set()
return program
class TestSerialize:
"""Unit tests for the serialize method of an XIR Program."""
def test_empty_program(self, program):
"""Tests serializing an empty program."""
assert program.serialize() == ""
def test_includes(self, program):
"""Tests serializing a XIR program with includes."""
program.add_include("xstd")
program.add_include("randomlib")
res = program.serialize()
assert res == "use xstd;\nuse randomlib;"
#####################
# Test declarations
#####################
@pytest.mark.parametrize(
"params, wires, declaration_type, want_res",
[
(["a", "b"], (0,), "gate", "gate name(a, b)[0];"),
([], (0, 2, 1), "obs", "obs name[0, 2, 1];"),
(["theta"], ("a", "b", "c"), "out", "out name(theta)[a, b, c];"),
],
)
def test_declarations(self, params, wires, declaration_type, want_res):
"""Test serializing gate, operation and output declarations"""
decl = xir.Declaration("name", declaration_type, params, wires)
program = xir.Program()
program._declarations[declaration_type].append(decl)
have_res = program.serialize()
assert have_res == want_res
@pytest.mark.parametrize(
"params, want_res",
[
(["a", "b"], "func name(a, b);"),
([], "func name;"),
(["theta"], "func name(theta);"),
],
)
def test_func_declaration(self, params, want_res):
"""Test serializing function declarations."""
decl = xir.Declaration("name", type_="func", params=params)
program = xir.Program()
program._declarations["func"].append(decl)
have_res = program.serialize()
assert have_res == want_res
###################
# Test statements
###################
@pytest.mark.parametrize("name", ["ry", "toffoli"])
@pytest.mark.parametrize("params", [[0, 3.14, -42]])
@pytest.mark.parametrize("wires", [("w0", "w1"), ("w0",), ("wire0", "anotherWire", "FortyTwo")])
def test_statements_params(self, program, name, params, wires):
"""Tests serializing an XIR program with general (gate) statements."""
stmt = xir.Statement(name, params, wires)
program.add_statement(stmt)
res = program.serialize()
params_str = ", ".join(map(str, params))
wires_str = ", ".join(map(str, wires))
assert res == f"{name}({params_str}) | [{wires_str}];"
@pytest.mark.parametrize("name", ["ry", "toffoli"])
@pytest.mark.parametrize("wires", [("w0", "w1"), ("w0",), ("wire0", "anotherWire", "FortyTwo")])
def test_statements_no_params(self, program, name, wires):
"""Tests serializing an XIR program with general (gate) statements without parameters."""
stmt = xir.Statement(name, [], wires)
program.add_statement(stmt)
res = program.serialize()
wires_str = ", ".join(map(str, wires))
assert res == f"{name} | [{wires_str}];"
@pytest.mark.parametrize("pref", [42, Decimal("3.14"), "2 * a + 1"])
@pytest.mark.parametrize("wires", [("w0", "w1"), ("w0",), ("wire0", "anotherWire", "FortyTwo")])
def test_observable_stmt(self, program, pref, wires):
"""Tests serializing an XIR program with observable statements."""
xyz = "XYZ"
factors = [xir.ObservableFactor(xyz[i], None, w) for i, w in enumerate(wires)]
factors_str = " @ ".join(str(t) for t in factors)
wires_str = ", ".join(wires)
program.add_observable("H", ["a", "b"], wires, [xir.ObservableStmt(pref, factors)])
res = program.serialize()
assert res == f"obs H(a, b)[{wires_str}]:\n {pref}, {factors_str};\nend;"
#########################
# Test gate definitions
#########################
@pytest.mark.parametrize("name", ["ry", "toffoli"])
@pytest.mark.parametrize("params", [["a", "b"]])
@pytest.mark.parametrize("wires", [("w0", "w1"), ("w0",), ("wire0", "anotherWire", "FortyTwo")])
def test_gates_params_and_wires(self, program, name, params, wires):
"""Tests serializing an XIR program with gates that have both parameters and wires."""
stmts = [xir.Statement("rz", [0.13], (0,)), xir.Statement("cnot", [], (0, 1))]
program.add_gate(name, params, wires, stmts)
res = program.serialize()
params_str = ", ".join(map(str, params))
wires_str = ", ".join(map(str, wires))
assert (
res == f"gate {name}({params_str})[{wires_str}]:"
"\n rz(0.13) | [0];\n cnot | [0, 1];\nend;"
)
@pytest.mark.parametrize("name", ["ry", "toffoli"])
@pytest.mark.parametrize("wires", [("w0", "w1"), ("w0",), ("wire0", "anotherWire", "FortyTwo")])
def test_gates_no_params(self, program, name, wires):
"""Tests serializing an XIR program with gates that have no parameters."""
stmts = [xir.Statement("rz", [0.13], (0,)), xir.Statement("cnot", [], (0, 1))]
program.add_gate(name, [], wires, stmts)
res = program.serialize()
wires_str = ", ".join(map(str, wires))
assert res == f"gate {name}[{wires_str}]:\n rz(0.13) | [0];\n cnot | [0, 1];\nend;"
@pytest.mark.parametrize("name", ["ry", "toffoli"])
@pytest.mark.parametrize("params", [["a", "b"]])
def test_gates_no_wires(self, program, name, params):
"""Tests serializing an XIR program with gates that have no wires."""
stmts = [xir.Statement("rz", [0.13], (0,)), xir.Statement("cnot", [], (0, 1))]
program.add_gate(name, params, (), stmts)
res = program.serialize()
params_str = ", ".join(map(str, params))
assert res == f"gate {name}({params_str}):\n rz(0.13) | [0];\n cnot | [0, 1];\nend;"
@pytest.mark.parametrize("name", ["mygate", "a_beautiful_gate"])
def test_gates_no_params_and_no_wires(self, program, name):
"""Tests serializing an XIR program with gates that have no parameters or wires."""
stmts = [xir.Statement("rz", [0.13], (0,)), xir.Statement("cnot", [], (0, 1))]
program.add_gate(name, [], (), stmts)
res = program.serialize()
assert res == f"gate {name}:\n rz(0.13) | [0];\n cnot | [0, 1];\nend;"
###############################
# Test observable definitions
###############################
@pytest.mark.parametrize("name", ["H", "my_op"])
@pytest.mark.parametrize("params", [["a", "b"]])
@pytest.mark.parametrize("wires", [("w0", "w1"), ("w0",), ("wire0", "anotherWire", "FortyTwo")])
def test_observables_params_and_wires(self, program, name, params, wires):
"""Tests serializing an XIR program with observables that have both parameters and wires."""
stmts = [
xir.ObservableStmt(
42, [xir.ObservableFactor("X", None, [0]), xir.ObservableFactor("Y", None, [1])]
)
]
program.add_observable(name, params, wires, stmts)
res = program.serialize()
params_str = ", ".join(map(str, params))
wires_str = ", ".join(map(str, wires))
assert res == f"obs {name}({params_str})[{wires_str}]:\n 42, X[0] @ Y[1];\nend;"
@pytest.mark.parametrize("name", ["H", "my_op"])
@pytest.mark.parametrize("wires", [("w0", "w1"), ("w0",), ("wire0", "anotherWire", "FortyTwo")])
def test_observables_no_params(self, program, name, wires):
"""Tests serializing an XIR program with observables that have no parameters."""
stmts = [
xir.ObservableStmt(
42, [xir.ObservableFactor("X", None, [0]), xir.ObservableFactor("Y", None, [1])]
)
]
program.add_observable(name, [], wires, stmts)
res = program.serialize()
wires_str = ", ".join(map(str, wires))
assert res == f"obs {name}[{wires_str}]:\n 42, X[0] @ Y[1];\nend;"
@pytest.mark.parametrize("name", ["H", "my_op"])
@pytest.mark.parametrize("params", [["a", "b"]])
def test_observables_no_wires(self, program, name, params):
"""Tests serializing an XIR program with observables that have no declared wires."""
stmts = [
xir.ObservableStmt(
42, [xir.ObservableFactor("X", None, [0]), xir.ObservableFactor("Y", None, [1])]
)
]
program.add_observable(name, params, (), stmts)
res = program.serialize()
params_str = ", ".join(map(str, params))
assert res == f"obs {name}({params_str}):\n 42, X[0] @ Y[1];\nend;"
@pytest.mark.parametrize("name", ["my_op", "op2"])
def test_observables_no_params_and_no_wires(self, program, name):
"""Tests serializing an XIR program with observables that have no parameters or wires."""
stmts = [
xir.ObservableStmt(
42, [xir.ObservableFactor("X", None, [0]), xir.ObservableFactor("Y", None, [1])]
)
]
program.add_observable(name, [], (), stmts)
res = program.serialize()
assert res == f"obs {name}:\n 42, X[0] @ Y[1];\nend;"
class TestProgram:
"""Unit tests for the xir.Program class."""
def test_init(self):
"""Tests that an (empty) XIR program can be constructed."""
program = xir.Program(version="1.2.3", use_floats=False)
assert program.version == "1.2.3"
assert program.use_floats is False
assert list(program.wires) == []
assert set(program.called_functions) == set()
assert dict(program.declarations) == {"gate": [], "func": [], "out": [], "obs": []}
assert dict(program.gates) == {}
assert list(program.includes) == []
assert dict(program.observables) == {}
assert list(program.options) == []
assert list(program.statements) == []
assert set(program.variables) == set()
def test_repr(self):
"""Test that the string representation of an XIR program has the correct format."""
program = xir.Program(version="1.2.3")
assert repr(program) == "<Program: version=1.2.3>"
def test_search(self, program):
"""Tests that an XIR program can be searched for the wires or parameters
of a declaration.
"""
decl1 = xir.Declaration("spin", type_="gate", params=["x", "y", "z"], wires=(0, 1))
program.add_declaration(decl1)
decl2 = xir.Declaration("spin", type_="func", params=["t", "p", "o"])
program.add_declaration(decl2)
assert program.search(decl_type="gate", attr_type="wires", name="spin") == (0, 1)
assert program.search(decl_type="gate", attr_type="params", name="spin") == ["x", "y", "z"]
assert program.search(decl_type="func", attr_type="params", name="spin") == ["t", "p", "o"]
def test_search_for_invalid_declaration_type(self, program):
"""Tests that a ValueError is raised when an XIR prorgram is searched
for an invalid declaration type.
"""
with pytest.raises(ValueError, match=r"Declaration type 'invalid' must be one of \{.*\}"):
program.search(decl_type="invalid", attr_type="wires", name="name")
def test_search_for_invalid_attribute_type(self, program):
"""Tests that a ValueError is raised when an XIR prorgram is searched
for an invalid attribute type.
"""
with pytest.raises(ValueError, match=r"Attribute type 'invalid' must be one of \{.*\}"):
program.search(decl_type="gate", attr_type="invalid", name="name")
def test_search_for_missing_declaration(self, program):
"""Tests that a ValueError is raised when an XIR prorgram is searched
for a declaration which does not exist.
"""
with pytest.raises(ValueError, match=r"No obs declarations with the name 'pos' were found"):
program.search(decl_type="obs", attr_type="wires", name="pos")
def test_add_called_function(self, program):
"""Tests that called functions can be added to an XIR program."""
program.add_called_function("cos")
assert set(program.called_functions) == {"cos"}
program.add_called_function("sin")
assert set(program.called_functions) == {"cos", "sin"}
program.add_called_function("cos")
assert set(program.called_functions) == {"cos", "sin"}
def test_add_declaration(self, program):
"""Tests that declarations can be added to an XIR program."""
tan = xir.Declaration("tan", type_="func", params=["x"])
program.add_declaration(tan)
assert program.declarations == {"func": [tan], "gate": [], "obs": [], "out": []}
u2 = xir.Declaration("U2", type_="gate", params=["a", "b"], wires=(0,))
program.add_declaration(u2)
assert program.declarations == {"func": [tan], "gate": [u2], "obs": | |
x-axis limit
ax.set_xlim([min(xvar), max(xvar)])
for yr in vert_yr:
plt.axvline(yr, linestyle='--', color='k', lw=1.5)
plt.annotate(str(yr), xy=(yr-5, ax.get_ylim()[0] + 0.01 * ax.get_ylim()[1]), color='k', size=8,
bbox=dict(edgecolor='none', fc='white', alpha=0.5))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
gridded_axis(ax)
# Show y-axis in scientific format
formatter = tkr.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1, 1))
ax.yaxis.set_major_formatter(formatter)
simple_legend(ax)
plt.title(title)
plt.tight_layout()
plt.savefig(out_path, dpi=constants.DPI, bbox_inches='tight')
plt.close()
def plot_multiple_ts(ax, np_arr, xvar, out_path='', vert_yr=[], title='', leg_name='', xlabel='', ylabel='',
linestyle='-', col=None, pos='first', do_log=False, fill_between=False):
"""
Produces a single plot showing time-series of multiple variables
:param ax:
:param np_arr: Numpy array to plot on y-axis
:param xvar: Time series
:param out_path: Output path (includes output file name)
:param vert_yr:
:param title: Title of plot
:param leg_name:
:param xlabel:
:param ylabel:
:param linestyle:
:param col:
:param pos: if 'first', then set up axis, if 'last' then save image. Refers to whether current time-series is
first or last
:param do_log:
:param fill_between:
:return: Nothing, side-effect: save an image
"""
logger.info('Plot multiple time-series')
ax.plot(xvar, np_arr, label=leg_name, color=col, lw=1.75, linestyle=linestyle,
markevery=int(len(xvar)/5.0), markeredgecolor='none')
if fill_between:
ax.fill_between(xvar, np_arr[:len(xvar)], y2=0)
if do_log:
ax.set_yscale('log')
if pos == 'last':
# Set x-axis limit
ax.set_xlim([min(xvar), max(xvar)])
# Annotate
if len(vert_yr):
for yr in vert_yr:
ax.axvline(yr, linestyle='--', color='k', lw=1.5)
plt.annotate(str(yr), xy=(yr-5, ax.get_ylim()[0] + 0.01 * ax.get_ylim()[1]), color='k', size=8,
bbox=dict(edgecolor='none', fc='white', alpha=0.5))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
gridded_axis(ax)
# Show y-axis in scientific format
formatter = tkr.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1, 1))
ax.yaxis.set_major_formatter(formatter)
simple_legend(ax)
plt.title(title.capitalize())
plt.tight_layout()
plt.savefig(out_path, dpi=constants.DPI, bbox_inches='tight')
def plot_hovmoller(nc_path, var, out_path, do_latitude=True, xlabel='', ylabel='', title='', cbar=''):
"""
Ref: http://scitools.org.uk/iris/docs/v0.9.1/examples/graphics/hovmoller.html
:param nc_path:
:param var:
:param out_path:
:param do_latitude:
:param xlabel:
:param ylabel:
:param title:
:param cbar:
:return:
"""
logger.info('Plot hovmoller')
# TODO: Iris install is not working on mac os x
if os.name == 'mac' or os.name == 'posix':
return
import iris
import iris.plot as iplt
iris.FUTURE.netcdf_promote = True
cubes = iris.load(nc_path, var)
# Take the mean over latitude/longitude
if do_latitude:
cube = cubes[0].collapsed('latitude', iris.analysis.MEAN)
else:
cube = cubes[0].collapsed('longitude', iris.analysis.MEAN)
# Create the plot contour with 20 levels
iplt.contourf(cube, 20, cmap=palettable.colorbrewer.diverging.RdYlGn_9.mpl_colormap)
if not do_latitude:
plt.ylabel(xlabel) # Latitude
plt.xlabel(ylabel) # Years
else:
plt.ylabel(ylabel) # Years
plt.xlabel(xlabel) # Longitude
plt.title(title)
plt.colorbar(orientation='horizontal', extend='both', drawedges=False, spacing='proportional').set_label(cbar)
# Stop matplotlib providing clever axes range padding and do not draw gridlines
plt.grid(b=False)
plt.axis('tight')
plt.tight_layout()
plt.savefig(out_path, dpi=constants.DPI)
plt.close()
# @plot_glm.command()
# @click.argument('path_nc')
# @click.argument('out_path')
# @click.argument('var_name')
# @click.option('--xaxis_min', default=0.0, help='')
# @click.option('--xaxis_max', default=1.1, help='')
# @click.option('--xaxis_step', default=0.1, help='')
# @click.option('--annotate_date', help='')
# @click.option('--yr', default=0, help='')
# @click.option('--date', default=-1, help='')
# @click.option('--xlabel', default='', help='')
# @click.option('--title', default='', help='')
# @click.option('--tme_name', default='time', help='')
# @click.option('--show_plot', help='')
# @click.option('--any_time_data', default=True, help='')
# @click.option('--format', default='%.2f', help='')
# @click.option('--land_bg', help='')
# @click.option('--cmap', default=plt.cm.RdBu, help='')
# @click.option('--grid', help='')
# @click.option('--fill_mask', help='')
def plot_map_from_nc(path_nc, out_path, var_name, xaxis_min=0.0, xaxis_max=1.1, xaxis_step=0.1,
annotate_date=False, yr=0, date=-1, xlabel='', title='', tme_name='time', show_plot=False,
any_time_data=True, format='%.2f', land_bg=True, cmap=plt.cm.RdBu, grid=False, fill_mask=False):
"""
Plot var_name variable from netCDF file
\b
Args:
path_nc: Name of netCDF file including path
out_path: Output directory path + file name
var_name: Name of variable in netCDF file to plot on map
Returns:
Nothing, side-effect: save an image
"""
logger.info('Plotting ' + var_name + ' in ' + path_nc)
# Read netCDF file and get time dimension
nc = util.open_or_die(path_nc, 'r', format='NETCDF4')
lon = nc.variables['lon'][:]
lat = nc.variables['lat'][:]
if any_time_data:
ts = nc.variables[tme_name][:] # time-series
if date == -1: # Plot either the last year {len(ts)-1} or whatever year the user wants
plot_yr = len(ts) - 1
else:
plot_yr = date - ts[0]
# Draw empty basemap
m = Basemap(projection='robin', resolution='c', lat_0=0, lon_0=0)
# m.drawcoastlines()
# m.drawcountries()
# Find x,y of map projection grid.
lons, lats = np.meshgrid(lon, lat)
x, y = m(lons, lats)
if fill_mask:
nc_vars = np.ma.filled(nc.variables[var_name], fill_value=np.nan)
else:
nc_vars = np.array(nc.variables[var_name])
# Plot
# Get data for the last year from the netCDF file array
if any_time_data:
mask_data = maskoceans(lons, lats, nc_vars[int(plot_yr), :, :])
else:
mask_data = maskoceans(lons, lats, nc_vars[:, :])
m.etopo()
if land_bg:
m.drawlsmask(land_color='white', ocean_color='none', lakes=True) # land_color = (0, 0, 0, 0) for transparent
else:
m.drawlsmask(land_color=(0, 0, 0, 0), ocean_color='none', lakes=True)
cs = m.contourf(x, y, mask_data, np.arange(xaxis_min, xaxis_max, xaxis_step), cmap=cmap)
if annotate_date:
plt.annotate(str(yr), xy=(0.45, 0.1), xycoords='axes fraction', size=20)
if grid:
# where labels intersect = [left, right, top, bottom]
m.drawmeridians(np.arange(-180, 180, 60), labels=[0,0,1,0], labelstyle='+/-', linewidth=0.5)
m.drawparallels([-40, 0, 40], labels=[1, 0, 0, 0], labelstyle='+/-', linewidth=0.5)
# Add colorbar
cb = m.colorbar(cs, "bottom", size="3%", pad='2%', extend='both', drawedges=False, spacing='proportional',
format=format)
cb.set_label(xlabel)
plt.title(title, y=1.08)
plt.tight_layout()
if not show_plot:
plt.savefig(out_path, dpi=constants.DPI)
plt.close()
else:
plt.show()
nc.close()
return out_path
def plot_maps_ts(arr_or_nc, ts, lon, lat, out_path, var_name='', xaxis_min=0.0, xaxis_max=1.1, xaxis_step=0.1,
save_name='fig', xlabel='', start_movie_yr=-1, title='', tme_name='time', land_bg=True, do_etopo=False,
do_log_cb=False, do_jenks=True, cmap=plt.cm.RdBu, grid=False):
"""
Args:
arr_or_nc: Input can be numpy array or netcdf path
ts:
lon:
lat:
out_path:
var_name:
xaxis_min:
xaxis_max:
xaxis_step:
save_name:
xlabel:
start_movie_yr:
title:
tme_name:
land_bg:
do_etopo:
do_log_cb: Draw logarithmic colorbar (true) or not (false). Default: False
do_jenks:
cmap:
grid:
Returns:
"""
logger.info('Plot time-series of maps')
if isinstance(arr_or_nc, (np.ndarray, np.generic)):
is_nc = False
arr = np.copy(arr_or_nc)
elif os.path.splitext(arr_or_nc)[1] == '.nc':
is_nc = True
else:
sys.exit(0)
list_pngs = []
base_yr = ts[0]
# Draw empty basemap
m = Basemap(projection='robin', resolution='c', lat_0=0, lon_0=0)
# m.drawcoastlines()
# m.drawcountries()
# Find x,y of map projection grid.
lons, lats = np.meshgrid(lon, lat)
x, y = m(lons, lats)
# Plot
# Get data for the last year from the netCDF file array
for yr in tqdm(ts[::constants.MOVIE_SEP], disable=(len(ts[::constants.MOVIE_SEP]) < 2)):
if do_etopo:
m.etopo()
if len(ts) > 1 and not is_nc:
mask_data = maskoceans(lons, lats, arr[int(yr - base_yr), :, :])
else:
if is_nc:
arr = util.get_nc_var3d(arr_or_nc, var_name, int(yr - base_yr))
mask_data = maskoceans(lons, lats, arr[:, :])
cb_range = get_cb_range(arr, xaxis_min=xaxis_min, xaxis_max=xaxis_max, xaxis_step=xaxis_step, do_jenks=do_jenks)
if land_bg:
m.drawlsmask(land_color='white', ocean_color='aqua', lakes=True) # land_color = (0, 0, 0, 0) transparent
else:
m.drawlsmask(land_color=(0, 0, 0, 0), ocean_color='none', lakes=True)
if np.any(cb_range < 0.0) or not do_log_cb:
# If any negative values exist in basemap then do not use log scale
cs = m.contourf(x, y, mask_data, cb_range, extend='both', cmap=cmap)
else:
# manually set log levels e.g. http://matplotlib.org/examples/images_contours_and_fields/contourf_log.html
lev_exp = np.arange(np.floor(np.log10(mask_data.min()) - 1), np.ceil(np.log10(mask_data.max()) + 1))
levs = np.power(10, lev_exp)
cs = m.contourf(x, y, mask_data, levs, norm=colors.LogNorm(), cmap=cmap)
plt.annotate(str(int(yr)), xy=(0.45, 0.1), xycoords='axes fraction', size=20)
if grid:
# where labels intersect = [left, right, top, bottom]
m.drawmeridians(np.arange(-180, 180, 60), labels=[0, 0, 1, 0], labelstyle='+/-', linewidth=0.5)
m.drawparallels([-40, 0, 40], labels=[1, 0, 0, 0], labelstyle='+/-', linewidth=0.5)
# Add colorbar
cb = m.colorbar(cs, "bottom", size="3%", pad='2%', extend='both', drawedges=False, spacing='uniform')
# Add label
cb.set_label(xlabel)
plt.title(title, y=1.08)
out_png_name = out_path + os.sep + save_name + '_' + str(int(yr)) + '.png'
list_pngs.append(out_png_name)
plt.tight_layout()
plt.savefig(out_png_name, dpi=constants.DPI)
plt.close()
return list_pngs
def plot_maps_ts_from_path(path_nc, var_name, lon, lat, out_path, xaxis_min=0.0, xaxis_max=1.1, xaxis_step=0.1,
save_name='fig', xlabel='', start_movie_yr=-1, title='', do_jenks=True,
tme_name='time', land_bg=True, cmap=plt.cm.RdBu, grid=False):
"""
Plot map for var_name variable from netCDF file
:param path_nc: Name of netCDF file
:param var_name: Name of variable in netCDF file to plot on map
:param xaxis_min:
:param xaxis_max:
:param xaxis_step:
:param lon: List of lon's
:param lat: List of lat's
:param out_path: Output directory path + file name
:return: List of paths of images produced, side-effect: save an image(s)
"""
logger.info('Plotting ' + var_name + ' in ' + path_nc)
util.make_dir_if_missing(out_path)
# Read netCDF file and get time dimension
nc = util.open_or_die(path_nc)
if start_movie_yr > 0:
ts = nc.variables[tme_name][:].astype(int) # time-series
ts = ts[start_movie_yr - ts[0]:]
else:
ts = nc.variables[tme_name][:] # time-series
nc.close()
return plot_maps_ts(path_nc, ts, lon, lat, out_path, var_name=var_name,
xaxis_min=xaxis_min, xaxis_max=xaxis_max, xaxis_step=xaxis_step,
save_name=save_name, xlabel=xlabel, do_jenks=do_jenks,
start_movie_yr=start_movie_yr, title=title, tme_name=tme_name, land_bg=land_bg, cmap=cmap,
grid=grid)
def plot_arr_to_map(path_arr, lon, lat, out_path, var_name='arr', xaxis_min=0.0, xaxis_max=1.1, xaxis_step=0.1,
plot_type='sequential', annotate_date=False, yr=0, date=-1, xlabel='', title='', tme_name='time',
any_time_data=True, format='%.2f', land_bg=True, cmap=plt.cm.RdBu, grid=False, fill_mask=False):
"""
Plot var_name variable from netCDF file
:param path_arr: array (2D)
:param lon: List of lon's
:param lat: List of lat's
:param out_path: Output directory path + file name
:param var_name: Name of variable in netCDF file to plot on map
:param xaxis_min:
:param xaxis_max:
:param xaxis_step:
:param plot_type:
:param annotate_date:
:param yr:
:param date:
:param xlabel:
:param title:
:param tme_name:
:param any_time_data: Is there any time dimension?
:param format:
:param land_bg:
:param cmap:
:param grid:
:param fill_mask:
:return: Nothing, side-effect: save an image
"""
logger.info('Plotting ' + xlabel)
# Bail if xaxis_min == xaxis_max
if xaxis_min == | |
args.project)[0].replace("\\:", ":"))
else:
pick_and_set_project(args)
def cd(args):
# entity_result should be None because expected='folder'
project, folderpath = try_call(resolve_existing_path, args.path, 'folder')[:2]
if project is not None:
project_name = try_call(dxpy.get_handler(project).describe)['name']
# It is obvious what the project is
if project != dxpy.WORKSPACE_ID or 'DX_PROJECT_CONTEXT_NAME' not in os.environ:
# Cache ID and name if necessary
set_project(project, not state['interactive'], name=project_name)
state['currentproj'] = project_name
else:
err_exit('Error: No current project was given', 3)
# TODO: attempt to add caching later if it's an issue
# if project in cached_project_paths and folderpath in cached_project_paths[project]:
# set_wd(folderpath, not interactive)
try:
dxproj = dxpy.get_handler(dxpy.WORKSPACE_ID)
dxproj.list_folder(folder=folderpath)
except:
err_exit(fill(folderpath + ': No such file or directory found in project ' + dxpy.WORKSPACE_ID), 3)
set_wd(folderpath, not state['interactive'])
def cmp_names(x):
return x['describe']['name'].lower()
def ls(args):
project, folderpath, entity_results = try_call(resolve_existing_path, # TODO: this needs to honor "ls -a" (all) (args.obj/args.folders/args.full)
args.path,
ask_to_resolve=False)
if project is None:
err_exit('Current project must be set or specified before any data can be listed', 3)
dxproj = dxpy.get_handler(project)
only = ""
if args.obj and not args.folders and not args.full:
only = "objects"
elif not args.obj and args.folders and not args.full:
only = "folders"
else:
only = "all"
resp = None
if entity_results is None:
try:
# Request the minimal set of describe fields possible
if args.brief:
describe_input = dict(fields={'id': True, 'name': True})
elif args.verbose:
describe_input = dict(fields=get_ls_l_desc_fields())
else:
describe_input = dict(fields={'id': True, 'class': True, 'name': True})
resp = dxproj.list_folder(folder=folderpath,
describe=describe_input,
only=only,
includeHidden=args.all)
# Listing the folder was successful
if args.verbose:
print(UNDERLINE('Project:') + ' ' + dxproj.describe()['name'] + ' (' + project + ')')
print(UNDERLINE('Folder :') + ' ' + folderpath)
if not args.obj:
folders_to_print = ['/.', '/..'] if args.all else []
folders_to_print += resp['folders']
for folder in folders_to_print:
if args.full:
print(BOLD() + BLUE() + folder + ENDC())
else:
print(BOLD() + BLUE() + os.path.basename(folder) + '/' + ENDC())
if not args.folders:
resp["objects"] = sorted(resp["objects"], key=cmp_names)
if args.verbose:
if len(resp['objects']) > 0:
print_ls_l_header()
else:
print("No data objects found in the folder")
if not args.brief and not args.verbose:
name_counts = collections.Counter(obj['describe']['name'] for obj in resp['objects'])
for obj in resp['objects']:
if args.brief:
print(obj['id'])
elif args.verbose:
print_ls_l_desc(obj['describe'], include_project=False)
else:
print_ls_desc(obj['describe'], print_id=True if name_counts[obj['describe']['name']] > 1 else False)
except:
err_exit()
else:
# We have results to describe
name_counts = collections.Counter(obj['describe']['name'] for obj in entity_results)
for result in entity_results:
# TODO: Figure out the right way to reason when to hide hidden files:
# if result['describe']['hidden'] and not args.all:
# continue
if result['describe']['project'] == project:
if args.brief:
print(result['id'])
elif args.verbose:
print_ls_l_desc(result['describe'], include_project=False)
else:
print_ls_desc(result['describe'], print_id=True if name_counts[result['describe']['name']] > 1 else False)
def mkdir(args):
had_error = False
for path in args.paths:
# Resolve the path and add it to the list
try:
project, folderpath, _none = resolve_path(path, expected='folder')
except ResolutionError as details:
print(fill('Could not resolve "' + path + '": ' + str(details)))
had_error = True
continue
if project is None:
print(fill('Could not resolve the project of "' + path + '"'))
try:
dxpy.api.project_new_folder(project, {"folder": folderpath, "parents": args.parents})
except Exception as details:
print("Error while creating " + folderpath + " in " + project)
print(" " + str(details))
had_error = True
if had_error:
err_exit('', 3)
def rmdir(args):
had_error = False
for path in args.paths:
try:
project, folderpath, _none = resolve_path(path, expected='folder')
except ResolutionError as details:
print(fill('Could not resolve "' + path + '": ' + str(details)))
had_error = True
continue
if project is None:
print(fill('Could not resolve the project of "' + path + '"'))
try:
completed = False
while not completed:
resp = dxpy.api.project_remove_folder(project, {"folder": folderpath,
"partial": True})
if 'completed' not in resp:
raise DXError('Error removing folder')
completed = resp['completed']
except Exception as details:
print("Error while removing " + folderpath + " in " + project)
print(" " + str(details))
had_error = True
if had_error:
err_exit('', 3)
def rm(args):
had_error = False
projects = {}
# Caution user when performing a recursive removal before any removal operation takes place
if args.recursive and not args.force:
for path in args.paths:
try:
with nostderr():
project, folderpath, entity_results = resolve_existing_path(path, allow_mult=True, all_mult=args.all)
if folderpath == '/':
print("")
print("===========================================================================")
print("* {}: Recursive deletion will remove all files in project! *".format(RED("RED ALERT")))
print("* *")
print("* {} *".format(project))
print("* *")
print("* Please issue 'dx rm -r --force' if you are sure you want to do this. *")
print("===========================================================================")
print("")
err_exit('', 3)
except Exception as details:
continue
for path in args.paths:
# Resolve the path and add it to the list
try:
project, folderpath, entity_results = resolve_existing_path(path, allow_mult=True, all_mult=args.all)
except Exception as details:
print(fill('Could not resolve "' + path + '": ' + str(details)))
had_error = True
continue
if project is None:
had_error = True
print(fill('Could not resolve "' + path + '" to a project'))
continue
if project not in projects:
projects[project] = {"folders": [], "objects": []}
if entity_results is None:
if folderpath is not None:
if not args.recursive:
print(fill(u'Did not find "' + path + '" as a data object; if it is a folder, cannot remove it without setting the "-r" flag'))
had_error = True
continue
else:
projects[project]['folders'].append(folderpath)
else:
print(fill('Path ' + path + ' resolved to a project; cannot remove a project using "rm"'))
had_error = True
continue
else:
projects[project]['objects'] += [result['id'] for result in entity_results]
for project in projects:
for folder in projects[project]['folders']:
try:
# set force as true so the underlying API requests are idempotent
completed = False
while not completed:
resp = dxpy.api.project_remove_folder(project,
{"folder": folder, "recurse": True,
"force": True, "partial": True},
always_retry=True)
if 'completed' not in resp:
raise DXError('Error removing folder')
completed = resp['completed']
except Exception as details:
print("Error while removing " + folder + " from " + project)
print(" " + str(details))
had_error = True
try:
# set force as true so the underlying API requests are idempotent
dxpy.api.project_remove_objects(project,
{"objects": projects[project]['objects'], "force": True},
always_retry=True)
except Exception as details:
print("Error while removing " + json.dumps(projects[project]['objects']) + " from " + project)
print(" " + str(details))
had_error = True
if had_error:
# TODO: 'dx rm' and related commands should separate out user error exceptions and internal code exceptions
err_exit('', 3)
def rmproject(args):
had_error = False
for project in args.projects:
# Be forgiving if they offer an extraneous colon
substrings = split_unescaped(':', project)
if len(substrings) > 1 or (len(substrings) == 1 and project[0] == ':'):
print(fill('Unable to remove "' + project + '": a nonempty string was found to the right of an unescaped colon'))
had_error = True
continue
if len(substrings) == 0:
if project[0] == ':':
print(fill('Unable to remove ":": to remove the current project, use its name or ID'))
had_error = True
continue
proj_id = try_call(resolve_container_id_or_name, substrings[0])
if proj_id is None:
print(fill('Unable to remove "' + project + '": could not resolve to a project ID'))
had_error = True
continue
try:
proj_desc = dxpy.api.project_describe(proj_id)
if args.confirm:
value = input(fill('About to delete project "' + proj_desc['name'] + '" (' + proj_id + ')') + '\nPlease confirm [y/n]: ')
if len(value) == 0 or value.lower()[0] != 'y':
had_error = True
print(fill('Aborting deletion of project "' + proj_desc['name'] + '"'))
continue
try:
dxpy.api.project_destroy(proj_id, {"terminateJobs": not args.confirm})
except dxpy.DXAPIError as apierror:
if apierror.name == 'InvalidState':
value = input(fill('WARNING: there are still unfinished jobs in the project.') + '\nTerminate all jobs and delete the project? [y/n]: ')
if len(value) == 0 or value.lower()[0] != 'y':
had_error = True
print(fill('Aborting deletion of project "' + proj_desc['name'] + '"'))
continue
dxpy.api.project_destroy(proj_id, {"terminateJobs": True})
else:
raise apierror
if not args.quiet:
print(fill('Successfully deleted project "' + proj_desc['name'] + '"'))
except EOFError:
err_exit('', 3)
except KeyboardInterrupt:
err_exit('', 3)
except Exception as details:
print(fill('Was unable to remove ' + project + ', ' + str(details)))
had_error = True
if had_error:
err_exit('', 3)
# ONLY for within the SAME project. Will exit fatally otherwise.
def mv(args):
dest_proj, dest_path, _none = try_call(resolve_path, args.destination, expected='folder')
try:
if dest_path is None:
raise ValueError()
dx_dest = dxpy.get_handler(dest_proj)
dx_dest.list_folder(folder=dest_path, only='folders')
except:
if dest_path is None:
err_exit('Cannot move to a hash ID', | |
"----------------\n"
st = st+ str(self.extent_c)+'\n'
if hasattr(self, 'Gs'):
st = st + "----------------\n"
st = st + "Gs : "+str(len(self.Gs.node))+"("+str(self.Np)+'/'+str(self.Ns)+'/'+str(len(self.lsss))+') :'+str(len(self.Gs.edges()))+'\n'
if hasattr(self,'Gt'):
st = st + "Gt : "+str(len(self.Gt.node))+' : '+str(len(self.Gt.edges()))+'\n'
if hasattr(self,'Gv'):
st = st + "Gv : "+str(len(self.Gv.node))+' : '+str(len(self.Gv.edges()))+'\n'
if hasattr(self,'Gi'):
st = st + "Gi : "+str(len(self.Gi.node))+' : '+str(len(self.Gi.edges()))+'\n'
if hasattr(self,'Gr'):
st = st + "Gr : "+str(len(self.Gr.node))+' : '+str(len(self.Gr.edges()))+'\n'
if hasattr(self,'Gw'):
st = st + "Gw : "+str(len(self.Gw.node))+' : '+str(len(self.Gw.edges()))+'\n'
st = st + "----------------\n\n"
if hasattr(self, 'degree'):
for k in self.degree:
if (k < 2) or (k > 3):
st = st + 'degree ' + \
str(k) + ' : ' + str(self.degree[k]) + "\n"
else:
st = st + 'number of node points of degree ' + \
str(k) + ' : ' + str(len(self.degree[k])) + "\n"
st = st + "\n"
st = st + "xrange : " + str(self.ax[0:2]) + "\n"
st = st + "yrange : " + str(self.ax[2:]) + "\n"
if hasattr(self,'pg'):
st = st + "center : " + "( %.2f,%.2f)" % (self.pg[0],self.pg[1]) + "\n"
if hasattr(self,'radius'):
st = st + "radius : %.2f " % self.radius + "\n"
# st = st + "\nUseful dictionnaries" + "\n----------------\n"
# if hasattr(self,'dca'):
# st = st + "dca {cycle : []} cycle with an airwall" +"\n"
# if hasattr(self,'di'):
# st = st + "di {interaction : [nstr,typi]}" +"\n"
# if hasattr(self,'sl'):
# st = st + "sl {slab name : slab dictionary}" +"\n"
# if hasattr(self,'name'):
# st = st + "name : {slab :seglist} " +"\n"
# st = st + "\nUseful arrays"+"\n----------------\n"
# if hasattr(self,'pt'):
# st = st + "pt : numpy array of points " +"\n"
# if hasattr(self,'normal'):
# st = st + "normal : numpy array of normal " +"\n"
# if hasattr(self,'offset'):
# st = st + "offset : numpy array of offset " +"\n"
# if hasattr(self,'tsg'):
# st = st + "tsg : get segment index in Gs from tahe" +"\n"
# if hasattr(self,'isss'):
# st = st + "isss : sub-segment index above Nsmax"+"\n"
# if hasattr(self,'tgs'):
# st = st + "tgs : get segment index in tahe from self.Gs" +"\n"
# if hasattr(self,'upnt'):
# st = st + "upnt : get point id index from self.pt"+"\n"
# #if hasattr(self,'iupnt'):
# # st = st + "iupnt : get point index in self.pt from point id "+"\n"
# if hasattr(self,'lsss'):
# st = st + "lsss : list of segments with sub-segment"+"\n"
# if hasattr(self,'sridess'):
# st = st + "stridess : stride to calculate the index of a subsegment" +"\n"
# if hasattr(self,'sla'):
# st = st + "sla : list of all slab names (Nsmax+Nss+1)" +"\n"
# if hasattr(self,'degree'):
# st = st + "degree : degree of nodes " +"\n"
# st = st + "\nUseful tip" + "\n----------------\n"
# st = st + "Point p in Gs => p_coord:\n"
# #st = st + "p -> u = self.iupnt[-p] -> p_coord = self.pt[:,u]\n\n"
#st = st + "Segment s in Gs => s_ab coordinates \n"
#st = st + "s2pc : segment to point coordinates (sparse) [p1,p2] = L.s2pc.toarray().reshape(2,2).T \n"
#st = st + \
# "s -> u = self.tgs[s] -> v = self.tahe[:,u] -> s_ab = self.pt[:,v]\n\n"
return(st)
def __add__(self, other):
""" addition
One can add either a numpy array or an other layout
"""
Ls = copy.deepcopy(self)
if type(other) == np.ndarray:
for k in Ls.Gs.pos:
Ls.Gs.pos[k] = Ls.Gs.pos[k] + other[0:2]
else:
offp = -min(Ls.Gs.nodes())
offs = max(Ls.Gs.nodes())
other.offset_index(offp=offp, offs=offs)
Ls.Gs.node.update(other.Gs.node)
Ls.Gs.edge.update(other.Gs.edge)
Ls.Gs.adj.update(other.Gs.adj)
Ls.Gs.pos.update(other.Gs.pos)
Ls.Np = Ls.Np + other.Np
Ls.Ns = Ls.Ns + other.Ns
Ls.Nss = Ls.Nss + other.Nss
return(Ls)
def __mul__(self, alpha):
""" scale the layout
other : scaling factor (np.array or int or float)
Returns
-------
Ls : Layout
scaled layout
"""
Ls = copy.deepcopy(self)
Gs = Ls.Gs
if type(alpha) != np.ndarray:
assert((type(alpha) == float) or (
type(alpha) == int)), " not float"
alpha = np.array([alpha, alpha, alpha])
else:
assert(len(alpha) == 3), " not 3D"
#
# scaling x & y
#
x = np.array(Gs.pos.values())[:, 0]
x = x * alpha[0]
y = np.array(Gs.pos.values())[:, 1]
y = y * alpha[1]
xy = np.vstack((x, y)).T
Ls.Gs.pos = dict(zip(Gs.pos.keys(), tuple(xy)))
#
# scaling z
#
nseg = filter(lambda x: x > 0, Gs.nodes())
for k in nseg:
Ls.Gs.node[k]['z'] = tuple(
(np.array(Ls.Gs.node[k]['z']) - self.zmin) * alpha[2] + self.zmin)
if 'ss_z' in Ls.Gs.node[k]:
Ls.Gs.node[k]['ss_z'] = list(
(np.array(Ls.Gs.node[k]['ss_z']) - self.zmin) * alpha[2] + self.zmin)
#
# updating numpy array from graph
#
Ls.g2npy()
return Ls
def _help(self):
st = ''
st = st + "\nUseful dictionnaries" + "\n----------------\n"
if hasattr(self,'dca'):
st = st + "dca {cycle : []} cycle with an airwall" +"\n"
if hasattr(self,'di'):
st = st + "di {interaction : [nstr,typi]}" +"\n"
if hasattr(self,'sl'):
st = st + "sl {slab name : slab dictionary}" +"\n"
if hasattr(self,'name'):
st = st + "name : {slab :seglist} " +"\n"
st = st + "\nUseful arrays"+"\n----------------\n"
if hasattr(self,'pt'):
st = st + "pt : numpy array of points " +"\n"
if hasattr(self,'normal'):
st = st + "normal : numpy array of normal " +"\n"
if hasattr(self,'offset'):
st = st + "offset : numpy array of offset " +"\n"
if hasattr(self,'tsg'):
st = st + "tsg : get segment index in Gs from tahe" +"\n"
if hasattr(self,'isss'):
st = st + "isss : sub-segment index above Nsmax"+"\n"
if hasattr(self,'tgs'):
st = st + "tgs : get segment index in tahe from self.Gs" +"\n"
if hasattr(self,'upnt'):
st = st + "upnt : get point id index from self.pt"+"\n"
st = st + "\nUseful Sparse arrays"+"\n----------------\n"
if hasattr(self,'sgsg'):
st = st + "sgsg : "+"get common point of 2 segment (usage self.sgsg[seg1,seg2] => return common point \n"
if hasattr(self,'s2pc'):
st = st + "s2pc : "+"from a Gs segment node to its 2 extremal points (tahe) coordinates\n"
if hasattr(self,'s2pu'):
st = st + "s2pc : "+"from a Gs segment node to its 2 extremal points (tahe) index\n"
if hasattr(self,'p2pu'):
st = st + "p2pc : "+"from a Gs point node to its coordinates\n"
st = st + "\nUseful lists"+"\n----------------\n"
#if hasattr(self,'iupnt'):
# st = st + "iupnt : get point index in self.pt from point id "+"\n"
if hasattr(self,'lsss'):
st = st + "lsss : list of segments with sub-segment"+"\n"
if hasattr(self,'sridess'):
st = st + "stridess : stride to calculate the index of a subsegment" +"\n"
if hasattr(self,'sla'):
st = st + "sla : list of all slab names (Nsmax+Nss+1)" +"\n"
if hasattr(self,'degree'):
st = st + "degree : degree of nodes " +"\n"
st = st + "\nUseful tip" + "\n----------------\n"
st = st + "Point p in Gs => p_coord: Not implemented\n"
# st = st + "p -> u = self.upnt[-p] -> p_coord = self.pt[:,-u]\n\n"
st = st + "Segment s in Gs => s_ab coordinates \n"
st = st + \
"s -> u = self.tgs[s] -> v = self.tahe[:,u] -> s_ab = self.pt[:,v]\n\n"
print(st)
def ls(self, typ='lay'):
""" list the available file in dirstruc
Parameters
----------
typ : string optional
{'lay'|'osm'|'wrl'}
Returns
-------
lfile_s : list
sorted list of all the .str file of strdir
Notes
-----
strdir is defined in the Project module
Examples
--------
Display all available structures
>>> from pylayers.gis.layout import *
>>> L = Layout()
>>> fillist = L.ls()
"""
if typ == 'lay':
pathname = os.path.join(pro.pstruc['DIRLAY'], '*.' + typ)
if typ == 'osm':
pathname = os.path.join(pro.pstruc['DIROSM'], '*.' + typ)
if typ == 'wrl':
pathname = os.path.join(pro.pstruc['DIRWRL'], '*.' + typ)
lfile_l = glob.glob(os.path.join(pro.basename, pathname))
lfile_s = []
for fi in lfile_l:
fis = pyu.getshort(fi)
lfile_s.append(fis)
lfile_s.sort()
return lfile_s
def offset_index(self, offp=0, offs=0):
""" offset points and segment index
Parameters
----------
offp : offset points
offs : | |
"""
<NAME>
(<EMAIL>)
Computational Epigenetics Sector
Waterland Lab, BCM 2017
AXTELL
Read-level Methylation Extractor
Changelog:
UPDATE 7-31-2017
Includes a column that shows which CpGs in each read contribute to that read's methylation status
UPDATE 10-30-2017
Fixes a bug where reads report CpGs at sligthly different locations, causing extra missing values to appear
Cleaned up the code and removed uncessary pieces of code
UPDATE 4-26-2018
Added capability to store objects as Bin objects
Sample usage:
python axtell.py -c chr19 -s debug.txt -o debugout.csv
"""
from scipy.stats.mstats import gmean
from collections import defaultdict
import multiprocessing as mp
from CpG_Bin import Bin
import pandas as pd
import numpy as np
import linecache
import argparse
import sklearn
import math
import sys
import os
import ctypes
import cPickle
# Reads a same file containing a sequence of reads (strings) from a sam file
def get_reads(datafile):
"""
Input:
1. datafile - name of a sam file
Output:
1. A list of reads starting positions and corresponding methylation call strings
"""
file = open(datafile)
data = [x for x in list(file) if x[0] != "@"]
file.close()
return data
# Finds the relative location of a CpG in a bin
def get_rel_pos(x):
"""
Input
1. x - absolute position of a CpG in chromosome. Integer.
Output
1. relative position of CpG in a bin. Integer.
"""
if x % bin_size == 0:
return bin_size
else:
return x % bin_size
# Returns 0 if a read reports methylated CpGs
# at the C, -1 if it reports at the G, and 0 otherwise
def reports_at_G(methy_read, seq_read):
if len(methy_read) != len(seq_read):
return 0
for i in range(len(methy_read)):
if methy_read[i] in ["Z", "z"]:
if seq_read[i] in ["G","g","A","a"]:
return -1
return 0
# Returns the bin that would contain a given dinucleotide position, 1-based coordinates
def get_bin_name(cpg_pos):
bin_num = (cpg_pos-1)/bin_size # integer division is key
bin_name = (bin_num*bin_size) + bin_size
return bin_name
# Adds lists of read methylation values to the corresponding bins
def update_data_frame(bin_data):
#print "appending tco dataframe"
# maps from bin name to a list of average methylation values
bin_read_methylation = defaultdict(lambda:[])
bin_methy_contribution = defaultdict(lambda:[])
for read_datem in bin_data:
for bin_datem in read_datem:
bin_name = bin_datem[0]
avg_methy = bin_datem[1]
cpg_contribution = bin_datem[2]
bin_read_methylation[bin_name].append(avg_methy)
bin_methy_contribution[bin_name].append(cpg_contribution)
# Turn into a dataframe
bin_names = list(bin_read_methylation.keys())
bin_names.sort()
methy_values = []
contribution_values = []
for bin_name in bin_names:
methy_values.append(bin_read_methylation[bin_name])
contribution_values.append(bin_methy_contribution[bin_name])
data_dict = {"Bin Name": bin_names, "Average Methylation Values":methy_values, "Read Contributions":contribution_values}
df = pd.DataFrame.from_dict(data_dict)
df = df[["Bin Name", "Average Methylation Values","Read Contributions"]]
return df
# Runs parallelization
def analyze_sam_file(filename):
"""
Input:
1. data_length: integer, number of reads in the input file
"""
reads = get_reads(filename)
bin_data = []
# Create shared memory for multiprocessing
d = np.array(reads)
shared_array_base = mp.Array(ctypes.c_double, d.size)
shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
shared_array = d
shared_array = shared_array.reshape(d.shape)
for i in range(len(reads)):
bin_data.append(compute_read_methylation(i,def_param=shared_array))
df = update_data_frame(bin_data)
return df
# Computes the overlap between a read and a bin. If the overlap contains x or more CpG sites,
# it is considered informative. The bin name and average read methylation for those
# overlapping CpG sites are computed. Returned is a set of bin names and corresponding average methylation
# that this read is contributing
#def compute_read_methylation(index, def_param=shared_array, bin_size=200): Parallel version
def compute_read_methylation(read):
# Determine if the read is informative for any of the bins it overlaps
informative_bins = []
data = read.split()
start_pos = int(data[3])
seq = data[9]
methy_call = data[16][5:] # change 16 back to 13 for Jduryea's reads, 16 for scott
cigar = data[5]
MAPQ = data[4]
if MAPQ < 3: # bad mapping quality,
return []
if cigar.count("M") > 1: # sequence doesn't directly align
return []
methy_call = update_methy_with_cigar(methy_call,cigar)
# correction factor for reads that report at Guanine
guanine_correction_factor = reports_at_G(methy_call,seq)
start_pos += guanine_correction_factor
# Place CpG's in corresponding bins
bin_cpg_data = defaultdict(lambda:[])
for i, dn in enumerate(methy_call):
if dn in ["Z","z"]: # if nucleotide is a C
pos = start_pos+i
bin_name = get_bin_name(pos)
# cpg_rel_pos = get_rel_pos(pos) # the cpgs position relative to it's bin
cpg_tup = (pos, int(dn=="Z")) # Tuple representing position and methy of CpG locus
bin_cpg_data[bin_name].append(cpg_tup) # 1 if methylated, 0 else
# We must cover at least half the CpGs
for bin_, cytosines in bin_cpg_data.items():
bin_name = chromosome_name + "_"+str(bin_)
#num_cpgs_in_bin = bin_cpg_count_mapping[bin_name]
read_mean_methy = np.mean([float(x[1]) for x in cytosines])
# Filtering criterion, turned off for now
# if num_cpgs_in_bin >= 4 and len(cytosines) >= math.ceil(num_cpgs_in_bin/2.0): # Must cover at least half the CpGs in the bin
# informative_bins.append( (bin_, read_mean_methy, (read_mean_methy,cytosines)) )
# elif num_cpgs_in_bin == 2 and len(cytosines) == 2:
# informative_bins.append( (bin_, read_mean_methy, (read_mean_methy,cytosines)) )
# elif num_cpgs_in_bin == 3 and len(cytosines) == 3:
informative_bins.append( (bin_, read_mean_methy, (read_mean_methy,cytosines)) )
return informative_bins
def parse_cigar(cigar):
cigar_list = []
prev_idx = 0
for i in range(len(cigar)):
if cigar[i].isalpha(): # If the character is a letter
component = cigar[prev_idx:i+1]
prev_idx = i+1
cigar_list.append(component)
return cigar_list
# Adapts the methylation call to account for the CIGAR string, adds "." or removes them based
def update_methy_with_cigar(methy_call, cigar):
updated_methy_call = ""
parsed_cigar = parse_cigar(cigar) # Breaks the cigar string into its components
for component in parsed_cigar:
if "M" in component: # All base pairs are matching
length = int(component[:component.index("M")])
substring = methy_call[:length]
methy_call = methy_call[length:]
updated_methy_call += substring
if "I" in component:
length = int(component[:component.index("I")])
methy_call = methy_call[length:]
if "D" in component:
length = int(component[:component.index("D")])
updated_methy_call += "."*length
return updated_methy_call
# Creates a cpg matrix based on the read contribution data in a bin
def make_cpg_matrix(cpg_data):
num_reads = len(cpg_data)
positions = set([])
# scan positions is first sweep
for read_index in range(num_reads):
read = cpg_data[read_index]
for cpg_i in range(len(read[1])):
cpg = read[1][cpg_i]
position = cpg[0]
methyStatus = cpg[1]
positions.update({position})
# not all reads contain all positions, so finding all possible positions are necessary
positions = sorted(positions)
num_cpgs = len(positions)
# now create the cpg matrix
matrix = -1 * np.ones((num_reads, num_cpgs))
for read_index in range(num_reads):
read = cpg_data[read_index]
for cpg_i in range(len(read[1])):
cpg = read[1][cpg_i]
position = cpg[0]
methyStatus = cpg[1]
matrix[read_index, positions.index(position)] = methyStatus
bin_name = get_bin_name(positions[0])
bin_name -= bin_size # correction
binStart = bin_name
binEnd = bin_name + bin_size - 1
mybin = Bin(matrix=matrix, binStartInc=bin_name, binEndInc= binEnd, cpgPositions=positions, chromosome=chromosome_name, binSize=bin_size, verbose=True, tag1="Jack's demo data, April 2018")
print "---------------"
print "matrix\n", mybin.matrix
print "bin start:", mybin.binStartInc
print "bin end:", mybin.binEndInc
print "positions:",positions
return mybin
# mybin = Bin()
# cpg_matrix = np.zeros((len(cpg_data), bin_size))#.fill(-1)
# cpg_matrix.fill(-1)
# positions = set([])
# for read_index in range(len(cpg_data)):
# read = cpg_data[read_index]
# for cpg in read[1]:
# position = cpg[0]
# positions.update({position})
# methyStatus = cpg[1]
# cpg_matrix[read_index,position-1] = methyStatus
# ## filter column
# cpg_matrix = np.transpose(cpg_matrix)
# cpg_matrix = cpg_matrix[np.sum(cpg_matrix, axis=1) != -1 *len(cpg_data)]
# return np.transpose(cpg_matrix), list(sorted(positions))
# Given a sorted list and a target value, efficiently find the element in the list closest in abs value to the target
def inexact_binary_search(alist, target):
# alist - a sorted list, ascending order (left is low right is high)
# target - the item we are looking for
l = len(alist)
# Base case
if l == 1:
return alist[0]
# Recursive case
else:
# Split list in half
L = alist[:l/2]
R = alist[l/2:]
if abs(L[-1] - target) < abs(R[0]-target):
return inexact_binary_search(L,target)
elif L[-1] == R[0]:
if target <= L[-1]:
return inexact_binary_search(L,target)
else:
return inexact_binary_search(R,target)
else:
return inexact_binary_search(R,target)
# Cleans the dataframe and uses annotation information
def clean_data(dataframe, chromosome_name):
"""
Input:
1. dataframe: a pandas dataframe with bins and associated list of averages for read methylation
2. annotation: a dataframe with chromosomal annotations
3. chromosome_name: string, name of the chromosome being analyzed
Output:
1. A cleaned dataframe
"""
# Parse list of average read methylation values
dataframe["Bin Name"] = dataframe["Bin Name"].apply(str)
dataframe["Bin Name"] = chromosome_name+"_"+dataframe["Bin Name"]
# Get CpG counts from annotation
# cpg_counts = []
# for bin_ in list(dataframe["Bin Name"]):
# count = bin_cpg_count_mapping[bin_]
# cpg_counts.append(count)
# dataframe["CpG Count"] = cpg_counts
return dataframe
# Filters a dataframe by cpg counts in bin and read coverage
def filter_data(dataframe, min_read_coverage, min_cpg_count):
"""
Inputs:
1. dataframe, a pandas dataframe
2. min_read_coverage, integer, the number of overlapping reads required to keep the bin
3. min_cpg_count, integer, the number of CpGs in a bin required to keep it
Output:
1. A filtered data frame
"""
# Filter bins by number of reads and CpG counts
# df_filtered = dataframe[dataframe["CpG Count"]>= min_cpg_count]
df_filtered = dataframe[dataframe["Average Methylation Values"].apply(len) >= min_read_coverage]
return df_filtered
# Given an index, this returns the position
# and methy call associated with the read in this
# position of the dataframe. This function helps us in
# multiprocessing that we don't have to copy over all the
# data for each process
def get_read_from_index(index):
line = linecache.getline(sam_file, index).split("\s+") # Get line efficiently
# TODO: should we clear the cache at some point?
linecache.clearcache()
if using_super_reads:
return (line[0],line[1])
else:
return (line[3], line[16][5:])
# Similar purpose to the function get_read_from_index,
# Returns the average methylation values
def get_methylation_patter_from_index(index):
return read_methylation_df["Average Methylation Values"].iloc[0]
# Command line arguments
des = "Axtell was written by <NAME> (<EMAIL>), BCM 2017, Waterland Lab. This software is a custom methylation extractor that parses the methylation calls in a SAM file and built"
des += "for the purpose of analyzing read-specific methylation in bis-seq data split by chromosome. "
des += "For each read in the file, the program find the number of CpGs | |
GeneTransfer: f(val, gene) returning species that gene transfers to (or
None if no trransfer.)
SpeciesBirth: f(val, species) returning True if species splits.
SpeciesDeath: f(val, species) returning True if species dies.
SpeciesRateChange: f(val, species) resetting species rate matrix given
val.
NOTE: in current implementation, Q only changes when species duplicates.
TimePerStep: branch length (sequence distance units) for each step.
GenesAtStart: number of genes at the beginning of the simulation.
MaxStep: maximum time step before stopping. Default 1000.
MaxGenes: maximum genes before stopping. Default None.
MaxSpecies: maximum species before stopping. Default None.
MaxGenome: maximum number of genes in a genome. Default None.
Sets CurrStep to 0 at the beginning to measure elapsed time.
Note: if both a birth and a death occur in the same timestep, they
will be ignored.
WARNING: If neither MaxStep nor MaxTaxa is set, the simulation will keep
going until all nodes are extinct, or you run out of memory!
"""
self.GeneBirth = GeneBirth
self.GeneDeath = GeneDeath
self.GeneTransfer = GeneTransfer
self.SpeciesBirth = SpeciesBirth
self.SpeciesDeath = SpeciesDeath
self.SpeciesRateChange = SpeciesRateChange
self.TimePerStep = TimePerStep
self.GenesAtStart = GenesAtStart
self.MaxStep = MaxStep
self.MaxGenes = MaxGenes
self.MaxSpecies = MaxSpecies
self.MaxGenome = MaxGenome
self.DEBUG = DEBUG
if TimePerStep <= 0:
raise ValueError, "TimePerStep must be greater than zero"
self._init_vars()
def _init_vars(self):
"""Initialize vars before running the simulation."""
self.CurrStep = 0
self.SpeciesTree = self.SpeciesClass()
self.SpeciesTree.Length = 0
self.SpeciesTree.BirthDeathModel = self
self.CurrSpecies = [self.SpeciesTree]
self.SpeciesTree.CurrSpecies = self.CurrSpecies #ref to same object
self.GeneTrees = [self.GeneClass() for i in range(self.GenesAtStart)]
for i in self.GeneTrees:
i.Length = 0
i.BirthDeathModel = self
self.CurrGenes = self.GeneTrees[:]
#set gene/species references
for i in self.CurrGenes:
i.Species = self.SpeciesTree
self.SpeciesTree.Genes = self.CurrGenes[:]
#note: copy of CurrGenes list, not reference
def timeOk(self):
"""Return True only if the maximum time has not yet been reached."""
#If MaxStep is not set, never say that the maximum time was reached
if self.MaxStep is None:
return True
else:
return self.CurrStep < self.MaxStep
def genesOk(self):
"""Returns True if the number of genes is > 0 and < self.MaxGenes.
Note: MaxGenes is exclusive (i.e. if MaxGenes is 32, genesOk will return
False when the number of genes is exactly 32, allowing you to stop when
this number is reached).
"""
num_taxa = len(self.CurrGenes)
if num_taxa < 1:
return False
if self.MaxGenes is not None:
return num_taxa < self.MaxGenes
#otherwise, if self.MaxTaxa was not set, any number is OK since we
#know we have at least item in the list or we wouldn't have got here.
else:
return True
def speciesOk(self):
"""Returns True if the number of species is > 0 and < self.MaxSpecies.
Note: MaxSpecies is exclusive (i.e. if MaxSpecies is 32, speciesOk
will return False when the number of species is exactly 32, allowing
you to stop when this number is reached).
"""
num_taxa = len(self.CurrSpecies)
if num_taxa < 1:
return False
if self.MaxSpecies is not None:
return num_taxa < self.MaxSpecies
#otherwise, if self.MaxTaxa was not set, any number is OK since we
#know we have at least item in the list or we wouldn't have got here.
else:
return True
def genomeOk(self):
"""Returns True if the max genome size is < self.MaxGenome.
Note: MaxGenome is exclusive (i.e. if MaxGenome is 32, genomeOk will
return False when the max genome size is exactly 32, allowing you to
stop when this number is reached).
"""
max_taxa = max([len(i.Genes) for i in self.CurrSpecies])
if num_taxa < 1:
return False
if self.MaxGenome is not None:
return num_taxa < self.MaxGenome
#otherwise, if self.MaxTaxa was not set, any number is OK since we
#know we have at least item in the list or we wouldn't have got here.
else:
return True
def geneStep(self, random_f=random):
"""Advances the state of the genes by one timestep (except speciation).
Specifically:
Decides whether each gene will die, duplicate, or transfer.
If a gene dies, delete it from the list of current genes.
If a gene gives birth, add two child nodes to the list of current
genes each with zero branchlength (will increment later), and delete the
original node from the list of genes.
If a gene transfers, handle like birth but also change the species.
WARNING: This method does not increment the branch length or the time
counter. Handle separately!
"""
#create list of new current nodes
#Too complex to do combinations of states. Use three-pass algorithm:
#1. birth
#2. transfer
#3. death
#i.e. each copy gets a separate chance at death after it is made.
#note that this differs slightly from what we do in the single
#birth-death model where each original gene gets a chance at death
#and a death and a duplication just cancel. Is this a problem with
#the original model?
self._gene_birth_step(random_f)
self._gene_transfer_step(random_f)
self._gene_death_step(random_f)
def _duplicate_gene(self, gene, orig_species, new_species=None, \
new_species_2=None):
"""Duplicates a gene, optionally attaching to new species.
When called with only orig_species, duplicates the gene in the same
species (killing the old gene and making two copies).
When called with orig_species and new_species, kills the old gene and
puts one new child into each of the old and new species (i.e. for
horizontal gene transfer).
When called with orig_species, new_species, and new_species_2, kills
the old gene and puts one new child into each of the two new species
(i.e. for speciation where all genes duplicate into new species).
WARNING: Does not update self.CurrGenes (so can use in loop, but
must update self.CurrGenes manually)."""
gc = self.GeneClass
#make new children
first_child, second_child = gc(), gc()
children = [first_child, second_child]
#update gene parent/child refs
gene.Children = children
first_child.Parent = gene
second_child.Parent = gene
#init branch lengths
first_child.Length = 0
second_child.Length = 0
#update species refs
#first, figure out which species to deal with
if new_species is None: #add both to orig species
first_species = orig_species
second_species = orig_species
elif new_species_2 is None: #add first to orig, second to new_species
first_species = orig_species
second_species = new_species
else: #add to the two new species
first_species = new_species
second_species = new_species_2
#then, update the refs
first_child.Species = first_species
second_child.Species = second_species
orig_species.Genes.remove(gene)
first_species.Genes.append(first_child)
second_species.Genes.append(second_child)
#return the new genes for appending or whatever
return first_child, second_child
def _gene_birth_step(self, random_f=random):
"""Implements gene birth sweep."""
gb = self.GeneBirth
new_genes = []
for gene in self.CurrGenes:
if gb(random_f(), gene):
new_genes.extend(self._duplicate_gene(gene, gene.Species))
else:
new_genes.append(gene)
self.CurrGenes[:] = new_genes[:]
def _gene_transfer_step(self, random_f=random):
"""Implements gene transfer sweep."""
gt = self.GeneTransfer
new_genes = []
#step 2: transfer
for gene in self.CurrGenes:
new_species = gt(random_f(), gene)
if new_species:
new_genes.extend(self._duplicate_gene(gene, gene.Species, \
new_species))
else:
new_genes.append(gene)
self.CurrGenes[:] = new_genes[:]
def _gene_death_step(self, random_f=random):
"""Implements gene death sweep."""
gd = self.GeneDeath
new_genes = []
for gene in self.CurrGenes:
if gd(random_f(), gene):
gene.Species.Genes.remove(gene)
else:
new_genes.append(gene)
self.CurrGenes[:] = new_genes[:]
def speciesStep(self, random_f=random):
"""Advances the state of the species by one timestep.
Specifically:
For each species in the current species, decides whether it's going to
produce a birth or a death.
If a species dies, delete it from the list of current species.
If a species gives birth, add two child nodes to the list of current
species, duplicate all their genes, and delete the
original node from the list of taxa.
Otherwise, add the timestep to the node's branchlength.
"""
#make the species that are going to duplicate
self._species_birth_step(random_f)
#kill the species that are going to die
self._species_death_step(random_f)
self._kill_orphan_genes_step()
def _species_death_step(self, random_f):
"""Kills species in self."""
sd = self.SpeciesDeath
new_list = []
for s in self.CurrSpecies:
if not sd(random_f(), s):
new_list.append(s)
self.CurrSpecies[:] = new_list[:]
def _kill_orphan_genes_step(self):
"""Kills genes whose species has been removed."""
new_list = []
species_dict = dict.fromkeys(map(id, self.CurrSpecies))
for g in self.CurrGenes:
if id(g.Species) in species_dict:
new_list.append(g)
self.CurrGenes[:] = new_list[:]
def _species_birth_step(self, random_f):
sb = self.SpeciesBirth
new_list = []
for s | |
None
self.assertEqual(m.__str__(), "<module '?' (built-in)>")
m.__file__ = []
self.assertEqual(m.__str__(), "<module '?' (built-in)>")
m.__file__ = 'foo.py'
self.assertEqual(m.__str__(), "<module '?' from 'foo.py'>")
def test_cp7007(self):
file_contents = '''
called = 3.14
'''
strange_module_names = [ "+",
"+a",
"a+",
"++",
"+++",
"-",
"=",
"$",
"^",
]
strange_file_names = [ os.path.join(self.test_dir, "cp7007", x + ".py") for x in strange_module_names ]
for x in strange_file_names: self.write_to_file(x, file_contents)
try:
with path_modifier(os.path.join(self.test_dir, 'cp7007')) as p:
for x in strange_module_names:
temp_mod = __import__(x)
self.assertEqual(temp_mod.called, 3.14)
finally:
self.clean_directory(os.path.join(self.test_dir, "cp7007"), remove=True)
def test_relative_control(self):
"""test various flavors of relative/absolute import and ensure the right
arguments are delivered to __import__"""
def myimport(*args):
global importArgs
importArgs = list(args)
importArgs[1] = None # globals, we don't care about this
importArgs[2] = None # locals, we don't care about this either
# we'll pull values out of this class on success, but that's not
# the important part
class X:
abc = 3
absolute_import = 2
bar = 5
return X
old_import = get_builtins_dict()['__import__']
try:
get_builtins_dict()['__import__'] = myimport
import abc
self.assertEqual(importArgs, ['abc', None, None, None])
from . import abc
self.assertEqual(importArgs, ['', None, None, ('abc',), 1])
from .. import abc
self.assertEqual(importArgs, ['', None, None, ('abc',), 2])
from ... import abc
self.assertEqual(importArgs, ['', None, None, ('abc',), 3])
from ...d import abc
self.assertEqual(importArgs, ['d', None, None, ('abc',), 3])
from ...d import (abc, bar)
self.assertEqual(importArgs, ['d', None, None, ('abc', 'bar'), 3])
from d import (
abc,
bar)
self.assertEqual(importArgs, ['d', None, None, ('abc', 'bar')])
code = """from __future__ import absolute_import\nimport abc"""
exec code in globals(), locals()
self.assertEqual(importArgs, ['abc', None, None, None, 0])
def f():exec "from import abc"
self.assertRaises(SyntaxError, f)
finally:
get_builtins_dict()['__import__'] = old_import
#TODO:@skip("multiple_execute") #http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=26829
def test_import_relative_error(self):
def f(): exec 'from . import *'
self.assertRaises(ValueError, f)
@unittest.skip('No access to CPython stdlib')
def test_import_hooks_import_precence(self):
"""__import__ takes precedence over import hooks"""
global myimpCalled
myimpCalled = None
class myimp(object):
def find_module(self, fullname, path=None):
global myimpCalled
myimpCalled = fullname, path
def myimport(*args):
return 'myimport'
import distutils
import distutils.command
mi = myimp()
sys.meta_path.append(mi)
builtinimp = get_builtins_dict()['__import__']
try:
get_builtins_dict()['__import__'] = myimport
import abc
self.assertEqual(abc, 'myimport')
self.assertEqual(myimpCalled, None)
# reload on a built-in hits the loader protocol
reload(distutils)
self.assertEqual(myimpCalled, ('distutils', None))
reload(distutils.command)
self.assertEqual(myimpCalled[0], 'distutils.command')
self.assertEqual(myimpCalled[1][0][-7:], 'distutils')
finally:
get_builtins_dict()['__import__'] = builtinimp
sys.meta_path.remove(mi)
def test_import_hooks_bad_importer(self):
class bad_importer(object): pass
mi = bad_importer()
sys.path.append(mi)
try:
def f(): import does_not_exist
self.assertRaises(ImportError, f)
finally:
sys.path.remove(mi)
sys.path.append(None)
try:
def f(): import does_not_exist
self.assertRaises(ImportError, f)
finally:
sys.path.remove(None)
class inst_importer(object): pass
mi = inst_importer()
def f(*args): raise Exception()
mi.find_module = f
sys.path.append(mi)
try:
def f(): import does_not_exist
self.assertRaises(ImportError, f)
finally:
sys.path.remove(mi)
def test_import_hooks_importer(self):
"""importer tests - verify the importer gets passed correct values, handles
errors coming back out correctly"""
global myimpCalled
myimpCalled = None
class myimp(object):
def find_module(self, fullname, path=None):
global myimpCalled
myimpCalled = fullname, path
if fullname == 'does_not_exist_throw':
raise Exception('hello')
mi = myimp()
sys.meta_path.append(mi)
try:
try:
import does_not_exist
AssertUnreachable()
except ImportError: pass
self.assertEqual(myimpCalled, ('does_not_exist', None))
try:
from testpkg1 import blah
AssertUnreachable()
except ImportError:
pass
self.assertEqual(type(myimpCalled[1]), list)
self.assertEqual(myimpCalled[0], 'testpkg1.blah')
self.assertEqual(myimpCalled[1][0][-8:], 'testpkg1')
def f(): import does_not_exist_throw
self.assertRaisesMessage(Exception, 'hello', f)
finally:
sys.meta_path.remove(mi)
#TODO: @skip("multiple_execute")
def test_import_hooks_loader(self):
"""loader tests - verify the loader gets the right values, handles errors correctly"""
global myimpCalled
myimpCalled = None
moduleType = type(sys)
class myloader(object):
loadcount = 0
def __init__(self, fullname, path):
self.fullname = fullname
self.path = path
def load_module(self, fullname):
if fullname == 'does_not_exist_throw':
raise Exception('hello again')
elif fullname == 'does_not_exist_return_none':
return None
else:
myloader.loadcount += 1
module = sys.modules.setdefault(fullname, moduleType(fullname))
module.__file__ = '<myloader file ' + str(myloader.loadcount) + '>'
module.fullname = self.fullname
module.path = self.path
module.__loader__ = self
if fullname[-3:] == 'pkg':
# create a package
module.__path__ = [fullname]
return module
class myimp(object):
def find_module(self, fullname, path=None):
return myloader(fullname, path)
mi = myimp()
sys.meta_path.append(mi)
try:
def f(): import does_not_exist_throw
self.assertRaisesMessage(Exception, 'hello again', f)
def f(): import does_not_exist_return_none
self.assertRaises(ImportError, f)
import does_not_exist_create
self.assertEqual(does_not_exist_create.__file__, '<myloader file 1>')
self.assertEqual(does_not_exist_create.fullname, 'does_not_exist_create')
self.assertEqual(does_not_exist_create.path, None)
reload(does_not_exist_create)
self.assertEqual(does_not_exist_create.__file__, '<myloader file 2>')
self.assertEqual(does_not_exist_create.fullname, 'does_not_exist_create')
self.assertEqual(does_not_exist_create.path, None)
import testpkg1.does_not_exist_create_sub
self.assertEqual(testpkg1.does_not_exist_create_sub.__file__, '<myloader file 3>')
self.assertEqual(testpkg1.does_not_exist_create_sub.fullname, 'testpkg1.does_not_exist_create_sub')
self.assertEqual(testpkg1.does_not_exist_create_sub.path[0][-8:], 'testpkg1')
reload(testpkg1.does_not_exist_create_sub)
self.assertEqual(testpkg1.does_not_exist_create_sub.__file__, '<myloader file 4>')
self.assertEqual(testpkg1.does_not_exist_create_sub.fullname, 'testpkg1.does_not_exist_create_sub')
self.assertEqual(testpkg1.does_not_exist_create_sub.path[0][-8:], 'testpkg1')
import does_not_exist_create_pkg.does_not_exist_create_subpkg
self.assertEqual(does_not_exist_create_pkg.__file__, '<myloader file 5>')
self.assertEqual(does_not_exist_create_pkg.fullname, 'does_not_exist_create_pkg')
finally:
sys.meta_path.remove(mi)
def test_path_hooks(self):
import toimport
def prepare(f):
sys.path_importer_cache = {}
sys.path_hooks = [f]
if 'toimport' in sys.modules: del sys.modules['toimport']
def hook(*args): raise Exception('hello')
prepare(hook)
def f(): import toimport
self.assertRaisesMessage(Exception, 'hello', f)
# ImportError shouldn't propagate out
def hook(*args): raise ImportError('foo')
prepare(hook)
f()
# returning none should be ok
def hook(*args): pass
prepare(hook)
f()
sys.path_hooks = []
def common_meta_import_tests(self):
def f(): import does_not_exist_throw
self.assertRaisesMessage(Exception, 'hello', f)
import does_not_exist_abc
self.assertEqual(does_not_exist_abc, 'abc')
def f(): import does_not_exist_loader_throw
self.assertRaisesMessage(Exception, 'loader', f)
def f(): import does_not_exist_loader_None
self.assertRaisesMessage(ImportError, 'No module named does_not_exist_loader_None', f)
from does_not_exist_X import abc
self.assertEqual(abc, 3)
def test_path_hooks_importer_and_loader(self):
path = list(sys.path)
hooks = list(sys.path_hooks)
try:
sys.path.append('<myname>')
def hook(name):
if name == "<myname>":
return meta_importer(self)
sys.path_hooks.append(hook)
self.common_meta_import_tests()
finally:
sys.path = path
sys.path_hooks = hooks
def test_meta_path(self):
metapath = list(sys.meta_path)
sys.meta_path.append(meta_importer(self))
try:
self.common_meta_import_tests()
finally:
sys.meta_path = metapath
def test_custom_meta_path(self):
"""most special methods invoked by the runtime from Python only invoke on the type, not the instance.
the import methods will invoke on instances including using __getattribute__ for resolution or on
old-style classes. This test verifies we do a full member lookup to find these methods"""
metapath = list(sys.meta_path)
finder = None
loader = None
class K(object):
def __init__(self):
self.calls = []
def __getattribute__(self, name):
if name != 'calls': self.calls.append(name)
if name == 'find_module': return finder
if name == 'load_module': return loader
return object.__getattribute__(self, name)
loaderInst = K()
sys.meta_path.append(loaderInst)
def ok_finder(name, path):
loaderInst.calls.append( (name, path) )
return loaderInst
def ok_loader(name):
loaderInst.calls.append(name)
return 'abc'
try:
# dynamically resolve find_module to None
try:
import xyz
except TypeError:
self.assertEqual(loaderInst.calls[0], 'find_module')
loaderInst.calls = []
# dynamically resolve find_module to a function,
# and load_module to None.
finder = ok_finder
try:
import xyz
except TypeError:
self.assertEqual(loaderInst.calls[0], 'find_module')
self.assertEqual(loaderInst.calls[1], ('xyz', None))
loaderInst.calls = []
loader = ok_loader
import xyz
self.assertEqual(xyz, 'abc')
self.assertEqual(loaderInst.calls[0], 'find_module')
self.assertEqual(loaderInst.calls[1], ('xyz', None))
self.assertEqual(loaderInst.calls[2], 'load_module')
self.assertEqual(loaderInst.calls[3], 'xyz')
finally:
sys.meta_path = metapath
def test_import_kw_args(self):
self.assertEqual(__import__(name = 'sys', globals = globals(), locals = locals(), fromlist = [], level = -1), sys)
def test_import_list_empty_string(self):
"""importing w/ an empty string in the from list should be ignored"""
x = __import__('testpkg1', {}, {}, [''])
self.assertTrue(not '' in dir(x))
def test_cp7050(self):
'''
This test case complements CPython's test_import.py
'''
try:
import Nt
AssertUnreachable("Should not have been able to import 'Nt'")
except:
pass
self.assertRaises(ImportError, __import__, "Nt")
self.assertRaises(ImportError, __import__, "Lib")
self.assertRaises(ImportError, __import__, "iptest.Assert_Util")
def test_meta_path_before_builtins(self):
"""the meta path should be consulted before builtins are loaded"""
class MyException(Exception): pass
class K:
def find_module(self, name, path):
if name == "time": return self
return None
def load_module(self, name):
raise MyException
if 'time' in sys.modules:
del sys.modules["time"]
loader = K()
sys.meta_path.append(loader)
try:
import time
AssertUnreachable()
except MyException:
pass
sys.meta_path.remove(loader)
import time
def test_file_coding(self):
try:
import os
f = file('test_coding_mod.py', 'wb+')
f.write("# coding: utf-8\nx = '\xe6ble'\n")
f.close()
with path_modifier('.'):
import test_coding_mod
self.assertEqual(test_coding_mod.x[0], '\xe6')
finally:
os.unlink('test_coding_mod.py')
try:
f = file('test_coding_2.py', 'wb+')
f.write("\xef\xbb\xbf# -*- coding: utf-8 -*-\n")
f.write("x = u'ABCDE'\n")
f.close()
with path_modifier('.'):
import test_coding_2
self.assertEqual(test_coding_2.x, 'ABCDE')
finally:
os.unlink('test_coding_2.py')
try:
f = file('test_coding_3.py', 'wb+')
f.write("# -*- coding: utf-8 -*-\n")
f.write("raise Exception()")
f.close()
try:
with path_modifier('.'):
import test_coding_3
except Exception, e:
self.assertEqual(sys.exc_info()[2].tb_next.tb_lineno, 2)
finally:
os.unlink('test_coding_3.py')
def test_module_subtype(self):
class x(type(sys)):
def __init__(self): self.baz = 100
def __getattr__(self, name):
if name == 'qux': raise AttributeError
return 42
def __getattribute__(self, name):
if name == 'foo' or name == 'qux': raise AttributeError
if name == 'baz': return type(sys).__getattribute__(self, name)
return 23
a = x()
self.assertEqual(a.foo, 42)
self.assertEqual(a.bar, 23)
self.assertEqual(a.baz, 100)
self.assertRaises(AttributeError, lambda : a.qux)
#Real *.py file
import testpkg1.mod1
class x(type(testpkg1.mod1)):
def __init__(self): self.baz = 100
| |
# -*- coding: utf-8 -*-
'''
Created on 13.08.2015
@author: rdebeerst
'''
import bkt
import bkt.console
import bkt.library.powerpoint as powerpoint
import bkt.library.algorithms
import System
import bkt.ui
import json
# import ruben as toolbox_rd
class Adjustments(object):
@staticmethod
def adjustment_edit_box(num):
editbox= bkt.ribbon.EditBox(
id ='adjustment-' + str(num),
label =' value ' + str(num),
sizeString = '#######',
on_change = bkt.Callback(
lambda shapes, value: map( lambda shape: Adjustments.set_adjustment(shape, num, value), shapes),
shapes=True),
get_text = bkt.Callback(
lambda shapes : Adjustments.get_adjustment(shapes[0], num),
shapes=True),
get_enabled = bkt.Callback(
lambda shapes : Adjustments.is_enabled(shapes[0], num),
shapes=True)
)
return editbox
@staticmethod
def set_adjustment(shape, num, value):
if shape.adjustments.count >= num:
shape.adjustments.item[num] = value
@staticmethod
def get_adjustment(shape, num):
try:
if shape.adjustments.count >= num:
return shape.adjustments.item[num]
else:
return None
except:
return None
@staticmethod
def is_enabled(shape, num):
try:
return (shape.adjustments.count >=num)
except:
return False
group_adjustments = bkt.ribbon.Group(
label = "Shape Adjustments",
children=[
Adjustments.adjustment_edit_box(num)
for num in range(1,10)
]
)
class VariousShapes(object):
@staticmethod
def create_line(slide, shapes):
shapeCount = slide.shapes.count
reset_selection = True
for shape in shapes:
line = slide.shapes.addLine( shape.left, shape.top+shape.height, shape.left+shape.width, shape.top+shape.height )
line.line.weight = 1.5
line.select(reset_selection)
reset_selection = False
class ShapePoints(object):
@classmethod
def display_points(cls, shape):
if not shape.Type == powerpoint.MsoShapeType['msoFreeform']:
#convert shape into freeform by adding and deleting node (not sure if this is required)
shape.Nodes.Insert(1, 0, 0, 0, 0) #msoSegmentLine, msoEditingAuto, x, y
shape.Nodes.Delete(2)
# shape.Nodes.SetPosition(1, shape.Left, shape.Top)
pointlist = "["
for i,node in enumerate(shape.nodes, start=1):
if i > 1:
pointlist += ","
pointlist += "\r\n"
pointlist += ' {"i":' + str(i)
pointlist += ', "x":' + str(node.points[0,0])
pointlist += ', "y":' + str(node.points[0,1])
pointlist += ', "segmentType": ' + str(node.segmentType)
pointlist += ', "editingType": ' + str(node.editingType)
pointlist += '}'
pointlist += "\r\n]"
#bkt.console.show_message(json)
def json_callback(json_points):
cls.change_points(shape, json_points=json_points)
bkt.console.show_input(pointlist, json_callback)
#shape.textframe.textrange.text = json
@staticmethod
def change_points(shape, json_points=None):
points = json.loads(json_points)
# richtige Anzahl Punkte
while len(points) > shape.nodes.count:
shape.nodes.insert(shape.nodes.count, 0,0, 0.0, 0.0)
while len(points) < shape.nodes.count:
shape.nodes.delete(shape.nodes.count)
index = 0
for p in points:
shape.nodes.setPosition(index+1, p['x'], p['y'])
# shape.nodes.setEditingType(index+1, p['editingType'])
# shape.nodes.setSegmentType(index+1, p['segmentType'])
index += 1
# index = 0
# for p in points:
# shape.nodes.insert(index+1, p['segmentType'], p['editingType'], p['x'], p['y'])
# index += 1
#
# # while len(points) > shape.nodes.count:
# # shape.nodes.insert(shape.nodes.count, 0,0, 0.0, 0.0)
# #
# while len(points) < shape.nodes.count:
# shape.nodes.delete(shape.nodes.count)
group_shape_points = bkt.ribbon.Group(
label=u"Shape Points",
children=[
bkt.ribbon.Button(
label=u'Shape Points',
size="large",
imageMso='ObjectEditPoints',
on_action=bkt.Callback(ShapePoints.display_points)
)
]
)
class CopyPasteStyle(object):
ref_shape = None
copy_settings = {
'background': True,
'img': True,
'line': True,
'position': False,
'size': True,
}
def copy_style(self, shape):
if shape.type == powerpoint.MsoShapeType['msoPicture']:
self.ref_shape = shape
else:
self.ref_shape = None
# def image_selected(self, shape):
# return shape.type == powerpoint.MsoShapeType['msoPicture'];
def paste_style(self, shapes):
if self.ref_shape == None:
return
for shape in shapes:
if self.copy_settings['size']:
shape.width = self.ref_shape.width
shape.height = self.ref_shape.height
if self.copy_settings['position']:
shape.left = self.ref_shape.left
shape.top = self.ref_shape.top
shape.rotation = self.ref_shape.rotation
if self.copy_settings['background']:
#shape.Fill.Type = self.ref_shape.Fill.Type
shape.Fill.ForeColor.RGB = self.ref_shape.Fill.ForeColor.RGB
shape.Fill.ForeColor.SchemeColor = self.ref_shape.Fill.ForeColor.SchemeColor
shape.Fill.ForeColor.Brightness = self.ref_shape.Fill.ForeColor.Brightness
shape.Fill.ForeColor.TintAndShade = self.ref_shape.Fill.ForeColor.TintAndShade
if self.copy_settings['line']:
shape.Line.Style = self.ref_shape.Line.Style
shape.Line.DashStyle = self.ref_shape.Line.DashStyle
shape.Line.Weight = self.ref_shape.Line.Weight
shape.Line.Transparency = self.ref_shape.Line.Transparency
shape.Line.ForeColor.RGB = self.ref_shape.Line.ForeColor.RGB
shape.Line.ForeColor.SchemeColor = self.ref_shape.Line.ForeColor.SchemeColor
shape.Line.ForeColor.Brightness = self.ref_shape.Line.ForeColor.Brightness
shape.Line.ForeColor.TintAndShade = self.ref_shape.Line.ForeColor.TintAndShade
if self.copy_settings['img'] and shape.type == powerpoint.MsoShapeType['msoPicture']:
shape.PictureFormat.crop.ShapeHeight = self.ref_shape.PictureFormat.crop.ShapeHeight
shape.PictureFormat.crop.ShapeWidth = self.ref_shape.PictureFormat.crop.ShapeWidth
shape.PictureFormat.crop.ShapeTop = self.ref_shape.PictureFormat.crop.ShapeTop
shape.PictureFormat.crop.ShapeLeft = self.ref_shape.PictureFormat.crop.ShapeLeft
shape.PictureFormat.crop.PictureHeight = self.ref_shape.PictureFormat.crop.PictureHeight
shape.PictureFormat.crop.PictureWidth = self.ref_shape.PictureFormat.crop.PictureWidth
shape.PictureFormat.crop.PictureOffsetX = self.ref_shape.PictureFormat.crop.PictureOffsetX
shape.PictureFormat.crop.PictureOffsetY = self.ref_shape.PictureFormat.crop.PictureOffsetY
# def paste_style_enabled(self, shape):
# return shape.type == powerpoint.MsoShapeType['msoPicture'] and self.ref_shape != None;
def setting_size(self, pressed):
self.copy_settings['size'] = (pressed == True)
def setting_size_pressed(self):
return self.copy_settings['size'] == True
def setting_background(self, pressed):
self.copy_settings['background'] = (pressed == True)
def setting_background_pressed(self):
return self.copy_settings['background'] == True
def setting_img(self, pressed):
self.copy_settings['img'] = (pressed == True)
def setting_img_pressed(self):
return self.copy_settings['img'] == True
copy_paste_style = CopyPasteStyle()
group_copy_paste_style = bkt.ribbon.Group(
label=u"Copy Style",
children=[
bkt.ribbon.Button(label='copy', screentip='copy image settings: Zuschneideposition, ...',
on_action=bkt.Callback(copy_paste_style.copy_style),
#get_enabled=bkt.Callback(copy_paste_style.image_selected)
),
bkt.ribbon.Button(label='paste', screentip='paste image settings: Zuschneideposition, ...',
on_action=bkt.Callback(copy_paste_style.paste_style),
#get_enabled=bkt.Callback(copy_paste_style.paste_style_enabled)
),
bkt.ribbon.ToggleButton(label="SIZE",
on_toggle_action=bkt.Callback(copy_paste_style.setting_size),
get_pressed=bkt.Callback(copy_paste_style.setting_size_pressed)
),
bkt.ribbon.ToggleButton(label="BACKGROUND",
on_toggle_action=bkt.Callback(copy_paste_style.setting_background),
get_pressed=bkt.Callback(copy_paste_style.setting_background_pressed)
),
bkt.ribbon.ToggleButton(label="IMG",
on_toggle_action=bkt.Callback(copy_paste_style.setting_img),
get_pressed=bkt.Callback(copy_paste_style.setting_img_pressed)
)
]
)
class ShapeMetaStyle(object):
META_STYLE_NAME = "RD-METASTYLE"
META_STYLE_SETTINGS = "RD-METASTYLE-SETTING"
style_names = None
settings = None
def set_meta_style(self, shapes, value, slides):
for shape in shapes:
if value == '':
shape.tags.delete(self.META_STYLE_NAME)
else:
shape.tags.add(self.META_STYLE_NAME, value)
# Nach Aktion die Liste aktualisieren
self.update_style_names(slides)
def set_meta_style_item_count(self, slides):
# Liste bei jedem Aufruf aktualisieren, damit Wechsel zwischen PowerPoint-Files möglich wird
self.update_style_names(slides)
return len(self.style_names)
def set_meta_style_item_label(self, index):
return self.style_names[index-1]
def get_meta_style(self, shapes):
shapetypes = map( lambda shape: self.get_tag_value(shape, self.META_STYLE_NAME, ''), shapes )
shapetypes = list(set(shapetypes))
if len(shapetypes) == 1:
return shapetypes[0]
else:
return ''
#return self.get_tag_value(shapes[0], 'SEN-RD-METASTYLE', '')
def get_tag_value(self, obj, tagname, default=''):
for idx in range(1,obj.tags.count+1):
if obj.tags.name(idx) == tagname:
return obj.tags.value(idx)
return default
def apply_style(self, shape, slides):
style = self.get_tag_value(shape, self.META_STYLE_NAME)
if style == '':
return
master_shape = shape
settings = self.get_style_settings(slides[0].parent, style)
for slide in slides:
for shape in slide.shapes:
if shape != master_shape:
if self.get_tag_value(shape, self.META_STYLE_NAME) == style:
self.copy_style(master_shape, shape, self.default_style)
self.update_style_names(slides)
def get_style_settings(self, presentation, stylename):
if self.settings == None:
# deserialize json
self.settings = {}
try:
self.settings = json.loads(self.get_tag_value(presentation, self.META_STYLE_SETTINGS, None)) or {}
except:
pass
else:
if self.settings.has_key(stylename):
return self.settings[stylename]
else:
return self.default_style
default_style = {
"type": True,
"size": True,
"background": True,
"linestyle": True,
"orientation": True,
"textformat": True,
"margin": True
}
def copy_style(self, origin, target, settings):
# Shape Type
if settings['type']:
if target.Type == origin.Type and origin.Type == powerpoint.MsoShapeType['msoAutoShape']:
target.AutoShapeType = origin.AutoShapeType
# Size
if settings['size']:
target.width = origin.width
target.height = origin.height
# Background
if settings['background']:
target.Fill.ForeColor.RGB = origin.Fill.ForeColor.RGB
# Line Style
if settings['linestyle']:
target.Line.Style = origin.Line.Style
target.Line.DashStyle = origin.Line.DashStyle
target.Line.Transparency = origin.Line.Transparency
target.Line.ForeColor.RGB = origin.Line.ForeColor.RGB
target.Line.Weight = origin.Line.Weight
# Text Ausrichtung
if settings['orientation']:
target.TextFrame.TextRange.ParagraphFormat.Alignment = origin.TextFrame.TextRange.ParagraphFormat.Alignment
target.Textframe.HorizontalAnchor = origin.Textframe.HorizontalAnchor
target.Textframe.VerticalAnchor = origin.Textframe.VerticalAnchor
# Text Format
if settings['textformat']:
target.Textframe.TextRange.Font.Color.RGB = origin.Textframe.TextRange.Font.Color.RGB
target.Textframe.TextRange.Font.Size = origin.Textframe.TextRange.Font.Size
target.Textframe.TextRange.Font.Bold = origin.Textframe.TextRange.Font.Bold
target.Textframe.TextRange.Font.Italic = origin.Textframe.TextRange.Font.Italic
target.Textframe.TextRange.Font.Underline = origin.Textframe.TextRange.Font.Underline
# Margins
if settings['margin']:
target.Textframe.marginLeft = origin.Textframe.marginLeft
target.Textframe.marginRight = origin.Textframe.marginRight
target.Textframe.marginTop = origin.Textframe.marginTop
target.Textframe.marginBottom = origin.Textframe.marginBottom
#FIXME: more to come...
def update_style_names(self, slides):
style_names = set()
for slide in slides:
for shape in slide.shapes:
style_names =style_names.union( set( [ self.get_tag_value(shape, self.META_STYLE_NAME, '') ] ))
self.style_names = list(style_names)
shape_meta_style = ShapeMetaStyle()
group_meta_style = bkt.ribbon.Group(
label=u'Master Styles',
children=[
bkt.ribbon.ComboBox(
label='Name', size_string='###############', show_label=True,
on_change=bkt.Callback(shape_meta_style.set_meta_style),
get_item_count=bkt.Callback(shape_meta_style.set_meta_style_item_count),
get_item_label=bkt.Callback(shape_meta_style.set_meta_style_item_label),
get_text=bkt.Callback(shape_meta_style.get_meta_style),
),
bkt.ribbon.Button(label='Style übertragen', show_label=True,
on_action=bkt.Callback(shape_meta_style.apply_style)
)
]
)
class Diverses(object):
@classmethod
def make_black_white(cls, shapes):
for shape in shapes:
shape.Fill.ForeColor.RGB = cls.black_white_rgb(shape.Fill.ForeColor.RGB)
shape.Line.ForeColor.RGB = cls.black_white_rgb(shape.Line.ForeColor.RGB)
shape.Textframe.TextRange.Font.Color.RGB = cls.black_white_rgb(shape.Textframe.TextRange.Font.Color.RGB)
@staticmethod
def black_white_rgb(rgb):
r= rgb % 256
g= ((rgb-r)/256) % 256
b=(rgb-r-g*256)/256/256
bw = int(round(0.21*r+0.72*g+0.07*b))
return bw+bw*256+bw*256*256
# @staticmethod
# def circ_w_connectors(slide):
# kurven = bezier.kreisSegmente(4*10, 100, [200,200])
# # Kurve aus Beziers erstellen
# # start beim ersten Punkt
# P = kurven[0][0][0];
# #bkt.message( "%d/%d" % (P[0], P[1]) )
# ffb = slide.Shapes.BuildFreeform(1, P[0], P[1])
# for kl in kurven:
# k = kl[0]
# # von den nächsten Beziers immer die nächsten Punkte angeben
# ffb.AddNodes(1, 0, k[1][0], k[1][1], k[2][0], k[2][1], k[3][0], k[3][1])
# # Parameter: SegmentType, EditingType, X1,Y1, X2,Y2, X3,Y3
# # SegmentType: 0=Line, 1=Curve
# # EditingType: 0=Auto, 1=Corner (keine Verbindungspunkte), 2=Smooth, 3=Symmetric --> Zweck?
# shp = ffb.ConvertToShape()
#
#
# @staticmethod
# def box_w_connectors(slide):
# #ffb = slide.Shapes.BuildFreeform(1, )
# corners = [ [100,100],
# [300,100],
# [300,200],
# [100,200]]
#
# def line_to(ffb, origin, dest, segments):
# deltaX = dest[0]-origin[0]
# deltaY = dest[1]-origin[1]
# for i in range(0,segments+1):
# # SegmentType=0 (Line), EditingType=1 (Corner)
# #bkt.message( "%d/%d" % (origin[0]+float(i)/segments*deltaX, origin[1]+float(i)/segments*deltaY) )
# # SegmentType=0 (Line), EditingType=1 (Corner)
# #ffb.addNodes(0,1, origin[0]+float(i)/segments*deltaX, origin[1]+float(i)/segments*deltaY)
#
# ffb.addNodes(0,0, origin[0]+float(i)/segments*deltaX, origin[1]+float(i)/segments*deltaY)
#
# segments = 3
#
# # EditingType=1 (Corner)
# ffb = slide.Shapes.BuildFreeform(1, *corners[0])
# last_corner = corners[0]
# for c in corners:
# #ffb.addNodes(0,1,*c)
# line_to(ffb, last_corner, c, segments)
# last_corner = c
#
# line_to(ffb, last_corner, corners[0], segments)
# # ffb.addNodes(0,1,*corners[0])
# shp = ffb.ConvertToShape()
#
# | |
<reponame>danbarla/GTDynamics
"""
* GTDynamics Copyright 2020, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* See LICENSE for the license information
*
* @file jr_simulator.py
* @brief Simulate the jumping robot by solving dynamics of each step.
* @author <NAME>
"""
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
sys.path.insert(0, currentdir)
import gtdynamics as gtd
import gtsam
import numpy as np
from helpers import mergeValues
from jumping_robot import Actuator, JumpingRobot
from jr_graph_builder import JRGraphBuilder
from jr_values import JRValues
from jr_visualizer import visualize_jr_trajectory, make_plot
class JRSimulator:
# TODO(yetong) create pneumatics.py and actuator.py, which contain
# the factors but also the functionality below that is
# pneumatics/actuator specific.
""" Class for jumping robot simulation.
Refer to `example_simulate` on setup and use. """
def __init__(self, yaml_file_path, init_config):
""" Constructor, creates init_config and jr. """
self.yaml_file_path = yaml_file_path
self.jr_graph_builder = JRGraphBuilder()
self.init_config = init_config
self.jr = JumpingRobot(yaml_file_path, init_config)
def step_integration(self, k, dt, values, include_actuation=True):
""" Perform integration, and add results to values.
Args:
k (int): current step index
dt (float): duration of time step
values (gtsam.Values): values and derivatives of previous step
"""
JRValues.integrate_joints(self.jr, values, k, dt)
JRValues.integrate_torso(self.jr, values, k, dt)
if include_actuation:
JRValues.integrate_mass(self.jr, values, k, dt)
# integrate time
t_prev = values.atDouble(gtd.TimeKey(k-1).key())
t_curr = t_prev + dt
values.insertDouble(gtd.TimeKey(k).key(), t_curr)
def step_actuation_dynamics(self, k, values):
""" Perform actuation dynamics by solving the actuation dynamics factor
graph of the current step, and add results to values.
Args:
k (int): current step index
values (gtsam.Values): values containing q, v, m_a, m_s of
current step and To, Ti, V_s
Raises:
Exception: optimization does not converge
"""
# directly compute source pressure
m_s = values.atDouble(Actuator.SourceMassKey(k))
V_s = values.atDouble(Actuator.SourceVolumeKey())
P_s = m_s * self.jr.gas_constant / V_s / 1e3
P_s_key = Actuator.SourcePressureKey(k)
# perform actuator forward dynamics by solving its dynamcis graph
for actuator in self.jr.actuators:
j = actuator.j
# construct graph
graph = self.jr_graph_builder.actuation_graph_builder.actuator_dynamics_graph(self.jr, actuator, k)
m_a_key = Actuator.MassKey(j, k)
q_key = gtd.internal.JointAngleKey(j, k).key()
v_key = gtd.internal.JointVelKey(j, k).key()
m_a = values.atDouble(m_a_key)
q = values.atDouble(q_key)
v = values.atDouble(v_key)
actuation_graph_builder = self.jr_graph_builder.actuation_graph_builder
graph.add(gtd.PriorFactorDouble(m_a_key, m_a, actuation_graph_builder.prior_m_cost_model))
graph.add(gtd.PriorFactorDouble(P_s_key, P_s, actuation_graph_builder.prior_pressure_cost_model))
graph.add(gtd.PriorFactorDouble(q_key, q, actuation_graph_builder.prior_q_cost_model))
graph.add(gtd.PriorFactorDouble(v_key, v, actuation_graph_builder.prior_v_cost_model))
# TODO(yetong): check why adding the massflow graph makes it unable to optimize
# construct init values and optimize
if k == 0:
init_values = JRValues.init_values_from_init_config_actuator(self.jr, j, k, values)
else:
init_values = JRValues.init_values_from_prev_actuator(j, k, values)
results = self.optimize(graph, init_values)
mergeValues(values, results)
# compute mass flow
mdot, mdot_sigma = JRValues.compute_mass_flow(self.jr, values, j, k)
values.insertDouble(Actuator.MassRateOpenKey(j, k), mdot)
values.insertDouble(Actuator.MassRateActualKey(j, k), mdot_sigma)
def step_robot_dynamics_by_layer(self, k, values):
""" In case solving the entire dynamics graph is hard to converge,
this function provides a more robust method to solve the dynamics
graph by layers (q, v, dynamics).
"""
# construct init values
if k==0:
init_values = JRValues.init_values_from_fk_robot(self.jr, k, values)
else:
init_values = JRValues.init_values_from_prev_robot(self.jr.robot, k, values)
robot_graph_builder = self.jr_graph_builder.robot_graph_builder
opt = robot_graph_builder.graph_builder.opt()
torso_i = self.jr.robot.link("torso").id()
link_names = [link.name() for link in self.jr.robot.links()]
# solve q level
graph_q = robot_graph_builder.graph_builder.qFactors(self.jr.robot, k, None)
pose_key = gtd.internal.PoseKey(torso_i, k).key()
torso_pose = gtd.Pose(values, torso_i, k)
graph_q.add(gtsam.PriorFactorPose3(pose_key, torso_pose, opt.p_cost_model))
if "ground" not in link_names:
for joint in self.jr.robot.joints():
j = joint.id()
q_key = gtd.internal.JointAngleKey(<KEY>()
graph_q.add(gtd.PriorFactorDouble(q_key, gtd.JointAngleDouble(values, j, k), opt.prior_q_cost_model))
init_values_q = gtd.ExtractValues(init_values, graph_q.keys())
results_q = self.optimize(graph_q, init_values_q)
mergeValues(init_values, results_q, overwrite=True)
# solve v level
graph_v = robot_graph_builder.graph_builder.vFactors(self.jr.robot, k, None)
twist_key = gtd.internal.TwistKey(torso_i, k).key()
torso_twist = gtd.Twist(values, torso_i, k)
graph_v.add(gtd.PriorFactorVector6(twist_key, torso_twist, opt.v_cost_model))
for joint in self.jr.robot.joints():
j = joint.id()
q_key = gtd.internal.JointAngleKey(j, k).key()
graph_v.add(gtd.PriorFactorDouble(q_key, init_values.atDouble(q_key), opt.prior_q_cost_model))
if not "ground" in link_names:
for joint in self.jr.robot.joints():
j = joint.id()
v_key = gtd.internal.JointVelKey(j, k).key()
graph_v.add(gtd.PriorFactorDouble(v_key, gtd.JointVelDouble(values, j, k), opt.prior_qv_cost_model))
init_values_v = gtd.ExtractValues(init_values, graph_v.keys())
results_v = self.optimize(graph_v, init_values_v)
mergeValues(init_values, results_v, overwrite=True)
# solve dynamics level
graph_a = robot_graph_builder.graph_builder.aFactors(self.jr.robot, k, None)
graph_d = robot_graph_builder.graph_builder.dynamicsFactors(self.jr.robot, k, None, None)
graph_dynamics = graph_a
graph_dynamics.push_back(graph_d)
for joint in self.jr.robot.joints():
j = joint.id()
q_key = gtd.internal.JointAngleKey(j, k).key()
v_key = gtd.internal.JointVelKey(j, k).key()
torque_key = gtd.internal.TorqueKey(j, k).key()
graph_dynamics.add(gtd.PriorFactorDouble(q_key, init_values.atDouble(q_key), opt.prior_q_cost_model))
graph_dynamics.add(gtd.PriorFactorDouble(v_key, init_values.atDouble(v_key), opt.prior_qv_cost_model))
graph_dynamics.add(gtd.PriorFactorDouble(torque_key, init_values.atDouble(torque_key), opt.prior_t_cost_model))
for link in self.jr.robot.links():
i = link.id()
pose_key = gtd.internal.PoseKey(i, k).key()
twist_key = gtd.internal.TwistKey(i, k).key()
graph_dynamics_keys = [key for key in gtd.KeySetToKeyVector(graph_dynamics.keys())]
if pose_key in graph_dynamics_keys:
graph_dynamics.add(gtsam.PriorFactorPose3(pose_key, init_values.atPose3(pose_key), opt.p_cost_model))
if twist_key in graph_dynamics_keys:
graph_dynamics.add(gtd.PriorFactorVector6(twist_key, gtd.Twist(init_values, i, k), opt.v_cost_model))
init_values_dynamics = gtd.ExtractValues(init_values, graph_dynamics.keys())
results_dynamics = self.optimize(graph_dynamics, init_values_dynamics)
mergeValues(init_values, results_dynamics, overwrite=True)
mergeValues(values, init_values, overwrite=True)
def step_robot_dynamics(self, k, values):
""" Perform robot dynamics by first performing forward kinematics,
then solving the dynamics factor graph of the current step.
Add results to values
Args:
k (int): current step index
values (gtsam.Values): values contains q, v, m_a, m_s of
current step and To, Ti, V_s
Raises:
Exception: forward kinematics disagreement
Exception: optimization does not converge
"""
link_names = [link.name() for link in self.jr.robot.links()]
joint_names = [joint.name() for joint in self.jr.robot.joints()]
# construct dynamcis graph for the time step
robot_graph_builder = self.jr_graph_builder.robot_graph_builder
opt = robot_graph_builder.graph_builder.opt()
graph = robot_graph_builder.dynamics_graph(self.jr, k)
for acutator in self.jr.actuators:
j = acutator.j
torque_key = gtd.internal.TorqueKey(j, k).key()
torque = values.atDouble(torque_key)
graph.add(gtd.PriorFactorDouble(torque_key, torque, opt.prior_t_cost_model))
# prior on torso link pose and twist
i = self.jr.robot.link("torso").id()
pose_key = gtd.internal.PoseKey(i, k).key()
torso_pose = gtd.Pose(values, i, k)
graph.add(gtsam.PriorFactorPose3(pose_key, torso_pose, opt.p_cost_model))
twist_key = gtd.internal.TwistKey(i, k).key()
torso_twist = gtd.Twist(values, i, k)
graph.add(gtd.PriorFactorVector6(twist_key, torso_twist, opt.v_cost_model))
# prior for joint angles and vels
if "ground" not in link_names:
for joint in self.jr.robot.joints():
j = joint.id()
q_key = gtd.internal.JointAngleKey(j, k).key()
graph.add(gtd.PriorFactorDouble(q_key, gtd.JointAngleDouble(values, j, k), opt.prior_q_cost_model))
v_key = gtd.internal.JointVelKey(j, k).key()
graph.add(gtd.PriorFactorDouble(v_key, gtd.JointVelDouble(values, j, k), opt.prior_v_cost_model))
# construct initial values
if k==0:
init_values = JRValues.init_values_from_fk_robot(self.jr, k, values)
else:
init_values = JRValues.init_values_from_prev_robot(self.jr.robot, k, values)
# solve the robot dynamics graph
results = self.optimize(graph, init_values)
mergeValues(values, results)
def optimize(self, graph, init_values, threshold=1e-5):
""" Run optimization with different optimizers to ensure convergence.
TODO(yetong): check why each optimizer does not converge for cases
"""
# optimize
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, init_values)
results = optimizer.optimize()
# Check if optimization converges.
if (graph.error(results) > 1e-5):
for f_idx in range(graph.size()):
factor = graph.at(f_idx)
print()
graph_tmp = gtsam.NonlinearFactorGraph()
graph_tmp.add(factor)
gtd.DynamicsGraph.printGraph(graph_tmp)
print(factor.error(init_values))
print("init error: ", graph.error(init_values))
params = gtsam.LevenbergMarquardtParams()
params.setVerbosityLM("SUMMARY")
results = gtsam.LevenbergMarquardtOptimizer(
graph, init_values, params).optimize()
print("error: ", graph.error(results))
print("graph size: ", graph.size())
print('values size: ', init_values.size())
results = gtsam.DoglegOptimizer(graph, init_values).optimize()
print("dogleg error: ", graph.error(results))
raise Exception("optimizing dynamics does not converge")
return results
def step_phase_change(self, k: int, phase: int, values: gtsam.Values):
""" Check if phase change happens in the step.
We follow event-driven algorithms in
Brogliato02amr_simulating_non_smooth
by checking the contact forcds. """
threshold = 0
new_phase = phase
if phase == 0:
f_left = JRValues.get_ground_force_z(self.jr, "l", k, values)
f_right = JRValues.get_ground_force_z(self.jr, "r", k, values)
if f_left < threshold and f_right < threshold:
new_phase = 3
elif f_left < threshold:
new_phase = 2
elif f_right < threshold:
new_phase = 1
elif phase == 1:
f_left = self.get_ground_force_z("l", k)
if f_left < threshold:
new_phase = 3
elif phase == 2:
f_right = self.get_ground_force_z("r", k)
if f_right < threshold:
new_phase = 3
if new_phase != phase:
self.jr = JumpingRobot(self.yaml_file_path,
self.init_config, new_phase)
return new_phase
def simulate(self, num_steps: int, dt: float, controls):
""" Simulate the trajectory with specified controls.
Args:
num_steps (int): total number of simulation steps
dt (float): duration of each step
controls (Dict): specify control variables
Returns:
(gtsam.Values, list): (values for all steps, list of phases for
each step)
"""
self.jr = JumpingRobot(self.yaml_file_path, self.init_config)
phase = 0
step_phases = [phase]
values = JRValues.init_config_values(self.jr, controls)
for k in range(num_steps):
print("step", k, "phase", phase)
if k != 0:
self.step_integration(k, dt, values)
self.step_actuation_dynamics(k, values)
self.step_robot_dynamics_by_layer(k, values)
phase = self.step_phase_change(k, phase, values)
step_phases.append(phase)
return values, step_phases
def simulate_with_torque_seq(self, num_steps, dt, torques_seq):
""" Run simulation with specified torque sequence. """
controls = JumpingRobot.create_controls()
self.jr = JumpingRobot(self.yaml_file_path, controls)
phase = 0
step_phases = [phase]
values = self.init_config_values(controls)
for k in range(num_steps):
print("step", k, "phase", phase)
if k != 0:
self.step_integration(k, dt, values, False)
for joint in self.jr.robot.joints():
j = joint.id()
gtd.InsertTorqueDouble(values, j, k, torques_seq[k][j])
self.step_robot_dynamics_by_layer(k, values)
phase = self.step_phase_change(k, phase, values)
step_phases.append(phase)
return values, step_phases
def example_simulate():
""" Show an example robot jumping trajectory """
yaml_file_path = "examples/example_jumping_robot/yaml/robot_config.yaml"
theta = np.pi/3
rest_angles = [-theta, 2 * theta, -theta, -theta, 2*theta, | |
from collections import defaultdict
import numpy as np
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double_or_blank, integer_double_string_or_blank)
from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default
from pyNastran.dev.bdf_vectorized2.cards.elements.bars import init_x_g0
from pyNastran.bdf.cards.base_card import _format_comment
class BeamElement:
"""base class for CBEAM"""
card_name = ''
def __init__(self, model):
"""intializes the BeamElement"""
self.model = model
self.is_current = True
self.eid = np.array([], dtype='int32')
self.pid = np.array([], dtype='int32')
self.nids = np.array([], dtype='float64')
self.offt = np.array([], dtype='|U8')
self.x = np.array([], dtype='float64')
self.g0 = np.array([], dtype='int32')
self.pin_flags = np.array([], dtype='int32')
self.wa_offset = np.array([], dtype='float64')
self.wb_offset = np.array([], dtype='float64')
self.sab_warping = np.array([], dtype='int32')
self._eid = []
self._pid = []
self._nids = []
self._offt = []
self._x = []
self._g0 = []
self._pin_flags = []
self._wa_offset = []
self._wb_offset = []
self._sab_warping = []
self.comment = defaultdict(str)
def check_if_current(self, nid, nids):
"""we split this up to reason about it easier"""
if self.is_current:
if nid in nids:
# card exists, so we use that slot
add_card = False
else:
add_card = True
else:
add_card = True
return add_card
#def get_element_by_eid(self, eid):
#self.make_current()
#ieid = np.searchsorted(eid, self.eid)
#return self[ieid]
def make_current(self):
"""creates an array of the GRID points"""
if not self.is_current:
if len(self.eid) > 0: # there are already elements in self.eid
self.eid = np.hstack([self.eid, self._eid])
self.pid = np.vstack([self.pid, self._pid])
self.nids = np.hstack([self.nids, self._nids])
self.offt = np.hstack([self.offt, self._offt])
self.x = np.hstack([self.x, self._x])
self.g0 = np.hstack([self.g0, self._g0])
self.pin_flags = np.hstack([self.pin_flags, self._pin_flags])
self.wa_offset = np.hstack([self.wa_offset, self._wa_offset])
self.wb_offset = np.hstack([self.wb_offset, self._wb_offset])
# don't need to handle comments
else:
self.eid = np.array(self._eid, dtype='int32')
self.pid = np.array(self._pid, dtype='int32')
self.nids = np.array(self._nids, dtype='int32')
self.offt = np.array(self._offt, dtype='|U8')
self.x = np.array(self._x, dtype='float64')
self.g0 = np.array(self._g0, dtype='int32')
self.pin_flags = np.array(self._pin_flags, dtype='int32')
self.wa_offset = np.array(self._wa_offset, dtype='float64')
self.wb_offset = np.array(self._wb_offset, dtype='float64')
assert len(self.eid) == len(np.unique(self.eid))
isort = np.argsort(self.eid)
self.eid = self.eid[isort]
self.pid = self.pid[isort]
self.nids = self.nids[isort, :]
self.offt = self.offt[isort]
self.x = self.x[isort, :]
self.g0 = self.g0[isort]
self.pin_flags = self.pin_flags[isort, :]
self.wa_offset = self.wa_offset[isort, :]
self.wb_offset = self.wb_offset[isort, :]
self._eid = []
self._pid = []
self._nids = []
self._offt = []
self._x = []
self._g0 = []
self._pin_flags = []
self._wa_offset = []
self._wb_offset = []
self._sab_warping = []
self.is_current = True
def cross_reference(self, model):
"""does this do anything?"""
self.make_current()
def __len__(self):
"""returns the number of elements"""
return len(self.eid) + len(self._eid)
def repr_indent(self, indent=''):
self.make_current()
neids = len(self.eid)
if neids == 0:
return '%s%sv; nelements=%s' % (indent, self.card_name, neids)
msg = '%s%sv; nelements=%s:\n' % (indent, self.card_name, neids)
msg += '%s eid = %s\n' % (indent, self.eid)
upid = np.unique(self.pid)
if len(upid) == 1 and upid[0] == 0:
msg += '%s upid = %s\n' % (indent, upid)
else:
msg += '%s pid = %s\n' % (indent, self.pid)
#msg += ' nid =\n%s' % self.nid
return msg
def __repr__(self):
return self.repr_indent('')
class CBEAMv(BeamElement):
"""
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+=====+=====+=====+=====+=====+=====+==========+
| CBEAM | EID | PID | GA | GB | X1 | X2 | X3 | OFFT/BIT |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| | SA | SB | | | | | | |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
or
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+=====+=====+=====+=====+=====+=====+==========+
| CBEAM | EID | PID | GA | GB | G0 | | | OFFT/BIT |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| | SA | SB | | | | | | |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
offt/bit are MSC specific fields
"""
card_name = 'CBEAM'
def add(self, eid, pid, nids, x, g0, offt='GGG', bit=None,
pin_flags=None, wa=None, wb=None, sa=0, sb=0, comment=''):
"""
Adds a CBEAM card
Parameters
----------
pid : int
property id
mid : int
material id
nids : List[int, int]
node ids; connected grid points at ends A and B
x : List[float, float, float]
Components of orientation vector, from GA, in the displacement
coordinate system at GA (default), or in the basic coordinate system
g0 : int
Alternate method to supply the orientation vector using grid
point G0. Direction of is from GA to G0. is then transferred
to End A
offt : str; default='GGG'
Offset vector interpretation flag
None : bit is active
bit : float; default=None
Built-in twist of the cross-sectional axes about the beam axis
at end B relative to end A.
For beam p-elements ONLY!
None : offt is active
pin_flags : List[int, int]; default=None
None : [0, 0]; don't release the DOFs
Pin Flag at End A/B. Releases the specified DOFs
wa / wb : List[float, float, float]
Components of offset vectors from the grid points to the end
points of the axis of the shear center
sa / sb : int; default=0
Scalar or grid point identification numbers for the ends A and B,
respectively. The degrees-of-freedom at these points are the
warping variables . SA and SB cannot be specified for
beam p-elements
comment : str; default=''
a comment for the card
offt/bit are MSC specific fields
"""
if g0 is None:
g0 = -1
else:
x = [np.nan, np.nan, np.nan]
if pin_flags is None:
pin_flags = [0, 0]
self.model.bars.add(eid)
self.is_current = False
self._eid.append(eid)
self._pid.append(pid)
self._nids.append(nids)
self._x.append(x)
self._g0.append(g0)
self._offt.append(offt)
self._wa_offset.append(wa)
self._wb_offset.append(wb)
self._sab_warping.append([sa, sb])
self._pin_flags.append(pin_flags)
#self._offset.append(wa_offset)
if comment:
self.comment[eid] = _format_comment(comment)
def add_card(self, card, comment=''):
"""
Adds a CBEAM card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
eid = integer(card, 1, 'eid')
pid = integer_or_blank(card, 2, 'pid', eid)
ga = integer(card, 3, 'ga')
gb = integer(card, 4, 'gb')
x, g0 = init_x_g0(card, eid)
offt, bit = init_offt_bit(card, eid)# offt doesn't exist in NX nastran
pin_flag_a = integer_or_blank(card, 9, 'pa', 0)
pin_flag_b = integer_or_blank(card, 10, 'pb', 0)
wa = np.array([double_or_blank(card, 11, 'w1a', 0.0),
double_or_blank(card, 12, 'w2a', 0.0),
double_or_blank(card, 13, 'w3a', 0.0)], 'float64')
wb = np.array([double_or_blank(card, 14, 'w1b', 0.0),
double_or_blank(card, 15, 'w2b', 0.0),
double_or_blank(card, 16, 'w3b', 0.0)], 'float64')
sa = integer_or_blank(card, 17, 'sa', 0)
sb = integer_or_blank(card, 18, 'sb', 0)
assert len(card) <= 19, 'len(CBEAM card) = %i\ncard=%s' % (len(card), card)
return self.add(eid, pid, [ga, gb], x, g0, offt, bit,
[pin_flag_a, pin_flag_b], wa, wb, sa, sb, comment=comment)
#def update(self, grid):
#"""functions like a dictionary"""
#nid = grid.nid
#add_card = self.check_if_current(eid, self.eid)
#if add_card:
#self.add(nid, grid.xyz, cp=grid.cp, cd=grid.cd, # add_cquad4
#ps=grid.ps, seid=grid.seid, comment=grid.comment)
#self.is_current = False
#else:
#inid = np.where(nid == self.nid)[0]
#self.nid[inid] = grid.nid
#self.xyz[inid] = grid.xyz
#self.cp[inid] = grid.cp
#self.cd[inid] = grid.cd
#self.ps[inid] = grid.ps
#self.seid[inid] = grid.seid
#self.comment[nid] = comment
#self.is_current = True # implicit
#def __iter__(self):
#pass
#def __next__(self):
#pass
#def __items__(self):
#pass
#def __keys__(self):
#pass
#def __values__(self):
#pass
#def __getitem__(self, i):
#"""this works on index"""
#self.make_current()
#eid = self.eid[i]
#return GRID(nid, self.xyz[i], cp=self.cp[i], cd=self.cd[i],
#ps=self.ps[i], seid=self.seid[i], comment=self.comment[nid])
#def __setitem__(self, i, value):
#pass
#def __delitem__(self, i):
#pass
@classmethod
def get_x_g0_defaults(cls, x, g0):
"""
X and G0 compete for the same fields, so the method exists to
make it easier to write the card
Returns
-------
x_g0 : varies
g0 : List[int, None, None]
x : List[float, float, float]
"""
if g0 is not None:
return (g0, None, None)
else:
#print('x =', self.x)
#print('g0 =', self.g0)
#x1 = set_blank_if_default(self.x[0], 0.0)
#x2 = set_blank_if_default(self.x[1], 0.0)
#x3 = set_blank_if_default(self.x[2], 0.0)
return list(x)
def write_card(self, size=8, is_double=False, bdf_file=None):
assert bdf_file is not None
self.make_current()
msg = ''
for eid, pid, nodes, x, g0, offt, pin_flags, wa_offset, wb_offset in zip(
self.eid, self.pid, self.nids, self.x, self.g0, self.offt, self.pin_flags, self.wa_offset, self.wb_offset):
x1, x2, x3 = self.get_x_g0_defaults(x, g0)
#pa = set_blank_if_default(self.pa, 0)
#pb = set_blank_if_default(self.pb, 0)
#w1a = set_blank_if_default(self.wa[0], 0.0)
#w2a = set_blank_if_default(self.wa[1], 0.0)
#w3a = set_blank_if_default(self.wa[2], 0.0)
#w1b = set_blank_if_default(self.wb[0], 0.0)
#w2b = set_blank_if_default(self.wb[1], 0.0)
#w3b = set_blank_if_default(self.wb[2], 0.0)
ga, gb = nodes
pin_flag_a, pin_flag_b = | |
<reponame>il-dionigi/crazyflie_ros_cyphy<gh_stars>0
#!/usr/bin/env python
import rospy
import tf
import numpy as np
import matplotlib.pyplot as plt
import atexit
from crazyflie_driver.msg import Position
from crazyflie_driver.msg import ConsoleMessage
from crazyflie_driver.msg import Hover
from crazyflie_driver.msg import GenericLogData
from std_msgs.msg import Empty
from crazyflie_driver.srv import UpdateParams
from threading import Thread
from geometry_msgs.msg import PointStamped, TransformStamped, PoseStamped #PoseStamped added to support vrpn_client
dont_move = True
Fly = False
enc_trace_test = False
Order = 'F'
time_series_test = False
inf_loop = True
delta_p_param = 0
LOCODECK_TS_FREQ = 499.2*(10**6) * 128
delta_bs = [0,0,0,0, 0,0,0,0]
beaconPos = [0,0,0]
beaconPos2 = [0,0,0]
cameraPos = [0,0,0]
beaconDists = [0,0,0,0, 0,0,0,0]
beaconMins = [0,0,0,0, 0,0,0,0]
beaconMaxs = [0,0,0,0, 0,0,0,0]
beaconMeans = [0,0,0,0, 0,0,0,0]
delta_pl32 = 0
delta_ph8 = 0
singleRanging = 0
allRangings = 0
betweenRounds = 0
betweenRangings = 0
singleRanging_h8 = 0
allRangings_h8 = 0
betweenRounds_h8 = 0
betweenRangings_h8 = 0
ts = [0,0,0, 0,0,0,0]
count1 = 0
count2 = 0
samples = 100
#x, y, z, yaw
currPos = [0,0,0,0] # current setpoint
shift = [0,0]
STOP = False
delta_d_list = []
delta_p_list = []
delta_b_list = []
bc_diffx = []
bc_diffy = []
bc_diffz = []
error = False
tofs = [1,2,3, 4]
encStates = []
#x,y,z,K
encState = [0,0,0,0]
Ktotal = 0.000001
K1count = 0
obstacles = [ [1,1,0.5], [0,2,0.2], [2,0,0.3], [-1, -6, 5] ] # drone canNOT go through these, list of [x, y, radius]
waypoints = [ ] # drone MUST go through these, list of [x,y, radius]
mirror_pts = [0,0,0]
def err_handler():
global error, STOP
if (not error):
rospy.loginfo("///|Exiting. sending stop command|///")
stop_pub = rospy.Publisher("cmd_stop", Empty, queue_size=1)
stop_msg = Empty()
STOP = True
stop_pub.publish(stop_msg)
error = True
def exit_handler():
global STOP
rospy.loginfo("///Exiting. sending stop command///")
positionMove([0,0,0,0],0.1)
stop_pub = rospy.Publisher("cmd_stop", Empty, queue_size=1)
stop_msg = Empty()
STOP = True
stop_pub.publish(stop_msg)
def check_within_bounds():
global cameraPos, shift
x, y, z = cameraPos
if ( -2.4 < x < 2.4 and -1.7 < y < 1.7 and z < 1.3):
return True
else:
if (y < -1.7):
shift[1] = 0.5
if (y > 1.7):
shift[1] = -0.5
if (x < -2):
shift[0] = 0.5
if (x > 2):
shift[0] = -0.5
#err_handler()
return False
def callback_beacon_ranging1(data):
global beaconDists, count1, beaconMins, beaconMaxs, beaconMeans, samples
for i in range(4):
beaconDists[i] = data.values[i]
"""
count1 = count1 + 1
if (count1 == samples):
count1 = 0
for i in range(4):
rospy.loginfo("B{} Mean: {} Max: {} Min: {}".format(i, beaconMeans[i]/samples, beaconMaxs[i], beaconMins[i]))
beaconMaxs[i] = data.values[i]
beaconMins[i] = data.values[i]
beaconMeans[i] = data.values[i]
else:
for i in range(4):
if beaconDists[i] > beaconMaxs[i]:
beaconMaxs[i] = beaconDists[i]
if beaconDists[i] < beaconMins[i]:
beaconMins[i] = beaconDists[i]
beaconMeans[i] += beaconDists[i]
"""
def callback_beacon_ranging2(data):
global beaconDists, count2
for i in range(4,8):
beaconDists[i] = data.values[i-4]
"""
count2 = count2 + 1
if (count2 == samples):
count2 = 0
for i in range(4,8):
rospy.loginfo("B{} Mean: {} Max: {} Min: {}".format(i, beaconMeans[i]/samples, beaconMaxs[i], beaconMins[i]))
beaconMaxs[i] = data.values[i-4]
beaconMins[i] = data.values[i-4]
beaconMeans[i] = data.values[i-4]
else:
for i in range(4,8):
if beaconDists[i] > beaconMaxs[i]:
beaconMaxs[i] = beaconDists[i]
if beaconDists[i] < beaconMins[i]:
beaconMins[i] = beaconDists[i]
beaconMeans[i] += beaconDists[i]
"""
def callback_pos_beacons(data):
global beaconPos
beaconPos[0] = data.values[0]
beaconPos[1] = data.values[1]
beaconPos[2] = data.values[2]
def callback_pos_beacons2(data):
global beaconPos2
beaconPos2[0] = data.values[0]
beaconPos2[1] = data.values[1]
beaconPos2[2] = data.values[2]
def callback_pos_camera(data):
global cameraPos
cameraPos[0] = data.point.x
cameraPos[1] = data.point.y
cameraPos[2] = data.point.z
check_within_bounds()
def callback_twr_time(data):
global ts, delta_d_list, delta_p_list, delta_b_list, delta_pl32, delta_ph8
for i in range(len(data.values)):
ts[i] = data.values[i]
#delta_p_list.append(1000*((ts[5])/LOCODECK_TS_FREQ + ts[6]*(2**32/LOCODECK_TS_FREQ)) )
delta_d_list.append(1000*(ts[4]-ts[3])/LOCODECK_TS_FREQ)
def callback_twr_beacon(data):
global delta_bs
for i in range(len(data.values)):
delta_bs[i] = data.values[i]
delta_b_list.append(1000*delta_bs[0]/LOCODECK_TS_FREQ)
def callback_twr_other(data):
global singleRanging, allRangings, betweenRounds, betweenRangings, singleRanging_h8, allRangings_h8, betweenRounds_h8, betweenRangings_h8
singleRanging = data.values[0]
allRangings = data.values[1]
betweenRounds = data.values[2]
betweenRangings = data.values[3]
singleRanging_h8 = data.values[4]
allRangings_h8 = data.values[5]
betweenRounds_h8 = data.values[6]
#betweenRangings_h8 = data.values[7]
def callback_twr_eve(data):
global tofs
for i in range(4):
tofs[i] = data.values[i]
#est, est2 actual_add, actual_mult
def callback_twr_enc(data):
global encStates, encState, Ktotal, K1count
if (encState[0] == data.values[0]):
return
elif ( dist(encState, data.values) > 3 ):
for i in range(3):
if mirror_pts[i] != 0:
mirror_pts[i] = (encState[i] + data.values[i])*0.1 + 0.9*mirror_pts[i]
else:
mirror_pts[i] = (encState[i] + data.values[i])
for i in range(4):
encState[i] = data.values[i]
encStates.append(np.copy(encState))
Ktotal += 1
K1count += (encState[3] == 1)
def callback_twr_encTime(data):
#print("MIRROR TIME THEN AES TIME:")
#print(data.values[0])
#print(data.values[1])
pass
def publisherThread():
global currPos, shift
sequence = 0
while not rospy.is_shutdown():
if STOP:
msgPos.x = 0
msgPos.y = 0
msgPos.z = 0
msgPos.header.seq = sequence
msgPos.header.stamp = rospy.Time.now()
for j in range(10):
pubPos.publish(msgPos)
sequence += 1
rate.sleep()
return
else:
msgPos.x = currPos[0]+shift[0]
msgPos.y = currPos[1]+shift[1]
msgPos.z = currPos[2]
msgPos.yaw = currPos[3]
msgPos.header.seq = sequence
msgPos.header.stamp = rospy.Time.now()
pubPos.publish(msgPos)
sequence += 1
rate.sleep()
def positionMove(pos=[0,0,0,0], t=1, N=1):
global currPos, STOP
if not dont_move:
currPos = pos
if (STOP):
return
for i in range(N):
rospy.sleep(1.0*t/N)
if (N > 2):
get_diff()
#print_beacon_camera_diff()
def get_diff():
global cameraPos, beaconPos2, bc_diffx, bc_diffy, bc_diffz, delta_p_list, ts
dx = cameraPos[0] - beaconPos2[0]
dy = cameraPos[1] - beaconPos2[1]
dz = cameraPos[2] - beaconPos2[2]
if (np.abs(dx) > 2.5 or np.abs(dy) > 2.5):
err_handler()
bc_diffx.append(np.abs(dx))
bc_diffy.append(np.abs(dy))
bc_diffz.append(np.abs(dz))
delta_p_list.append(1000*((ts[5])/LOCODECK_TS_FREQ + ts[6]*(2**32/LOCODECK_TS_FREQ)) )
def get_stats():
global error, delta_b_list, delta_d_list, delta_p_list, bc_diffx, bc_diffy, bc_diffy
if (error or len(delta_p_list) < 2 or np.std(delta_p_list) == 0):
rospy.loginfo("XXXX ERROR XXXX DID NOT SAVE XXXX")
return
else:
rospy.loginfo("XXXX SAVED XXXX")
rospy.loginfo("STATS: delta_b, delta_d, delta_p in ms, dx, dy, dz in m")
#rospy.loginfo("db mean:{}, stddev:{}, max-min:{}".format(np.mean(delta_b_list), np.std(delta_b_list), np.max(delta_b_list)-np.min(delta_b_list)))
#rospy.loginfo("dd mean:{}, stddev:{}, max-min:{}".format(np.mean(delta_d_list), np.std(delta_d_list), np.max(delta_d_list)-np.min(delta_d_list)))
rospy.loginfo("dp mean:{}, stddev:{}, max-min:{}".format(np.mean(delta_p_list), np.std(delta_p_list), np.max(delta_p_list)-np.min(delta_p_list)))
#rospy.loginfo(str(delta_b_list))
rospy.loginfo("dx mean:{}, stddev:{}, max-min:{}".format(np.mean(bc_diffx), np.std(bc_diffx), np.max(bc_diffx)-np.min(bc_diffx)))
rospy.loginfo("dy mean:{}, stddev:{}, max-min:{}".format(np.mean(bc_diffy), np.std(bc_diffy), np.max(bc_diffy)-np.min(bc_diffy)))
rospy.loginfo("dz mean:{}, stddev:{}, max-min:{}".format(np.mean(bc_diffz), np.std(bc_diffz), np.max(bc_diffz)-np.min(bc_diffz)))
f = open("order:"+Order+str(np.mean(delta_p_list))+".txt", "a")
f.write("\nNEW RUN: \ndp=" + str(np.mean(delta_p_list) ) + "\n")
f.write("\nstddevdp:{}\n".format(np.std(delta_p_list) ))
f.write("\nmeanx:{}\n".format(np.mean(bc_diffx) ))
f.write("\nstddevx:{}\n".format(np.std(bc_diffx) ))
f.write("\nmeany:{}\n".format(np.mean(bc_diffy) ))
f.write("\nstddevy:{}\n".format(np.std(bc_diffy) ))
f.write("\nmeanz:{}\n".format(np.mean(bc_diffz) ))
f.write("\nstddevz:{}\n".format(np.std(bc_diffz) ))
for num in bc_diffx:
f.write("\ndx:"+str(num))
for num in bc_diffy:
f.write("\ndy:"+str(num))
for num in bc_diffz:
f.write("\ndz:"+str(num))
def save_enc_trace():
global encStates, K1count, Ktotal, mirror_pts, waypoints
print("MIRROR POINTS:" + str(mirror_pts) )
if (len(encStates) < 2 or K1count < 2):
rospy.loginfo("XXXX ERROR XXXX DID NOT SAVE XXXX")
return
else:
rospy.loginfo("XXXX SAVED XXXX")
mse = 0
f = open("EncK1percent:{}.txt".format(1.0*K1count/Ktotal), "a")
f.write("NEW RUN\n")
for estate in encStates:
actual = [ estate[0], estate[1], estate[2] ]
if (estate[3]):
actual = [-actual[0], -actual[1], 3-actual[2] ]
for i in range(3):
f.write(str(estate[i]) + ",")
f.write(str(estate[3]) + "\n")
p1, p2 = eve_split_trajs()
path = p1
if (check_collision(p1) ):
path = p2
print("p1 crashed")
elif (check_collision(p2) ):
path = p1
print("p2 crashed")
else:
print("NO CRASHES, GUESS")
if not check_waypoints(p1):
print("p1 didnt pass waypoint")
if check_waypoints(p2):
print("and p2 did pass!")
path = p2
elif not check_waypoints(p2):
print("p2 didnt pass waypoint")
if check_waypoints(p1):
print("and p1 did pass!")
path = p1
else:
print("both went through waypoints!")
mse = 0
for i in range(len(path)):
actual = encStates[i]
if actual[3]:
actual = mirror(np.copy(actual), known=True)
mse += dist(path[i], actual)
mse = mse / len(path)
print("ERROR:{}".format(mse))
if (mse <= 0.5):
print("Attack succesful!")
else:
print("Attack failed")
print("FILE: EncK1percent:{}.txt".format(1.0*K1count/Ktotal) )
print("Waypoints:" + str(waypoints))
def check_collision(path):
global obstacles
for point in path:
px, py, pz = point
for obs in obstacles:
ox, oy, orad = obs
if ( abs(px - ox) < orad and abs(py - oy) < orad ):
return True
return False
def check_waypoints(path):
global waypoints
for wp in waypoints:
wx, wy, wrad = wp
passed_waypoint = False
for point in path:
px, py, pz = point
if ( abs(px - wx) < wrad and abs(py - wy) < wrad ):
passed_waypoint = True
if not passed_waypoint:
return False
return True
def mirror(point, known=False):
global mirror_pts
if known:
return [ -point[0], -point[1],3-point[2] ]
return [ mirror_pts[0]-point[0], mirror_pts[1]-point[1], mirror_pts[2]-point[2] ]
def eve_split_trajs():
#get the 2 possible trajectories
global encStates
path1 = [ encStates[0][:3] ]
path2 = [ mirror(encStates[0]) ]
for i in range(1, len(encStates)):
p1last = path1[-1]
p2last = path2[-1]
if (dist(p1last, encStates[i]) < dist(p2last, encStates[i]) ):
path1.append(encStates[i][:3] )
path2.append( mirror(encStates[i]) )
else:
path2.append(encStates[i][:3] )
path1.append( mirror(encStates[i]) )
return path1, path2
def eve_split_trajs_multiple(num_paths):
#get the num_paths possible trajectories, hopefully
global encStates, clusterStates
paths = [ ] #num_paths dimensional
for i in range(len(encStates)):
nextPoint = encStates[i][:3]
shortestDist = 100
bestPath = None
for j in range(len(paths)):
path = paths[j]
shortestPathDist = 100
for point in path:
d = dist(point,nextPoint)
if (d < shortestPathDist):
shortestPathDist = d
if shortestPathDist < shortestDist:
shortestDist = shortestPathDist
bestPath = j
if (bestPath is None or shortestDist > 1) and len(paths) < num_paths:
| |
get_preserved_filters_querystring(self):
return urlencode({
'_changelist_filters': self.get_changelist_filters_querystring()
})
def get_sample_user_id(self):
return self.joepublicuser.pk
def get_changelist_url(self):
return '%s?%s' % (
reverse('admin:auth_user_changelist',
current_app=self.admin_site.name),
self.get_changelist_filters_querystring(),
)
def get_add_url(self, add_preserved_filters=True):
url = reverse('admin:auth_user_add', current_app=self.admin_site.name)
if add_preserved_filters:
url = '%s?%s' % (url, self.get_preserved_filters_querystring())
return url
def get_change_url(self, user_id=None, add_preserved_filters=True):
if user_id is None:
user_id = self.get_sample_user_id()
url = reverse('admin:auth_user_change', args=(user_id,), current_app=self.admin_site.name)
if add_preserved_filters:
url = '%s?%s' % (url, self.get_preserved_filters_querystring())
return url
def get_history_url(self, user_id=None):
if user_id is None:
user_id = self.get_sample_user_id()
return "%s?%s" % (
reverse('admin:auth_user_history', args=(user_id,),
current_app=self.admin_site.name),
self.get_preserved_filters_querystring(),
)
def get_delete_url(self, user_id=None):
if user_id is None:
user_id = self.get_sample_user_id()
return "%s?%s" % (
reverse('admin:auth_user_delete', args=(user_id,),
current_app=self.admin_site.name),
self.get_preserved_filters_querystring(),
)
def test_changelist_view(self):
response = self.client.get(self.get_changelist_url())
self.assertEqual(response.status_code, 200)
# Check the `change_view` link has the correct querystring.
detail_link = re.search(
'<a href="(.*?)">{}</a>'.format(self.joepublicuser.username),
response.content.decode()
)
self.assertURLEqual(detail_link[1], self.get_change_url())
def test_change_view(self):
# Get the `change_view`.
response = self.client.get(self.get_change_url())
self.assertEqual(response.status_code, 200)
# Check the form action.
form_action = re.search(
'<form action="(.*?)" method="post" id="user_form" novalidate>',
response.content.decode()
)
self.assertURLEqual(form_action[1], '?%s' % self.get_preserved_filters_querystring())
# Check the history link.
history_link = re.search(
'<a href="(.*?)" class="historylink">History</a>',
response.content.decode()
)
self.assertURLEqual(history_link[1], self.get_history_url())
# Check the delete link.
delete_link = re.search(
'<a href="(.*?)" class="deletelink">Delete</a>',
response.content.decode()
)
self.assertURLEqual(delete_link[1], self.get_delete_url())
# Test redirect on "Save".
post_data = {
'username': 'joepublic',
'last_login_0': '2007-05-30',
'last_login_1': '13:20:10',
'date_joined_0': '2007-05-30',
'date_joined_1': '13:20:10',
}
post_data['_save'] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertRedirects(response, self.get_changelist_url())
post_data.pop('_save')
# Test redirect on "Save and continue".
post_data['_continue'] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertRedirects(response, self.get_change_url())
post_data.pop('_continue')
# Test redirect on "Save and add new".
post_data['_addanother'] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertRedirects(response, self.get_add_url())
post_data.pop('_addanother')
def test_change_view_without_preserved_filters(self):
response = self.client.get(self.get_change_url(add_preserved_filters=False))
# The action attribute is omitted.
self.assertContains(response, '<form method="post" id="user_form" novalidate>')
def test_add_view(self):
# Get the `add_view`.
response = self.client.get(self.get_add_url())
self.assertEqual(response.status_code, 200)
# Check the form action.
form_action = re.search(
'<form action="(.*?)" method="post" id="user_form" novalidate>',
response.content.decode()
)
self.assertURLEqual(form_action[1], '?%s' % self.get_preserved_filters_querystring())
post_data = {
'username': 'dummy',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
}
# Test redirect on "Save".
post_data['_save'] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertRedirects(response, self.get_change_url(User.objects.get(username='dummy').pk))
post_data.pop('_save')
# Test redirect on "Save and continue".
post_data['username'] = 'dummy2'
post_data['_continue'] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertRedirects(response, self.get_change_url(User.objects.get(username='dummy2').pk))
post_data.pop('_continue')
# Test redirect on "Save and add new".
post_data['username'] = 'dummy3'
post_data['_addanother'] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertRedirects(response, self.get_add_url())
post_data.pop('_addanother')
def test_add_view_without_preserved_filters(self):
response = self.client.get(self.get_add_url(add_preserved_filters=False))
# The action attribute is omitted.
self.assertContains(response, '<form method="post" id="user_form" novalidate>')
def test_delete_view(self):
# Test redirect on "Delete".
response = self.client.post(self.get_delete_url(), {'post': 'yes'})
self.assertRedirects(response, self.get_changelist_url())
def test_url_prefix(self):
context = {
'preserved_filters': self.get_preserved_filters_querystring(),
'opts': User._meta,
}
prefixes = ('', '/prefix/', '/後台/')
for prefix in prefixes:
with self.subTest(prefix=prefix), override_script_prefix(prefix):
url = reverse('admin:auth_user_changelist', current_app=self.admin_site.name)
self.assertURLEqual(
self.get_changelist_url(),
add_preserved_filters(context, url),
)
class NamespacedAdminKeepChangeListFiltersTests(AdminKeepChangeListFiltersTests):
admin_site = site2
@override_settings(ROOT_URLCONF='admin_views.urls')
class TestLabelVisibility(TestCase):
""" #11277 -Labels of hidden fields in admin were not hidden. """
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='<PASSWORD>', email='<EMAIL>')
def setUp(self):
self.client.force_login(self.superuser)
def test_all_fields_visible(self):
response = self.client.get(reverse('admin:admin_views_emptymodelvisible_add'))
self.assert_fieldline_visible(response)
self.assert_field_visible(response, 'first')
self.assert_field_visible(response, 'second')
def test_all_fields_hidden(self):
response = self.client.get(reverse('admin:admin_views_emptymodelhidden_add'))
self.assert_fieldline_hidden(response)
self.assert_field_hidden(response, 'first')
self.assert_field_hidden(response, 'second')
def test_mixin(self):
response = self.client.get(reverse('admin:admin_views_emptymodelmixin_add'))
self.assert_fieldline_visible(response)
self.assert_field_hidden(response, 'first')
self.assert_field_visible(response, 'second')
def assert_field_visible(self, response, field_name):
self.assertContains(response, '<div class="fieldBox field-%s">' % field_name)
def assert_field_hidden(self, response, field_name):
self.assertContains(response, '<div class="fieldBox field-%s hidden">' % field_name)
def assert_fieldline_visible(self, response):
self.assertContains(response, '<div class="form-row field-first field-second">')
def assert_fieldline_hidden(self, response):
self.assertContains(response, '<div class="form-row hidden')
@override_settings(ROOT_URLCONF='admin_views.urls')
class AdminViewOnSiteTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='<PASSWORD>', email='<EMAIL>')
cls.s1 = State.objects.create(name='New York')
cls.s2 = State.objects.create(name='Illinois')
cls.s3 = State.objects.create(name='California')
cls.c1 = City.objects.create(state=cls.s1, name='New York')
cls.c2 = City.objects.create(state=cls.s2, name='Chicago')
cls.c3 = City.objects.create(state=cls.s3, name='San Francisco')
cls.r1 = Restaurant.objects.create(city=cls.c1, name='Italian Pizza')
cls.r2 = Restaurant.objects.create(city=cls.c1, name='Boulevard')
cls.r3 = Restaurant.objects.create(city=cls.c2, name='Chinese Dinner')
cls.r4 = Restaurant.objects.create(city=cls.c2, name='Angels')
cls.r5 = Restaurant.objects.create(city=cls.c2, name='Take Away')
cls.r6 = Restaurant.objects.create(city=cls.c3, name='The Unknown Restaurant')
cls.w1 = Worker.objects.create(work_at=cls.r1, name='Mario', surname='Rossi')
cls.w2 = Worker.objects.create(work_at=cls.r1, name='Antonio', surname='Bianchi')
cls.w3 = Worker.objects.create(work_at=cls.r1, name='John', surname='Doe')
def setUp(self):
self.client.force_login(self.superuser)
def test_add_view_form_and_formsets_run_validation(self):
"""
Issue #20522
Verifying that if the parent form fails validation, the inlines also
run validation even if validation is contingent on parent form data.
Also, assertFormError() and assertFormsetError() is usable for admin
forms and formsets.
"""
# The form validation should fail because 'some_required_info' is
# not included on the parent form, and the family_name of the parent
# does not match that of the child
post_data = {
'family_name': 'Test1',
'dependentchild_set-TOTAL_FORMS': '1',
'dependentchild_set-INITIAL_FORMS': '0',
'dependentchild_set-MAX_NUM_FORMS': '1',
'dependentchild_set-0-id': '',
'dependentchild_set-0-parent': '',
'dependentchild_set-0-family_name': 'Test2',
}
response = self.client.post(reverse('admin:admin_views_parentwithdependentchildren_add'), post_data)
self.assertFormError(response, 'adminform', 'some_required_info', ['This field is required.'])
msg = "The form 'adminform' in context 0 does not contain the non-field error 'Error'"
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormError(response, 'adminform', None, ['Error'])
self.assertFormsetError(
response, 'inline_admin_formset', 0, None,
['Children must share a family name with their parents in this contrived test case']
)
msg = "The formset 'inline_admin_formset' in context 12 does not contain any non-form errors."
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormsetError(response, 'inline_admin_formset', None, None, ['Error'])
def test_change_view_form_and_formsets_run_validation(self):
"""
Issue #20522
Verifying that if the parent form fails validation, the inlines also
run validation even if validation is contingent on parent form data
"""
pwdc = ParentWithDependentChildren.objects.create(some_required_info=6, family_name='Test1')
# The form validation should fail because 'some_required_info' is
# not included on the parent form, and the family_name of the parent
# does not match that of the child
post_data = {
'family_name': 'Test2',
'dependentchild_set-TOTAL_FORMS': '1',
'dependentchild_set-INITIAL_FORMS': '0',
'dependentchild_set-MAX_NUM_FORMS': '1',
'dependentchild_set-0-id': '',
'dependentchild_set-0-parent': str(pwdc.id),
'dependentchild_set-0-family_name': 'Test1',
}
response = self.client.post(
reverse('admin:admin_views_parentwithdependentchildren_change', args=(pwdc.id,)), post_data
)
self.assertFormError(response, 'adminform', 'some_required_info', ['This field is required.'])
self.assertFormsetError(
response, 'inline_admin_formset', 0, None,
['Children must share a family name with their parents in this contrived test case']
)
def test_check(self):
"The view_on_site value is either a boolean or a callable"
try:
admin = CityAdmin(City, AdminSite())
CityAdmin.view_on_site = True
self.assertEqual(admin.check(), [])
CityAdmin.view_on_site = False
self.assertEqual(admin.check(), [])
CityAdmin.view_on_site = lambda obj: obj.get_absolute_url()
self.assertEqual(admin.check(), [])
CityAdmin.view_on_site = []
self.assertEqual(admin.check(), [
Error(
"The value of 'view_on_site' must be a callable or a boolean value.",
obj=CityAdmin,
id='admin.E025',
),
])
finally:
# Restore the original values for the benefit of other tests.
CityAdmin.view_on_site = True
def test_false(self):
"The 'View on site' button is not displayed if view_on_site is False"
response = self.client.get(reverse('admin:admin_views_restaurant_change', args=(self.r1.pk,)))
content_type_pk = ContentType.objects.get_for_model(Restaurant).pk
self.assertNotContains(response, reverse('admin:view_on_site', args=(content_type_pk, 1)))
def test_true(self):
"The default behavior is followed if view_on_site is True"
response = self.client.get(reverse('admin:admin_views_city_change', args=(self.c1.pk,)))
content_type_pk = ContentType.objects.get_for_model(City).pk
self.assertContains(response, reverse('admin:view_on_site', args=(content_type_pk, self.c1.pk)))
def test_callable(self):
"The right link is displayed if view_on_site is a callable"
response = self.client.get(reverse('admin:admin_views_worker_change', args=(self.w1.pk,)))
self.assertContains(response, '"/worker/%s/%s/"' % (self.w1.surname, self.w1.name))
def test_missing_get_absolute_url(self):
"None is returned if model doesn't have get_absolute_url"
model_admin = ModelAdmin(Worker, None)
self.assertIsNone(model_admin.get_view_on_site_url(Worker()))
@override_settings(ROOT_URLCONF='admin_views.urls')
class InlineAdminViewOnSiteTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='<PASSWORD>', email='<EMAIL>')
cls.s1 = State.objects.create(name='New York')
cls.s2 = State.objects.create(name='Illinois')
cls.s3 = State.objects.create(name='California')
cls.c1 = City.objects.create(state=cls.s1, name='New York')
cls.c2 = City.objects.create(state=cls.s2, name='Chicago')
cls.c3 = City.objects.create(state=cls.s3, name='San Francisco')
cls.r1 = Restaurant.objects.create(city=cls.c1, name='Italian Pizza')
cls.r2 = Restaurant.objects.create(city=cls.c1, name='Boulevard')
cls.r3 = Restaurant.objects.create(city=cls.c2, name='Chinese Dinner')
cls.r4 = Restaurant.objects.create(city=cls.c2, name='Angels')
cls.r5 = Restaurant.objects.create(city=cls.c2, name='Take Away')
cls.r6 = Restaurant.objects.create(city=cls.c3, name='The Unknown Restaurant')
cls.w1 = Worker.objects.create(work_at=cls.r1, name='Mario', surname='Rossi')
cls.w2 = Worker.objects.create(work_at=cls.r1, name='Antonio', surname='Bianchi')
cls.w3 = Worker.objects.create(work_at=cls.r1, name='John', surname='Doe')
def setUp(self):
self.client.force_login(self.superuser)
def test_false(self):
"The 'View on site' button is not displayed if view_on_site is False"
response = self.client.get(reverse('admin:admin_views_state_change', args=(self.s1.pk,)))
content_type_pk = ContentType.objects.get_for_model(City).pk
self.assertNotContains(response, reverse('admin:view_on_site', args=(content_type_pk, self.c1.pk)))
def test_true(self):
"The 'View on site' button is displayed if view_on_site is True"
response = self.client.get(reverse('admin:admin_views_city_change', args=(self.c1.pk,)))
content_type_pk = ContentType.objects.get_for_model(Restaurant).pk
self.assertContains(response, reverse('admin:view_on_site', args=(content_type_pk, self.r1.pk)))
def test_callable(self):
"The right link is displayed if view_on_site is a callable"
response = self.client.get(reverse('admin:admin_views_restaurant_change', args=(self.r1.pk,)))
self.assertContains(response, '"/worker_inline/%s/%s/"' % (self.w1.surname, self.w1.name))
@override_settings(ROOT_URLCONF='admin_views.urls')
class GetFormsetsWithInlinesArgumentTest(TestCase):
"""
#23934 - When adding a new model instance in the admin, the 'obj' argument
of get_formsets_with_inlines() should be None. When changing, it should be
equal to the existing model instance.
The GetFormsetsArgumentCheckingAdmin ModelAdmin throws an exception
if obj is not None during add_view or obj is None during change_view.
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='<PASSWORD>', email='<EMAIL>')
def setUp(self):
self.client.force_login(self.superuser)
def test_explicitly_provided_pk(self):
post_data = {'name': '1'}
response = self.client.post(reverse('admin:admin_views_explicitlyprovidedpk_add'), post_data)
self.assertEqual(response.status_code, 302)
post_data = {'name': '2'}
response = self.client.post(reverse('admin:admin_views_explicitlyprovidedpk_change', args=(1,)), post_data)
self.assertEqual(response.status_code, 302)
def test_implicitly_generated_pk(self):
post_data = {'name': '1'}
response = self.client.post(reverse('admin:admin_views_implicitlygeneratedpk_add'), post_data)
self.assertEqual(response.status_code, 302)
post_data = {'name': '2'}
response | |
<filename>tests/test_dumper.py
# -*- coding: utf-8 -*-
# Zinc dumping and parsing module
# See the accompanying LICENSE Apache V2.0 file.
# (C) 2016 VRT Systems
# (C) 2021 Engie Digital
#
# vim: set ts=4 sts=4 et tw=78 sw=4 si:
import datetime
import json
from csv import reader
import pytz
import haystackapi
from haystackapi import dump_scalar
from .test_parser import SIMPLE_EXAMPLE_ZINC, SIMPLE_EXAMPLE_JSON, \
METADATA_EXAMPLE_JSON, SIMPLE_EXAMPLE_CSV, METADATA_EXAMPLE_CSV
# The metadata example is a little different, as we generate the grid without
# spaces around the commas.
METADATA_EXAMPLE = '''ver:"2.0" database:"test" dis:"Site Energy Summary"
siteName dis:"Sites",val dis:"Value" unit:"kW"
"Site 1",356.214kW
"Site 2",463.028kW
'''
def make_simple_grid(version=haystackapi.VER_2_0):
grid = haystackapi.Grid(version=version)
grid.column['firstName'] = {}
grid.column['bday'] = {}
grid.extend([
{
'firstName': 'Jack',
'bday': datetime.date(1973, 7, 23),
},
{
'firstName': 'Jill',
'bday': datetime.date(1975, 11, 15),
},
])
return grid
def test_simple_zinc():
grid = make_simple_grid()
grid_str = haystackapi.dump(grid)
assert grid_str == SIMPLE_EXAMPLE_ZINC
def test_simple_json():
grid = make_simple_grid()
grid_json = json.loads(haystackapi.dump(grid, mode=haystackapi.MODE_JSON))
assert grid_json == SIMPLE_EXAMPLE_JSON
def test_simple_csv():
grid = make_simple_grid()
grid_csv = haystackapi.dump(grid, mode=haystackapi.MODE_CSV)
assert list(reader(grid_csv.splitlines()))
assert grid_csv == SIMPLE_EXAMPLE_CSV
def make_metadata_grid(version=haystackapi.VER_2_0):
grid = haystackapi.Grid(version=version)
grid.metadata['database'] = 'test'
grid.metadata['dis'] = 'Site Energy Summary'
grid.column['siteName'] = {'dis': 'Sites'}
grid.column['val'] = haystackapi.MetadataObject()
grid.column['val']['dis'] = 'Value'
grid.column['val']['unit'] = 'kW'
grid.extend([
{
'siteName': 'Site 1',
'val': haystackapi.Quantity(356.214, 'kW'),
},
{
'siteName': 'Site 2',
'val': haystackapi.Quantity(463.028, 'kW'),
},
])
return grid
def test_metadata_zinc():
grid = make_metadata_grid()
grid_str = haystackapi.dump(grid)
assert grid_str == METADATA_EXAMPLE
def test_metadata_json():
grid = make_metadata_grid()
grid_json = json.loads(haystackapi.dump(grid, mode=haystackapi.MODE_JSON))
assert grid_json == METADATA_EXAMPLE_JSON
def test_metadata_csv():
grid = make_metadata_grid()
grid_csv = haystackapi.dump(grid, mode=haystackapi.MODE_CSV)
assert list(reader(grid_csv.splitlines()))
assert grid_csv == METADATA_EXAMPLE_CSV
def test_multi_grid_zinc():
grids = [make_simple_grid(), make_metadata_grid()]
grid_str = haystackapi.dump(grids)
assert grid_str == '\n'.join([SIMPLE_EXAMPLE_ZINC, METADATA_EXAMPLE])
def test_multi_grid_json():
grids = [make_simple_grid(), make_metadata_grid()]
grid_json = json.loads(haystackapi.dump(grids, mode=haystackapi.MODE_JSON))
assert grid_json[0] == SIMPLE_EXAMPLE_JSON
assert grid_json[1] == METADATA_EXAMPLE_JSON
def test_multi_grid_csv():
grids = [make_simple_grid(), make_metadata_grid()]
grid_csv = haystackapi.dump(grids, mode=haystackapi.MODE_CSV)
assert list(reader(grid_csv.splitlines()))
assert grid_csv == '''firstName,bday
"Jack",1973-07-23
"Jill",1975-11-15
siteName,val
"Site 1",356.214kW
"Site 2",463.028kW
'''
def make_grid_meta(version=haystackapi.VER_2_0):
grid = haystackapi.Grid(version=version)
grid.metadata['aString'] = 'aValue'
grid.metadata['aNumber'] = 3.14159
grid.metadata['aNull'] = None
grid.metadata['aMarker'] = haystackapi.MARKER
grid.metadata['aQuantity'] = haystackapi.Quantity(123, 'Hz')
grid.column['empty'] = {}
return grid
def test_grid_meta():
grid_str = haystackapi.dump(make_grid_meta())
assert grid_str == '''ver:"2.0" aString:"aValue" aNumber:3.14159 aNull:N aMarker aQuantity:123Hz
empty
'''
def test_grid_meta_json():
grid_json = json.loads(haystackapi.dump(make_grid_meta(),
mode=haystackapi.MODE_JSON))
assert grid_json == {
'meta': {
'ver': '2.0',
'aString': 's:aValue',
'aNumber': 'n:3.141590',
'aNull': None,
'aMarker': 'm:',
'aQuantity': 'n:123.000000 Hz',
},
'cols': [
{'name': 'empty'},
],
'rows': [],
}
def test_grid_meta_csv():
grid_csv = haystackapi.dump(make_grid_meta(), mode=haystackapi.MODE_CSV)
assert list(reader(grid_csv.splitlines()))
assert grid_csv == 'empty\n'
def make_col_meta(version=haystackapi.VER_2_0):
grid = haystackapi.Grid(version=version)
col_meta = haystackapi.MetadataObject()
col_meta['aString'] = 'aValue'
col_meta['aNumber'] = 3.14159
col_meta['aNull'] = None
col_meta['aMarker'] = haystackapi.MARKER
col_meta['aQuantity'] = haystackapi.Quantity(123, 'Hz')
grid.column['empty'] = col_meta
return grid
def test_col_meta_zinc():
grid_str = haystackapi.dump(make_col_meta(), mode=haystackapi.MODE_ZINC)
assert grid_str == '''ver:"2.0"
empty aString:"aValue" aNumber:3.14159 aNull:N aMarker aQuantity:123Hz
'''
def test_col_meta_json():
grid_json = json.loads(haystackapi.dump(make_col_meta(),
mode=haystackapi.MODE_JSON))
assert grid_json == {
'meta': {
'ver': '2.0',
},
'cols': [
{'name': 'empty',
'aString': 's:aValue',
'aNumber': 'n:3.141590',
'aNull': None,
'aMarker': 'm:',
'aQuantity': 'n:123.000000 Hz',
},
],
'rows': [],
}
def test_col_meta_csv():
grid_csv = haystackapi.dump(make_col_meta(), mode=haystackapi.MODE_CSV)
assert list(reader(grid_csv.splitlines()))
assert grid_csv == 'empty\n'
def test_data_types_zinc_v2():
grid = haystackapi.Grid(version=haystackapi.VER_2_0)
grid.column['comment'] = {}
grid.column['value'] = {}
grid.extend([
{
'comment': 'A null value',
'value': None,
},
{
'comment': 'A marker',
'value': haystackapi.MARKER,
},
{
'comment': 'A "remove" object',
'value': haystackapi.REMOVE,
},
{
'comment': 'A boolean, indicating False',
'value': False,
},
{
'comment': 'A boolean, indicating True',
'value': True,
},
{
'comment': 'A reference, without value',
'value': haystackapi.Ref('a-ref'),
},
{
'comment': 'A reference, with value',
'value': haystackapi.Ref('a-ref', 'a value'),
},
{
'comment': 'A binary blob',
'value': haystackapi.Bin('text/plain'),
},
{
'comment': 'A quantity',
'value': haystackapi.Quantity(500, 'miles'),
},
{
'comment': 'A quantity without unit',
'value': haystackapi.Quantity(500, None),
},
{
'comment': 'A coordinate',
'value': haystackapi.Coordinate(-27.4725, 153.003),
},
{
'comment': 'A URI',
'value': haystackapi.Uri('http://www.example.com#`unicode:\u1234\u5678`'),
},
{
'comment': 'A string',
'value': 'This is a test\n'
'Line two of test\n'
'\tIndented with "quotes", \\backslashes\\ and '
'Unicode characters: \u1234\u5678 and a $ dollar sign',
},
{
'comment': 'A date',
'value': datetime.date(2016, 1, 13),
},
{
'comment': 'A time',
'value': datetime.time(7, 51, 43, microsecond=12345),
},
{
'comment': 'A timestamp (non-UTC)',
'value': pytz.timezone('Europe/Berlin').localize(
datetime.datetime(2016, 1, 13, 7, 51, 42, 12345)),
},
{
'comment': 'A timestamp (UTC)',
'value': pytz.timezone('UTC').localize(
datetime.datetime(2016, 1, 13, 7, 51, 42, 12345)),
},
])
grid_str = haystackapi.dump(grid, mode=haystackapi.MODE_ZINC)
ref_str = '''ver:"2.0"
comment,value
"A null value",N
"A marker",M
"A \\"remove\\" object",R
"A boolean, indicating False",F
"A boolean, indicating True",T
"A reference, without value",@a-ref
"A reference, with value",@a-ref "a value"
"A binary blob",Bin(text/plain)
"A quantity",500miles
"A quantity without unit",500
"A coordinate",C(-27.472500,153.003000)
"A URI",`http://www.example.com#\\`unicode:\\u1234\\u5678\\``
"A string","This is a test\\nLine two of test\\n\\tIndented with \\"quotes\\", \\\\backslashes\\\\ and Unicode characters: \\u1234\\u5678 and a \\$ dollar sign"
"A date",2016-01-13
"A time",07:51:43.012345
"A timestamp (non-UTC)",2016-01-13T07:51:42.012345+01:00 Berlin
"A timestamp (UTC)",2016-01-13T07:51:42.012345+00:00 UTC
'''
assert grid_str == ref_str
def test_data_types_json_v2():
grid = haystackapi.Grid(version=haystackapi.VER_2_0)
grid.column['comment'] = {}
grid.column['value'] = {}
grid.extend([
{
'comment': 'A null value',
'value': None,
},
{
'comment': 'A marker',
'value': haystackapi.MARKER,
},
{
'comment': 'A remove (2.0 version)',
'value': haystackapi.REMOVE,
},
{
'comment': 'A boolean, indicating False',
'value': False,
},
{
'comment': 'A boolean, indicating True',
'value': True,
},
{
'comment': 'A reference, without value',
'value': haystackapi.Ref('a-ref'),
},
{
'comment': 'A reference, with value',
'value': haystackapi.Ref('a-ref', 'a value'),
},
{
'comment': 'A binary blob',
'value': haystackapi.Bin('text/plain'),
},
{
'comment': 'A quantity',
'value': haystackapi.Quantity(500, 'miles'),
},
{
'comment': 'A quantity without unit',
'value': haystackapi.Quantity(500, None),
},
{
'comment': 'A coordinate',
'value': haystackapi.Coordinate(-27.4725, 153.003),
},
{
'comment': 'A URI',
'value': haystackapi.Uri('http://www.example.com'),
},
{
'comment': 'A string',
'value': 'This is a test\n'
'Line two of test\n'
'\tIndented with "quotes" and \\backslashes\\',
},
{
'comment': 'A date',
'value': datetime.date(2016, 1, 13),
},
{
'comment': 'A time',
'value': datetime.time(7, 51, 43, microsecond=12345),
},
{
'comment': 'A timestamp (non-UTC)',
'value': pytz.timezone('Europe/Berlin').localize(
datetime.datetime(2016, 1, 13, 7, 51, 42, 12345)),
},
{
'comment': 'A timestamp (UTC)',
'value': pytz.timezone('UTC').localize(
datetime.datetime(2016, 1, 13, 7, 51, 42, 12345)),
},
])
grid_json = json.loads(haystackapi.dump(grid, mode=haystackapi.MODE_JSON))
assert grid_json == {
'meta': {'ver': '2.0'},
'cols': [
{'name': 'comment'},
{'name': 'value'},
],
'rows': [
{'comment': 's:A null value',
'value': None},
{'comment': 's:A marker',
'value': 'm:'},
{'comment': 's:A remove (2.0 version)',
'value': 'x:'},
{'comment': 's:A boolean, indicating False',
'value': False},
{'comment': 's:A boolean, indicating True',
'value': True},
{'comment': 's:A reference, without value',
'value': 'r:a-ref'},
{'comment': 's:A reference, with value',
'value': 'r:a-ref a value'},
{'comment': 's:A binary blob',
'value': 'b:text/plain'},
{'comment': 's:A quantity',
'value': 'n:500.000000 miles'},
{'comment': 's:A quantity without unit',
'value': 'n:500.000000'},
{'comment': 's:A coordinate',
'value': 'c:-27.472500,153.003000'},
{'comment': 's:A URI',
'value': 'u:http://www.example.com'},
{'comment': 's:A string',
'value': 's:This is a test\n'
'Line two of test\n'
'\tIndented with \"quotes\" '
'and \\backslashes\\'},
{'comment': 's:A date',
'value': 'd:2016-01-13'},
{'comment': 's:A time',
'value': 'h:07:51:43.012345'},
{'comment': 's:A timestamp (non-UTC)',
'value': 't:2016-01-13T07:51:42.012345+01:00 Berlin'},
{'comment': 's:A timestamp (UTC)',
'value': 't:2016-01-13T07:51:42.012345+00:00 UTC'},
],
}
def test_data_types_csv_v2():
grid = haystackapi.Grid(version=haystackapi.VER_2_0)
grid.column['comment'] = {}
grid.column['value'] = {}
grid.extend([
{
'comment': 'A null value',
'value': None,
},
{
'comment': 'A marker',
'value': haystackapi.MARKER,
},
{
'comment': 'A remove (2.0 version)',
'value': haystackapi.REMOVE,
},
{
'comment': 'A boolean, indicating False',
'value': False,
},
{
'comment': 'A boolean, indicating True',
'value': True,
},
{
'comment': 'A reference, without value',
'value': haystackapi.Ref('a-ref'),
},
{
'comment': 'A reference, with value',
'value': haystackapi.Ref('a-ref', 'a value'),
},
{
'comment': 'A binary blob',
'value': haystackapi.Bin('text/plain'),
},
{
'comment': 'A quantity',
'value': haystackapi.Quantity(500, 'miles'),
},
{
'comment': 'A quantity without unit',
'value': haystackapi.Quantity(500, None),
},
{
'comment': 'A coordinate',
'value': haystackapi.Coordinate(-27.4725, 153.003),
},
{
'comment': 'A URI',
'value': haystackapi.Uri('http://www.example.com'),
},
{
'comment': 'A string',
'value': 'This is a test\n'
'Line two of test\n'
'\tIndented with "quotes" and \\backslashes\\',
},
{
'comment': 'A date',
'value': datetime.date(2016, 1, 13),
},
{
'comment': 'A time',
'value': datetime.time(7, 51, 43, microsecond=12345),
},
{
'comment': 'A timestamp (non-UTC)',
'value': pytz.timezone('Europe/Berlin').localize(
datetime.datetime(2016, 1, 13, 7, 51, 42, 12345)),
},
{
'comment': 'A timestamp (UTC)',
'value': pytz.timezone('UTC').localize(
datetime.datetime(2016, 1, 13, 7, 51, 42, 12345)),
},
])
grid_csv = haystackapi.dump(grid, mode=haystackapi.MODE_CSV)
assert list(reader(grid_csv.splitlines()))
assert grid_csv == '''comment,value
"A null value",
"A marker",\u2713
"A remove (2.0 version)",R
"A boolean, indicating False",false
"A boolean, indicating True",true
"A reference, without value",@a-ref
"A reference, with value",@a-ref a value
"A binary blob",Bin(text/plain)
"A quantity",500miles
"A quantity without unit",500
"A coordinate","C(-27.472500,153.003000)"
"A URI",`http://www.example.com`
"A string","This is a test\nLine two of test\n\tIndented with ""quotes"" and \\backslashes\\"
"A date",2016-01-13
"A time",07:51:43.012345
"A timestamp (non-UTC)",2016-01-13T07:51:42.012345+01:00
"A timestamp (UTC)",2016-01-13T07:51:42.012345+00:00
'''
def test_data_types_zinc_v3():
grid = haystackapi.Grid(version=haystackapi.VER_3_0)
grid.column['comment'] = {}
grid.column['value'] = {}
grid.extend([
{
'comment': 'A NA',
'value': haystackapi.NA,
},
| |
<filename>hst.py
from __future__ import division, print_function, absolute_import
from . import data_structures
from astropy.io import fits as _fits
import astropy.time as _time
import astropy.units as _u
import astropy.constants as _const
import astropy.table as _tbl
from time import strftime as _strftime
import numpy as _np
import scipy.interpolate as _interp
import os as _os
import re as _re
from . import utils as _utils
from functools import reduce
# TODO: test with COS NUV data
def readtagset(directory_or_tagfiles, traceloc='stsci', fluxed='tag_vs_x1d', divvied=True, clipends=True, flux_bins=2.0,
a_or_b='both'):
"""
Parameters
----------
directory
traceloc
fluxed
divvied
clipends
Returns
-------
Photons object
"""
if type(directory_or_tagfiles) is str:
# find all the tag files and matching x1d files
tagfiles, x1dfiles = obs_files(directory_or_tagfiles)
else:
tagfiles = directory_or_tagfiles
x1dfiles = [_re.sub('(corr)?tag(_[ab])?', 'x1d', tf) for tf in tagfiles]
def readfiles(tagfiles, x1dfiles):
# start by parsing photons from first observation
photons = readtag(tagfiles[0], x1dfiles[0], traceloc, fluxed, divvied, clipends, flux_bins=flux_bins)
# now prepend/append other observations (if available) in order of time
if len(tagfiles) > 1:
for tagfile, x1dfile in zip(tagfiles[1:], x1dfiles[1:]):
photons2 = readtag(tagfile, x1dfile, traceloc, fluxed, divvied, clipends, flux_bins=flux_bins)
# add in order of time
if photons2.time_datum < photons.time_datum:
photons = photons2 + photons
else:
photons = photons + photons2
return photons
if any([('corrtag_b' in tf) for tf in tagfiles]):
file_pairs = list(zip(tagfiles, x1dfiles))
if a_or_b in ['a', 'b']:
filter_ab = lambda seg: [g for g in file_pairs if 'corrtag_' + seg in g[0]]
file_pairs = filter_ab(a_or_b)
return readfiles(*list(zip(*file_pairs)))
elif a_or_b == 'both':
p = readfiles(*list(zip(*file_pairs)))
p.merge_like_observations()
return p
else:
raise ValueError("a_or_b should be one of ['a', 'b', 'both']")
else:
return readfiles(tagfiles, x1dfiles)
def readtag(tagfile, x1dfile, traceloc='stsci', fluxed='tag_vs_x1d', divvied=True, clipends=True, flux_bins=2.0):
"""
Parameters
----------
tagfile
x1dfile
traceloc
fluxed
divvied
clipends
flux_bin
Returns
-------
Photons object
"""
# open tag file
tag = _fits.open(tagfile)
# is it a STIS or COS observation?
stis = _isstis(tag)
cos = _iscos(tag)
if traceloc in [None, 'none', False]:
traceloc = 0.0
fluxit = fluxed not in ['none', None, False]
divvyit = divvied or fluxit
if divvyit and traceloc == 0.0:
raise ValueError('Cannot atuomatically divvy events into signal and background regions if a trace '
'location is not specified or et to 0.')
if traceloc != 'stsci' and fluxit:
raise ValueError('Proper computation of effective area of photon wavelengths (and thus flux) requires '
'that traceloc==\'stsci\'.')
if x1dfile is None:
if fluxed or divvied:
raise ValueError('Cannot flux or divvy the events if no x1d is available.')
if traceloc == 'stsci':
raise ValueError('If x1d is not provided, the STScI trace location (traceloc) is not known.')
# open x1d file
x1d = _fits.open(x1dfile) if x1dfile is not None else None
# empty photons object
photons = data_structures.Photons()
# parse observation metadata
hdr = tag[0].header + tag[1].header
if x1d is not None:
hdr += x1d[1].header
photons.obs_metadata = [hdr]
# parse observation time datum
photons.time_datum = _time.Time(hdr['expstart'], format='mjd')
# parse observation time range
gti = tag['GTI'].data
time_ranges = _np.array([gti['start'], gti['stop']]).T
photons.obs_times = [time_ranges]
# parse observation wavelength ranges.
if x1d is None:
if stis:
wave_ranges = _np.array([[hdr['minwave'], hdr['maxwave']]])
if cos:
w = tag[1].data['wavelength']
nonzero = w > 0
wave_ranges = _np.array([[_np.min(w[nonzero]), _np.max(w[nonzero])]])
else:
# if x1d is available, areas where every pixel has at least one flag matching clipflags will be
# clipped. for STIS almost every pixel is flagged with bits 2 and 9, so these are ignored
clipflags = 2 + 128 + 256 if stis else 8 + 128 + 256
wave_ranges = good_waverange(x1d, clipends=clipends, clipflags=clipflags)
photons.obs_bandpasses = [wave_ranges]
if cos:
# keep only the wavelength range of the appropriate segment if FUV detector
if hdr['detector'] == 'FUV':
i = 0 if hdr['segment'] == 'FUVA' else 1
wave_ranges = wave_ranges[[i], :]
photons.obs_bandpasses[0] = photons.obs_bandpasses[0][[i], :]
# if hdr['detector'] == 'NUV':
# raise NotImplementedError('Gotta do some work on this. Fluxing is not working well.')
# parse photons. I'm going to use sneaky list comprehensions and such. sorry. this is nasty because
# supposedly stsci sometimes puts tags into multiple 'EVENTS' extensions
t, w, e, q, ph, y, o = _get_photon_info_COS(tag, x1d, traceloc)
photons.photons = _tbl.Table([t, w, e, q, ph, y, o], names=['t', 'w', 'e', 'q', 'pulse_height', 'y', 'o'])
# cull anomalous events
bad_dq = 64 | 512 | 2048
bad = (_np.bitwise_and(photons['dq'], bad_dq) > 0)
photons.photons = photons.photons[~bad]
# reference photons to trace location(s) and divvy into signal and background regions
if divvyit:
if hdr['detector'] == 'NUV':
limits = [stsci_extraction_ranges(x1d, seg) for seg in ['A', 'B', 'C']]
ysignal, yback = list(zip(*limits))
ysignal, yback = [_np.round(a, 2) for a in (ysignal, yback)] # otherwise sometimes I get errors from some values being 0.5 and others being 0.49999999...
list(map(photons.divvy, ysignal, yback))
elif hdr['detector'] == 'FUV':
seg = hdr['segment']
ysignal, yback = stsci_extraction_ranges(x1d, seg)
ysignal, yback = [_np.round(a, 2) for a in (ysignal, yback)]
photons.divvy(ysignal, yback)
# add effective area to photons
if fluxit:
if hdr['detector'] == 'FUV':
segments = [0] if hdr['segment'] == 'FUVA' else [1]
else:
segments = [0, 1, 2]
Aeff = _np.zeros_like(photons['t'])
for i in segments:
try:
Aeff_i = _get_Aeff_x1d(photons, x1d, x1d_row=i, order=i, method=fluxed, flux_bins=flux_bins)
except _utils.LowSNError:
raise _utils.LowSNError('S/N is too low to flux the counts for {} in segment {}.'
''.format(x1dfile, 'ABC'[i]))
Aeff[photons['o'] == i] = Aeff_i
photons['a'] = Aeff
# merge orders for FUV
if hdr['detector'] == 'FUV':
photons.merge_orders()
elif stis:
# nothing comes for free with STIS
time, wave, xdisp, order, dq = _get_photon_info_STIS(tag, x1d, traceloc)
photons.photons = _tbl.Table([time, wave, xdisp, order, dq], names=['t', 'w', 'y', 'o', 'q'])
# get number of orders and the order numbers
Norders = x1d['sci'].header['naxis2']
order_nos = x1d['sci'].data['sporder']
# add signal/background column to photons
if divvyit:
ysignal, yback = stsci_extraction_ranges(x1d)
for order, ys, yb in zip(order_nos, ysignal, yback):
photons.divvy(ys, yb, order=int(order))
# add effective area to photons
if fluxit:
Aeff = _np.zeros_like(photons['t'])
for x1d_row, order in zip(list(range(Norders)), order_nos):
Aeff_i = _get_Aeff_x1d(photons, x1d, x1d_row, order, method=fluxed, flux_bins=flux_bins)
Aeff[photons['o'] == order] = Aeff_i
photons['a'] = Aeff
# FIXME: this is shoddy -- I'm trying to deal with having user-defined flux bins which don't really mathc
# up with the bins of the orders and I end up with photons that don't get proper areas
keep = _np.isfinite(photons['a'])
photons.photons = photons.photons[keep]
else:
raise NotImplementedError('HST instrument {} not recognized/code not written to handle it.'
''.format(hdr['instrume']))
# cull photons outside of wavelength and time ranges
keep_w = (photons['w'] >= wave_ranges.min()) & (photons['w'] <= wave_ranges.max())
keep_t = (photons['t'] >= time_ranges.min()) & (photons['t'] <= time_ranges.max())
photons.photons = photons.photons[keep_w & keep_t]
# add appropriate units
photons['t'].unit = _u.s
photons['w'].unit = _u.AA
if 'a' in photons:
photons['a'].unit = _u.cm**2
tag.close()
if x1d:
x1d.close()
return photons
def x2dspec(x2dfile, traceloc='max', extrsize='stsci', bksize='stsci', bkoff='stsci', x1dfile=None, fitsout=None,
overwrite=True, bkmask=0):
"""
Creates a spectrum from HST STIS (or maybe also COS?) data from HST using the x2d file provided by the default
STScI pipeline.
Parameters
----------
x2dfile : str
Path of the x2d file.
traceloc : {int|'max'|'lya'}, optional
Location of the spectral trace.
int : the midpoint pixel
'max' : use the mean y-location of the pixel with highest S/N
extrsize, bksize, bkoff : {int|'stsci'}, optional
The height of the signal extraction region, the height of the
background extraction regions, and the offset above and below the
spectral trace at which to center the background extraction regions.
'stsci' : use the value used by STScI in making the x1d (requires
x1dfile)
int : user specified value in pixels
x1dfile : str, optional if 'stsci' is not specfied for any other keyword
Path of the x1d file.
fitsout : str, optional
Path for saving a FITS file version of the spectrum.
overwrite : {True|False}, optional
Whether to overwrite the existing FITS file.
bkmask : int, optional
Data quality flags to mask the background. Background pixels that have
at least one of these flags will be discarded.
Returns
-------
spectbl : astropy table
The wavelength, flux, error, and data quality flag values of the extracted
spectrum.
Cautions
--------
Using a non-stsci extraction size will cause a systematic error | |
try:
result.success = self._handler.iterationsUntilConvergence(args.modelIds, args.tolerance)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("iterationsUntilConvergence", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_rankModels(self, seqid, iprot, oprot):
args = rankModels_args()
args.read(iprot)
iprot.readMessageEnd()
result = rankModels_result()
try:
result.success = self._handler.rankModels(args.modelIds, args.metric)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("rankModels", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_confidenceIntervals(self, seqid, iprot, oprot):
args = confidenceIntervals_args()
args.read(iprot)
iprot.readMessageEnd()
result = confidenceIntervals_result()
try:
result.success = self._handler.confidenceIntervals(args.modelId, args.sigLevel)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ResourceNotFoundException as rnfEx:
msg_type = TMessageType.REPLY
result.rnfEx = rnfEx
except IllegalOperationException as ioEx:
msg_type = TMessageType.REPLY
result.ioEx = ioEx
except BadRequestException as brEx:
msg_type = TMessageType.REPLY
result.brEx = brEx
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("confidenceIntervals", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_modelsWithFeatures(self, seqid, iprot, oprot):
args = modelsWithFeatures_args()
args.read(iprot)
iprot.readMessageEnd()
result = modelsWithFeatures_result()
try:
result.success = self._handler.modelsWithFeatures(args.featureNames)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("modelsWithFeatures", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_modelsDerivedFromDataFrame(self, seqid, iprot, oprot):
args = modelsDerivedFromDataFrame_args()
args.read(iprot)
iprot.readMessageEnd()
result = modelsDerivedFromDataFrame_result()
try:
result.success = self._handler.modelsDerivedFromDataFrame(args.dfId)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ResourceNotFoundException as rnfEx:
msg_type = TMessageType.REPLY
result.rnfEx = rnfEx
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("modelsDerivedFromDataFrame", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getProjectIds(self, seqid, iprot, oprot):
args = getProjectIds_args()
args.read(iprot)
iprot.readMessageEnd()
result = getProjectIds_result()
try:
result.success = self._handler.getProjectIds(args.keyValuePairs)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getProjectIds", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getModelIds(self, seqid, iprot, oprot):
args = getModelIds_args()
args.read(iprot)
iprot.readMessageEnd()
result = getModelIds_result()
try:
result.success = self._handler.getModelIds(args.keyValuePairs)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getModelIds", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_updateProject(self, seqid, iprot, oprot):
args = updateProject_args()
args.read(iprot)
iprot.readMessageEnd()
result = updateProject_result()
try:
result.success = self._handler.updateProject(args.projectId, args.key, args.value)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("updateProject", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_createOrUpdateScalarField(self, seqid, iprot, oprot):
args = createOrUpdateScalarField_args()
args.read(iprot)
iprot.readMessageEnd()
result = createOrUpdateScalarField_result()
try:
result.success = self._handler.createOrUpdateScalarField(args.modelId, args.key, args.value, args.valueType)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("createOrUpdateScalarField", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_createVectorField(self, seqid, iprot, oprot):
args = createVectorField_args()
args.read(iprot)
iprot.readMessageEnd()
result = createVectorField_result()
try:
result.success = self._handler.createVectorField(args.modelId, args.vectorName, args.vectorConfig)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("createVectorField", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_updateVectorField(self, seqid, iprot, oprot):
args = updateVectorField_args()
args.read(iprot)
iprot.readMessageEnd()
result = updateVectorField_result()
try:
result.success = self._handler.updateVectorField(args.modelId, args.key, args.valueIndex, args.value, args.valueType)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("updateVectorField", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_appendToVectorField(self, seqid, iprot, oprot):
args = appendToVectorField_args()
args.read(iprot)
iprot.readMessageEnd()
result = appendToVectorField_result()
try:
result.success = self._handler.appendToVectorField(args.modelId, args.vectorName, args.value, args.valueType)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("appendToVectorField", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getModel(self, seqid, iprot, oprot):
args = getModel_args()
args.read(iprot)
iprot.readMessageEnd()
result = getModel_result()
try:
result.success = self._handler.getModel(args.modelId)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ResourceNotFoundException as rnfEx:
msg_type = TMessageType.REPLY
result.rnfEx = rnfEx
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getModel", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRunsInExperiment(self, seqid, iprot, oprot):
args = getRunsInExperiment_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRunsInExperiment_result()
try:
result.success = self._handler.getRunsInExperiment(args.experimentId)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getRunsInExperiment", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getRunsAndExperimentsInProject(self, seqid, iprot, oprot):
args = getRunsAndExperimentsInProject_args()
args.read(iprot)
iprot.readMessageEnd()
result = getRunsAndExperimentsInProject_result()
try:
result.success = self._handler.getRunsAndExperimentsInProject(args.projId)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getRunsAndExperimentsInProject", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getProjectOverviews(self, seqid, iprot, oprot):
args = getProjectOverviews_args()
args.read(iprot)
iprot.readMessageEnd()
result = getProjectOverviews_result()
try:
result.success = self._handler.getProjectOverviews()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getProjectOverviews", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getExperimentRunDetails(self, seqid, iprot, oprot):
args = getExperimentRunDetails_args()
args.read(iprot)
iprot.readMessageEnd()
result = getExperimentRunDetails_result()
try:
result.success = self._handler.getExperimentRunDetails(args.experimentRunId)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except ResourceNotFoundException as rnfEx:
msg_type = TMessageType.REPLY
result.rnfEx = rnfEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getExperimentRunDetails", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_originalFeatures(self, seqid, iprot, oprot):
args = originalFeatures_args()
args.read(iprot)
iprot.readMessageEnd()
result = originalFeatures_result()
try:
result.success = self._handler.originalFeatures(args.modelId)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ResourceNotFoundException as rnfEx:
msg_type = TMessageType.REPLY
result.rnfEx = rnfEx
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("originalFeatures", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_storeTreeModel(self, seqid, iprot, oprot):
args = storeTreeModel_args()
args.read(iprot)
iprot.readMessageEnd()
result = storeTreeModel_result()
try:
result.success = self._handler.storeTreeModel(args.modelId, args.model)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ResourceNotFoundException as rnfEx:
msg_type = TMessageType.REPLY
result.rnfEx = rnfEx
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("storeTreeModel", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_storePipelineTransformEvent(self, seqid, iprot, oprot):
args = storePipelineTransformEvent_args()
args.read(iprot)
iprot.readMessageEnd()
result = storePipelineTransformEvent_result()
try:
result.success = self._handler.storePipelineTransformEvent(args.te)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidExperimentRunException as ierEx:
msg_type = TMessageType.REPLY
result.ierEx = ierEx
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("storePipelineTransformEvent", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_computeModelAncestry(self, seqid, iprot, oprot):
args = computeModelAncestry_args()
args.read(iprot)
iprot.readMessageEnd()
result = computeModelAncestry_result()
try:
result.success = self._handler.computeModelAncestry(args.modelId)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ResourceNotFoundException as rnfEx:
msg_type = TMessageType.REPLY
result.rnfEx = rnfEx
except ServerLogicException as svEx:
msg_type = TMessageType.REPLY
result.svEx = svEx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("computeModelAncestry", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_extractPipeline(self, seqid, iprot, oprot):
args = extractPipeline_args()
args.read(iprot)
iprot.readMessageEnd()
result = extractPipeline_result()
try:
result.success = self._handler.extractPipeline(args.modelId)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except ResourceNotFoundException as rnfEx:
msg_type = TMessageType.REPLY
result.rnfEx = rnfEx
except ServerLogicException as svEx:
msg_type = | |
<reponame>sobkulir/web
# -*- coding: utf-8 -*-
import datetime
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.sites.models import Site
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
import trojsten.submit.constants as submit_constants
from trojsten.contests.models import Category, Competition, Round, Semester, Task
from trojsten.events.models import Event, EventParticipant, EventPlace, EventType
from trojsten.people.constants import SCHOOL_YEAR_END_MONTH
from trojsten.people.models import User, UserProperty, UserPropertyKey
from trojsten.rules.kms import (
COEFFICIENT_COLUMN_KEY,
KMS_ALFA,
KMS_BETA,
KMS_CAMP_TYPE,
KMS_MO_FINALS_TYPE,
KMSResultsGenerator,
KMSRules,
)
from trojsten.rules.ksp import KSP_ALL, KSP_L1, KSP_L2, KSP_L3, KSP_L4
from trojsten.rules.models import KSPLevel
from trojsten.rules.susi import SUSIResultsGenerator, SUSIRules
from trojsten.rules.susi_constants import (
PUZZLEHUNT_PARTICIPATIONS_KEY_NAME,
SUSI_AGAT,
SUSI_BLYSKAVICA,
SUSI_CAMP_TYPE,
SUSI_CIFERSKY_CECH,
SUSI_OUTDOOR_ROUND_NUMBER,
)
from trojsten.submit.models import Submit
SOURCE = submit_constants.SUBMIT_TYPE_SOURCE
DESCRIPTION = submit_constants.SUBMIT_TYPE_DESCRIPTION
ZIP = submit_constants.SUBMIT_TYPE_TESTABLE_ZIP
class DictObject(object):
def __init__(self, d):
"""Convert a dictionary to a class
@param :d Dictionary
"""
self.__dict__.update(d)
for k, v in d.items():
if isinstance(v, dict):
self.__dict__[k] = DictObject(v)
if isinstance(v, list):
self.__dict__[k] = [DictObject(i) for i in v if isinstance(i, dict)]
def get_scoreboard(scoreboards, tag_key):
for scoreboard_object in scoreboards:
scoreboard = scoreboard_object.scoreboard
if scoreboard.tag == tag_key:
return scoreboard
return None
def get_row_for_user(scoreboard, user):
if scoreboard:
table = scoreboard.serialized_results
for row in table["rows"]:
if row["user"]["id"] == user.id:
return DictObject(row)
return None
def get_col_to_index_map(scoreboard):
if scoreboard:
return {
k: i
for i, k in enumerate(map(lambda c: c["key"], scoreboard.serialized_results["cols"]))
}
return dict()
class KMSCoefficientTest(TestCase):
def setUp(self):
time = datetime.datetime(2047, 4, 7, 12, 47)
self.time = timezone.make_aware(time)
group = Group.objects.create(name="skupina")
self.place = EventPlace.objects.create(name="<NAME>")
self.type_camp = EventType.objects.create(
name=KMS_CAMP_TYPE, organizers_group=group, is_camp=True
)
self.type_mo = EventType.objects.create(
name=KMS_MO_FINALS_TYPE, organizers_group=group, is_camp=False
)
competition = Competition.objects.create(name="TestCompetition")
competition.sites.add(Site.objects.get(pk=settings.SITE_ID))
self.semesters = []
self.camps = []
self.mo_finals = []
for (year, semester_number) in [(1, 1), (1, 2), (2, 1), (2, 2), (3, 1), (3, 2)]:
self.semesters.append(
Semester.objects.create(
year=year, number=semester_number, name="Test semester", competition=competition
)
)
self.camps.append(
Event.objects.create(
name="KMS camp alpha",
type=self.type_camp,
semester=self.semesters[-1],
place=self.place,
start_time=self.time,
end_time=self.time,
)
)
if semester_number == 2:
self.mo_finals.append(
Event.objects.create(
name="CKMO",
type=self.type_mo,
place=self.place,
start_time=self.time + timezone.timedelta((year - 3) * 366),
end_time=self.time + timezone.timedelta((year - 3) * 366),
)
)
self.current_semester = self.semesters[-1]
self.start = self.time + timezone.timedelta(2)
self.end = self.time + timezone.timedelta(4)
self.round = Round.objects.create(
number=1,
semester=self.current_semester,
visible=True,
solutions_visible=False,
start_time=self.start,
end_time=self.end,
)
graduation_year = self.round.end_time.year + int(
self.round.end_time.month > SCHOOL_YEAR_END_MONTH
)
self.test_user = User.objects.create(
username="test_user",
password="password",
first_name="Jozko",
last_name="Mrkvicka",
graduation=graduation_year + 3,
)
self.tag = KMSRules.RESULTS_TAGS[KMS_BETA]
def test_year_only(self):
# Coefficient = 3: year = 3, successful semesters = 0, mo = 0
self.test_user.graduation -= 2
self.test_user.save()
generator = KMSResultsGenerator(self.tag)
self.assertEqual(generator.get_user_coefficient(self.test_user, self.round), 3)
def test_camps_only(self):
# Coefficient = 3: year = 1, successful semesters = 2, mo = 0
EventParticipant.objects.create(
event=self.camps[5], user=self.test_user, type=EventParticipant.PARTICIPANT, going=True
)
EventParticipant.objects.create(
event=self.camps[4], user=self.test_user, type=EventParticipant.PARTICIPANT, going=True
)
generator = KMSResultsGenerator(self.tag)
self.assertEqual(generator.get_user_coefficient(self.test_user, self.round), 3)
def test_camps_mo(self):
# Coefficient = 7: year = 4, successful semesters = 1, mo = 2
self.test_user.graduation -= 3
self.test_user.save()
EventParticipant.objects.create(
event=self.camps[5], user=self.test_user, type=EventParticipant.PARTICIPANT, going=True
)
EventParticipant.objects.create(
event=self.mo_finals[1],
user=self.test_user,
type=EventParticipant.PARTICIPANT,
going=True,
)
EventParticipant.objects.create(
event=self.mo_finals[0],
user=self.test_user,
type=EventParticipant.PARTICIPANT,
going=True,
)
generator = KMSResultsGenerator(self.tag)
self.assertEqual(generator.get_user_coefficient(self.test_user, self.round), 7)
def test_invited_to_both_camps(self):
# Coefficient = 2: year = 1, successful semesters = 1, mo = 0
beta_camp = Event.objects.create(
name="KMS camp beta",
type=self.type_camp,
semester=self.semesters[4],
place=self.place,
start_time=self.time,
end_time=self.time,
)
EventParticipant.objects.create(
event=self.camps[4], user=self.test_user, type=EventParticipant.PARTICIPANT, going=False
)
EventParticipant.objects.create(
event=beta_camp, user=self.test_user, type=EventParticipant.PARTICIPANT, going=True
)
generator = KMSResultsGenerator(self.tag)
self.assertEqual(generator.get_user_coefficient(self.test_user, self.round), 2)
def test_ignore_mo_in_same_semester(self):
# Coefficient = 3: year = 1, successful semesters = 0, mo = 2
for mo_finals in self.mo_finals:
EventParticipant.objects.create(
event=mo_finals, user=self.test_user, type=EventParticipant.PARTICIPANT, going=True
)
generator = KMSResultsGenerator(self.tag)
self.assertEqual(generator.get_user_coefficient(self.test_user, self.round), 3)
def test_ignore_not_going_reserve(self):
# Coefficient = 1: year = 1, successful semesters = 0, mo = 0
EventParticipant.objects.create(
event=self.camps[4], user=self.test_user, type=EventParticipant.RESERVE, going=False
)
generator = KMSResultsGenerator(self.tag)
self.assertEqual(generator.get_user_coefficient(self.test_user, self.round), 1)
def test_count_not_going_participant(self):
# Coefficient = 2: year = 1, successful semesters = 1, mo = 0
EventParticipant.objects.create(
event=self.camps[4], user=self.test_user, type=EventParticipant.PARTICIPANT, going=False
)
generator = KMSResultsGenerator(self.tag)
self.assertEqual(generator.get_user_coefficient(self.test_user, self.round), 2)
def test_many_camps(self):
# Coefficient = 6: year = 1, successful semesters = 5, mo = 0
for i in range(5):
EventParticipant.objects.create(
event=self.camps[i],
user=self.test_user,
type=EventParticipant.PARTICIPANT,
going=True,
)
generator = KMSResultsGenerator(self.tag)
self.assertEqual(generator.get_user_coefficient(self.test_user, self.round), 6)
class KMSRulesTest(TestCase):
def setUp(self):
time = datetime.datetime(2004, 4, 7, 12, 47)
self.time = timezone.make_aware(time)
# pk = 7 sets rules to KMSRules
self.competition = Competition.objects.create(name="TestCompetition", pk=7)
self.competition.sites.add(Site.objects.get(pk=settings.SITE_ID))
self.competition.save()
self.semester = Semester.objects.create(
number=1, name="Test semester", competition=self.competition, year=47
)
self.start = self.time + timezone.timedelta(-4)
self.end = self.time + timezone.timedelta(4)
self.round = Round.objects.create(
number=1,
semester=self.semester,
visible=True,
solutions_visible=False,
start_time=self.start,
end_time=self.end,
)
category_alfa = Category.objects.create(name=KMS_ALFA, competition=self.competition)
category_beta = Category.objects.create(name=KMS_BETA, competition=self.competition)
self.tasks = []
for i in range(1, 11):
self.tasks.append(
Task.objects.create(
number=i,
name="Test task {}".format(i),
round=self.round,
description_points_visible=True,
)
)
cat = []
if i <= 7:
cat += [category_alfa]
if i >= 3:
cat += [category_beta]
self.tasks[-1].categories.set(cat)
self.tasks[-1].save()
self.group = Group.objects.create(name="skupina")
self.url = reverse("view_latest_results")
def _create_submits(self, user, points):
for i in range(len(points)):
if points[i] >= 0:
submit = Submit.objects.create(
task=self.tasks[i],
user=user,
submit_type=1,
points=points[i],
testing_status="reviewed",
)
submit.time = self.end + timezone.timedelta(-1)
submit.save()
def _create_user_with_coefficient(self, coefficient, username="test_user"):
graduation_year = (
self.round.end_time.year + 3 + int(self.round.end_time.month > SCHOOL_YEAR_END_MONTH)
)
if coefficient < 4:
graduation_year -= coefficient - 1
else:
graduation_year -= 3
type_mo = EventType.objects.create(
name=KMS_MO_FINALS_TYPE, is_camp=False, organizers_group=self.group
)
place = EventPlace.objects.create(name="Horna dolna")
user = User.objects.create(
username=username,
password="password",
first_name="Jozko",
last_name="Mrkvicka",
graduation=graduation_year,
)
for i in range(coefficient - 4):
ckmo = Event.objects.create(
name="CKMO",
type=type_mo,
place=place,
start_time=self.time,
end_time=self.time + timezone.timedelta(-(i + 1) * 366),
)
EventParticipant.objects.create(
event=ckmo, user=user, type=EventParticipant.PARTICIPANT, going=True
)
return user
def test_create_user_with_coefficient(self):
for i in range(-4, 12):
user = self._create_user_with_coefficient(i, "testuser%d" % i)
generator = KMSResultsGenerator(KMSRules.RESULTS_TAGS[KMS_BETA])
self.assertEqual(generator.get_user_coefficient(user, self.round), i)
def test_only_best_five(self):
points = [9, 7, 0, 8, 4, 5, 4]
active = [True] * 7
active[2] = False
user = self._create_user_with_coefficient(1)
self._create_submits(user, points)
response = self.client.get("%s?single_round=True" % self.url)
self.assertEqual(response.status_code, 200)
scoreboard = get_scoreboard(response.context["scoreboards"], KMS_ALFA)
col_to_index_map = get_col_to_index_map(scoreboard)
row = get_row_for_user(scoreboard, user)
self.assertEqual(row.cell_list[col_to_index_map[COEFFICIENT_COLUMN_KEY]].points, "1")
self.assertEqual(row.cell_list[col_to_index_map["sum"]].points, "33")
for i in range(1, 8):
self.assertEqual(row.cell_list[col_to_index_map[i]].points, str(points[i - 1]))
if i not in [5, 7]:
self.assertEqual(row.cell_list[col_to_index_map[i]].active, active[i - 1])
self.assertTrue(
row.cell_list[col_to_index_map[5]].active ^ row.cell_list[col_to_index_map[7]].active
)
def test_only_best_five_halved_points(self):
points = [-1, -1, -1, 9, 6, 8, 9, 9, 2, 10]
active = [True] * 11
active[5] = False
active[9] = False
user = self._create_user_with_coefficient(9)
self._create_submits(user, points)
response = self.client.get("%s?single_round=True" % self.url)
self.assertEqual(response.status_code, 200)
scoreboard = get_scoreboard(response.context["scoreboards"], KMS_BETA)
col_to_index_map = get_col_to_index_map(scoreboard)
row = get_row_for_user(scoreboard, user)
self.assertEqual(row.cell_list[col_to_index_map[COEFFICIENT_COLUMN_KEY]].points, "9")
self.assertEqual(row.cell_list[col_to_index_map["sum"]].points, "41")
for i in range(4, 11):
self.assertEqual(row.cell_list[col_to_index_map[i]].points, str(points[i - 1]))
self.assertEqual(row.cell_list[col_to_index_map[i]].active, active[i])
self.assertTrue(
row.cell_list[col_to_index_map[5]].active ^ row.cell_list[col_to_index_map[7]].active
)
def test_alfa_coeff_2(self):
points = [9, 2, 3, 4, 5]
user = self._create_user_with_coefficient(2)
self._create_submits(user, points)
response = self.client.get("%s?single_round=True" % self.url)
self.assertEqual(response.status_code, 200)
scoreboard = get_scoreboard(response.context["scoreboards"], KMS_ALFA)
col_to_index_map = get_col_to_index_map(scoreboard)
row = get_row_for_user(scoreboard, user)
for i in range(1, 6):
self.assertTrue(row.cell_list[col_to_index_map[i]].active)
self.assertEqual(row.cell_list[col_to_index_map["sum"]].points, "19")
def test_alfa_coeff_3(self):
points = [1, 2, 3, 4, 5, 6]
user = self._create_user_with_coefficient(3)
self._create_submits(user, points)
response = self.client.get("%s?single_round=True" % self.url)
self.assertEqual(response.status_code, 200)
scoreboard = get_scoreboard(response.context["scoreboards"], KMS_ALFA)
col_to_index_map = get_col_to_index_map(scoreboard)
row = get_row_for_user(scoreboard, user)
for i in range(1, 2):
self.assertFalse(row.cell_list[col_to_index_map[i]].active)
for i in range(2, 7):
self.assertTrue(row.cell_list[col_to_index_map[i]].active)
self.assertEqual(row.cell_list[col_to_index_map["sum"]].points, "19")
def test_beta_coeff_4(self):
points = [-1, -1, 8, 4, 5, 6, 7]
user = self._create_user_with_coefficient(4)
self._create_submits(user, points)
response = self.client.get("%s?single_round=True" % self.url)
self.assertEqual(response.status_code, 200)
scoreboard = get_scoreboard(response.context["scoreboards"], KMS_BETA)
col_to_index_map = get_col_to_index_map(scoreboard)
row = get_row_for_user(scoreboard, user)
for i in range(3, 8):
self.assertTrue(row.cell_list[col_to_index_map[i]].active)
self.assertEqual(row.cell_list[col_to_index_map["sum"]].points, "26")
def test_beta_coeff_8(self):
points = [-1, -1, 3, 4, 5, 6, 7, 8]
user = self._create_user_with_coefficient(8)
self._create_submits(user, points)
response = self.client.get("%s?single_round=True" % self.url)
self.assertEqual(response.status_code, 200)
scoreboard = get_scoreboard(response.context["scoreboards"], KMS_BETA)
col_to_index_map = get_col_to_index_map(scoreboard)
row = get_row_for_user(scoreboard, user)
self.assertFalse(row.cell_list[col_to_index_map[3]].active)
for i in range(4, 8):
self.assertTrue(row.cell_list[col_to_index_map[i]].active)
self.assertEqual(row.cell_list[col_to_index_map["sum"]].points, "28")
def test_beta_coeff_9(self):
points = [-1, -1, 3, 4, 6, 6, 7, 8]
user = self._create_user_with_coefficient(9)
self._create_submits(user, points)
response = self.client.get("%s?single_round=True" % self.url)
self.assertEqual(response.status_code, 200)
scoreboard = get_scoreboard(response.context["scoreboards"], KMS_BETA)
col_to_index_map = get_col_to_index_map(scoreboard)
row = get_row_for_user(scoreboard, user)
self.assertFalse(row.cell_list[col_to_index_map[3]].active)
for i in range(4, 8):
self.assertTrue(row.cell_list[col_to_index_map[i]].active)
self.assertEqual(row.cell_list[col_to_index_map["sum"]].points, "26")
def test_beta_only_user(self):
points = [-1, -1, 2, 3, 4, 5, 6, 7, 8]
user = self._create_user_with_coefficient(7)
self._create_submits(user, points)
response = self.client.get("%s?single_round=True" % self.url)
self.assertEqual(response.status_code, 200)
scoreboard = get_scoreboard(response.context["scoreboards"], KMS_BETA)
row_beta = get_row_for_user(scoreboard, user)
scoreboard = get_scoreboard(response.context["scoreboards"], KMS_ALFA)
row_alfa = get_row_for_user(scoreboard, user)
self.assertTrue(row_beta.active)
self.assertFalse(row_alfa.active)
def test_alfa_only_user(self):
points = [1, 2, 3, 4, 5]
user = self._create_user_with_coefficient(1)
self._create_submits(user, points)
response = self.client.get("%s?single_round=True" % self.url)
self.assertEqual(response.status_code, 200)
scoreboard = get_scoreboard(response.context["scoreboards"], KMS_BETA)
row_beta = get_row_for_user(scoreboard, user)
scoreboard = get_scoreboard(response.context["scoreboards"], KMS_ALFA)
row_alfa = get_row_for_user(scoreboard, user)
self.assertTrue(row_alfa.active)
self.assertFalse(row_beta.active)
def test_alfa_beta_user(self):
points = [1, 2, | |
node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL514/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL514/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL514/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL514/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL514/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL514/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL514/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL514/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL514/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif | |
access roads.
'favorite_tree', # The best connected (most nodes) tree in the forest.
# To help with matching, we distinguish real Expressway Ramps from those
# that don't actually connect to an Expressway.
'expressway_endpts',
#
# Used by the workers.
'hydrated_items',
'processed_sids',
'problem_items',
'analyzed_sids',
'create_feats',
'update_feats',
'create_lvals',
'update_lvals',
'delete_nodes_for',
'insert_brats',
'brats_dbldict',
'insert_aadts',
'aadts_dbldict',
#
'attr_to_field',
'field_attr_cache_name',
'field_attr_cache_sid',
'bad_tag_sids',
#
'mndot_geom',
'mndot_region',
'ccp_region',
)
# *** Constructor
def __init__(self):
Ccp_Script_Base.__init__(self, ArgParser_Script)
#
self.stats = {}
#
self.target_path = None
#
self.target_schema = None
self.intermed_feats = None
self.slayers = None
self.everylayer = None
self.sid_use_count = None
self.sid_use_noops = None
self.sid_del_count = None
self.sid_del_lists = None
self.recorded_sids = None
self.sid_delete_froms = None
self.the_forest = None
self.sub_forest = None
self.favorite_tree = None
self.expressway_endpts = None
#
self.hydrated_items = None
self.processed_sids = None
self.problem_items = None
self.analyzed_sids = None
self.create_feats = None
self.update_feats = None
self.create_lvals = None
self.update_lvals = None
self.delete_nodes_for = None
self.insert_brats = None
self.brats_dbldict = None
self.insert_aadts = None
self.aadts_dbldict = None
#
self.attr_to_field = None
self.field_attr_cache_name = None
self.field_attr_cache_sid = None
self.bad_tag_sids = None
#
self.mndot_geom = None
self.mndot_region = None
self.ccp_region = None
# ***
# This script's main() is very simple: it makes one of these objects and
# calls go(). Our base class reads the user's command line arguments and
# creates a query_builder object for us at self.qb before thunking to
# go_main().
#
def go_main(self):
# Skipping: Ccp_Script_Base.go_main(self)
do_commit = False
try:
if self.cli_opts.init_importer:
self.init_importer()
else:
self.setup_importer()
if self.cli_opts.instance_master:
self.instance_master()
else:
if self.cli_opts.try_matching:
g.assurt(self.cli_opts.item_type == 'byway')
self.try_matching()
elif (self.cli_opts.process_edits
or self.cli_opts.do_import):
self.process_edits()
elif self.cli_opts.do_export:
self.do_export_non_byway()
else:
g.assurt(False)
if debug_skip_commit:
raise Exception('DEBUG: Skipping commit: Debugging')
do_commit = True
except Exception, e:
# FIXME: g.assurt()s that are caught here have empty msgs?
log.error('Exception!: "%s" / %s' % (str(e), traceback.format_exc(),))
finally:
self.cli_args.close_query(do_commit)
#
# *** Initial setup phase.
#
#
def init_importer(self):
self.stats_init_initer()
if self.cli_opts.item_type == 'byway':
self.init_cache_tables()
self.consume_source_shps()
if self.cli_opts.item_type == 'byway':
# TODO: gf_type_bucket='controlled', to fix freeways...
self.match_cache_populate(gf_type_bucket='shared_use')
self.stats_show_initer()
#
def init_cache_tables(self):
log.info('Dropping view maybe: hausdorff_cache.')
drop_sql = "DROP VIEW IF EXISTS _hm"
self.qb.db.sql(drop_sql)
log.info('Dropping view maybe: hausdorff_cache.')
drop_sql = "DROP VIEW IF EXISTS _hm2"
self.qb.db.sql(drop_sql)
log.info('Dropping table maybe: hausdorff_cache.')
drop_sql = "DROP TABLE IF EXISTS hausdorff_cache"
self.qb.db.sql(drop_sql)
log.info('Creating table certainly: hausdorff_cache.')
create_sql = (
"""
CREATE TABLE hausdorff_cache (
/* MAYBE: Use a branch ID? Or is the cache
always just created, processed,
and then dumped?
branch_id INTEGER NOT NULL */
stack_id INTEGER NOT NULL
, name TEXT
, gf_lyr_nom TEXT
, gf_lyr_id INTEGER
, gf_lyr_bucket TEXT
, match_cmd TEXT
, one_way INTEGER
, well_connected BOOLEAN
, nneighbr_1 INTEGER
, nneighbr_n INTEGER
)
""")
self.qb.db.sql(create_sql)
table_name = 'hausdorff_cache'
geometry_col = 'geometry'
dimension = 2
addgeom_sql = (
"""
SELECT AddGeometryColumn('%s', '%s', %d, 'LINESTRING', %d)
""" % (table_name, geometry_col, conf.default_srid, dimension,))
self.qb.db.sql(addgeom_sql)
#
# https://en.wikipedia.org/wiki/Connectivity_%28graph_theory%29
#
# The main Cyclopath tree in the forest is considered the connected tree.
# All other disjoint trees' features are considered, well, disjoint.
byway_00_tree_type = set([
'connected',
'sovereign',
])
byway_01_geof_type = set([
'segregated',
'shared_use',
'controlled',
'transition', # Verified Expressway Ramp.
'fifth_hand',
])
byway_02_guidance = set([
'update', # A feature the user has marked as keeping/updating
# and doesn't need to be matched against other items
# (though other items might be matched against it).
'repeat', # Basically an 'update', but when CCP_FROMS_ is set;
# really just used for auditing the import process.
'delete', # A user can mark a feature deleted, and we'll mark features
# deleted during preprocesssing. While applying edits,
# features marked delete will have their corresponding
# Cyclopath items marked delete.
'donate', # A line marked as a duplicate as other lines, where the
# user wants us to programmatically determine the matching
# lines. We can be a lot more confident about a donated line's
# matches since the user told us the donated line must go.
'reject', # A donation for which we cannot find matches; user must fix.
'ignore', # Deleted freshies marked deleted are ignored, since they don't
# exist in Cyclopath; and deleted split-froms that have one
# or more siblings not deleted are marked 'ignore', since the
# stack ID lives on.
'nogeom', # This problem is so serious it deserves its own category.
'noring', # Ditto.
#
# 'detain', # During the design phase, an option to quarantine items
# # was introducted. These items would be temporarily deleted
# # from Cyclopath, and then someone would clean up the data
# # using Shapefiles and finally re-import the clean data.
# # However, the problem data is just disconected byways --
# # those that screw up the geocoder (finding points in the
# # network) and that screw up the route finder (finding edges
# # between points) -- and we've since introduce the geofeature
# # attribute, is_disconnected, so we don't need this option
# # to solve that problem.
])
# # Network analysis states:
# layer_02_analysis = set([
# 'pending', # waiting for Hausdorff et al
# 'no_match',
# 'no_haus',
# 'too_short',
# 'good_haus',
# 'good_frag',
# 'poor_haus',
# 'couplet',
# 'same_endpts',
# 'undeadended_old',
# 'undeadended_new',
# 'extendeadend_old',
# 'extendeadend_new',
# 'deadends_same_len',
# ])
intermediate_fields_byway = [
# 1234567890
(u'GUIDANCE', 'str',),
(u'OPERATION', 'str',),
(u'CCP_ID', 'int:9',),
(u'CCP_NAME', 'str',),
(u'gf_lyr_id', 'int:9',),
(u'gf_lyr_nom', 'str',),
(u'CCP_FROMS_', 'str',),
(u'import_err', 'str',),
(u'speedlimit', 'int:9',),
(u'lane_count', 'int:9',),
(u'item_tags', 'str',),
(u'z_level', 'int:9',),
(u'one_way', 'int:9',),
(u'out_ln_wid', 'int:9',),
(u'shld_width', 'int:9',),
(u'bike_facil', 'str',),
(u'cautionary', 'str',),
#
(u'wconnected', 'str',),
(u'wangdangle', 'str',),
(u'nneighbr_1', 'int:9',),
(u'nneighbr_n', 'int:9',),
(u'gfl_typish', 'str',),
#
(u'CCP_SYS', 'int:9',),
(u'CCP_VERS', 'int:9',),
(u'new_length', 'float:19.11',),
#
#(u'OBJECTID', 'int:9',),
# Deprecated:
#(u'ACTION_', 'str',),
#(u'CONTEXT_', 'str',),
]
intermediate_schema_byway = {
'geometry': 'LineString',
'properties': OrderedDict(intermediate_fields_byway),
}
# MAYBE: Move all these item-specific pieces of data to the appropriate
# item classes? For now, to make editing this file easier, [lb]
# is keeping everything here.
# MAYBE: Are more item-specific lookups really necessary?
region_00_tree_type = set([
# Skipping: state/province/administrative district and beyond
'county',
'township',
'city', # municipality
'neighborhood',
'yogabbagabba', # everything else
'non-byway', # also everything else, e.g., non-byway, for shapefiles not
# originally exported from Cyclopath (so no item_tags field
# with which to discern the layer name).
])
#region_01_geof_type = set([
# ])
# We at least need a Shapefile definition for each item type.
intermediate_fields_non_byway = [
(u'GUIDANCE', 'str',),
(u'OPERATION', 'str',),
(u'CCP_ID', 'int:9',),
(u'CCP_NAME', 'str',),
(u'gf_lyr_id', 'int:9',),
(u'gf_lyr_nom', 'str',),
(u'CCP_FROMS_', 'str',),
(u'import_err', 'str',),
(u'item_tags', 'str',),
(u'z_level', 'int:9',),
(u'gfl_typish', 'str',),
(u'CCP_SYS', 'int:9',),
(u'CCP_VERS', 'int:9',),
(u'AREA', 'float:19.11',),
(u'PERIMETER', 'float:19.11',),
(u'POPULATION', 'int:9',),
#(u'OBJECTID', 'int:9',),
# missing a few...
# {'geometry': {'type': 'Polygon', 'coordinates': [[(222512.76, 5050709.15), (221708.37, 5050747.78), (221726.82, 5051153.33), (221732.2, 5051553.23), (222539.39, 5051515.79), (222534.78, 5051114.33), (222512.76, 5050709.15)]]}, 'type': 'Feature', 'id': '0', 'properties': OrderedDict([(u'AREA', 651350.6103), (u'PERIMETER', 3226.5449), (u'MUN_', 1.0), (u'MUN_ID', 328.0), (u'MUNI_NAME', u'Barry'), (u'FIPS', u'03718'), (u'MCD', u'0210'), (u'POPULATION', 25)])}
]
intermediate_schema_non_byway = {
'geometry': 'SET_ME_BUDDY',
'properties': OrderedDict(intermediate_fields_non_byway),
}
# ***
#
def consume_source_shps(self):
self.sid_use_count = {}
self.sid_use_noops = {}
self.sid_del_count = {}
self.sid_del_lists = {}
self.sid_delete_froms = set()
self.the_forest = networkx.Graph()
self.sub_forest = networkx.Graph()
self.expressway_endpts = set()
#
self.mndot_geom = {}
self.mndot_geom['city'] = {}
self.mndot_geom['township'] = {}
self.mndot_region = {}
self.mndot_region['city'] = {}
self.mndot_region['township'] = {}
self.ccp_region = {}
self.ccp_region['city'] = {}
self.ccp_region['township'] = {}
source_files = self.get_source_files('Source')
common_crs = None
for source_shp in source_files:
try:
log.info('Preprocessing Shapefile: %s'
% (os.path.basename(source_shp),))
with fiona.open(source_shp, 'r') as source_data:
if len(source_data) == 0:
log.warning('Skipping empty Shapefile: %s' % (source_shp,))
else:
common_crs = self.preprocess_source_shp_feats(
source_data, common_crs)
if self.cli_opts.item_type == 'byway':
self.preprocess_source_build_network(source_data)
elif self.cli_opts.item_type == 'region':
self.preprocess_source_regions(source_data)
except Exception, e:
log.error('Unable to process source: %s' % (str(e),))
raise
if self.cli_opts.item_type == 'byway':
self.analyze_graph()
try:
try:
if self.cli_opts.item_type == 'byway':
target_schema = Hausdorff_Import.intermediate_schema_byway
else:
target_schema = Hausdorff_Import.intermediate_schema_non_byway
self.prepare_target_shapefiles(target_schema,
touch_note='note_prepared')
except Exception, e:
log.error('Unable to prepare targets: %s' % (str(e),))
raise
try:
self.cache_process_sources(source_files)
if self.cli_opts.item_type == 'byway':
self.cache_table_indices_create()
except Exception, e:
log.error('Unable to process inputs: %s' % (str(e),))
raise
finally:
| |
from ipaddress import IPv4Network
import re
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.management import call_command
from rest_framework import status
from desecapi.models import RRset
from desecapi.tests.base import DesecTestCase, AuthenticatedRRSetBaseTestCase
class UnauthenticatedRRSetTestCase(DesecTestCase):
def test_unauthorized_access(self):
url = self.reverse('v1:rrsets', name='example.com')
for method in [
self.client.get,
self.client.post,
self.client.put,
self.client.delete,
self.client.patch
]:
response = method(url)
self.assertStatus(response, status.HTTP_401_UNAUTHORIZED)
class AuthenticatedRRSetTestCase(AuthenticatedRRSetBaseTestCase):
def test_subname_validity(self):
for subname in [
'aEroport',
'AEROPORT',
'aéroport'
]:
with self.assertRaises(ValidationError):
RRset(domain=self.my_domain, subname=subname, ttl=60, type='A').save()
RRset(domain=self.my_domain, subname='aeroport', ttl=60, type='A').save()
def test_retrieve_my_rr_sets(self):
for response in [
self.client.get_rr_sets(self.my_domain.name),
self.client.get_rr_sets(self.my_domain.name, subname=''),
]:
self.assertStatus(response, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1, response.data)
def test_retrieve_my_rr_sets_pagination(self):
def convert_links(links):
mapping = {}
for link in links.split(', '):
_url, label = link.split('; ')
label = re.search('rel="(.*)"', label).group(1)
_url = _url[1:-1]
assert label not in mapping
mapping[label] = _url
return mapping
def assertPaginationResponse(response, expected_length, expected_directional_links=[]):
self.assertStatus(response, status.HTTP_200_OK)
self.assertEqual(len(response.data), expected_length)
_links = convert_links(response['Link'])
self.assertEqual(len(_links), len(expected_directional_links) + 1) # directional links, plus "first"
self.assertTrue(_links['first'].endswith('/?cursor='))
for directional_link in expected_directional_links:
self.assertEqual(_links['first'].find('/?cursor='), _links[directional_link].find('/?cursor='))
self.assertTrue(len(_links[directional_link]) > len(_links['first']))
# Prepare extra records so that we get three pages (total: n + 1)
n = int(settings.REST_FRAMEWORK['PAGE_SIZE'] * 2.5)
RRset.objects.bulk_create(
[RRset(domain=self.my_domain, subname=str(i), ttl=123, type='A') for i in range(n)]
)
# No pagination
response = self.client.get_rr_sets(self.my_domain.name)
self.assertStatus(response, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['detail'],
f'Pagination required. You can query up to {settings.REST_FRAMEWORK["PAGE_SIZE"]} items at a time ({n+1} total). '
'Please use the `first` page link (see Link header).')
links = convert_links(response['Link'])
self.assertEqual(len(links), 1)
self.assertTrue(links['first'].endswith('/?cursor='))
# First page
url = links['first']
response = self.client.get(url)
assertPaginationResponse(response, settings.REST_FRAMEWORK['PAGE_SIZE'], ['next'])
# Next
url = convert_links(response['Link'])['next']
response = self.client.get(url)
assertPaginationResponse(response, settings.REST_FRAMEWORK['PAGE_SIZE'], ['next', 'prev'])
data_next = response.data.copy()
# Next-next (last) page
url = convert_links(response['Link'])['next']
response = self.client.get(url)
assertPaginationResponse(response, n/5 + 1, ['prev'])
# Prev
url = convert_links(response['Link'])['prev']
response = self.client.get(url)
assertPaginationResponse(response, settings.REST_FRAMEWORK['PAGE_SIZE'], ['next', 'prev'])
# Make sure that one step forward equals two steps forward and one step back
self.assertEqual(response.data, data_next)
def test_retrieve_other_rr_sets(self):
self.assertStatus(self.client.get_rr_sets(self.other_domain.name), status.HTTP_404_NOT_FOUND)
self.assertStatus(self.client.get_rr_sets(self.other_domain.name, subname='test'), status.HTTP_404_NOT_FOUND)
self.assertStatus(self.client.get_rr_sets(self.other_domain.name, type='A'), status.HTTP_404_NOT_FOUND)
def test_retrieve_my_rr_sets_filter(self):
response = self.client.get_rr_sets(self.my_rr_set_domain.name, query='?cursor=')
self.assertStatus(response, status.HTTP_200_OK)
expected_number_of_rrsets = min(len(self._test_rr_sets()), settings.REST_FRAMEWORK['PAGE_SIZE'])
self.assertEqual(len(response.data), expected_number_of_rrsets)
for subname in self.SUBNAMES:
response = self.client.get_rr_sets(self.my_rr_set_domain.name, subname=subname)
self.assertStatus(response, status.HTTP_200_OK)
self.assertRRSetsCount(response.data, [dict(subname=subname)],
count=len(self._test_rr_sets(subname=subname)))
for type_ in self.ALLOWED_TYPES:
response = self.client.get_rr_sets(self.my_rr_set_domain.name, type=type_)
self.assertStatus(response, status.HTTP_200_OK)
def test_create_my_rr_sets(self):
for subname in [None, 'create-my-rr-sets', 'foo.create-my-rr-sets', 'bar.baz.foo.create-my-rr-sets']:
for data in [
{'subname': subname, 'records': ['1.2.3.4'], 'ttl': 3660, 'type': 'A'},
{'subname': '' if subname is None else subname, 'records': ['desec.io.'], 'ttl': 36900, 'type': 'PTR'},
{'subname': '' if subname is None else subname, 'ttl': 3650, 'type': 'TXT', 'records': ['"foo"']},
]:
# Try POST with missing subname
if data['subname'] is None:
data.pop('subname')
with self.assertPdnsRequests(self.requests_desec_rr_sets_update(name=self.my_empty_domain.name)):
response = self.client.post_rr_set(domain_name=self.my_empty_domain.name, **data)
self.assertStatus(response, status.HTTP_201_CREATED)
# Check for uniqueness on second attempt
response = self.client.post_rr_set(domain_name=self.my_empty_domain.name, **data)
self.assertContains(response, 'Another RRset with the same subdomain and type exists for this domain.',
status_code=status.HTTP_400_BAD_REQUEST)
response = self.client.get_rr_sets(self.my_empty_domain.name)
self.assertStatus(response, status.HTTP_200_OK)
self.assertRRSetsCount(response.data, [data])
response = self.client.get_rr_set(self.my_empty_domain.name, data.get('subname', ''), data['type'])
self.assertStatus(response, status.HTTP_200_OK)
self.assertRRSet(response.data, **data)
def test_create_my_rr_sets_type_restriction(self):
for subname in ['', 'create-my-rr-sets', 'foo.create-my-rr-sets', 'bar.baz.foo.create-my-rr-sets']:
for data in [
{'subname': subname, 'ttl': 60, 'type': 'a'},
{'subname': subname, 'records': ['10 example.com.'], 'ttl': 60, 'type': 'txt'}
] + [
{'subname': subname, 'records': ['10 example.com.'], 'ttl': 60, 'type': type_}
for type_ in self.DEAD_TYPES
] + [
{'subname': subname, 'records': ['set.an.example. get.desec.io. 2584 10800 3600 604800 60'],
'ttl': 60, 'type': type_}
for type_ in self.RESTRICTED_TYPES
]:
response = self.client.post_rr_set(self.my_domain.name, **data)
self.assertStatus(response, status.HTTP_400_BAD_REQUEST)
response = self.client.get_rr_sets(self.my_domain.name)
self.assertStatus(response, status.HTTP_200_OK)
self.assertRRSetsCount(response.data, [data], count=0)
def test_create_my_rr_sets_without_records(self):
for subname in ['', 'create-my-rr-sets', 'foo.create-my-rr-sets', 'bar.baz.foo.create-my-rr-sets']:
for data in [
{'subname': subname, 'records': [], 'ttl': 60, 'type': 'A'},
{'subname': subname, 'ttl': 60, 'type': 'A'},
]:
response = self.client.post_rr_set(self.my_empty_domain.name, **data)
self.assertStatus(
response,
status.HTTP_400_BAD_REQUEST
)
response = self.client.get_rr_sets(self.my_empty_domain.name)
self.assertStatus(response, status.HTTP_200_OK)
self.assertRRSetsCount(response.data, [], count=0)
def test_create_other_rr_sets(self):
data = {'records': ['1.2.3.4'], 'ttl': 60, 'type': 'A'}
response = self.client.post_rr_set(self.other_domain.name, **data)
self.assertStatus(response, status.HTTP_404_NOT_FOUND)
def test_create_my_rr_sets_too_long_content(self):
def _create_data(length):
content_string = 'A' * (length - 2) # we have two quotes
return {'records': [f'"{content_string}"'], 'ttl': 3600, 'type': 'TXT', 'subname': f'name{length}'}
with self.assertPdnsRequests(self.requests_desec_rr_sets_update(self.my_empty_domain.name)):
response = self.client.post_rr_set(self.my_empty_domain.name, **_create_data(500))
self.assertStatus(response, status.HTTP_201_CREATED)
response = self.client.post_rr_set(self.my_empty_domain.name, **_create_data(501))
self.assertStatus(response, status.HTTP_400_BAD_REQUEST)
self.assertIn('Ensure this field has no more than 500 characters.', str(response.data))
def test_create_my_rr_sets_too_large_rrset(self):
network = IPv4Network('127.0.0.0/20') # size: 4096 IP addresses
data = {'records': [str(ip) for ip in network], 'ttl': 3600, 'type': 'A', 'subname': 'name'}
response = self.client.post_rr_set(self.my_empty_domain.name, **data)
self.assertStatus(response, status.HTTP_400_BAD_REQUEST)
excess_length = 28743 + len(self.my_empty_domain.name)
self.assertIn(f'Total length of RRset exceeds limit by {excess_length} bytes.', str(response.data))
def test_create_my_rr_sets_twice(self):
data = {'records': ['172.16.58.3'], 'ttl': 3660, 'type': 'A'}
with self.assertPdnsRequests(self.requests_desec_rr_sets_update(self.my_empty_domain.name)):
response = self.client.post_rr_set(self.my_empty_domain.name, **data)
self.assertStatus(response, status.HTTP_201_CREATED)
data['records'][0] = '172.16.31.10'
response = self.client.post_rr_set(self.my_empty_domain.name, **data)
self.assertStatus(response, status.HTTP_400_BAD_REQUEST)
def test_create_my_rr_sets_upper_case(self):
for subname in ['asdF', 'cAse', 'asdf.FOO', '--F', 'ALLCAPS']:
data = {'records': ['1.2.3.4'], 'ttl': 60, 'type': 'A', 'subname': subname}
response = self.client.post_rr_set(self.my_empty_domain.name, **data)
self.assertStatus(response, status.HTTP_400_BAD_REQUEST)
self.assertIn('Subname can only use (lowercase)', str(response.data))
def test_create_my_rr_sets_empty_payload(self):
response = self.client.post_rr_set(self.my_empty_domain.name)
self.assertContains(response, 'No data provided', status_code=status.HTTP_400_BAD_REQUEST)
def test_create_my_rr_sets_unknown_type(self):
for _type in ['AA', 'ASDF']:
with self.assertPdnsRequests(
self.request_pdns_zone_update_unknown_type(name=self.my_domain.name, unknown_types=_type)
):
response = self.client.post_rr_set(self.my_domain.name, records=['1234'], ttl=3660, type=_type)
self.assertStatus(response, status.HTTP_400_BAD_REQUEST)
def test_create_my_rr_sets_insufficient_ttl(self):
ttl = settings.MINIMUM_TTL_DEFAULT - 1
response = self.client.post_rr_set(self.my_empty_domain.name, records=['172.16.58.3'], ttl=ttl, type='A')
self.assertStatus(response, status.HTTP_400_BAD_REQUEST)
detail = f'Ensure this value is greater than or equal to {self.my_empty_domain.minimum_ttl}.'
self.assertEqual(response.data['ttl'][0], detail)
ttl += 1
with self.assertPdnsRequests(self.requests_desec_rr_sets_update(name=self.my_empty_domain.name)):
response = self.client.post_rr_set(self.my_empty_domain.name, records=['172.16.17.32'], ttl=ttl, type='A')
self.assertStatus(response, status.HTTP_201_CREATED)
def test_retrieve_my_rr_sets_apex(self):
response = self.client.get_rr_set(self.my_rr_set_domain.name, subname='', type_='A')
self.assertStatus(response, status.HTTP_200_OK)
self.assertEqual(response.data['records'][0], '1.2.3.4')
self.assertEqual(response.data['ttl'], 3620)
def test_retrieve_my_rr_sets_restricted_types(self):
for type_ in self.RESTRICTED_TYPES:
response = self.client.get_rr_sets(self.my_domain.name, type=type_)
self.assertStatus(response, status.HTTP_403_FORBIDDEN)
response = self.client.get_rr_sets(self.my_domain.name, type=type_, subname='')
self.assertStatus(response, status.HTTP_403_FORBIDDEN)
def test_update_my_rr_sets(self):
for subname in self.SUBNAMES:
with self.assertPdnsRequests(self.requests_desec_rr_sets_update(name=self.my_rr_set_domain.name)):
data = {'records': ['172.16.31.10'], 'ttl': 3630, 'type': 'A', 'subname': subname}
response = self.client.put_rr_set(self.my_rr_set_domain.name, subname, 'A', data)
self.assertStatus(response, status.HTTP_200_OK)
response = self.client.get_rr_set(self.my_rr_set_domain.name, subname, 'A')
self.assertStatus(response, status.HTTP_200_OK)
self.assertEqual(response.data['records'], ['172.16.31.10'])
self.assertEqual(response.data['ttl'], 3630)
response = self.client.put_rr_set(self.my_rr_set_domain.name, subname, 'A', {'records': ['172.16.31.10']})
self.assertStatus(response, status.HTTP_400_BAD_REQUEST)
response = self.client.put_rr_set(self.my_rr_set_domain.name, subname, 'A', {'ttl': 3637})
self.assertStatus(response, status.HTTP_400_BAD_REQUEST)
def test_update_my_rr_set_with_invalid_payload_type(self):
for subname in self.SUBNAMES:
data = [{'records': ['172.16.31.10'], 'ttl': 30, 'type': 'A', 'subname': subname}]
response = self.client.put_rr_set(self.my_rr_set_domain.name, subname, 'A', data)
self.assertStatus(response, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.data['non_field_errors'][0],
'Invalid data. Expected a dictionary, but got list.')
data = 'foobar'
response = self.client.put_rr_set(self.my_rr_set_domain.name, subname, 'A', data)
self.assertStatus(response, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.data['non_field_errors'][0],
'Invalid data. Expected a dictionary, but got str.')
def test_partially_update_my_rr_sets(self):
for subname in self.SUBNAMES:
current_rr_set = self.client.get_rr_set(self.my_rr_set_domain.name, subname, 'A').data
for data in [
{'records': ['172.16.31.10'], 'ttl': 3630},
{'records': ['172.16.58.3']},
{'records': ['172.16.58.3', '192.168.127.12']},
{'ttl': 3637},
]:
with self.assertPdnsRequests(self.requests_desec_rr_sets_update(name=self.my_rr_set_domain.name)):
response = self.client.patch_rr_set(self.my_rr_set_domain.name, subname, 'A', data)
self.assertStatus(response, status.HTTP_200_OK)
response = self.client.get_rr_set(self.my_rr_set_domain.name, subname, 'A')
self.assertStatus(response, status.HTTP_200_OK)
current_rr_set.update(data)
self.assertEqual(response.data['records'], current_rr_set['records'])
self.assertEqual(response.data['ttl'], current_rr_set['ttl'])
response = self.client.patch_rr_set(self.my_rr_set_domain.name, subname, 'A', {})
self.assertStatus(response, status.HTTP_200_OK)
def test_partially_update_other_rr_sets(self):
data = {'records': ['3.2.3.4'], 'ttl': 334}
for subname in self.SUBNAMES:
response = self.client.patch_rr_set(self.other_rr_set_domain.name, subname, 'A', data)
self.assertStatus(response, status.HTTP_404_NOT_FOUND)
def test_update_other_rr_sets(self):
data = {'ttl': 305}
for subname in self.SUBNAMES:
response = self.client.patch_rr_set(self.other_rr_set_domain.name, subname, 'A', data)
self.assertStatus(response, status.HTTP_404_NOT_FOUND)
def test_update_essential_properties(self):
# Changing the subname is expected to cause an error
url = self.reverse('v1:rrset', name=self.my_rr_set_domain.name, subname='test', type='A')
data = {'records': ['3.2.3.4'], 'ttl': 3620, 'subname': 'test2', 'type': 'A'}
response = self.client.patch(url, data)
self.assertStatus(response, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.data['subname'][0].code, 'read-only-on-update')
response = self.client.put(url, data)
self.assertStatus(response, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.data['subname'][0].code, 'read-only-on-update')
# Changing the type is expected to cause an error
data = {'records': ['3.2.3.4'], 'ttl': 3620, 'subname': 'test', 'type': 'TXT'}
response = self.client.patch(url, data)
self.assertStatus(response, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.data['type'][0].code, 'read-only-on-update')
response = self.client.put(url, data)
self.assertStatus(response, status.HTTP_400_BAD_REQUEST)
self.assertEquals(response.data['type'][0].code, 'read-only-on-update')
# Changing "created" is no-op
response = self.client.get(url)
data = response.data
created = data['created']
data['created'] = '2019-07-19T17:22:49.575717Z'
response = self.client.patch(url, data)
self.assertStatus(response, status.HTTP_200_OK)
response = self.client.put(url, data)
self.assertStatus(response, status.HTTP_200_OK)
# Check that nothing changed
response = self.client.get(url)
self.assertStatus(response, status.HTTP_200_OK)
self.assertEqual(response.data['records'][0], '2.2.3.4')
self.assertEqual(response.data['ttl'], 3620)
self.assertEqual(response.data['name'], 'test.' + self.my_rr_set_domain.name + '.')
self.assertEqual(response.data['subname'], 'test')
self.assertEqual(response.data['type'], 'A')
self.assertEqual(response.data['created'], created)
# This is expected to work, but the fields are ignored
data = {'records': ['172.16.58.3'], 'name': 'example.com.', 'domain': 'example.com'}
with self.assertPdnsRequests(self.requests_desec_rr_sets_update(name=self.my_rr_set_domain.name)):
response = self.client.patch(url, data)
self.assertStatus(response, status.HTTP_200_OK)
response = self.client.get(url)
self.assertStatus(response, status.HTTP_200_OK)
self.assertEqual(response.data['records'][0], '3.2.3.4')
self.assertEqual(response.data['domain'], self.my_rr_set_domain.name)
self.assertEqual(response.data['name'], 'test.' + self.my_rr_set_domain.name + '.')
def test_update_unknown_rrset(self):
url = self.reverse('v1:rrset', name=self.my_rr_set_domain.name, subname='doesnotexist', type='A')
data = {'records': ['3.2.3.4'], 'ttl': 3620}
response = self.client.patch(url, data)
self.assertStatus(response, status.HTTP_404_NOT_FOUND)
response = self.client.put(url, data)
self.assertStatus(response, status.HTTP_404_NOT_FOUND)
def test_delete_my_rr_sets_with_patch(self):
data = {'records': []}
for subname in self.SUBNAMES:
with self.assertPdnsRequests(self.requests_desec_rr_sets_update(name=self.my_rr_set_domain.name)):
response = self.client.patch_rr_set(self.my_rr_set_domain.name, subname, 'A', data)
self.assertStatus(response, status.HTTP_204_NO_CONTENT)
# Deletion is only idempotent via DELETE. For PATCH/PUT, | |
<gh_stars>10-100
#!/usr/bin/env python
# encoding: utf-8
"""
script to install all the necessary things
for working on a linux machine with nothing
Installing minimum dependencies
"""
import sys
import os
import logging
import subprocess
import xml.etree.ElementTree as ElementTree
import xml.dom.minidom as minidom
import socket
import time
import pwd
###---------------------------------------------------##
# Configuration Section, will be modified by script #
###---------------------------------------------------##
node_apt_packages = [
'emacs',
'git',
'g++',
'make',
'python-numpy',
'libprotobuf-dev',
'libcurl4-openssl-dev']
# master only packages
master_apt_packages = [
'protobuf-compiler']
# List of r packages to be installed in master
master_r_packages = [
'r-base-dev',
'r-base',
'r-cran-statmod',
'r-cran-RCurl',
'r-cran-rjson'
]
# download link of hadoop.
hadoop_url = 'http://apache.claz.org/hadoop/common/hadoop-2.8.0/hadoop-2.8.0.tar.gz'
hadoop_dir = 'hadoop-2.8.0'
# customized installation script.
# See optional installation scripts for options.
def custom_master_install():
#install_spark()
#install_r()
pass
# customized installation script for all nodes.
def custom_all_nodes_install():
install_gcc()
pass
###---------------------------------------------------##
# Automatically set by script #
###---------------------------------------------------##
USER_NAME = 'ubuntu'
# setup variables
MASTER = os.getenv('MY_MASTER_DNS', '')
# node type the type of current node
NODE_TYPE = os.getenv('MY_NODE_TYPE', 'm3.xlarge')
NODE_VMEM = int(os.getenv('MY_NODE_VMEM', str(1024*15)))
NODE_VCPU = int(os.getenv('MY_NODE_VCPU', '4'))
AWS_ID = os.getenv('AWS_ACCESS_KEY_ID', 'undefined')
AWS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', 'undefined')
JAVA_HOME = os.getenv('JAVA_HOME')
HADOOP_HOME = os.getenv('HADOOP_HOME')
DISK_LIST = [('xvd' + chr(ord('b') + i)) for i in range(10)]
ENVIRON = os.environ.copy()
###--------------------------------##
# Optional installation scripts. #
###--------------------------------##
def install_r():
if master_r_packages:
sudo("apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E084DAB9")
sudo("echo deb https://cran.r-project.org/bin/linux/ubuntu trusty/ >>/etc/apt/sources.list")
sudo('apt-get -y update')
sudo('apt-get -y install %s' % (' '.join(master_r_packages)))
def install_spark():
run('wget https://www.apache.org/dist/spark/spark-2.1.1/spark-2.1.1-bin-hadoop2.7.tgz')
run('tar xf spark-2.1.1-bin-hadoop2.7.tgz')
run('rm -rf spark-2.1.1-bin-hadoop2.7.tgz')
with open('.bashrc', 'a') as fo:
fo.write('\nexport PATH=${PATH}:spark-2.1.1-bin-hadoop2.7\n')
def install_xgboost():
run('git clone --recursive https://github.com/dmlc/xgboost')
run('cd xgboost; cp make/config.mk .; echo USE_S3=1 >> config.mk; make -j4')
### Script section ###
def run(cmd):
try:
print cmd
logging.info(cmd)
proc = subprocess.Popen(cmd, shell=True, env = ENVIRON,
stdout=subprocess.PIPE, stderr = subprocess.PIPE)
out, err = proc.communicate()
retcode = proc.poll()
if retcode != 0:
logging.error('Command %s returns %d' % (cmd,retcode))
logging.error(out)
logging.error(err)
else:
print out
except Exception as e:
print(str(e))
logging.error('Exception running: %s' % cmd)
logging.error(str(e))
pass
def sudo(cmd):
run('sudo %s' % cmd)
### Installation helpers ###
def install_packages(pkgs):
sudo('apt-get -y update')
sudo('apt-get -y install %s' % (' '.join(pkgs)))
# install g++4.9, needed for regex match.
def install_gcc():
sudo('add-apt-repository -y ppa:ubuntu-toolchain-r/test')
sudo('apt-get -y update')
sudo('apt-get -y install g++-4.9')
def install_java():
"""
install java and setup environment variables
Returns environment variables that needs to be exported
"""
if not os.path.exists('jdk1.8.0_131'):
run('wget --no-check-certificate --no-cookies'\
' --header \"Cookie: oraclelicense=accept-securebackup-cookie\"'\
' http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz')
run('tar xf jdk-8u131-linux-x64.tar.gz')
run('rm -f jdk-8u131-linux-x64.tar.gz')
global JAVA_HOME
if JAVA_HOME is None:
JAVA_HOME = os.path.abspath('jdk1.8.0_131')
return [('JAVA_HOME', JAVA_HOME)]
def install_hadoop(is_master):
def update_site(fname, rmap):
"""
update the site script
"""
try:
tree = ElementTree.parse(fname)
root = tree.getroot()
except Exception:
cfg = ElementTree.Element("configuration")
tree = ElementTree.ElementTree(cfg)
root = tree.getroot()
rset = set()
for prop in root.getiterator('property'):
prop = dict((p.tag, p) for p in prop)
name = prop['name'].text.strip()
if name in rmap:
prop['value'].text = str(rmap[name])
rset.add(name)
for name, text in rmap.iteritems():
if name in rset:
continue
prop = ElementTree.SubElement(root, 'property')
ElementTree.SubElement(prop, 'name').text = name
ElementTree.SubElement(prop, 'value').text = str(text)
rough_string = ElementTree.tostring(root, 'utf-8')
reparsed = minidom.parseString(rough_string)
pretty = reparsed.toprettyxml(indent='\t')
fo = open(fname, 'w')
fo.write(pretty)
fo.close()
def setup_hadoop_site(master, hadoop_dir, hdfs_dir, vcpu, vmem):
"""
setup hadoop side given the parameters
Parameters
----------
master: the dns to master uri
hadoop_dir: the directory to store temp files
hdfs_dir: the directories for hdfs
vcpu: the number of cpus current machine have
vmem: the memory(MB) current machine have
"""
if vmem < 4 * 1024:
reserved_ram = 256
elif vmem < 8 * 1024:
reserved_ram = 1 * 1024
elif vmem < 24 * 1024 :
reserved_ram = 2 * 1024
elif vmem < 48 * 1024:
reserved_ram = 2 * 1024
elif vmem < 64 * 1024:
reserved_ram = 6 * 1024
else:
reserved_ram = 8 * 1024
ram_per_container = (vmem - reserved_ram) / vcpu
if is_master:
vcpu = vcpu - 2
tmp_dir = hadoop_dir[0]
core_site = {
'fs.defaultFS': 'hdfs://%s:9000/' % master,
'fs.s3n.impl': 'org.apache.hadoop.fs.s3native.NativeS3FileSystem',
'hadoop.tmp.dir': tmp_dir
}
if AWS_ID != 'undefined':
core_site['fs.s3n.awsAccessKeyId'] = AWS_ID
core_site['fs.s3n.awsSecretAccessKey'] = AWS_KEY
update_site('%s/etc/hadoop/core-site.xml' % HADOOP_HOME, core_site)
hdfs_site = {
'dfs.data.dir': ','.join(['%s/data' % d for d in hdfs_dir]),
'dfs.permissions': 'false',
'dfs.replication': '1'
}
update_site('%s/etc/hadoop/hdfs-site.xml' % HADOOP_HOME, hdfs_site)
yarn_site = {
'yarn.resourcemanager.resource-tracker.address': '%s:8025' % master,
'yarn.resourcemanager.scheduler.address': '%s:8030' % master,
'yarn.resourcemanager.address': '%s:8032' % master,
'yarn.scheduler.minimum-allocation-mb': 512,
'yarn.scheduler.maximum-allocation-mb': 640000,
'yarn.scheduler.minimum-allocation-vcores': 1,
'yarn.scheduler.maximum-allocation-vcores': 32,
'yarn.nodemanager.resource.memory-mb': vcpu * ram_per_container,
'yarn.nodemanager.resource.cpu-vcores': vcpu,
'yarn.log-aggregation-enable': 'true',
'yarn.nodemanager.vmem-check-enabled': 'false',
'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
'yarn.nodemanager.aux-services.mapreduce.shuffle.class': 'org.apache.hadoop.mapred.ShuffleHandler',
'yarn.nodemanager.remote-app-log-dir': os.path.join(tmp_dir, 'logs'),
'yarn.nodemanager.log-dirs': os.path.join(tmp_dir, 'userlogs'),
'yarn.nodemanager.local-dirs': ','.join(['%s/yarn/nm-local-dir' % d for d in hadoop_dir])
}
update_site('%s/etc/hadoop/yarn-site.xml' % HADOOP_HOME, yarn_site)
mapred_site = {
'mapreduce.application.classpath' : ':'.join(['$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*',
'$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*',
'$HADOOP_MAPRED_HOME/share/hadoop/tools/lib/*']),
'yarn.app.mapreduce.am.resource.mb': 2 * ram_per_container,
'yarn.app.mapreduce.am.command-opts': '-Xmx%dm' % int(0.8 * 2 * ram_per_container),
'mapreduce.framework.name': 'yarn',
'mapreduce.map.cpu.vcores': 1,
'mapreduce.map.memory.mb': ram_per_container,
'mapreduce.map.java.opts': '-Xmx%dm' % int(0.8 * ram_per_container),
'mapreduce.reduce.cpu.vcores': 1,
'mapreduce.reduce.memory.mb': 2 * ram_per_container,
'mapreduce.reduce.java.opts': '-Xmx%dm' % int(0.8 * ram_per_container)
}
update_site('%s/etc/hadoop/mapred-site.xml' % HADOOP_HOME, mapred_site)
capacity_site = {
'yarn.scheduler.capacity.resource-calculator': 'org.apache.hadoop.yarn.util.resource.DominantResourceCalculator'
}
update_site('%s/etc/hadoop/capacity-scheduler.xml' % HADOOP_HOME, capacity_site)
fo = open('%s/etc/hadoop/hadoop-env.sh' % HADOOP_HOME, 'w')
fo.write('export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$HADOOP_PREFIX/share/hadoop/tools/lib/*\n')
fo.write('export HADOOP_LOG_DIR=%s/log\n' % tmp_dir)
fo.write('export YARN_LOG_DIR=%s/log\n' % tmp_dir)
fo.write('export JAVA_HOME=\"%s\"\n' % JAVA_HOME)
fo.close()
fo = open('%s/etc/hadoop/slaves' % HADOOP_HOME, 'w')
fo.write(master + '\n')
fo.close()
def run_install():
if not os.path.exists('hadoop-2.8.0'):
run('wget %s' % hadoop_url)
run('tar xf hadoop-2.8.0.tar.gz')
run('rm -f hadoop-2.8.0.tar.gz')
global HADOOP_HOME
if HADOOP_HOME is None:
HADOOP_HOME = os.path.abspath('hadoop-2.8.0')
env = [('HADOOP_HOME', HADOOP_HOME)]
env += [('HADOOP_PREFIX', HADOOP_HOME)]
env += [('HADOOP_MAPRED_HOME', HADOOP_HOME)]
env += [('HADOOP_COMMON_HOME', HADOOP_HOME)]
env += [('HADOOP_HDFS_HOME', HADOOP_HOME)]
env += [('YARN_HOME', HADOOP_HOME)]
env += [('YARN_CONF_DIR', '%s/etc/hadoop' % HADOOP_HOME)]
env += [('HADOOP_CONF_DIR', '%s/etc/hadoop' % HADOOP_HOME)]
disks = ['/disk/%s' % d for d in DISK_LIST if os.path.exists('/dev/%s' % d)]
setup_hadoop_site(MASTER,
['%s/hadoop' % d for d in disks],
['%s/hadoop/dfs' % d for d in disks],
NODE_VCPU, NODE_VMEM)
return env
return run_install()
def regsshkey(fname):
for dns in (open(fname).readlines() + ['localhost', '0.0.0.0']):
try:
run('ssh-keygen -R %s' % dns.strip())
except:
pass
run('ssh-keyscan %s >> ~/.ssh/known_hosts' % dns.strip())
# main script to install all dependencies
def install_main(is_master):
if is_master:
install_packages(master_apt_packages + node_apt_packages)
else:
install_packages(node_apt_packages)
env = []
env += install_java()
env += install_hadoop(is_master)
path = ['$HADOOP_HOME/bin', '$HADOOP_HOME/sbin', '$JAVA_HOME/bin']
env += [('LD_LIBRARY_PATH', '$HADOOP_HOME/native/lib')]
env += [('LD_LIBRARY_PATH', '${LD_LIBRARY_PATH}:$HADOOP_HDFS_HOME/lib/native:$JAVA_HOME/jre/lib/amd64/server')]
env += [('LD_LIBRARY_PATH', '${LD_LIBRARY_PATH}:/usr/local/lib')]
env += [('LIBHDFS_OPTS', '--Xmx128m')]
env += [('MY_MASTER_DNS', MASTER)]
env += [('MY_NODE_TYPE', NODE_TYPE)]
env += [('MY_NODE_VMEM', str(NODE_VMEM))]
env += [('MY_NODE_VCPU', str(NODE_VCPU))]
if AWS_ID != 'undefined':
env += [('AWS_ACCESS_KEY_ID', AWS_ID)]
if AWS_KEY != 'undefined':
env += [('AWS_SECRET_ACCESS_KEY', AWS_KEY)]
# setup environments
fo = open('.hadoop_env', 'w')
for k, v in env:
fo.write('export %s=%s\n' % (k,v))
ENVIRON[k] = v
fo.write('export PATH=$PATH:%s\n' % (':'.join(path)))
fo.write('export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib\n')
fo.close()
for l in open('.bashrc'):
if l.find('.hadoop_env') != -1:
return
run('echo source ~/.hadoop_env >> ~/.bashrc')
# allow ssh, if they already share the key.
key_setup = """
[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
"""
run(key_setup)
regsshkey('%s/etc/hadoop/slaves' % HADOOP_HOME)
# end of instalation.
# Make startup script for bulding
def make_startup_script(is_master):
assert JAVA_HOME is not None
assert HADOOP_HOME is not None
assert NODE_VCPU is not None
assert NODE_VMEM is not None
disks = []
cmds = []
if is_master:
cmds.append('$HADOOP_HOME/sbin/stop-all.sh')
for d in DISK_LIST:
if os.path.exists('/dev/%s' % d):
cmds.append('sudo umount /dev/%s' % d)
cmds.append('sudo mkfs -t ext4 /dev/%s' % d)
cmds.append('sudo mkdir -p /disk/%s' % d)
cmds.append('sudo mount /dev/%s /disk/%s' % (d, d))
disks.append('/disk/%s' % d)
for d in disks:
cmds.append('sudo mkdir -p %s/hadoop' %d)
cmds.append('sudo chown ubuntu:ubuntu %s/hadoop' % d)
cmds.append('sudo mkdir -p %s/tmp' %d)
cmds.append('sudo chown ubuntu:ubuntu %s/tmp' % d)
cmds.append('rm -rf %s/hadoop/dfs' % d)
cmds.append('mkdir %s/hadoop/dfs' % d)
cmds.append('mkdir %s/hadoop/dfs/name' % d)
cmds.append('mkdir %s/hadoop/dfs/data' % d)
# run command
if is_master:
cmds.append('$HADOOP_HOME/bin/hadoop namenode -format')
cmds.append('$HADOOP_HOME/sbin/start-all.sh')
else:
cmds.append('export HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec &&'\
' $HADOOP_HOME/sbin/yarn-daemon.sh --config $HADOOP_HOME/etc/hadoop start nodemanager')
with open('startup.sh', 'w') as fo:
fo.write('#!/bin/bash\n')
fo.write('set -v\n')
fo.write('\n'.join(cmds))
run('chmod +x startup.sh')
run('./startup.sh')
def main():
global MASTER
logging.basicConfig(filename = 'bootstrap.log', level = logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
if MASTER == '':
is_master = True
MASTER = socket.getfqdn()
logging.info('assuming master is myself as %s' % MASTER)
else:
is_master = socket.getfqdn() == MASTER
tstart = time.time()
install_main(is_master)
tmid = time.time()
logging.info('installation finishes in %g secs' % (tmid - tstart))
make_startup_script(is_master)
ENVIRON['HADOOP_HOME'] = HADOOP_HOME
ENVIRON['JAVA_HOME'] = JAVA_HOME
tend = time.time()
if is_master:
custom_master_install()
custom_all_nodes_install()
logging.info('boostrap finishes in %g secs' % (tend - tmid))
logging.info('all finishes in %g secs' % (tend - tstart))
if __name__ == '__main__':
pw_record = pwd.getpwnam(USER_NAME)
user_name = pw_record.pw_name
user_home_dir = pw_record.pw_dir
user_uid = pw_record.pw_uid
user_gid = pw_record.pw_gid
env = os.environ.copy()
cwd = | |
0.03
1 2.17e+06 273.50 | 255.95 0.0 1000 0 | 0.05 0.00 78.21 0.03
1 2.18e+06 273.50 | 249.94 0.0 1000 0 | 0.05 0.00 78.31 0.03
1 2.20e+06 273.50 | 253.91 0.0 1000 0 | 0.05 0.00 78.00 0.03
1 2.21e+06 274.55 |
1 2.21e+06 274.55 | 274.55 0.3 1000 0 | 0.05 0.00 77.51 0.03
1 2.22e+06 274.55 | 267.84 0.0 1000 0 | 0.05 0.57 76.41 0.03
1 2.23e+06 279.16 |
1 2.23e+06 279.16 | 279.16 2.5 1000 0 | 0.06 0.00 75.55 0.03
1 2.24e+06 279.16 | 273.00 0.0 1000 0 | 0.05 0.26 74.95 0.03
1 2.26e+06 279.16 | 264.01 0.0 1000 0 | 0.05 0.30 73.58 0.03
1 2.27e+06 279.16 | 267.32 0.0 1000 0 | 0.05 0.00 73.75 0.03
1 2.28e+06 279.16 | 269.79 0.0 1000 0 | 0.05 0.00 72.23 0.03
1 2.29e+06 279.16 | 268.52 0.0 1000 0 | 0.05 0.00 72.87 0.03
1 2.30e+06 279.16 | 265.92 0.0 1000 0 | 0.05 0.25 72.11 0.03
1 2.32e+06 279.16 | 276.36 0.0 1000 0 | 0.05 0.00 71.94 0.03
1 2.33e+06 279.16 | 270.18 0.0 1000 0 | 0.05 0.00 71.86 0.03
1 2.34e+06 279.16 | 268.08 0.0 1000 0 | 0.05 0.00 72.68 0.03
1 2.35e+06 279.16 | 254.55 0.0 1000 0 | 0.05 0.00 71.80 0.03
1 2.36e+06 279.16 | 271.47 0.0 1000 0 | 0.05 0.30 71.55 0.03
1 2.38e+06 279.16 | 263.83 0.0 1000 0 | 0.05 0.00 72.41 0.03
1 2.39e+06 279.16 | 265.38 0.0 1000 0 | 0.05 0.00 72.12 0.03
1 2.40e+06 279.16 | 267.98 0.0 1000 0 | 0.05 0.00 71.99 0.03
1 2.41e+06 279.16 | 268.60 0.0 1000 0 | 0.05 0.00 71.62 0.03
1 2.43e+06 279.16 | 264.70 0.0 1000 0 | 0.05 0.00 72.49 0.03
1 2.44e+06 279.16 | 262.57 0.0 1000 0 | 0.05 0.00 72.16 0.03
1 2.46e+06 279.16 | 256.97 0.0 1000 0 | 0.05 0.00 72.79 0.03
1 2.47e+06 279.16 | 263.02 0.0 1000 0 | 0.05 0.00 72.96 0.03
1 2.48e+06 279.16 | 271.66 0.0 1000 0 | 0.05 0.00 73.30 0.03
1 2.50e+06 279.16 | 275.71 0.0 1000 0 | 0.05 0.00 72.58 0.03
1 3.00e+06 282.71 | 272.30 0.0 1000 0 | 0.06 0.00 75.52 0.03
1 3.01e+06 282.71 | 277.51 0.0 1000 0 | 0.05 0.00 75.08 0.03
1 3.03e+06 282.71 | 277.76 0.0 1000 0 | 0.06 0.00 75.68 0.03
1 3.04e+06 282.71 | 279.86 0.0 1000 0 | 0.06 0.00 74.71 0.03
1 3.05e+06 282.71 | 270.92 0.0 1000 0 | 0.06 0.00 75.04 0.03
1 3.07e+06 282.71 | 271.82 0.0 1000 0 | 0.06 0.00 75.17 0.03
1 3.08e+06 282.71 | 272.38 0.0 1000 0 | 0.06 0.00 74.64 0.03
1 3.09e+06 282.71 | 278.07 0.0 1000 0 | 0.06 0.00 74.59 0.03
1 3.11e+06 282.71 | 278.19 0.0 1000 0 | 0.06 0.00 74.55 0.03
1 3.12e+06 282.71 | 274.46 0.0 1000 0 | 0.06 0.00 74.46 0.03
1 3.13e+06 282.71 | 277.75 0.0 1000 0 | 0.06 0.00 74.46 0.03
1 3.15e+06 282.71 | 273.52 0.0 1000 0 | 0.06 0.30 74.14 0.03
1 3.16e+06 282.71 | 277.32 0.0 1000 0 | 0.06 0.00 73.91 0.03
1 3.17e+06 284.77 |
1 3.17e+06 284.77 | 284.77 0.6 1000 0 | 0.06 0.00 74.40 0.03
1 3.19e+06 284.77 | 282.03 0.0 1000 0 | 0.06 0.25 74.23 0.03
1 3.20e+06 284.77 | 34.92 0.0 1000 0 | 0.01 0.31 82.79 0.07
1 3.21e+06 284.77 | 189.63 0.0 1000 0 | 0.02 0.00 83.09 0.03
1 3.23e+06 284.77 | 265.40 0.0 1000 0 | 0.05 0.00 82.80 0.03
1 3.24e+06 284.77 | 280.59 0.0 1000 0 | 0.06 0.00 81.42 0.03
1 3.25e+06 286.19 |
1 3.25e+06 286.19 | 286.19 0.8 1000 0 | 0.06 0.00 81.49 0.03
1 3.26e+06 286.19 | 279.21 0.0 1000 0 | 0.06 0.27 79.98 0.03
1 3.28e+06 286.19 | 277.80 0.0 1000 0 | 0.06 0.00 80.00 0.03
1 3.29e+06 286.19 | 281.87 0.0 1000 0 | 0.05 0.00 79.75 0.03
1 3.30e+06 286.19 | 284.48 0.0 1000 0 | 0.06 0.32 79.12 0.03
1 3.31e+06 286.19 | 277.54 0.0 1000 0 | 0.06 0.63 78.93 0.03
1 3.33e+06 286.19 | 274.09 0.0 1000 0 | 0.05 0.00 78.19 0.03
1 3.34e+06 286.19 | 277.04 0.0 1000 0 | 0.01 0.00 77.78 0.03
1 3.35e+06 286.19 | 283.19 0.0 1000 0 | 0.06 0.00 77.21 0.03
1 3.36e+06 286.19 | 280.71 0.0 1000 0 | 0.06 0.00 77.49 0.03
1 3.38e+06 286.19 | 277.38 0.0 1000 0 | 0.06 0.00 76.72 0.03
1 3.39e+06 286.19 | 283.72 0.0 1000 0 | 0.06 0.00 76.55 0.03
1 3.40e+06 286.19 | 278.91 0.0 1000 0 | 0.06 0.00 76.37 0.03
1 3.41e+06 286.19 | 273.63 0.0 1000 0 | 0.06 0.00 75.69 0.03
1 3.43e+06 286.19 | 283.73 0.0 1000 0 | 0.06 0.00 76.27 0.03
1 3.44e+06 286.19 | 266.33 0.0 1000 0 | 0.05 0.00 76.06 0.03
1 3.45e+06 288.79 |
1 3.45e+06 288.79 | 288.79 1.5 1000 0 | 0.06 0.00 76.05 0.03
1 3.46e+06 288.79 | 279.64 0.0 1000 0 | 0.06 0.00 76.04 0.03
1 3.48e+06 288.79 | 288.71 1.8 1000 0 | 0.06 0.00 75.77 0.03
1 3.49e+06 288.79 | 282.01 0.0 1000 0 | 0.06 0.00 75.53 0.03
1 3.50e+06 288.79 | 277.28 0.0 1000 0 | 0.06 0.00 75.07 0.03
1 4.00e+06 288.79 | 273.84 0.0 1000 0 | 0.05 0.00 80.36 0.03
1 4.01e+06 288.79 | 279.71 0.0 1000 0 | 0.06 0.00 79.80 0.03
1 4.03e+06 288.79 | 279.59 0.0 1000 0 | 0.06 0.00 78.89 0.03
| UsedTime: 64403 | SavedDir: ./Swimmer-v3_SAC_1
| Learner: Save in ./Swimmer-v3_SAC_1
################################################################################
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
2 4.00e+03 -6.28 |
2 4.00e+03 -6.28 | -6.28 2.6 1000 0 | -0.00 0.79 0.93 0.50
2 2.10e+05 -6.28 | -11.70 0.0 1000 0 | -0.00 0.06 33.38 0.14
2 3.02e+05 23.82 |
2 3.02e+05 23.82 | 23.82 9.6 1000 0 | 0.00 0.09 53.78 0.11
2 3.74e+05 23.82 | 14.50 0.0 1000 0 | 0.00 0.10 68.90 0.09
2 4.34e+05 23.82 | -58.80 0.0 1000 0 | -0.00 0.09 75.72 0.07
2 4.88e+05 47.66 |
2 4.88e+05 47.66 | 47.66 5.9 1000 0 | 0.01 0.09 80.90 0.06
2 5.36e+05 47.66 | 33.35 0.0 1000 0 | 0.01 0.11 84.02 0.05
2 5.82e+05 47.66 | 30.58 0.0 1000 0 | 0.01 0.09 86.48 0.05
2 6.24e+05 47.66 | -7.63 0.0 1000 0 | -0.00 0.10 88.60 0.04
2 6.64e+05 47.66 | 18.22 0.0 1000 0 | 0.00 0.10 89.48 0.04
2 7.02e+05 47.66 | 34.68 0.0 1000 0 | 0.01 0.11 87.96 0.04
2 7.38e+05 59.89 |
2 7.38e+05 59.89 | 59.89 0.5 1000 0 | 0.01 0.08 87.32 0.04
2 7.72e+05 59.89 | 30.50 0.0 1000 0 | 0.01 0.09 84.90 0.03
2 8.04e+05 59.89 | 51.30 0.0 1000 0 | 0.01 0.08 81.95 0.03
2 8.36e+05 59.89 | 42.13 0.0 1000 0 | 0.01 0.10 78.65 0.02
2 8.66e+05 59.89 | 41.95 0.0 1000 0 | 0.01 0.10 76.30 0.03
2 8.96e+05 59.89 | 48.86 0.0 1000 0 | 0.01 0.08 74.73 0.03
2 9.24e+05 59.89 | 12.75 0.0 1000 0 | 0.01 0.08 71.93 0.02
2 9.52e+05 59.89 | 43.57 0.0 1000 0 | 0.01 0.08 69.69 0.02
2 9.78e+05 59.89 | 52.49 0.0 1000 0 | 0.01 0.08 67.37 0.02
2 1.00e+06 59.89 | 57.30 0.0 1000 0 | 0.01 0.06 65.46 0.02
2 1.03e+06 59.89 | 36.17 0.0 1000 0 | 0.01 0.07 63.51 0.02
2 1.05e+06 62.24 |
2 1.05e+06 62.24 | 62.24 4.4 1000 0 | 0.02 0.07 61.56 0.02
2 1.08e+06 99.75 |
2 1.08e+06 99.75 | 99.75 0.5 1000 0 | 0.02 0.07 59.61 0.02
2 1.10e+06 99.75 | 36.13 0.0 1000 0 | 0.03 0.06 57.29 0.02
2 1.12e+06 179.45 |
2 1.12e+06 179.45 | 179.45 1.3 1000 0 | 0.03 0.05 56.03 0.02
2 1.15e+06 179.45 | 58.21 0.0 1000 0 | 0.03 0.06 55.73 0.02
2 1.17e+06 179.45 | 110.22 0.0 1000 0 | 0.04 0.06 55.51 0.03
2 1.19e+06 207.64 |
2 1.19e+06 207.64 | 207.64 2.1 1000 0 | 0.04 0.06 55.21 0.03
2 1.21e+06 216.00 |
2 1.21e+06 216.00 | 216.00 0.4 1000 0 | 0.05 0.05 55.78 0.03
2 1.23e+06 216.00 | 215.36 0.0 1000 0 | 0.04 0.06 55.62 0.03
2 1.25e+06 220.60 |
2 1.25e+06 220.60 | |
<reponame>PsycleResearch/django-url-filter
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import abc
import re
from functools import wraps
import six
from cached_property import cached_property
from django import forms
from django.core.exceptions import ValidationError
from .constants import StrictMode
from .fields import MultipleValuesField
from .utils import FilterSpec, dict_pop
MANY_LOOKUP_FIELD_OVERWRITES = {
"in": lambda **kwargs: MultipleValuesField(min_values=1, **kwargs),
"iin": lambda **kwargs: MultipleValuesField(min_values=1, **kwargs),
"range": lambda **kwargs: MultipleValuesField(
min_values=2, max_values=2, **dict_pop("all_valid", kwargs)
),
}
LOOKUP_FIELD_OVERWRITES = {
"isnull": forms.BooleanField(required=False),
"second": forms.IntegerField(min_value=0, max_value=59),
"minute": forms.IntegerField(min_value=0, max_value=59),
"hour": forms.IntegerField(min_value=0, max_value=23),
"week_day": forms.IntegerField(min_value=1, max_value=7),
"day": forms.IntegerField(min_value=1, max_value=31),
"month": forms.IntegerField(),
"year": forms.IntegerField(min_value=0, max_value=9999),
"regex": forms.CharField(),
"iregex": forms.CharField(),
"len": forms.IntegerField(min_value=0)
}
LOOKUP_CALLABLE_FROM_METHOD_REGEX = re.compile(
r"^filter_(?P<filter>[\w\d]+)_for_(?P<backend>[\w\d]+)$"
)
class BaseFilter(six.with_metaclass(abc.ABCMeta, object)):
"""
Base class to be used for defining both filters and filtersets.
This class implements the bare-minimum functions which are used across
both filters and filtersets however all other functionality must be
implemented in subclasses. Additionally by using a single base class,
both filters and filtersets inherit from the same base class hence
instance checks can be easily done by filteset's metaclass in order
to find all declared filters defined in it.
Parameters
----------
source : str
Name of the attribute for which which filter applies to
within the model of the queryset to be filtered
as given to the :class:`.FilterSet`.
Attributes
----------
parent : :class:`.FilterSet`
Parent :class:`.FilterSet` to which this filter is bound to
name : str
Name of the field as it is defined in parent :class:`.FilterSet`
is_bound : bool
If this filter has been bound to a parent yet
"""
def __init__(self, source=None, *args, **kwargs):
self._source = source
self.parent = None
self.name = None
self.is_bound = False
def __repr__(self):
data = self.repr()
data = data if six.PY3 else data.encode("utf-8")
return data
@abc.abstractmethod
def repr(self, prefix=""):
"""
Get the representation of the filter or its subclasses.
Subclasses **must** overwrite this method.
.. note::
This class should return unicode text data
Parameters
----------
prefix : str
All nested filtersets provide useful representation of the complete
filterset including all descendants however in that case descendants
need to be indented in order for the representation to get structure.
This parameter is used to do just that. It specifies the prefix
the representation must use before returning any of its representations.
"""
@property
def source(self):
"""
Source field/attribute in queryset model to be used for filtering.
This property is helpful when ``source`` parameter is not provided
when instantiating :class:`.BaseFilter` or its subclasses since it will
use the filter name as it is defined in the :class:`.FilterSet`.
For example::
>>> from .filtersets import FilterSet
>>> class MyFilterSet(FilterSet):
... foo = Filter(form_field=forms.CharField())
... bar = Filter(source='stuff', form_field=forms.CharField())
>>> fs = MyFilterSet()
>>> print(fs.filters['foo'].source)
foo
>>> print(fs.filters['bar'].source)
stuff
"""
return self._source or self.name
@property
def components(self):
"""
List of all components (source names) of all parent filtersets.
"""
if self.parent is None:
return []
return self.parent.components + [self.source]
def bind(self, name, parent):
"""
Bind the filter to the filterset.
This method should be used by the parent :class:`.FilterSet`
since it allows to specify the parent and name of each
filter within the filterset.
"""
self.name = name
self.parent = parent
self.is_bound = True
@property
def root(self):
"""
This gets the root filterset.
"""
if self.parent is None:
return self
return self.parent.root
class Filter(BaseFilter):
"""
Class which job is to convert leaf :class:`.LookupConfig` to
:class:`.FilterSpec`
Each filter by itself is meant to be used a "filter" (field) in the
:class:`.FilterSet`.
Examples
--------
::
>>> from .filtersets import FilterSet
>>> class MyFilterSet(FilterSet):
... foo = Filter(forms.CharField())
... bar = Filter(forms.IntegerField())
Parameters
----------
form_field : Field
Instance of Django's ``forms.Field`` which will be used
to clean the filter value as provided in the queryset.
For example if field is ``IntegerField``, this filter
will make sure to convert the filtering value to integer
before creating a :class:`.FilterSpec`.
lookups : list, optional
List of strings of allowed lookups for this filter.
By default all supported lookups are allowed.
default_lookup : str, optional
If the lookup is not provided in the querystring lookup key,
this lookup will be used. By default ``exact`` lookup is used.
For example the default lookup is used when querystring key is
``user__profile__email`` which is missing the lookup so ``exact``
will be used.
is_default : bool, optional
Boolean specifying if this filter should be used as a default
filter in the parent :class:`.FilterSet`.
By default it is ``False``.
Primarily this is used when querystring lookup key
refers to a nested :class:`.FilterSet` however it does not specify
which filter to use. For example lookup key ``user__profile``
intends to filter something in the user's profile however
it does not specify by which field to filter on.
In that case the default filter within profile :class:`.FilterSet`
will be used. At most, one default filter should be provided
in the :class:`.FilterSet`.
no_lookup : bool, optional
When ``True``, this filter does not allow to explicitly specify
lookups in the URL. For example ``id__gt`` will not be allowed.
This is useful when a given filter should only support a single
lookup and that lookup name should not be exposed in the URL.
This is of particular use when defining custom callable filters.
By default it is ``False``.
Attributes
----------
form_field : Field
Django form field which is provided in initialization which
should be used to validate data as provided in the querystring
default_lookup : str
Default lookup to be used as provided in initialization
is_default : bool
If this filter should be a default filter as provided in initialization
no_lookup : str
If this filter should not support explicit lookups as provided in initialization
"""
def __init__(
self,
form_field,
lookups=None,
default_lookup="exact",
is_default=False,
no_lookup=False,
*args,
**kwargs
):
super(Filter, self).__init__(*args, **kwargs)
self.form_field = form_field
self._given_lookups = lookups
self.default_lookup = default_lookup or self.default_lookup
self.is_default = is_default
self.no_lookup = no_lookup
def repr(self, prefix=""):
"""
Get custom representation of the filter
The representation includes the following information:
* filter class name
* source name (same as :attr:`.source`) when filter is bound to parent
* primary form field (same as :attr:`.form_field`)
* which lookups this filter supports
* default lookup (same as :attr:`.default_lookup`)
* if the filter is a default filter (same as :attr:`.is_default`) when
filter is bound to parent
* if this filter does not support explicit lookups (same as :attr:`.no_lookup`)
"""
return (
"{name}("
"{source}"
"form_field={form_field}, "
"lookups={lookups}, "
'default_lookup="{default_lookup}", '
"{is_default}"
"no_lookup={no_lookup}"
")"
"".format(
name=self.__class__.__name__,
source='source="{}", '.format(self.source) if self.is_bound else "",
form_field=self.form_field.__class__.__name__,
lookups=self._given_lookups or "ALL",
default_lookup=self.default_lookup,
is_default="is_default={}, ".format(self.is_default)
if self.is_bound
else "",
no_lookup=self.no_lookup,
)
)
@cached_property
def lookups(self):
"""
Cached property for getting lookups this filter supports
The reason why we need as a property is because lookups
cant be hardcoded. There are 3 main distinct possibilities
which drive which lookups are supported:
* lookups were explicitly provided in the filter instantiation
in which case we use those lookups. For example::
>>> f = Filter(forms.CharField(), lookups=['exact', 'contains'])
* when filter is already bound to a parent filterset and root
filterset has a defined ``filter_backend`` we use supported
lookups as explicitly defined by the backend. This is necessary
since different backends support different sets of lookups.
* when nether lookups are explicitly provided and filter is not bound
yet we have no choice but not support any lookups and so we
use empty set as supported lookups
"""
if self._given_lookups:
return set(self._given_lookups)
if hasattr(self.root, "filter_backend"):
return self.root.filter_backend.supported_lookups
return set()
def get_form_field(self, lookup):
"""
Get the form field for a particular lookup.
This method does not blindly return :attr:`.form_field` attribute
since some lookups require to use different validations.
For example if the :attr:`.form_field` is ``CharField`` but
the lookup is ``isnull``, it makes more sense to use
``BooleanField`` as form field.
Parameters
----------
lookup : str
Name of the lookup
Returns
-------
Field
Instantiated form field appropriate for the given lookup.
"""
if lookup in MANY_LOOKUP_FIELD_OVERWRITES:
return MANY_LOOKUP_FIELD_OVERWRITES[lookup](
child=self.form_field,
all_valid=getattr(self.root, "strict_mode", StrictMode.fail)
== StrictMode.fail,
)
elif lookup in LOOKUP_FIELD_OVERWRITES:
| |
authenticated user')
def test_detail_show_flags_for_not_int(self):
rating = Rating.objects.create(
addon=self.addon, body='review', user=user_factory())
detail_url = reverse_ns(self.detail_url_name, kwargs={'pk': rating.pk})
response = self.client.get(detail_url, {'show_flags_for': 'nope'})
assert response.status_code == 400
assert response.data['detail'] == (
'show_flags_for parameter value should be equal to the user '
'id of the authenticated user')
def test_detail_show_flags_for_not_right_user(self):
self.user = user_factory()
self.client.login_api(self.user)
rating = Rating.objects.create(
addon=self.addon, body='review', user=user_factory())
detail_url = reverse_ns(self.detail_url_name, kwargs={'pk': rating.pk})
response = self.client.get(
detail_url, {'show_flags_for': self.user.pk + 42})
assert response.status_code == 400
assert response.data['detail'] == (
'show_flags_for parameter value should be equal to the user '
'id of the authenticated user')
def test_detail_rating_flags(self):
self.user = user_factory()
self.client.login_api(self.user)
rating = Rating.objects.create(
addon=self.addon, body='review 1', user=user_factory(),
rating=2)
detail_url = reverse_ns(self.detail_url_name, kwargs={'pk': rating.pk})
params = {'show_flags_for': self.user.pk}
# First, not flagged
response = self.client.get(detail_url, params)
assert response.status_code == 200
data = json.loads(force_text(response.content))
assert data['flags'] == []
# then add some RatingFlag - one for a rating, the other a reply
RatingFlag.objects.create(
rating=rating, user=self.user, flag=RatingFlag.LANGUAGE)
response = self.client.get(detail_url, params)
assert response.status_code == 200
data = json.loads(force_text(response.content))
assert 'flags' in data
assert data['flags'] == [
{'flag': RatingFlag.LANGUAGE, 'note': None}]
def test_detail_rating_flags_absent_in_v3(self):
self.user = user_factory()
self.client.login_api(self.user)
rating = Rating.objects.create(
addon=self.addon, body='review', user=user_factory(),
rating=1)
RatingFlag.objects.create(
rating=rating, user=self.user, flag=RatingFlag.OTHER,
note=u'foo')
detail_url = reverse_ns(
self.detail_url_name, kwargs={'pk': rating.pk}, api_version='v3')
params = {'show_flags_for': self.user.pk}
response = self.client.get(detail_url, params)
assert response.status_code == 200
data = json.loads(force_text(response.content))
assert 'flags' not in data
def test_list_by_admin_does_not_show_deleted_by_default(self):
self.user = user_factory()
self.grant_permission(self.user, 'Addons:Edit')
self.client.login_api(self.user)
review1 = Rating.objects.create(
addon=self.addon, body='review 1', user=user_factory())
review2 = Rating.objects.create(
addon=self.addon, body='review 2', user=user_factory())
review1.update(created=self.days_ago(1))
# Add a review belonging to a different add-on, a reply and a deleted
# review. They should not be present in the list.
review_deleted = Rating.objects.create(
addon=self.addon, body='review deleted', user=review1.user)
review_deleted.delete()
Rating.objects.create(
addon=self.addon, body='reply to review 2', reply_to=review2,
user=user_factory())
Rating.objects.create(
addon=addon_factory(), body='review other addon',
user=review1.user)
# Also add a deleted reply to the first review, it should not be shown.
deleted_reply = Rating.objects.create(
addon=self.addon, body='reply to review 1', reply_to=review1,
user=user_factory())
deleted_reply.delete()
assert Rating.unfiltered.count() == 6
response = self.client.get(self.url, {'addon': self.addon.pk})
assert response.status_code == 200
data = json.loads(force_text(response.content))
assert data['count'] == 2
assert data['results']
assert len(data['results']) == 2
assert data['results'][0]['id'] == review2.pk
assert data['results'][0]['reply'] is not None
assert data['results'][1]['id'] == review1.pk
assert data['results'][1]['reply'] is None
def test_list_admin_show_deleted_if_requested(self):
self.user = user_factory()
self.grant_permission(self.user, 'Addons:Edit')
self.client.login_api(self.user)
review1 = Rating.objects.create(
addon=self.addon, body='review 1', user=user_factory())
review2 = Rating.objects.create(
addon=self.addon, body='review 2', user=user_factory())
review1.update(created=self.days_ago(1))
# Add a review belonging to a different add-on, a reply and a deleted
# review. The deleted review should be present, not the rest.
review_deleted = Rating.objects.create(
addon=self.addon, body='review deleted', user=review1.user)
review_deleted.update(created=self.days_ago(2))
review_deleted.delete()
Rating.objects.create(
addon=self.addon, body='reply to review 2', reply_to=review2,
user=user_factory())
Rating.objects.create(
addon=addon_factory(), body='review other addon',
user=review1.user)
# Also add a deleted reply to the first review, it should be shown
# as a child of that review.
deleted_reply = Rating.objects.create(
addon=self.addon, body='reply to review 1', reply_to=review1,
user=user_factory())
deleted_reply.delete()
assert Rating.unfiltered.count() == 6
response = self.client.get(
self.url, {'addon': self.addon.pk, 'filter': 'with_deleted'})
assert response.status_code == 200
data = json.loads(force_text(response.content))
assert data['count'] == 3
assert data['results']
assert len(data['results']) == 3
assert data['results'][0]['id'] == review2.pk
assert data['results'][0]['reply'] is not None
assert data['results'][1]['id'] == review1.pk
assert data['results'][1]['reply'] is not None
assert data['results'][1]['reply']['id'] == deleted_reply.pk
assert data['results'][2]['id'] == review_deleted.pk
def test_list_weird_parameters(self):
self.addon.update(slug=u'my-slûg')
user = user_factory()
Rating.objects.create(addon=self.addon, body='A review.', user=user)
# No user, but addon is present.
response = self.client.get(
self.url, {'addon': self.addon.pk, 'user': u''})
assert response.status_code == 200
# No addon, but user is present.
response = self.client.get(self.url, {'addon': u'', 'user': user.pk})
assert response.status_code == 200
# Addon parameter is utf-8.
response = self.client.get(self.url, {'addon': u'my-slûg'})
assert response.status_code == 200
# User parameter is weird (it should be a pk, as string): 404.
response = self.client.get(
self.url, {'addon': self.addon.pk, 'user': u'çæ→'})
assert response.status_code == 400
data = json.loads(force_text(response.content))
assert data == {'detail': 'user parameter should be an integer.'}
# Version parameter is weird (it should be a pk, as string): 404.
response = self.client.get(
self.url, {'addon': self.addon.pk, 'version': u'çæ→'})
assert response.status_code == 400
data = json.loads(force_text(response.content))
assert data == {'detail': 'version parameter should be an integer.'}
def test_get_then_post_then_get_any_caching_is_cleared(self):
"""Make sure there is no overzealous caching going on when requesting
the list of reviews for a given user+addon+version combination.
Regression test for #5006."""
self.user = user_factory()
self.client.login_api(self.user)
# Do a get filtering on both addon and user: it should not find
# anything.
response = self.client.get(self.url, {
'addon': self.addon.pk,
'version': self.addon.current_version.pk,
'user': self.user.pk
})
assert response.status_code == 200
data = json.loads(force_text(response.content))
assert len(data['results']) == 0
assert data['count'] == 0
# Do a post to add a review by this user.
response = self.client.post(self.url, {
'addon': self.addon.pk, 'body': u'test bodyé',
'score': 5, 'version': self.addon.current_version.pk})
assert response.status_code == 201
# Re-do the same get as before, should now find something since the
# view is avoiding count() caching in this case.
response = self.client.get(self.url, {
'addon': self.addon.pk,
'version': self.addon.current_version.pk,
'user': self.user.pk
})
assert response.status_code == 200
data = json.loads(force_text(response.content))
assert len(data['results']) == 1
assert data['count'] == 1
def test_no_throttle(self):
self.user = user_factory()
self.client.login_api(self.user)
Rating.objects.create(
addon=self.addon, body='review 1', user=user_factory(),
rating=1)
# We should be able to get as quick as we want.
response = self.client.get(self.url, {'addon': self.addon.pk})
assert response.status_code == 200
response = self.client.get(self.url, {'addon': self.addon.pk})
assert response.status_code == 200
class TestRatingViewSetDelete(TestCase):
client_class = APITestClient
detail_url_name = 'rating-detail'
def setUp(self):
self.addon = addon_factory(
guid=generate_addon_guid(), name=u'My Addôn', slug='my-addon')
self.user = user_factory()
self.rating = Rating.objects.create(
addon=self.addon, version=self.addon.current_version, rating=1,
body='My review', user=self.user)
self.url = reverse_ns(
self.detail_url_name, kwargs={'pk': self.rating.pk})
def test_delete_anonymous(self):
response = self.client.delete(self.url)
assert response.status_code == 401
def test_delete_no_rights(self):
other_user = user_factory()
self.client.login_api(other_user)
response = self.client.delete(self.url)
assert response.status_code == 403
def test_delete_admin(self):
admin_user = user_factory()
self.grant_permission(admin_user, 'Addons:Edit')
self.client.login_api(admin_user)
response = self.client.delete(self.url)
assert response.status_code == 204
assert Rating.objects.count() == 0
assert Rating.unfiltered.count() == 1
def test_delete_moderator_flagged(self):
self.rating.update(editorreview=True)
admin_user = user_factory()
self.grant_permission(admin_user, 'Ratings:Moderate')
self.client.login_api(admin_user)
response = self.client.delete(self.url)
assert response.status_code == 204
assert Rating.objects.count() == 0
assert Rating.unfiltered.count() == 1
def test_delete_moderator_not_flagged(self):
admin_user = user_factory()
self.grant_permission(admin_user, 'Ratings:Moderate')
self.client.login_api(admin_user)
response = self.client.delete(self.url)
assert response.status_code == 403
assert Rating.objects.count() == 1
def test_delete_moderator_but_addon_author(self):
admin_user = user_factory()
self.addon.addonuser_set.create(user=admin_user)
self.grant_permission(admin_user, 'Ratings:Moderate')
self.client.login_api(admin_user)
response = self.client.delete(self.url)
assert response.status_code == 403
assert Rating.objects.count() == 1
def test_delete_owner(self):
self.client.login_api(self.user)
response = self.client.delete(self.url)
assert response.status_code == 204
assert Rating.objects.count() == 0
assert Rating.unfiltered.count() == 1
def test_delete_owner_reply(self):
addon_author = user_factory()
self.addon.addonuser_set.create(user=addon_author)
self.client.login_api(addon_author)
reply = Rating.objects.create(
addon=self.addon, reply_to=self.rating,
body=u'Reply that will be delêted...', user=addon_author)
self.url = reverse_ns(self.detail_url_name, kwargs={'pk': reply.pk})
response = self.client.delete(self.url)
assert response.status_code == 204
assert Rating.objects.count() == 1
assert Rating.unfiltered.count() == 2
def test_delete_404(self):
self.client.login_api(self.user)
self.url = reverse_ns(
self.detail_url_name, kwargs={'pk': self.rating.pk + 42})
response = self.client.delete(self.url)
assert response.status_code == 404
assert Rating.objects.count() == 1
def test_no_throttle(self):
# Add two reviews for different versions.
rating_a = self.rating
version_b = version_factory(addon=self.addon)
rating_b = Rating.objects.create(
addon=self.addon, version=version_b, rating=2,
body='Second Review to delete', user=self.user)
# And confirm we can rapidly delete them.
self.client.login_api(self.user)
response = self.client.delete(
reverse_ns(self.detail_url_name, kwargs={'pk': rating_a.pk}))
assert response.status_code == 204
response = self.client.delete(
reverse_ns(self.detail_url_name, kwargs={'pk': rating_b.pk}))
assert response.status_code == 204
assert Rating.objects.count() == 0
class TestRatingViewSetEdit(TestCase):
client_class = APITestClient
detail_url_name = 'rating-detail'
def setUp(self):
self.addon = addon_factory(
guid=generate_addon_guid(), name=u'My Addôn', slug='my-addon')
self.user = user_factory(username='areviewuser')
self.rating = Rating.objects.create(
addon=self.addon, version=self.addon.current_version, rating=1,
body=u'My revïew', user=self.user)
self.url = reverse_ns(
self.detail_url_name, kwargs={'pk': self.rating.pk})
def test_edit_anonymous(self):
response = self.client.patch(self.url, {'body': u'løl!'})
assert response.status_code == 401
response = self.client.put(self.url, {'body': u'løl!'})
assert response.status_code == 405
def test_edit_no_rights(self):
other_user = user_factory()
self.client.login_api(other_user)
response = self.client.patch(self.url, {'body': u'løl!'})
assert response.status_code == 403
response = self.client.put(self.url, {'body': u'løl!'})
assert response.status_code == 405
def test_edit_no_rights_even_reviewer(self):
# Only admins can edit a review they didn't write themselves.
reviewer_user = user_factory()
self.grant_permission(reviewer_user, 'Addons:Review')
self.client.login_api(reviewer_user)
response = self.client.patch(self.url, {'body': u'løl!'})
assert response.status_code == 403
response = self.client.put(self.url, {'body': u'løl!'})
assert response.status_code == 405
def test_edit_owner_partial(self):
original_created_date = self.days_ago(1)
self.rating.update(created=original_created_date)
self.client.login_api(self.user)
response = self.client.patch(self.url, {'score': 2, 'body': u'løl!'})
assert response.status_code == 200
self.rating.reload()
assert response.data['id'] == self.rating.pk
assert response.data['body'] == str(self.rating.body) == u'løl!'
assert response.data['score'] == self.rating.rating == 2
assert response.data['version'] == {
'id': self.rating.version.id,
'version': self.rating.version.version
}
| |
)
)
DTObjectMapESProducer = cms.ESProducer( "DTObjectMapESProducer",
appendToDataLabel = cms.string( "" )
)
EcalBarrelGeometryFromDBEP = cms.ESProducer( "EcalBarrelGeometryFromDBEP",
applyAlignment = cms.bool( True )
)
EcalElectronicsMappingBuilder = cms.ESProducer( "EcalElectronicsMappingBuilder" )
EcalEndcapGeometryFromDBEP = cms.ESProducer( "EcalEndcapGeometryFromDBEP",
applyAlignment = cms.bool( True )
)
EcalLaserCorrectionService = cms.ESProducer( "EcalLaserCorrectionService" )
EcalPreshowerGeometryFromDBEP = cms.ESProducer( "EcalPreshowerGeometryFromDBEP",
applyAlignment = cms.bool( True )
)
HcalGeometryFromDBEP = cms.ESProducer( "HcalGeometryFromDBEP",
applyAlignment = cms.bool( False ),
hcalTopologyConstants = cms.PSet(
maxDepthHE = cms.int32( 3 ),
maxDepthHB = cms.int32( 2 ),
mode = cms.string( "HcalTopologyMode::LHC" )
)
)
HcalTopologyIdealEP = cms.ESProducer( "HcalTopologyIdealEP",
Exclude = cms.untracked.string( "" ),
appendToDataLabel = cms.string( "" )
)
MaterialPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "" ),
PropagationDirection = cms.string( "alongMomentum" ),
ComponentName = cms.string( "PropagatorWithMaterial" ),
Mass = cms.double( 0.105 ),
ptMin = cms.double( -1.0 ),
MaxDPhi = cms.double( 1.6 ),
useRungeKutta = cms.bool( False )
)
MaterialPropagatorForHI = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
PropagationDirection = cms.string( "alongMomentum" ),
ComponentName = cms.string( "PropagatorWithMaterialForHI" ),
Mass = cms.double( 0.139 ),
ptMin = cms.double( -1.0 ),
MaxDPhi = cms.double( 1.6 ),
useRungeKutta = cms.bool( False )
)
MaterialPropagatorParabolicMF = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
PropagationDirection = cms.string( "alongMomentum" ),
ComponentName = cms.string( "PropagatorWithMaterialParabolicMf" ),
Mass = cms.double( 0.105 ),
ptMin = cms.double( -1.0 ),
MaxDPhi = cms.double( 1.6 ),
useRungeKutta = cms.bool( False )
)
OppositeMaterialPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "" ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
ComponentName = cms.string( "PropagatorWithMaterialOpposite" ),
Mass = cms.double( 0.105 ),
ptMin = cms.double( -1.0 ),
MaxDPhi = cms.double( 1.6 ),
useRungeKutta = cms.bool( False )
)
OppositeMaterialPropagatorForHI = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
ComponentName = cms.string( "PropagatorWithMaterialOppositeForHI" ),
Mass = cms.double( 0.139 ),
ptMin = cms.double( -1.0 ),
MaxDPhi = cms.double( 1.6 ),
useRungeKutta = cms.bool( False )
)
OppositeMaterialPropagatorParabolicMF = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
ComponentName = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
Mass = cms.double( 0.105 ),
ptMin = cms.double( -1.0 ),
MaxDPhi = cms.double( 1.6 ),
useRungeKutta = cms.bool( False )
)
OppositePropagatorWithMaterialForMixedStep = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
ComponentName = cms.string( "PropagatorWithMaterialForMixedStepOpposite" ),
Mass = cms.double( 0.105 ),
ptMin = cms.double( 0.1 ),
MaxDPhi = cms.double( 1.6 ),
useRungeKutta = cms.bool( False )
)
ParametrizedMagneticFieldProducer = cms.ESProducer( "AutoParametrizedMagneticFieldProducer",
version = cms.string( "Parabolic" ),
valueOverride = cms.int32( -1 ),
label = cms.untracked.string( "ParabolicMf" )
)
PropagatorWithMaterialForLoopers = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
PropagationDirection = cms.string( "alongMomentum" ),
ComponentName = cms.string( "PropagatorWithMaterialForLoopers" ),
Mass = cms.double( 0.1396 ),
ptMin = cms.double( -1.0 ),
MaxDPhi = cms.double( 4.0 ),
useRungeKutta = cms.bool( False )
)
PropagatorWithMaterialForMixedStep = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
PropagationDirection = cms.string( "alongMomentum" ),
ComponentName = cms.string( "PropagatorWithMaterialForMixedStep" ),
Mass = cms.double( 0.105 ),
ptMin = cms.double( 0.1 ),
MaxDPhi = cms.double( 1.6 ),
useRungeKutta = cms.bool( False )
)
RPCGeometryESModule = cms.ESProducer( "RPCGeometryESModule",
useDDD = cms.untracked.bool( False ),
compatibiltyWith11 = cms.untracked.bool( True )
)
SiStripGainESProducer = cms.ESProducer( "SiStripGainESProducer",
printDebug = cms.untracked.bool( False ),
appendToDataLabel = cms.string( "" ),
APVGain = cms.VPSet(
cms.PSet( Record = cms.string( "SiStripApvGainRcd" ),
NormalizationFactor = cms.untracked.double( 1.0 ),
Label = cms.untracked.string( "" )
),
cms.PSet( Record = cms.string( "SiStripApvGain2Rcd" ),
NormalizationFactor = cms.untracked.double( 1.0 ),
Label = cms.untracked.string( "" )
)
),
AutomaticNormalization = cms.bool( False )
)
SiStripQualityESProducer = cms.ESProducer( "SiStripQualityESProducer",
appendToDataLabel = cms.string( "" ),
PrintDebugOutput = cms.bool( False ),
ThresholdForReducedGranularity = cms.double( 0.3 ),
UseEmptyRunInfo = cms.bool( False ),
ReduceGranularity = cms.bool( False ),
ListOfRecordToMerge = cms.VPSet(
cms.PSet( record = cms.string( "SiStripDetVOffRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiStripDetCablingRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiStripBadChannelRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiStripBadFiberRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiStripBadModuleRcd" ),
tag = cms.string( "" )
)
)
)
SiStripRecHitMatcherESProducer = cms.ESProducer( "SiStripRecHitMatcherESProducer",
PreFilter = cms.bool( False ),
ComponentName = cms.string( "StandardMatcher" ),
NSigmaInside = cms.double( 3.0 )
)
SiStripRegionConnectivity = cms.ESProducer( "SiStripRegionConnectivity",
EtaDivisions = cms.untracked.uint32( 20 ),
PhiDivisions = cms.untracked.uint32( 20 ),
EtaMax = cms.untracked.double( 2.5 )
)
SimpleSecondaryVertex3TrkComputer = cms.ESProducer( "SimpleSecondaryVertexESProducer",
minTracks = cms.uint32( 3 ),
minVertices = cms.uint32( 1 ),
use3d = cms.bool( True ),
unBoost = cms.bool( False ),
useSignificance = cms.bool( True )
)
StableParameters = cms.ESProducer( "StableParametersTrivialProducer",
NumberL1JetCounts = cms.uint32( 12 ),
NumberL1NoIsoEG = cms.uint32( 4 ),
NumberL1CenJet = cms.uint32( 4 ),
NumberL1Tau = cms.uint32( 8 ),
NumberConditionChips = cms.uint32( 1 ),
NumberL1EGamma = cms.uint32( 12 ),
TotalBxInEvent = cms.int32( 5 ),
NumberL1Mu = cms.uint32( 4 ),
PinsOnConditionChip = cms.uint32( 512 ),
WordLength = cms.int32( 64 ),
PinsOnChip = cms.uint32( 512 ),
OrderOfChip = cms.vint32( 1 ),
IfMuEtaNumberBits = cms.uint32( 6 ),
OrderConditionChip = cms.vint32( 1 ),
appendToDataLabel = cms.string( "" ),
NumberL1TauJet = cms.uint32( 4 ),
NumberL1Jet = cms.uint32( 12 ),
NumberPhysTriggers = cms.uint32( 512 ),
NumberL1Muon = cms.uint32( 12 ),
UnitLength = cms.int32( 8 ),
NumberL1IsoEG = cms.uint32( 4 ),
NumberTechnicalTriggers = cms.uint32( 64 ),
NumberL1ForJet = cms.uint32( 4 ),
IfCaloEtaNumberBits = cms.uint32( 4 ),
NumberPsbBoards = cms.int32( 7 ),
NumberChips = cms.uint32( 5 ),
NumberPhysTriggersExtended = cms.uint32( 64 )
)
SteppingHelixPropagatorAny = cms.ESProducer( "SteppingHelixPropagatorESProducer",
NoErrorPropagation = cms.bool( False ),
endcapShiftInZPos = cms.double( 0.0 ),
PropagationDirection = cms.string( "anyDirection" ),
useTuningForL2Speed = cms.bool( False ),
useIsYokeFlag = cms.bool( True ),
endcapShiftInZNeg = cms.double( 0.0 ),
SetVBFPointer = cms.bool( False ),
AssumeNoMaterial = cms.bool( False ),
returnTangentPlane = cms.bool( True ),
useInTeslaFromMagField = cms.bool( False ),
VBFName = cms.string( "VolumeBasedMagneticField" ),
useEndcapShiftsInZ = cms.bool( False ),
sendLogWarning = cms.bool( False ),
useMatVolumes = cms.bool( True ),
debug = cms.bool( False ),
ApplyRadX0Correction = cms.bool( True ),
useMagVolumes = cms.bool( True ),
ComponentName = cms.string( "SteppingHelixPropagatorAny" )
)
TrackerDigiGeometryESModule = cms.ESProducer( "TrackerDigiGeometryESModule",
appendToDataLabel = cms.string( "" ),
fromDDD = cms.bool( False ),
applyAlignment = cms.bool( True ),
alignmentsLabel = cms.string( "" )
)
TrackerGeometricDetESModule = cms.ESProducer( "TrackerGeometricDetESModule",
appendToDataLabel = cms.string( "" ),
fromDDD = cms.bool( False )
)
TransientTrackBuilderESProducer = cms.ESProducer( "TransientTrackBuilderESProducer",
ComponentName = cms.string( "TransientTrackBuilder" )
)
VolumeBasedMagneticFieldESProducer = cms.ESProducer( "VolumeBasedMagneticFieldESProducerFromDB",
debugBuilder = cms.untracked.bool( False ),
valueOverride = cms.int32( -1 ),
label = cms.untracked.string( "" )
)
ZdcGeometryFromDBEP = cms.ESProducer( "ZdcGeometryFromDBEP",
applyAlignment = cms.bool( False )
)
caloDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "CaloDetIdAssociator" ),
etaBinSize = cms.double( 0.087 ),
nEta = cms.int32( 70 ),
nPhi = cms.int32( 72 ),
includeBadChambers = cms.bool( False )
)
cosmicsNavigationSchoolESProducer = cms.ESProducer( "NavigationSchoolESProducer",
ComponentName = cms.string( "CosmicNavigationSchool" ),
SimpleMagneticField = cms.string( "" )
)
ecalDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "EcalDetIdAssociator" ),
etaBinSize = cms.double( 0.02 ),
nEta = cms.int32( 300 ),
nPhi = cms.int32( 360 ),
includeBadChambers = cms.bool( False )
)
ecalSeverityLevel = cms.ESProducer( "EcalSeverityLevelESProducer",
dbstatusMask = cms.PSet(
kGood = cms.vstring( 'kOk' ),
kProblematic = cms.vstring( 'kDAC',
'kNoLaser',
'kNoisy',
'kNNoisy',
'kNNNoisy',
'kNNNNoisy',
'kNNNNNoisy',
'kFixedG6',
'kFixedG1',
'kFixedG0' ),
kRecovered = cms.vstring( ),
kTime = cms.vstring( ),
kWeird = cms.vstring( ),
kBad = cms.vstring( 'kNonRespondingIsolated',
'kDeadVFE',
'kDeadFE',
'kNoDataNoTP' )
),
timeThresh = cms.double( 2.0 ),
flagMask = cms.PSet(
kGood = cms.vstring( 'kGood' ),
kProblematic = cms.vstring( 'kPoorReco',
'kPoorCalib',
'kNoisy',
'kSaturated' ),
kRecovered = cms.vstring( 'kLeadingEdgeRecovered',
'kTowerRecovered' ),
kTime = cms.vstring( 'kOutOfTime' ),
kWeird = cms.vstring( 'kWeird',
'kDiWeird' ),
kBad = cms.vstring( 'kFaultyHardware',
'kDead',
'kKilled' )
)
)
hcalDDDRecConstants = cms.ESProducer( "HcalDDDRecConstantsESModule",
appendToDataLabel = cms.string( "" )
)
hcalDDDSimConstants = cms.ESProducer( "HcalDDDSimConstantsESModule",
appendToDataLabel = cms.string( "" )
)
hcalDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "HcalDetIdAssociator" ),
etaBinSize = cms.double( 0.087 ),
nEta = cms.int32( 70 ),
nPhi = cms.int32( 72 ),
includeBadChambers = cms.bool( False )
)
hcalRecAlgos = cms.ESProducer( "HcalRecAlgoESProducer",
RecoveredRecHitBits = cms.vstring( 'TimingAddedBit',
'TimingSubtractedBit' ),
SeverityLevels = cms.VPSet(
cms.PSet( RecHitFlags = cms.vstring( ),
ChannelStatus = cms.vstring( ),
Level = cms.int32( 0 )
),
cms.PSet( RecHitFlags = cms.vstring( ),
ChannelStatus = cms.vstring( 'HcalCellCaloTowerProb' ),
Level = cms.int32( 1 )
),
cms.PSet( RecHitFlags = cms.vstring( 'HSCP_R1R2',
'HSCP_FracLeader',
'HSCP_OuterEnergy',
'HSCP_ExpFit',
'ADCSaturationBit',
'HBHEIsolatedNoise',
'AddedSimHcalNoise' ),
ChannelStatus = cms.vstring( 'HcalCellExcludeFromHBHENoiseSummary' ),
Level = cms.int32( 5 )
),
cms.PSet( RecHitFlags = cms.vstring( 'HBHEHpdHitMultiplicity',
'HBHEPulseShape',
'HOBit',
'HFInTimeWindow',
'ZDCBit',
'CalibrationBit',
'TimingErrorBit',
'HBHETriangleNoise',
'HBHETS4TS5Noise' ),
ChannelStatus = cms.vstring( ),
Level = cms.int32( 8 )
),
cms.PSet( RecHitFlags = cms.vstring( 'HFLongShort',
'HFPET',
'HFS8S1Ratio',
'HFDigiTime' ),
ChannelStatus = cms.vstring( | |
in range(self.nr_pipelets)
])
@property
def solos_full_utilization(self):
l = []
for pipelet, nr_ways, traffic_spec in \
zip(self.pipelets, self.l3_ways, self.traffics):
e = Run(
pipelets=(pipelet, ),
cbms=(ways_to_cbm(nr_ways), ),
run_number=self.run_number,
traffics=(traffic_spec, ),
utilizations=(100, ))
l.append(e)
return np.array(l)
@property
def solos_full_cbm(self):
l = []
for pipelet, cbm, traffic_spec, utilization in \
zip(self.pipelets, self.cbms, self.traffics, self.utilizations):
e = Run(
pipelets=(pipelet, ),
cbms=(config.cat['cbm_max'], ),
run_number=self.run_number,
traffics=(traffic_spec, ),
utilizations=(utilization, ))
l.append(e)
return np.array(l)
@property
def solos_same_cbm(self):
l = []
for pipelet, nr_ways, traffic_spec, utilization in \
zip(self.pipelets, self.l3_ways, self.traffics, self.utilizations):
e = Run(
pipelets=(pipelet, ),
cbms=(ways_to_cbm(nr_ways), ),
run_number=self.run_number,
traffics=(traffic_spec, ),
utilizations=(utilization, ))
l.append(e)
return np.array(l)
@property
def tx_mbps_request(self):
l = []
e = self.copy(utilizations=tuple([100] * self.nr_pipelets))
for i, (u, t, nf, speed) in \
enumerate(zip(self.utilizations, self.traffics, self.pipelets,
self.port_speeds)):
pkt_size = Traffic(t).size[0][0]
# FIXME: find a better way
l.append(speed if u == 100 else -1 if not e.is_done else
e.rx_mpps_mean[i] * pkt_size * 8 * u / 100)
#e.rx_mbps_mean[i] * u / 100)
return np.array(l)
# minimum cache allocation that yields a given normalized througphput
def min_l3ways(self, pps_normalized):
assert (self.nr_pipelets == 1)
runs = self.all_cbms
xput = [
e.pps_normalized_full_cbm[0] - pps_normalized
if e.pps_normalized_full_cbm[0] > pps_normalized else 100
for e in runs
]
idx = np.argmin(xput)
return cbm_to_ways(runs[idx].cbms[0])
def next_utilization(self, rtt=80):
#self.cbms[0] == '3' and \
if self.utilizations[0] == 100 and \
self.is_real.all():
next_util = self.rtt_to_utilization(rtt)
if next_util:
utilizations = tuple([next_util] + [100] *
(len(self.utilizations) - 1))
return self.copy(utilizations=utilizations)
return None
@functools.lru_cache()
def optimal_calloc(self, nr_ways, policy):
def neighbors():
# per class min and max for number of ways
nr_ways_min = config.cat['nr_ways_min']
nr_ways_max = nr_ways - nr_ways_min * len(self.pipelets)
e = self.copy(
pipelets=self.pipelets[1:],
cbms=self.cbms[1:],
traffics=self.traffics[1:],
utilizations=self.utilizations[1:])
neighbors = []
for nr_ways_first in range(nr_ways_min, nr_ways_max + 1):
e_opt = e.optimal_calloc(nr_ways - nr_ways_first, policy)
cbms = [ways_to_cbm(nr_ways_first)]
nr_ways_used = nr_ways_first
for cbm in e_opt.cbms:
nr_ways_this = cbm_to_ways(cbm)
cbms.append(
ways_to_cbm(nr_ways_this, nr_ways_used=nr_ways_used))
nr_ways_used += nr_ways_this
neighbors.append(self.copy(cbms=tuple(cbms)))
return neighbors
# this is the max pps for incomplete cbms
# note that this is definitely an overestimate
def heuristic(nr_ways, policy):
return objective_func(policy, self.pps_solos_full_cbm)
def objective_func(policy, l):
def eval_func(e):
v = 100 - e.pps_predict_cat_normalized_full_cbm
if policy == 'minmax':
return max(v)
elif policy == 'minsum':
return sum(v)
return min(l, key=eval_func)
if policy == 'equal':
assert (nr_ways % self.nr_pipelets == 0)
cbms = []
nr_ways_used = 0
for i in range(self.nr_pipelets):
nr_ways_this = int(nr_ways / self.nr_pipelets)
cbms.append(
ways_to_cbm(nr_ways_this, nr_ways_used=nr_ways_used))
nr_ways_used += nr_ways_this
return self.copy(cbms=tuple(cbms))
# base case
if self.nr_pipelets == 1:
return self.copy(cbms=(ways_to_cbm(nr_ways), ))
return objective_func(policy, neighbors())
def rtt_to_utilization(self, rtt):
u_list = []
all_cbms = \
filter(lambda e: e.is_done,
[self.copy(cbms=tuple([ways_to_cbm(nr_ways)] + list(self.cbms[1:])))
for nr_ways in [config.cat['nr_ways_min'],
config.cat['nr_ways_max']]])
for e in all_cbms:
e_u = e.all_utils
if len(e_u) <= 1:
continue
rtt_l = np.array([e.rtt_95[0] for e in e_u])
e_u_select = e_u[rtt_l < rtt]
if len(e_u_select) == 0:
print(rtt_l)
return None
import heapq
u = min(heapq.nlargest(1, [e.utilizations[0] for e in e_u_select]))
#if u == 98:
# u = 95
#if e.pipelets[0] == 'mon':
# u = 90
u_list.append(u)
if len(u_list) == 0:
return None
return min(u_list)
def selector(self, best=None, is_done=None):
if best:
r = self.best_run
return np.array([r] if r else [])
if is_done is None:
return np.array(
[self.copy(run_number=r) for r in config.run_numbers])
elif is_done:
return self.all_runs
else:
return self.all_runs_pending
def start(self, retry=0, ignore=False, force=False):
def _format_list(fmt, l):
return ', '.join([fmt % i for i in l])
if retry < 0:
return False
if not force and self.is_done:
return True
if not self.is_necessary:
return True
log_info(self.__str__())
self.results = {}
if not RuntimeManager().start(self):
return self.start(retry=retry - 1, ignore=ignore, force=force)
if self.has_results:
MB = 1024 * 1024
log_info('rx (Mpps): %s' % _format_list('%6.3f', self.mpps))
log_info(
'tx (Mpps): %s' % _format_list('%6.3f', self.tx_mpps_mean))
log_info(
'rx (Mbps): %s' % _format_list('%6d', self.rx_mbps_mean))
log_info(
'tx (Mbps): %s' % _format_list('%6d', self.tx_mbps_mean))
log_info('rtt_95 (us): %s' % _format_list('%6d', self.rtt_95))
log_info(
'l3mr (%%): %s' % _format_list('%6.2f', self.l3missrate))
#log_info('l3oc (MB): %s' % _format_list('%6.2f', self.llc_occupancy / MB))
#log_info('mem (MBps): %s' % _format_list('%6d', self.local_bytes / MB))
log_info('mem (MBps): r=%6d w=%6d' %
(self.llc_misses__mem_read / MB,
self.llc_misses__mem_write / MB))
log_info('pcie (MBps): r=%6d w=%6d' %
(self.llc_references__pcie_read / MB,
self.llc_references__pcie_write / MB))
log_info('ddiomr (%%): r=%6.2f w=%6.2f' %
(100 * self.llc_misses__pcie_read / self.
llc_references__pcie_read,
100 * self.llc_misses__pcie_write / self.
llc_references__pcie_write))
# log_info('all :\n%s' % pformat(self.results, width=250))
# is_done, fail_msgs = self.is_sane()
is_done, fail_msgs = (True, ('',)) if self.has_results else (False, ('no results', ))
if not is_done:
log_error('Run sanity check failed')
log_error(fail_msgs)
log_debug(pformat(self.results, width=250))
return self.start(retry=retry - 1, ignore=ignore, force=force)
if not ignore:
db.session.merge(self)
flag_modified(self, 'results')
db.session.flush()
db.session.commit()
return True
# FIXME
def stop(self):
if hasattr(self, '_instances'):
for i in self._instances:
if i:
i.stop()
class RuntimeManager(object, metaclass=Singleton):
def __init__(self):
from resq.nf import NFInstanceManager
from resq.port import PortManager
self._loop = asyncio.get_event_loop()
#_loop.set_debug(True)
self.nfi_manager = NFInstanceManager()
self.port_manager = PortManager()
local_ports = self.port_manager.list(
numa_node=config.run_socket_id, ring=False)
self.tgen = MelvinGen(local_ports=local_ports)
# clean up after a previous run
cmds = [#['pkill', '-9', '-f', 'userlevel/click'],
['service', 'lldpd', 'stop'],
['service', 'irqbalance', 'stop']]
for cmd in cmds:
call(cmd, stdin=DEVNULL, stdout=DEVNULL, stderr=DEVNULL)
disable_c_state()
disable_p_state()
disable_nmi_watchdog()
disable_core_frequency_scaling()
disable_uncore_frequency_scaling()
configure_ddio()
configure_irq_affinity()
configure_pcie()
init_dpdk()
init_hugepages()
init_lxc()
init_netmap()
init_rdt()
@asyncio.coroutine
def _monitor_pmu(self,
run,
warmup_sec=config.warmup_sec,
duration_sec=config.duration_sec):
# FIXME: assumes NF has exactly one core
# revert to the old model of one perf per NF
cmd = {}
proc = {}
out = {}
cmd['uncore'] = [
'perf', 'stat', '--field-separator', ',', '--delay',
str(warmup_sec), '--repeat',
str(duration_sec - warmup_sec), '--event',
','.join(config.perf_events['uncore']), '--per-socket', '-a',
'sleep', '1'
]
# FIXME: pcm-memory takes 1s to start
#cmd['membw'] = [config.pcm_memory_binary, '-csv', '--', 'sleep',
# str(duration_sec - 1)]
# FIXME: pcm-pcie takes 1.36s to start
#cmd['pcibw'] = [config.pcm_pcie_binary, '-csv', '-B', '--', 'sleep',
# str(duration_sec - 1.37)]
nf_results = []
for inst in self.nfi_manager.list():
fut = asyncio.ensure_future(inst.monitor_pmu_async())
nf_results.append(fut)
for t in cmd.keys():
proc[t] = asyncio.ensure_future(
asyncio.
create_subprocess_exec( #'cgexec', '--sticky', '-g', 'cpuset:/',
*cmd[t],
stdin=DEVNULL,
stdout=PIPE,
stderr=PIPE))
l3_flush()
try:
yield from asyncio.sleep(warmup_sec)
nf_results = yield from asyncio.gather(*nf_results)
nf_results = {
k: [r[k] for r in nf_results]
for k in nf_results[0].keys()
}
run.results.update(nf_results)
proc['uncore'] = yield from proc['uncore']
out = yield from proc['uncore'].stderr.read()
for line in out.decode().splitlines():
socket, _, value, unit, name, *etc = line.split(',')
if socket != 'S%d' % config.run_socket_id:
continue
name = re.sub(r'^[^/]*/', '', name).\
replace(':pp', '').\
replace('/', '').\
replace('-', '_').\
replace('.', '__')
if name not in run.results:
run.results[name] = 0
run.results[name] += int(float(value))
# FIXME: mbps and bps here are bytes/sec
if 'membw' in proc:
proc['membw'] = yield from proc['membw']
out = yield from proc['membw'].stdout.read()
lines = [l.split(';') for l in out.decode().splitlines()]
assert (len(lines) == 3)
index_skt = lines[0].index('SKT%d' % config.run_socket_id)
index_r = lines[1].index('Mem Read (MB/s)', index_skt)
index_w = lines[1].index('Mem Write (MB/s)', index_skt)
run.results['mem_read_mbps'] = int(float(lines[2][index_r]))
run.results['mem_write_mbps'] = int(float(lines[2][index_w]))
if 'pcibw' in proc:
proc['pcibw'] = yield from proc['pcibw']
out = yield from proc['pcibw'].stdout.read()
lines = [l.split(',') for l in out.decode().splitlines()]
assert (len(lines) == 3 and int(lines[config.run_socket_id + 1]
[0]) == config.run_socket_id)
for name, value in zip(lines[0][1:], lines[config.run_socket_id
+ 1][1:]):
name = name.replace(' ', '_') \
.replace('(B)', 'mbps') \
.replace('Rd', 'Read') \
.replace('Wr', 'Write') \
.lower()
value = int(value)
if 'mbps' in name:
value = value / 1000000.
run.results[name] = value
except asyncio.CancelledError:
log_error('monitor_pmu failed: Timeout')
except Exception as e:
log_error('monitor_pmu failed: %s' % e)
for p in proc.values():
exitcode = yield from p.wait()
assert (exitcode == 0)
@asyncio.coroutine
def _monitor_traffic(self, run):
# FIXME: doesn't work for MLC
try:
tx_mbps_l = run.tx_mbps_request
ports = [
i.ports[0].name if i.nr_ports > 0 else None
for i in self.nfi_manager.list()
]
randomize_payloads = [
NF(nf).randomize_payload for nf in run.pipelets
]
r = yield from \
self.tgen.measure(peer_ports=ports,
tx_mbps_l=tx_mbps_l,
traffics=run.traffics,
randomize_payloads=randomize_payloads)
if r:
# convert a list of dicts to a dict of lists
keys = sorted(list(set([k for i in r for k in i.keys()])))
r = {k: [d[k] if k in d else -1 for d in r] for k in keys}
run.results.update(r)
except asyncio.CancelledError:
log_error('monitor_traffic failed: Timeout')
except Exception as e:
log_error('monitor_traffic failed: %s' % e)
def monitor(self, run):
subtasks = [self._monitor_pmu(run), self._monitor_traffic(run)]
timeout = config.duration_sec + 1.5
done, pending = self._loop.run_until_complete(
asyncio.wait(subtasks, timeout=timeout))
for i in pending:
if not i.cancelled():
i.cancel()
if len(pending) | |
<reponame>isabella232/eclipse2017<gh_stars>10-100
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import traceback
from datetime import datetime
import logging
from multiprocessing import Pool
import os
import io
import json
from functools import partial
import shutil
from google.cloud import datastore, storage, vision
from common.geometry import getRescaledDimensions
from common import config
from common import constants
from common import datastore_schema as ds
from common.eclipse2017_exceptions import CouldNotObtainCredentialsError
import common.service_account as sa
from common import util
from PIL import Image
from common.geometry import ratio_to_decimal
from common.exif import _extract_exif_metadata, _extract_image_metadata
import exifread
from rawkit.raw import Raw
from rawkit.options import WhiteBalance
class UploadErrors(object):
def __init__(self):
# List of file paths that failed to upload to Cloud Storage
self.failed_to_upload = list()
# List of files that failed to delete
self.failed_to_delete = list()
# List of files that uploaded successfully however this failed to be
# captured in datastore
self.datastore_success = list()
# List of files that failed to upload and this failed to be
# captured in datastore
self.datastore_failure = list()
def __eq__(self, other):
eq = True
eq &= self.failed_to_upload == other.failed_to_upload
eq &= self.failed_to_delete == other.failed_to_delete
eq &= self.datastore_success == other.datastore_success
eq &= self.datastore_failure == other.datastore_failure
return eq
def heal(errors):
"""
Attempts to resolve any upload errors that may have occured. `errors` is an
UploadErrors object.
"""
logging.info('Attempting to heal any upload errors that occured...')
if len(errors.failed_to_upload) > 0:
upload(errors.failed_to_upload)
if len(errors.failed_to_delete) > 0:
_delete_all_files(errors.failed_to_delete)
if len(errors.datastore_success) > 0:
_record_status_in_datastore(errors.datastore_success, success=True)
if len(errors.datastore_failure) > 0:
_record_status_in_datastore(errors.datastore_failure, success=False)
def scan(directory, file_ready):
"""
Scans directory for files. A list of all ready files is returned. A given
file is considered ready if file_ready(f) returns true for
that file, where f is the files name.
"""
# Get files to upload
fpaths = [os.path.join(directory, f) for f in os.listdir(directory)
if file_ready(f)]
if len(fpaths) > 0:
msg = 'Scanned {0}. Found {1} files to upload'
logging.info(msg.format(directory, len(fpaths)))
return fpaths
def upload(fpaths):
"""
Uploads files pointed to by paths in `fpaths` list to GCS using up to
`constants.UPLOAD_DAEMON_MAX_PROCESSES` processes. Files that are
successfully uploaded are deleted from local disk. Each file's upload status
is recorded in datastore.
Errors are returned in an `UploadErrors` instace that has lists of files
that:
- failed to upload to GCS
- uploaded to GCS but failed to be delete from the local file system
- uploaded to GCS but failed to have this recorded in datastore
- failed to upload to GCS and this failed to be recorded in datastore
"""
errors = UploadErrors()
if not len(fpaths) > 0:
return errors
logging.info('Uploading {0} files'.format(len(fpaths)))
results = []
for fpath in fpaths:
result = _upload_single(fpath)
results.append(result)
logging.info('Uploaded {0} files'.format(len(fpaths)))
# Seperate files that uploaded successfully from those that didn't
uploaded_files = [r[1] for r in results if r[0] is True]
errors.failed_to_upload = [r[1] for r in results if r[0] is False]
# Delete files that were successfully uploaded, keep track of any that
# fail to delete
errors.failed_to_delete = _delete_all_files(uploaded_files)
errors.datastore_success = _record_status_in_datastore(
uploaded_files, success=True)
errors.datastore_failure = _record_status_in_datastore(
errors.failed_to_upload, success=False)
return errors
def _delete_all_files(fpaths):
"""
Deletes each file in fpaths. Returns list of file paths that failed to
delete.
"""
failed_to_delete = list()
for p in fpaths:
try:
util.retry_func(os.remove, constants.RETRYS, (OSError, ), p)
except RuntimeError:
failed_to_delete.append(p)
return failed_to_delete
def _get_client(client_type='storage'):
"""
Returns gcloud client, (either for storage if `client_type` is 'storage',
or for datastore if `client_type` is 'datastore'). Defaults to storage
client, including when an invalid `client_type` is given.
Raises `CouldNotObtainCredentialsError` if there is an error obtaining
credentials.
"""
# Raises CouldNotObtainCredentialsError
credentials = sa.get_credentials()
if client_type == 'datastore':
client_class = datastore.Client
else:
client_class = storage.client.Client
return client_class(project=config.PROJECT_ID, credentials=credentials)
def _get_ds_key_for_file(fpath):
"""
Gets a datastore key for a file.
"""
return datastore.key.Key(ds.DATASTORE_PHOTO, os.path.basename(fpath),
project=config.PROJECT_ID)
def _insert_missing_entities(entities, fpaths):
"""
Creates datastore entities for all files in fpaths that do not already have
a corresponding entity in entities. Returns original entities list with
any missing entities added to the end.
"""
def cmp_key(entity):
try:
return entity.key.name
except AttributeError:
return ''
for p in fpaths:
if not util.in_list(entities, os.path.basename(p), key=cmp_key):
key = _get_ds_key_for_file(p)
entity = datastore.entity.Entity(key=key)
entity['uploaded_date'] = datetime.now()
entities.append(entity)
return entities
def _record_status_in_datastore(fpaths, success):
"""
Records GCS upload status in datastore for each file fpaths.
`success` is a boolean corresponding to whether the files in fpaths were
uploaded successfully to GCS or not. A list of files that failed to have
their upload status updated are returned.
"""
error_msg = ''
error = False
try:
client = _get_client('datastore')
except CouldNotObtainCredentialsError as e:
error_msg = 'Could not obtain datastore credentials: {0}'.format(e)
error = True
if not error:
keys = list()
for p in fpaths:
key = _get_ds_key_for_file(p)
keys.append(key)
try:
entities = client.get_multi(keys)
except Exception as e:
error_msg = str(e)
error = True
if not error:
# Add new entities as necessary
if len(entities) != len(fpaths):
entities = _insert_missing_entities(entities, fpaths)
if success is False:
new_data = {'gcs_upload_failed': True, 'in_gcs': False}
else:
new_data = {'in_gcs': True, 'gcs_upload_failed': False}
# We only want to validate the new data, as there may be restricted
# fields in the entities we pulled from datastore. All new data must
# be validated as follows before adding it to the entities that will be
# pushed to datastore.
if not ds.validate_data(new_data, allow_restricted_fields=False,
kind=ds.DATASTORE_PHOTO):
error_msg = 'Invalid data: {0}'.format(new_data)
error = True
if not error:
# Update entities
for i in range(len(entities)):
entities[i].update(new_data)
# Save to datastore
try:
client.put_multi(entities)
except Exception as e:
error_msg = str(e)
error = True
if error:
msg = 'Failed to record {0} upload statuses in datastore: {1}'
logging.error(msg.format(len(fpaths), error_msg))
return fpaths if error else list()
def _check_adult_content(img):
"""
Checks if img contains adult content.
Returns True if img contains adult content.
"""
first, second = getRescaledDimensions(img.width, img.height, 640, 480)
try:
resize = img.resize((first, second), Image.ANTIALIAS)
except IOError:
logging.error("Invalid image cannot be resized.")
# Have to assume image is adult content
return True
out = io.BytesIO()
resize.convert('RGB').save(out, format='JPEG')
vision_client = vision.Client()
vc_img = vision_client.image(content=out.getvalue())
safe = vc_img.detect_safe_search()
if safe.adult == vision.likelihood.Likelihood.LIKELY or safe.adult == vision.likelihood.Likelihood.POSSIBLE:
logging.error("Detected likely adult content upload.")
return True
return False
def _upload_derived(derived_file, bucket):
blob = storage.Blob(os.path.basename(derived_file), bucket)
# Upload derived file
try:
blob.upload_from_filename(derived_file)
msg = 'Successfully uploaded derived {0} to GCS'
logging.info(msg.format(derived_file))
except Exception as e:
msg = 'Derived {0} failed to upload to GCS: {1}'
logging.error(msg.format(derived_file, e))
return False
return True
def _upload_single(fpath):
"""
Uploads single file to GCS. Returns a tuple containing
(upload_success, fpath).
"""
try:
bucket_name = config.GCS_BUCKET
success = True
try:
datastore_client = _get_client('datastore')
except CouldNotObtainCredentialsError as e:
error_msg = 'Could not obtain datastore credentials: {0}'.format(str(e))
logging.error(error_msg)
return False, fpath
try:
client = _get_client('storage')
except CouldNotObtainCredentialsError as e:
logging.error('Could not obtain GCS credentials: {0}'.format(str(e)))
return False, fpath
bucket = client.bucket(bucket_name)
# Verify that filename already exists as key in database
filename = os.path.basename(fpath)
key = datastore_client.key('Photo', filename)
entity = datastore_client.get(key)
if entity is None:
logging.error('Failed to find file: ' + filename)
return False, fpath
try:
img = Image.open(fpath)
format_ = img.format
if format_ == 'TIFF':
output_file = "/tmp/" + filename + ".jpg"
img.save(output_file)
_upload_derived(output_file, bucket)
os.unlink(output_file)
except IOError as e:
try:
with Raw(filename=fpath) as raw:
tiff_output_file = "/tmp/" + filename + ".tiff"
raw.save(filename=tiff_output_file)
except Exception as e:
logging.error("Failed to parse file with PIL or rawkit: %s (error: %s)" % (fpath, str(e)))
# move the file out of the pending tree so it won't be processed next loop
try:
shutil.move(fpath, "/tmp/%s" % os.path.basename(fpath))
except IOError as e:
logging.error("Unable to move bad file out of the way: %s (error: %s)" % (fpath, str(e)))
return False, fpath
jpg_output_file = "/tmp/" + filename + ".jpg"
img = Image.open(tiff_output_file)
img.save(jpg_output_file)
_upload_derived(jpg_output_file, bucket)
os.unlink(tiff_output_file)
| |
from io import FileIO
import re
import sqlite3
import argparse
import os.path
from enum import Enum, auto
from sqlite3 import Error
from locale import atof
from datetime import datetime
import subprocess
import sys
try:
from openpyxl import Workbook
except ImportError:
subprocess.check_call([sys.executable, "-m", "pip", "install", 'openpyxl'])
finally:
from openpyxl import Workbook
sql_create_agents_table = """CREATE TABLE IF NOT EXISTS "agents" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
"agent_no" TEXT,
"agent_name" TEXT
);"""
sql_create_commission_settlements_table = """CREATE TABLE IF NOT EXISTS "commission_settlements" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"agent_no" TEXT NOT NULL,
"periode" TEXT,
"description" TEXT
);"""
sql_create_commission_settlement_items_table = """CREATE TABLE IF NOT EXISTS "commission_settlement_items" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"commission_settlement_id" NUMERIC,
"agent_no" TEXT NOT NULL,
"description" TEXT,
"amount" NUMERIC
);"""
sql_create_commission_reports_table = """CREATE TABLE IF NOT EXISTS "commission_reports" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"agent_no" TEXT NOT NULL,
"periode" TEXT,
"commission_source" NUMERIC,
"total_premium" NUMERIC,
"total_commission" NUMERIC,
"description" TEXT,
"type" TEXT
);"""
sql_create_commission_report_items_table = """CREATE TABLE IF NOT EXISTS "commission_report_items" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"commission_report_id" NUMERIC,
"agent_no" TEXT NOT NULL,
"contract_no" TEXT,
"name" TEXT,
"product" TEXT,
"rn" TEXT,
"akt" TEXT,
"ok" TEXT,
"tk" TEXT,
"from_date" TEXT,
"premium" NUMBER,
"commission_source" NUMBER,
"commission" NUMBER
);"""
# Functions and declarations
def slices(s, *args):
position = 0
for length in args:
yield s[position:position + length]
position += length
class ProcessingStep(Enum):
Default = auto()
Header = auto()
Details = auto()
def get_str(data: any):
return str(data).strip()
def get_float(data: any):
float_string = str(data).replace(".", "").replace(",", ".")
if texist(float_string, len(float_string)-1, "-"):
float_string = "-" + get_str(float_string[0:len(float_string)-1])
return atof(float_string)
def texist(source, start, substr):
txt_length = len(substr)
if not substr:
return False
if start < 0 or start > len(source)-1 or start+txt_length > len(source):
return False
if source[start:(start+txt_length)] == substr:
return True
return False
def ss(source, start, length=None):
if length == None:
return source[start:]
return source[start:start+length]
def db_create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return conn
def db_create_table(conn, create_table_sql):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
try:
c = conn.cursor()
c.execute(create_table_sql)
except Error as e:
print(e)
def db_query(database, sql):
""" Save agent to database """
conn = None
try:
conn = db_create_connection(database)
if conn is not None:
cur = conn.cursor()
cur.execute(sql)
result = cur.fetchall()
col_name_list = [tuple[0] for tuple in cur.description]
return [col_name_list, result]
else:
print("Error! cannot create the database connection.")
except Error as e:
print(e)
finally:
if conn:
conn.close()
def db_add_agent(database, agent_no, agentName):
""" Save agent to database """
conn = None
try:
conn = db_create_connection(database)
if conn is not None:
db_create_table(conn, sql_create_agents_table)
sql = """SELECT MAX(id) AS id, agent_no, agent_name FROM agents WHERE trim(agent_no)=trim(?) GROUP BY agent_no, agent_name """
cur = conn.cursor()
cur.execute(sql, (agent_no,))
agents = cur.fetchone()
if not agents:
sql = """INSERT INTO agents(agent_no, agent_name) VALUES(?,?)"""
cur = conn.cursor()
cur.execute(sql, (agent_no, agentName))
conn.commit()
return cur.lastrowid
else:
return agents[0]
else:
print("Error! cannot create the database connection.")
except Error as e:
print(e)
finally:
if conn:
conn.close()
def db_save_commission_settlement(database, description, periode, agent_no, items):
""" Save Commission settlement to the database """
conn = None
try:
conn = db_create_connection(database)
if conn is not None:
db_create_table(conn, sql_create_commission_settlements_table)
db_create_table(conn, sql_create_commission_settlement_items_table)
sql1 = """INSERT INTO commission_settlements(agent_no, periode, description) VALUES(?,?,?)"""
sql2 = """INSERT INTO commission_settlement_items(commission_settlement_id, agent_no, description, amount) VALUES(?,?,?,?)"""
cur = conn.cursor()
cur.execute(sql1, (agent_no, periode, description))
commission_settlement_id = cur.lastrowid
for item in items:
cur.execute(sql2, (commission_settlement_id,
agent_no, ss(item, 5, 39), ss(item, 44)))
conn.commit()
return cur.lastrowid
else:
print("Error! cannot create the database connection.")
except Error as e:
print(e)
finally:
if conn:
conn.close()
return
def db_save_commission_report_1(database, agent_no, name, periode, type, total_premium, total_commission, items):
""" Save Commission report 1 to the database """
conn = None
try:
conn = db_create_connection(database)
if conn is not None:
db_create_table(conn, sql_create_commission_reports_table)
db_create_table(conn, sql_create_commission_report_items_table)
sql1 = """INSERT INTO commission_reports(agent_no, periode, total_premium, total_commission, type, description) VALUES(?,?,?,?,?,?)"""
sql2 = """INSERT INTO commission_report_items(commission_report_id, agent_no, contract_no, name, product, rn, akt, from_date, premium, commission) VALUES(?,?,?,?,?,?,?,?,?,?)"""
cur = conn.cursor()
cur.execute(sql1, (agent_no, periode, total_premium,
total_commission, type, name))
commission_report_id = cur.lastrowid
for item in items:
cur.execute(sql2, (commission_report_id, agent_no, ss(item, 5, 13), ss(item, 18, 24), ss(item, 42, 9), ss(
item, 51, 1), ss(item, 53, 4), ss(item, 57, 6), get_float(ss(item, 63, 13)), get_float(ss(item, 76))))
conn.commit()
return cur.lastrowid
else:
print("Error! cannot create the database connection.")
except Error as e:
print(e)
finally:
if conn:
conn.close()
return
def db_save_commission_report_2(database, agent_no, name, periode, type, total_commission, items):
""" Save Commission report 2 to the database """
conn = None
try:
conn = db_create_connection(database)
if conn is not None:
db_create_table(conn, sql_create_commission_reports_table)
db_create_table(conn, sql_create_commission_report_items_table)
sql1 = """INSERT INTO commission_reports(agent_no, periode, total_commission, type, description) VALUES(?,?,?,?,?)"""
sql2 = """INSERT INTO commission_report_items(commission_report_id, agent_no, contract_no, name, product, from_date, commission) VALUES(?,?,?,?,?,?,?)"""
cur = conn.cursor()
cur.execute(sql1, (agent_no, periode,
total_commission, type, name))
commission_report_id = cur.lastrowid
for item in items:
cur.execute(sql2, (commission_report_id, agent_no, ss(item, 5, 13), ss(item, 18, 34), ss(item, 52, 30),
ss(item, 82, 6), get_float(ss(item, 88, 7))))
conn.commit()
return cur.lastrowid
else:
print("Error! cannot create the database connection.")
except Error as e:
print(e)
finally:
if conn:
conn.close()
return
def db_save_commission_report_3(database, agent_no, name, periode, type, commission_source, total_commission, items):
""" Save Commission report 2 to the database """
conn = None
try:
conn = db_create_connection(database)
if conn is not None:
db_create_table(conn, sql_create_commission_reports_table)
db_create_table(conn, sql_create_commission_report_items_table)
sql1 = """INSERT INTO commission_reports(agent_no, periode, commission_source, total_commission, type, description) VALUES(?,?,?,?,?,?)"""
sql2 = """INSERT INTO commission_report_items(commission_report_id, agent_no, contract_no, name, product, ok, tk, from_date, commission_source, commission) VALUES(?,?,?,?,?,?,?,?,?,?)"""
cur = conn.cursor()
cur.execute(sql1, (agent_no, periode, commission_source,
total_commission, type, name))
commission_report_id = cur.lastrowid
for item in items:
cur.execute(sql2, (commission_report_id, agent_no, ss(item, 5, 10), ss(item, 15, 16), ss(item, 31, 16),
ss(item, 47, 1), ss(item, 50, 2), ss(item, 53, 6), get_float(ss(item, 59, 16)), get_float(ss(item, 75))))
conn.commit()
return cur.lastrowid
else:
print("Error! cannot create the database connection.")
except Error as e:
print(e)
finally:
if conn:
conn.close()
return
def pdf_parser(input_file, database):
# Testing purposes only
# data = slices('Dette er en test', 6, 3, 3, 10)
# for d in data:
# print(d)
# Main program
encodings = ["utf-8", "ISO-8859-1", "windows-1250", "windows-1252"]
regex_page = r"((.?)stream(.|\n)BT(?P<page>.*?)ET(.|\n)endstream)|(\/Title(?P<title>.*?)\n)"
regex_objects = r"(?P<object>[0-9]+\s+[0-9]+\s+obj\s.+?endobj)"
regex_text = r"\((?P<text>.*)\)\s*?Tj"
for e in encodings:
try:
with open("test.pdf", "r", encoding=e) as strm:
pdf = strm.read()
strm.close()
except UnicodeDecodeError:
print('got unicode error with %s , trying different encoding' % e)
else:
print('opening the file with encoding: %s ' % e)
matches = re.finditer(regex_page, pdf, re.DOTALL)
matches_enum = enumerate(matches, start=1)
elements = []
for matchNum, match in matches_enum:
page = match.group("page")
pageTitle = match.group("title")
if not page:
continue
sub_matches = re.finditer(
regex_text, page, re.MULTILINE | re.IGNORECASE)
sub_matches_enum = enumerate(sub_matches, start=1)
for subMatchNum, sub_match in sub_matches_enum:
sub_group_text = sub_match.group("text")
elements.append(sub_group_text)
elements.append("### END ###")
with open("test.txt", "w") as strm:
strm.writelines('\n'.join(elements) + '\n')
# TODO: Convert these variable to appropriate classes.
agent_no = ""
agent_name = ""
processing_step: ProcessingStep = ProcessingStep.Default
commission_settlemenmt = False
commission_settlemenmt_name = ""
commission_settlemenmt_periode = ""
commission_settlemenmt_items = []
commission_report = False
commission_report_name = ""
commission_report_periode = ""
commission_report_type = ""
commission_report_items = []
for element in elements:
if texist(element, 53, "AGENT:"):
agent_name = get_str(ss(element, 60))
continue
if texist(element, 40, "AGENTNR:"):
if get_str(ss(element, 50, 10)) != agent_no:
agent_no = get_str(ss(element, 50, 10))
db_add_agent(database, agent_no, agent_name)
continue
# Commission settlement
if texist(element, 10, "PROVISJONSAVREGNING FRA"):
commission_settlemenmt = True
commission_settlemenmt_name = get_str(ss(element, 10, 58))
commission_settlemenmt_periode = get_str(ss(element, 76))
commission_settlemenmt_items.clear()
continue
if commission_settlemenmt and not texist(element, 5, "PROVISJON") and not texist(element, 5, "TOTALT"):
commission_settlemenmt_items.append(element)
continue
if commission_settlemenmt and texist(element, 5, "PROVISJON"):
commission_amount = get_float(ss(element, 31))
continue
if commission_settlemenmt and texist(element, 5, "TOTALT"):
commission_settlemenmt = False
total_amount = get_float(ss(element, 31))
db_save_commission_settlement(
database, commission_settlemenmt_name, commission_settlemenmt_periode, agent_no, commission_settlemenmt_items)
continue
# Commission report
if texist(element, 10, "PROVISJONSOPPGAVE FRA"):
commission_report = True
commission_report_name = get_str(ss(element, 10, 56))
commission_report_periode = get_str(ss(element, 75))
commission_report_items.clear()
commission_report_type = ""
processing_step = ProcessingStep.Default
continue
if commission_report and not texist(element, 50, "TOTALT") and not texist(element, 53, "TOTALT") and not texist(element, 76, "TOTALT"):
if processing_step == ProcessingStep.Default:
commission_report_type = get_str(element).strip("* ")
processing_step = ProcessingStep.Header
continue
if processing_step == ProcessingStep.Header and texist(element, 5, "AVTALE-NR NAVN"):
processing_step = ProcessingStep.Details
| |
<reponame>intel/RAAD
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
# @package gatherMeta
from __future__ import annotations
import optparse, datetime, traceback, os, json, tempfile, random, sys, pprint, os.path, shutil, pathlib, fnmatch, hashlib, difflib, time, copy
from src.software.debug import whoami
def checkPythonVersion(expect_major=3, expect_minor=9):
""" Checking Python version.
To upgrade do the following with Anaconda:
conda update conda
conda install python=3.9
conda install anaconda-client
conda update anaconda
conda install -c anaconda python
Args:
expect_major: python major value.
expect_minor: python minor value.
Returns: bool if version matches candidates.
"""
current_version = str(sys.version_info[0]) + "." + str(sys.version_info[1]) + "." + str(sys.version_info[2])
print("Python version is " + current_version)
if sys.version_info[:2] == (expect_major, expect_minor):
matchBool = True
elif sys.version_info[:2] != (expect_major, expect_minor):
matchBool = False
else:
print("Current version is different: Python " + current_version)
matchBool = False
return matchBool
def tryFolder(path=None, walkUpLimit=4):
"""
Helper function to walk up a file path to the destination folder.
Args:
path: directory name
walkUpLimit: levels allowed to walk up in directory chain
Returns: Full path of the directory
"""
if path is None:
return
try:
pathExists = False
fullPath = None
pathWalkUp = path
walkCount = 0
while ((pathExists is False) and (walkCount < walkUpLimit)):
fullPath = os.path.abspath(os.path.join(os.getcwd(), pathWalkUp))
pathExists = os.path.exists(fullPath)
pathWalkUp = f"../{pathWalkUp}"
walkCount += 1
if pathExists is False:
return None
except:
fullPath = None
pass
return fullPath
def tryFile(path=None, fileName=None, walkUpLimit=4):
"""
Helper function to walk up a file path to the destination folder.
Args:
path: directory to start with.
fileName: file to look for in directory walk up path.
walkUpLimit: levels allowed to walk up in directory chain
Returns: Full path of the directory
"""
if fileName is None:
return
faultFound = fileName[0] == '/'
if faultFound:
tokenFileName = fileName[0:]
# assert not faultFound, f"Error in file path. Cannot have forwardslash as first character. For Example, '/path/file' should be 'path/file'.{os.linesep} {fileName[0]}"
else:
tokenFileName = fileName
try:
if path is None:
startPath = os.getcwd()
else:
startPath = path
fileExists = False
fullPath = None
pathWalkUp = ''
walkCount = 0
while ((fileExists is False) and (walkCount < walkUpLimit)):
joinedPath = os.path.join(startPath, pathWalkUp, tokenFileName)
fullPath = os.path.abspath(joinedPath)
fileExists = os.path.isfile(fullPath)
pathWalkUp = f'../{pathWalkUp}'
walkCount += 1
if fileExists is False:
return None
except BaseException as errorContext:
pprint.pprint(whoami())
print(errorContext)
fullPath = None
pass
return fullPath
def getPathToRootCount(selectPath: str = None):
if selectPath is None or not os.path.exists(selectPath):
cPath = os.path.join(os.path.dirname(__file__))
else:
cPath = selectPath
cCount = 0
while (len(cPath) > 1):
cPath = os.path.split(cPath)[0]
cCount += 1
return cCount
def tryFolderDetect(cPath: str = None):
pathRootCount = getPathToRootCount(selectPath=cPath)
return tryFolder(path=cPath, walkUpLimit=pathRootCount)
def tryFileDetect(cPath=None, fileName=None):
pathRootCount = getPathToRootCount(selectPath=cPath)
return tryFile(path=cPath, fileName=fileName, walkUpLimit=pathRootCount)
def findAll(fileType=None, directoryTreeRootNode=None, debug=False, verbose=False, doIt=False,
excludeFolderList=None,
excludeFileList=None):
"""
Find all files of a type given a directory.
Args:
fileType: file extension to look for.
directoryTreeRootNode: filesystem main root node
debug: debug mode for adding functionality.
verbose: Add more information to debug.
doIt: Add to systempath.
excludeFolderList: Folders to not look in.
excludeFileList: files to not look in.
Returns: fileTypeTree, directoryTree
"""
if fileType is None:
fileType = '.fpcore'
if excludeFileList is None:
excludeFileList = [".pyc", ".bin", ".svg", ".png", ".mbs", ".rc" ".cache", ".pdb", ".md", ".cs", ".bat"]
if excludeFolderList is None:
excludeFolderList = ["__pycache__", "exclude", "data"]
if directoryTreeRootNode is None or os.path.exists(directoryTreeRootNode) is False:
directoryTreeRootNode = (os.path.dirname(__file__) or '.')
if debug is True:
print('Directory Root Node: {0}'.format(directoryTreeRootNode), flush=True)
# Walk Entire local tree and save contents
directoryTree = []
fileTypeTree = []
for root, dirs, files in os.walk(directoryTreeRootNode):
try:
for dirSelect in dirs:
if dirSelect not in excludeFolderList:
dirLocation = os.path.abspath(os.path.join(root, dirSelect))
directoryTree.append(dirLocation)
if debug is True and verbose is True:
print('Directory Node: {0}'.format(dirLocation), flush=True)
for file in files:
if file not in excludeFileList:
if file.endswith(fileType):
fileLocation = os.path.abspath(os.path.join(root, file))
fileTypeTree.append(fileLocation)
if debug is True and verbose is True:
print('{0} File Node: {1}'.format(fileType, fileLocation), flush=True)
else:
otherFileLocation = os.path.abspath(os.path.join(root, file))
if debug is True and verbose is True:
print('Other {0} File Node: {1}'.format(file.endswith(fileType), otherFileLocation), flush=True)
except BaseException as ErrorContext:
pprint.pprint(whoami())
print('Exception {0}'.format(ErrorContext))
pass
systemPath = sys.path
if debug is True and verbose is True:
print('System path is: ')
pprint.pprint(systemPath)
if doIt is True:
for loc in directoryTree:
sys.path.append(loc)
return (fileTypeTree, directoryTree)
def generateString(characterSet=None, fileStringNameSize=None):
"""
Generate random string set when not set by user.
Args:
characterSet: Character set to be used.
fileStringNameSize: File string name size maxima.
Returns: Token generated.
"""
if characterSet is None:
characterSet = 'abcdef0123456789'
if fileStringNameSize is None:
fileStringNameSize = 16
random.seed(1)
generatedToken = ''.join(random.choice(characterSet) for _ in range(fileStringNameSize))
return generatedToken
def getTempPathName(genPath=True):
if genPath is True:
generatedPath = tempfile.gettempdir()
else:
generatedPath = ""
return generatedPath
def getDateTimeFileFormatString():
return '%Y-%m-%d-%H-%M-%S-%f'
def getTempFileName(genFile=True):
"""
Generate file name.
Args:
genFile: proceed flag with generation.
Returns: file name token string.
"""
utc_datetime = datetime.datetime.utcnow()
utc_name = utc_datetime.strftime(getDateTimeFileFormatString())
if genFile is True:
pseudo = "".join([utc_name, "_"])
fileConstruct = tempfile.mkstemp(suffix="", prefix=pseudo, dir=None)
else:
fileConstruct = ""
return fileConstruct
def getTempPathAndFileName(extensionName=None, genPath=True, genFile=True):
"""
Generate path and file name.
Args:
extensionName:Extension name desired.
genPath: proceed flag with generation.
genFile: proceed flag with generation.
Returns: token generation of path and file name token string.
"""
if extensionName is None:
extensionName = "_debug.tex"
outfileRef = "".join([getTempPathName(genPath=genPath), "/", getTempFileName(genFile=genFile), extensionName])
return outfileRef
def createFilePath(requestPath):
"""
Generate file path with existance okay.
Args:
requestPath: path to request creation.
Returns: Void
"""
os.makedirs(requestPath, exist_ok=True)
return
def getTimeStamp(inTime=None):
"""
Time stamp update or set for document.
Args:
inTime: date time desired to be set.
Returns: Void
"""
if inTime is None:
timeToken = datetime.datetime.utcnow().strftime(getDateTimeFileFormatString())
elif isinstance(inTime, str):
timeToken = inTime
else:
timeToken = None
return timeToken
def getFileNameUTCTime():
""" Get timestamp for a file name
Returns: string with UTC time
"""
return getTimeStamp(inTime=None)
def getBytesSize(bytesIn=0, suffix="B"):
"""
Scale bytes to its proper format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
bytesValue = bytesIn
if bytesValue is None or 0:
return int(0)
elif (isinstance(bytesValue, int) or isinstance(bytesValue, float)) and (int(bytesValue) > 0):
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytesValue < factor:
return f"{bytesValue:.2f}{unit}{suffix}"
bytesValue /= factor
return bytesValue
return int(0)
def strip_end(stringInput='', suffix=''):
try: # Requires python version 3.9+
partitionedString = stringInput.removesuffix(suffix)
except:
if not stringInput.endswith(suffix):
partitionedString = stringInput
else:
partitionedString = stringInput[:len(stringInput) - len(suffix)]
return partitionedString
def strip_start(stringInput='', prefix=''):
try:
partitionedString = stringInput.removeprefix(prefix)
except:
if not stringInput.startswith(prefix):
partitionedString = stringInput
else:
partitionedString = stringInput[len(prefix):len(stringInput)]
return partitionedString
def strip_StartEnd(stringInput='', prefix='', suffix=''):
cleanedStr = stringInput
cleanedStr = strip_start(stringInput=cleanedStr, prefix=prefix)
cleanedStr = strip_end(stringInput=cleanedStr, suffix=suffix)
return cleanedStr
def cleanFileName(fileName='defaultName'):
""" Processes a string name into a filename removing invalid tokens.
Args:
fileName: string name
Returns: clean name
"""
pdfName = fileName
validchars = "-_.() "
pdfNameClean = ""
for charItem in pdfName:
if str.isalpha(charItem) or str.isdigit(charItem) or (charItem in validchars):
pdfNameClean += charItem
else:
pdfNameClean += "_"
return pdfNameClean
def readFiles(directory_files):
"""
Args:
directory_files:
Returns:
"""
for File in directory_files:
try:
with open(File, 'r') as infile:
print(infile.read())
except:
pass
return
def matchFiles(ext, directory_files):
"""Prints files with extension ext
Args:
directory_files:
ext:
Returns:
"""
# * matches everything
# ? matches any single character
# [seq] matches any character in seq
# [!seq] matches any character not in seq
charMatch = '*'
for File in directory_files:
if fnmatch.fnmatch(File, charMatch + ext):
print(File)
return
def cleanAndRecreatePath(locationOutput: str = None):
if locationOutput is None:
return
# Delete output folder contents and recreate
try:
shutil.rmtree(path=locationOutput, ignore_errors=True)
except OSError as errorOccurance:
print(f"Error delete: {locationOutput} {errorOccurance.strerror}")
pass
try:
os.makedirs(locationOutput, mode=0o777, exist_ok=True)
except OSError as errorOccurance:
print(f"Error create: {locationOutput} {errorOccurance.strerror}")
pass
return
def getFileFormats():
""" Dictionary contains file types as keys and lists of their corresponding file formats
Returns: Expected classes of files.
"""
fileTypes = {"Images": ["jpg", "gif", "png", "jpeg", "bmp"],
"Audio": ["mp3", "wav", "aiff", "flac", "aac"],
"Video": ["m4v", "flv", "mpeg", "mov", "mpg", "mpe", "wmv", "MOV", "mp4"],
"Documents": ["doc", "docx", "txt", "ppt", "pptx", "pdf", "rtf", "epub", "xls", "xlsx"],
"Exe": ["exe", ".sh"],
"Compressed": ["zip", "tar", "7", "rar", "gz", "7z"],
"Virtual_Machine_and_iso": ["vmdk", "ova", "iso"]}
return fileTypes
def organizeDirectory(downloadDirectory=None, fileTypes=None):
""" Download of directory by file types.
Args:
downloadDirectory: Directory to | |
<reponame>doraskayo/buildstream
#!/usr/bin/env python3
#
# Copyright (C) 2018 Bloomberg Finance LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# <NAME> <<EMAIL>>
"""
FileBasedDirectory
=========
Implementation of the Directory class which backs onto a normal POSIX filing system.
See also: :ref:`sandboxing`.
"""
import os
import shutil
import stat
from .directory import Directory, VirtualDirectoryError, _FileType
from .. import utils
from ..utils import link_files, copy_files, list_relative_paths, _get_link_mtime, BST_ARBITRARY_TIMESTAMP
from ..utils import _set_deterministic_user, _set_deterministic_mtime
from ..utils import FileListResult
# FileBasedDirectory intentionally doesn't call its superclass constuctor,
# which is meant to be unimplemented.
# pylint: disable=super-init-not-called
class FileBasedDirectory(Directory):
def __init__(self, external_directory=None, *, parent=None):
self.external_directory = external_directory
self.parent = parent
def descend(self, *paths, create=False, follow_symlinks=False):
""" See superclass Directory for arguments """
current_dir = self
for path in paths:
# Skip empty path segments
if not path:
continue
if path == ".":
continue
if path == "..":
if current_dir.parent is not None:
current_dir = current_dir.parent
# In POSIX /.. == / so just stay at the root dir
continue
new_path = os.path.join(current_dir.external_directory, path)
try:
st = os.lstat(new_path)
if stat.S_ISDIR(st.st_mode):
current_dir = FileBasedDirectory(new_path, parent=current_dir)
elif follow_symlinks and stat.S_ISLNK(st.st_mode):
linklocation = os.readlink(new_path)
newpaths = linklocation.split(os.path.sep)
if os.path.isabs(linklocation):
current_dir = current_dir._find_root().descend(*newpaths, follow_symlinks=True)
else:
current_dir = current_dir.descend(*newpaths, follow_symlinks=True)
else:
raise VirtualDirectoryError(
"Cannot descend into '{}': '{}' is not a directory".format(path, new_path),
reason="not-a-directory",
)
except FileNotFoundError:
if create:
os.mkdir(new_path)
current_dir = FileBasedDirectory(new_path, parent=current_dir)
else:
raise VirtualDirectoryError("Cannot descend into '{}': '{}' does not exist".format(path, new_path))
return current_dir
def import_files(
self,
external_pathspec,
*,
filter_callback=None,
report_written=True,
update_mtime=None,
can_link=False,
properties=None
):
""" See superclass Directory for arguments """
from ._casbaseddirectory import CasBasedDirectory # pylint: disable=cyclic-import
if isinstance(external_pathspec, CasBasedDirectory):
if can_link:
actionfunc = utils.safe_link
else:
actionfunc = utils.safe_copy
import_result = FileListResult()
self._import_files_from_cas(
external_pathspec, actionfunc, filter_callback, update_mtime=update_mtime, result=import_result,
)
else:
if isinstance(external_pathspec, Directory):
source_directory = external_pathspec.external_directory
else:
source_directory = external_pathspec
if can_link and not update_mtime:
import_result = link_files(
source_directory,
self.external_directory,
filter_callback=filter_callback,
ignore_missing=False,
report_written=report_written,
)
else:
import_result = copy_files(
source_directory,
self.external_directory,
filter_callback=filter_callback,
ignore_missing=False,
report_written=report_written,
)
if update_mtime:
for f in import_result.files_written:
os.utime(os.path.join(self.external_directory, f), times=(update_mtime, update_mtime))
return import_result
def import_single_file(self, external_pathspec, properties=None):
dstpath = os.path.join(self.external_directory, os.path.basename(external_pathspec))
result = FileListResult()
if os.path.exists(dstpath):
result.ignored.append(dstpath)
else:
shutil.copyfile(external_pathspec, dstpath, follow_symlinks=False)
return result
def _mark_changed(self):
pass
def set_deterministic_user(self):
_set_deterministic_user(self.external_directory)
def export_files(self, to_directory, *, can_link=False, can_destroy=False):
if can_destroy:
# Try a simple rename of the sandbox root; if that
# doesnt cut it, then do the regular link files code path
try:
os.rename(self.external_directory, to_directory)
return
except OSError:
# Proceed using normal link/copy
pass
os.makedirs(to_directory, exist_ok=True)
if can_link:
link_files(self.external_directory, to_directory)
else:
copy_files(self.external_directory, to_directory)
# Add a directory entry deterministically to a tar file
#
# This function takes extra steps to ensure the output is deterministic.
# First, it sorts the results of os.listdir() to ensure the ordering of
# the files in the archive is the same. Second, it sets a fixed
# timestamp for each entry. See also https://bugs.python.org/issue24465.
def export_to_tar(self, tarfile, destination_dir, mtime=BST_ARBITRARY_TIMESTAMP):
# We need directories here, including non-empty ones,
# so list_relative_paths is not used.
for filename in sorted(os.listdir(self.external_directory)):
source_name = os.path.join(self.external_directory, filename)
arcname = os.path.join(destination_dir, filename)
tarinfo = tarfile.gettarinfo(source_name, arcname)
tarinfo.mtime = mtime
tarinfo.uid = 0
tarinfo.gid = 0
tarinfo.uname = ""
tarinfo.gname = ""
if tarinfo.isreg():
with open(source_name, "rb") as f:
tarfile.addfile(tarinfo, f)
elif tarinfo.isdir():
tarfile.addfile(tarinfo)
self.descend(*filename.split(os.path.sep)).export_to_tar(tarfile, arcname, mtime)
else:
tarfile.addfile(tarinfo)
def is_empty(self):
it = os.scandir(self.external_directory)
return next(it, None) is None
def mark_unmodified(self):
""" Marks all files in this directory (recursively) as unmodified.
"""
_set_deterministic_mtime(self.external_directory)
def list_modified_paths(self):
"""Provide a list of relative paths which have been modified since the
last call to mark_unmodified.
Return value: List(str) - list of modified paths
"""
return [
f
for f in list_relative_paths(self.external_directory)
if _get_link_mtime(os.path.join(self.external_directory, f)) != BST_ARBITRARY_TIMESTAMP
]
def list_relative_paths(self):
"""Provide a list of all relative paths.
Return value: List(str) - list of all paths
"""
return list_relative_paths(self.external_directory)
def get_size(self):
return utils._get_dir_size(self.external_directory)
def stat(self, *path, follow_symlinks=False):
subdir = self.descend(*path[:-1], follow_symlinks=follow_symlinks)
newpath = os.path.join(subdir.external_directory, path[-1])
st = os.lstat(newpath)
if follow_symlinks and stat.S_ISLNK(st.st_mode):
linklocation = os.readlink(newpath)
newpath = linklocation.split(os.path.sep)
if os.path.isabs(linklocation):
return subdir._find_root().stat(*newpath, follow_symlinks=True)
return subdir.stat(*newpath, follow_symlinks=True)
else:
return st
def exists(self, *path, follow_symlinks=False):
try:
self.stat(*path, follow_symlinks=follow_symlinks)
return True
except (VirtualDirectoryError, FileNotFoundError):
return False
def file_digest(self, *path):
# Use descend() to avoid following symlinks (potentially escaping the sandbox)
subdir = self.descend(*path[:-1])
if subdir.exists(path[-1]) and not subdir.isfile(path[-1]):
raise VirtualDirectoryError("Unsupported file type for digest")
newpath = os.path.join(subdir.external_directory, path[-1])
return utils.sha256sum(newpath)
def readlink(self, *path):
# Use descend() to avoid following symlinks (potentially escaping the sandbox)
subdir = self.descend(*path[:-1])
if subdir.exists(path[-1]) and not subdir.islink(path[-1]):
raise VirtualDirectoryError("Unsupported file type for readlink")
newpath = os.path.join(subdir.external_directory, path[-1])
return os.readlink(newpath)
def open_file(self, *path: str, mode: str = "r"):
# Use descend() to avoid following symlinks (potentially escaping the sandbox)
subdir = self.descend(*path[:-1])
newpath = os.path.join(subdir.external_directory, path[-1])
if mode not in ["r", "rb", "w", "wb", "w+", "w+b", "x", "xb", "x+", "x+b"]:
raise ValueError("Unsupported mode: `{}`".format(mode))
if "b" in mode:
encoding = None
else:
encoding = "utf-8"
if "r" in mode:
return open(newpath, mode=mode, encoding=encoding) # pylint: disable=consider-using-with
else:
if "x" in mode:
# This check is not atomic, however, we're operating with a
# single thread in a private directory tree.
if subdir.exists(path[-1]):
raise FileExistsError("{} already exists in {}".format(path[-1], str(subdir)))
mode = "w" + mode[1:]
return utils.save_file_atomic(newpath, mode=mode, encoding=encoding)
def remove(self, *path, recursive=False):
# Use descend() to avoid following symlinks (potentially escaping the sandbox)
subdir = self.descend(*path[:-1])
newpath = os.path.join(subdir.external_directory, path[-1])
if subdir._get_filetype(path[-1]) == _FileType.DIRECTORY:
if recursive:
shutil.rmtree(newpath)
else:
os.rmdir(newpath)
else:
os.unlink(newpath)
def rename(self, src, dest):
# Use descend() to avoid following symlinks (potentially escaping the sandbox)
srcdir = self.descend(*src[:-1])
destdir = self.descend(*dest[:-1])
srcpath = os.path.join(srcdir.external_directory, src[-1])
destpath = os.path.join(destdir.external_directory, dest[-1])
if destdir.exists(dest[-1]):
destdir.remove(dest[-1])
os.rename(srcpath, destpath)
def __iter__(self):
yield from os.listdir(self.external_directory)
def __str__(self):
# This returns the whole path (since we don't know where the directory started)
# which exposes the sandbox directory; we will have to assume for the time being
# that people will not abuse __str__.
return self.external_directory
def _get_underlying_directory(self) -> str:
""" Returns the underlying (real) file system directory this
object refers to. """
return self.external_directory
def _find_root(self):
""" Finds the root of this directory tree by following 'parent' until there is
no parent. """
if self.parent:
return self.parent._find_root()
else:
return self
def _get_filetype(self, name=None):
path = self.external_directory
if name:
path = os.path.join(path, name)
st = os.lstat(path)
if stat.S_ISDIR(st.st_mode):
return _FileType.DIRECTORY
elif stat.S_ISLNK(st.st_mode):
return _FileType.SYMLINK
elif stat.S_ISREG(st.st_mode):
return _FileType.REGULAR_FILE
else:
return _FileType.SPECIAL_FILE
def _import_files_from_cas(
self, source_directory, actionfunc, filter_callback, *, path_prefix="", update_mtime=None, result
):
""" Import files from a CAS-based directory. """
for name, entry in source_directory.index.items():
# The destination filename, relative to the root where the import started
relative_pathname = os.path.join(path_prefix, name)
# The full destination path
dest_path = os.path.join(self.external_directory, name)
is_dir = entry.type == _FileType.DIRECTORY
if is_dir:
src_subdir = source_directory.descend(name)
try:
create_subdir = not os.path.lexists(dest_path)
dest_subdir = self.descend(name, create=create_subdir)
except VirtualDirectoryError:
filetype = self._get_filetype(name)
raise VirtualDirectoryError(
"Destination is a {}, not a directory: /{}".format(filetype, relative_pathname)
)
dest_subdir._import_files_from_cas(
src_subdir,
actionfunc,
filter_callback,
path_prefix=relative_pathname,
result=result,
update_mtime=update_mtime,
)
if filter_callback and not filter_callback(relative_pathname):
if is_dir and create_subdir and dest_subdir.is_empty():
# Complete subdirectory has been filtered out, remove it
os.rmdir(dest_subdir.external_directory)
# Entry filtered out, move to next
continue
if not is_dir:
if os.path.lexists(dest_path):
# Collect overlaps
if not os.path.isdir(dest_path):
result.overwritten.append(relative_pathname)
if not utils.safe_remove(dest_path):
result.ignored.append(relative_pathname)
continue
if entry.type == _FileType.REGULAR_FILE:
src_path = source_directory.cas_cache.objpath(entry.digest)
# fallback to copying if we require mtime support on this file
if update_mtime or entry.mtime is not None:
utils.safe_copy(src_path, dest_path, result=result)
mtime = update_mtime
# mtime property will override specified mtime
if entry.mtime is not None:
mtime = utils._parse_protobuf_timestamp(entry.mtime)
if mtime:
utils._set_file_mtime(dest_path, mtime)
else:
actionfunc(src_path, dest_path, result=result)
if entry.is_executable:
| |
instance that provides the text.
text (str):
Plain text without formatting.
entities (telethon.types.TypeMessageEntity list):
List of Telegram entity objects.
Returns:
.TelegramRichText:
Parsed rich text container.
"""
if not text:
return None
elif not entities:
return immp.RichText([immp.Segment(text)])
changes = defaultdict(dict)
for entity in entities:
value = True
if isinstance(entity, tl.types.MessageEntityBold):
key = "bold"
elif isinstance(entity, tl.types.MessageEntityItalic):
key = "italic"
elif isinstance(entity, tl.types.MessageEntityUnderline):
key = "underline"
elif isinstance(entity, tl.types.MessageEntityStrike):
key = "strike"
elif isinstance(entity, tl.types.MessageEntityCode):
key = "code"
elif isinstance(entity, tl.types.MessageEntityPre):
key = "pre"
elif isinstance(entity, tl.types.MessageEntityUrl):
key = "link"
value = text[entity.offset:entity.offset + entity.length]
elif isinstance(entity, tl.types.MessageEntityTextUrl):
key = "link"
value = entity.url
elif isinstance(entity, tl.types.MessageEntityEmail):
key = "link"
value = "mailto:{}".format(text[entity.offset:entity.offset + entity.length])
elif isinstance(entity, tl.types.MessageEntityMention):
key = "mention"
username = text[entity.offset + 1:entity.offset + entity.length]
value = await telegram.user_from_username(username)
elif isinstance(entity, tl.types.MessageEntityMentionName):
key = "mention"
value = await telegram.user_from_id(entity.user_id)
else:
continue
clear = False if value is True else None
changes[entity.offset][key] = value
changes[entity.offset + entity.length][key] = clear
return cls._from_changes(text, changes)
class TelegramFile(immp.File):
"""
File attachment originating from Telegram.
"""
@classmethod
async def from_id(cls, telegram, id_, type_=immp.File.Type.unknown, name=None):
"""
Generate a file using the bot API URL for a Telegram file.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the file.
id (str):
File ID as provided in the bot API, or constructed from a raw MTProto file.
type (.File.Type):
Corresponding file type.
name (str):
Original filename, if available for the file format.
Returns:
.TelegramFile:
Parsed file object.
"""
file = await telegram._api("getFile", _Schema.file, params={"file_id": id_})
url = ("https://api.telegram.org/file/bot{}/{}"
.format(telegram.config["token"], file["file_path"]))
return immp.File(name, type_, url)
class TelegramMessage(immp.Message):
"""
Message originating from Telegram.
"""
_file_types = ("animation", "video", "video_note", "audio", "voice", "document")
@classmethod
async def from_bot_message(cls, telegram, json):
"""
Convert an API message :class:`dict` to a :class:`.Message`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the event.
json (dict):
Telegram API `message <https://core.telegram.org/bots/api#message>`_ object.
Returns:
.TelegramMessage:
Parsed message object.
"""
message = _Schema.message(json)
# Message IDs are just a sequence, only unique to their channel and not the whole network.
# Pair with the chat ID for a network-unique value.
id_ = "{}:{}".format(message["chat"]["id"], message["message_id"])
revision = message["edit_date"] or message["date"]
at = datetime.fromtimestamp(message["date"], timezone.utc)
channel = immp.Channel(telegram, message["chat"]["id"])
edited = bool(message["edit_date"])
text = await TelegramRichText.from_bot_entities(telegram, message["text"],
message["entities"])
user = None
action = False
reply_to = None
joined = None
left = None
title = None
attachments = []
if message["from"]:
user = TelegramUser.from_bot_user(telegram, message["from"])
if message["reply_to_message"]:
reply_to = await cls.from_bot_message(telegram, message["reply_to_message"])
# At most one of these fields will be set.
if message["group_chat_created"]:
action = True
text = immp.RichText([immp.Segment("created the group "),
immp.Segment(message["chat"]["title"], bold=True)])
elif message["new_chat_members"]:
joined = [(TelegramUser.from_bot_user(telegram, member))
for member in message["new_chat_members"]]
action = True
if joined == [user]:
text = "joined group via invite link"
else:
text = immp.RichText()
for join in joined:
text.append(immp.Segment(", " if text else "invited "),
immp.Segment(join.real_name, bold=True, link=join.link))
elif message["left_chat_member"]:
left = [TelegramUser.from_bot_user(telegram, message["left_chat_member"])]
action = True
if left == [user]:
text = "left group"
else:
part = left[0]
text = immp.RichText([immp.Segment("removed "),
immp.Segment(part.real_name, bold=True, link=part.link)])
elif message["new_chat_title"]:
title = message["new_chat_title"]
action = True
text = immp.RichText([immp.Segment("changed group name to "),
immp.Segment(title, bold=True)])
elif message["new_chat_photo"]:
action = True
text = "changed group photo"
photo = max(message["new_chat_photo"], key=lambda photo: photo["height"])
attachments.append(await TelegramFile.from_id(telegram, photo["file_id"],
immp.File.Type.image))
elif message["delete_chat_photo"]:
action = True
text = "removed group photo"
elif message["pinned_message"]:
action = True
text = "pinned a message"
attachments.append(await cls.from_bot_message(telegram, message["pinned_message"]))
elif message["photo"]:
# This is a list of resolutions, find the original sized one to return.
photo = max(message["photo"], key=lambda photo: photo["height"])
attachments.append(await TelegramFile.from_id(telegram, photo["file_id"],
immp.File.Type.image))
if message["caption"]:
text = await TelegramRichText.from_bot_entities(telegram, message["caption"],
message["caption_entities"])
elif message["sticker"]:
attachments.append(await TelegramFile.from_id(telegram, message["sticker"]["file_id"],
immp.File.Type.image))
# All real stickers should have an emoji, but webp images uploaded as photos are
# incorrectly categorised as stickers in the API response.
if not text and message["sticker"]["emoji"]:
action = True
text = "sent {} sticker".format(message["sticker"]["emoji"])
elif any(message[key] for key in cls._file_types):
for key in cls._file_types:
if message[key]:
obj = message[key]
break
if key == "animation":
type_ = immp.File.Type.image
elif key in ("video", "video_note"):
type_ = immp.File.Type.video
else:
type_ = immp.File.Type.unknown
attachments.append(await TelegramFile.from_id(telegram, obj["file_id"], type_,
obj["file_name"]))
elif message["venue"]:
attachments.append(immp.Location(latitude=message["venue"]["location"]["latitude"],
longitude=message["venue"]["location"]["longitude"],
name=message["venue"]["title"],
address=message["venue"]["address"]))
elif message["location"]:
attachments.append(immp.Location(latitude=message["location"]["latitude"],
longitude=message["location"]["longitude"]))
elif message["poll"]:
action = True
prefix = "closed the" if message["poll"]["is_closed"] else "opened a"
text = immp.RichText([immp.Segment("{} poll: ".format(prefix)),
immp.Segment(message["poll"]["question"], bold=True)])
elif not text:
# No support for this message type.
raise NotImplementedError
common = dict(id_=id_,
revision=revision,
at=at,
channel=channel,
user=user,
raw=message)
if message["forward_date"]:
# Event is a message containing another message. Forwarded messages have no ID, so we
# use a Message instead of a SentMessage here, unless they come from a channel.
forward_id = forward_channel = forward_user = None
if message["forward_from_chat"]:
forward_channel = immp.Channel(telegram, message["forward_from_chat"]["id"])
try:
forward_user = TelegramUser.from_bot_channel(telegram,
message["forward_from_chat"])
except _HiddenSender:
if message["forward_signature"]:
forward_user = immp.User(real_name=message["forward_signature"])
if message["forward_from_message_id"]:
forward_id = "{}:{}".format(message["forward_from_chat"]["id"],
message["forward_from_message_id"])
elif message["forward_from"]:
forward_user = TelegramUser.from_bot_user(telegram, message["forward_from"])
elif message["forward_sender_name"]:
forward_user = immp.User(real_name=message["forward_sender_name"])
forward_common = dict(text=text,
user=forward_user,
edited=edited,
action=action,
reply_to=reply_to,
joined=joined,
left=left,
title=title,
attachments=attachments,
raw=message)
if forward_id:
forward = immp.SentMessage(id_=forward_id,
channel=forward_channel,
**forward_common)
else:
forward = immp.Message(**forward_common)
# Embed the inner message as an attachment.
return immp.SentMessage(attachments=[forward], **common)
else:
return immp.SentMessage(text=text,
edited=edited,
action=action,
reply_to=reply_to,
joined=joined,
left=left,
title=title,
attachments=attachments,
**common)
@classmethod
async def from_bot_update(cls, telegram, update):
"""
Convert an API update :class:`dict` to a :class:`.Message`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the event.
update (dict):
Telegram API `update <https://core.telegram.org/bots/api#update>`_ object.
Returns:
.TelegramMessage:
Parsed message object.
"""
for key in ("message", "channel_post"):
if update.get(key):
return await cls.from_bot_message(telegram, update[key])
elif update.get("edited_{}".format(key)):
return await cls.from_bot_message(telegram, update["edited_{}".format(key)])
@classmethod
async def from_proto_message(cls, telegram, message):
"""
Convert a Telegram message event to a :class:`.Message`.
Args:
telegram (.TelegramPlug):
Related plug instance that provides the event.
message (telethon.tl.custom.Message):
Received message from an event or get request.
Returns:
.TelegramMessage:
Parsed message object.
"""
id_ = "{}:{}".format(message.chat_id, message.id)
channel = immp.Channel(telegram, message.chat_id)
edited = bool(message.edit_date)
if edited:
revision = int(message.edit_date.timestamp())
elif message.date:
revision = int(message.date.timestamp())
else:
revision = None
text = await TelegramRichText.from_proto_entities(telegram, message.message,
message.entities)
sender = await message.get_sender()
if isinstance(sender, tl.types.Channel):
user = TelegramUser.from_proto_channel(telegram, sender)
else:
user = TelegramUser.from_proto_user(telegram, sender)
action = False
reply_to = None
joined = []
left = []
title = None
attachments = []
if message.reply_to_msg_id:
receipt = immp.Receipt(message.reply_to_msg_id, channel)
reply_to = await telegram.resolve_message(receipt)
if message.photo:
try:
attach = await TelegramFile.from_id(telegram, pack_bot_file_id(message.photo),
immp.File.Type.image)
except TelegramAPIRequestError as e:
log.warning("Unable to fetch attachment", exc_info=e)
else:
attachments.append(attach)
elif message.document:
type_ = immp.File.Type.unknown
name = None
for attr in message.document.attributes:
if isinstance(attr, tl.types.DocumentAttributeSticker):
type_ = immp.File.Type.image
if attr.alt and not text:
text = "sent {} sticker".format(attr.alt)
action = True
elif isinstance(attr, tl.types.DocumentAttributeAnimated):
type_ = immp.File.Type.image
elif isinstance(attr, tl.types.DocumentAttributeVideo):
type_ = immp.File.Type.video
if isinstance(attr, tl.types.DocumentAttributeFilename):
name = attr.file_name
try:
attach = await TelegramFile.from_id(telegram, pack_bot_file_id(message.document),
type_, name)
except TelegramAPIRequestError as e:
log.warning("Unable to fetch attachment", exc_info=e)
else:
attachments.append(attach)
elif message.poll:
action = True
prefix = "closed the" if message.poll.poll.closed else "opened a"
text = immp.RichText([immp.Segment("{} poll: ".format(prefix)),
immp.Segment(message.poll.poll.question, bold=True)])
if message.action:
action = True
if isinstance(message.action, tl.types.MessageActionChatCreate):
text = immp.RichText([immp.Segment("created the group "),
immp.Segment(message.action.title, bold=True)])
elif isinstance(message.action, tl.types.MessageActionChatJoinedByLink):
joined = [user]
text = "joined group via invite link"
elif isinstance(message.action, tl.types.MessageActionChatAddUser):
joined = await gather(*(telegram.user_from_id(id_) for id_ in message.action.users))
if joined == [user]:
text = "joined group"
else:
text = immp.RichText()
for join in joined:
text.append(immp.Segment(", " if text else "invited "),
immp.Segment(join.real_name, link=join.link))
elif isinstance(message.action, tl.types.MessageActionChatDeleteUser):
left = [await telegram.user_from_id(message.action.user_id)]
if left == [user]:
text = "left group"
else:
part = left[0]
text = immp.RichText([immp.Segment("removed "),
immp.Segment(part.real_name, bold=True, link=part.link)])
elif isinstance(message.action, tl.types.MessageActionChatEditTitle):
title = message.action.title
text = immp.RichText([immp.Segment("changed group name to "),
immp.Segment(title, bold=True)])
elif isinstance(message.action, tl.types.MessageActionChatEditPhoto):
text = "changed group photo"
elif isinstance(message.action, tl.types.MessageActionChatDeletePhoto):
text = "removed group photo"
elif isinstance(message.action, tl.types.MessageActionPinMessage):
attachments.append(reply_to)
reply_to = None
text = "pinned message"
else:
raise NotImplementedError
if not text and not attachments:
# No support for this message type.
raise NotImplementedError
common = dict(id_=id_,
revision=revision,
at=message.date,
channel=channel,
user=user,
raw=message)
if message.forward:
# | |
an "X" or "0" at BOX/SQUARE no. 2. It has co-ordinates of BOX/SQUARE no. 2.
#(4.6.3)p3 -> self.p3() is used to decide if PLAYER1 or PLAYER2 will make an "X" or "0" at BOX/SQUARE no. 3. It has co-ordinates of BOX/SQUARE no. 3.
#(4.6.4)p4 -> self.p4() is used to decide if PLAYER1 or PLAYER2 will make an "X" or "0" at BOX/SQUARE no. 4. It has co-ordinates of BOX/SQUARE no. 4.
#(4.6.5)p5 -> self.p5() is used to decide if PLAYER1 or PLAYER2 will make an "X" or "0" at BOX/SQUARE no. 5. It has co-ordinates of BOX/SQUARE no. 5.
#(4.6.6)p6 -> self.p6() is used to decide if PLAYER1 or PLAYER2 will make an "X" or "0" at BOX/SQUARE no. 6. It has co-ordinates of BOX/SQUARE no. 6.
#(4.6.7)p7 -> self.p7() is used to decide if PLAYER1 or PLAYER2 will make an "X" or "0" at BOX/SQUARE no. 7. It has co-ordinates of BOX/SQUARE no. 7.
#(4.6.8)p8 -> self.p8() is used to decide if PLAYER1 or PLAYER2 will make an "X" or "0" at BOX/SQUARE no. 8. It has co-ordinates of BOX/SQUARE no. 8.
#(4.6.9)p9 -> self.p9() is used to decide if PLAYER1 or PLAYER2 will make an "X" or "0" at BOX/SQUARE no. 9. It has co-ordinates of BOX/SQUARE no. 9.
#(4.7) pos -> self.pos() function is used to move turtle using *self.t vertically downwards i.e. from top to bottom and call self.points() or self.tie()
#function based on the outcome of the ongoing GAME. If the ongoing game is won by any PLAYER then self.points() is called or if the GAME
#is a draw or tie, then self.tie() is called. Basically, this funciton moves the turtle down the scoreboard to set it's position so that
#using self.points() or self.tie() function, the turtle can write down the points of the PLAYERS for that specific match.
#(4.8) win -> self.win() function checks if someone has won the game or not. If any of the lists, i.e. *self.top, *self.mid, etc become ['X','X','X'] or
#['0','0','0'], then the PLAYER or SYSTEM wins. It also calls the self.pos() function.
#(4.9) tie -> self.tie() function just checks if the ongoing GAME is a TIE/DRAW or not. If it is then it gives 0 points to both NAME1 and NAME2 too. It also
#calls the self.new_game() function.
#(4.10) points -> self.points() function gives NAME1 or NAME2 "1" or "0" points on the main SCOREBOARD depending on the value stored in variables *self.game1
#and *self.game2. It also calls the self.new_game() function.
#(4.11) new_game -> self.new_game() function's job is to start a new game after the ongoing GAME is finished. It checks if "zero" games are played or not, and if
#*self.Ngames are played or not or "10" games are completed (the maximum number of playable games) are complete or not. If they are not
#not complete then it calls self.reset() function or if it is complete then it calls the self.score() function. It also asks the user if
#they would want to keep playing till *self.Ngames are complete or "10" games are complete or stop midway and call self.score() function.
#(4.12) score -> self.score() is the finisher, that is, it finishes the game. It checks if the game is finished just like self.new_game function and also
#displays the final score of both PLAYERS or SYSTEM and hence quits the game after doing so.
#(4.13) play_with_friend1 -> This function takes input from *self.player1 (PLAYER1) and asks them where they would want to move on TTT Board. If PLAYER1 choses
#position 7 for example, then it calls self.p7() function and makes "X" or "0" whichever PLAYER1 chose. It also calls self.win()
#function and checks if filling BOX no. 7 makes PLAYER1 win or not. This is obviously true for all BOX no.s. If it isn't True, then
#it calls self.play_with_friend2() function if *choice value is "1" (against "FRIEND") or calls self.play_with_sys() function if
#*choice value is "2" (against "SYSTEM").
#(4.14) play_with_friend2 -> This function is similiar to self.play_with_friend1() function except that it takes input from PLAYER2 and the letter to move
#"X" or "0" is already set based on what PLAYER1 chose as it was randomly selected first. It also calls self.p5() if PLAYER2 chose
#to move at BOX no. 5 after PLAYER1's chance, and calls self.win() function to check if PLAYER2 won or not. It calls function
#self.play_with_friend1() after PLAYER2 is done moving and doesn't win in that chance.
#(4.15) play_with_sys -> This function moves in place of self.play_with_friend2() function if the *choice variable value is "2" (against "SYSTEM"), i.e.
#after self.play_with_friend1() function, self.play_with_friend2() function is not called but self.play_with_sys() function is
#called. Unlink self.play_with_friend2(), self.play_with_sys() can move first if it's PLAYER1 (i.e., if it is selected to move first)
#and it randomly moves to any position instead of using -aritificial intelligence- (for now) at any available BOX no. randomly.
#It also calls self.win() function to check if SYSTEM won or not and if not it calls self.play_with_friend1() function.
#(5) END -> Not really required, but it's important feature is calling TTT() class and beginning the game. Except that everything else is irrevelant and exists
#only for reference purposes if to be used in future ever for any other program or such.
#(1) IMPORT FUNCTIONS
import turtle as x
import random as r
import sys
if sys.version_info[0]>2:
import tkinter as tk
from tkinter.simpledialog import askstring as askstring
from tkinter.simpledialog import askinteger as askinteger
from tkinter.messagebox import showerror as showerror
from tkinter.messagebox import showinfo as showinfo
from tkinter.messagebox import askyesno as askyesno
elif sys.version_info[0]<3:
import Tkinter as tk
from tkSimpleDialog import askstring as askstring
from tkSimpleDialog import askinteger as askinteger
from tkMessageBox import showerror as showerror
from tkMessageBox import showinfo as showinfo
from tkMessageBox import askyesno as askyesno
#(2) TKINTER AND TURTLE MODULE INITIALIZATION
root=tk.Tk()
root.overrideredirect(1)
root.withdraw()
root.focus()
root.lift()
window = x.Screen()
window.screensize(1920, 1080)
window.setup(width=1.0, height=1.0, startx=None, starty=None)
#(3) GLOBAL VARIABLES DECLARATION
showinfo("WELCOME !","\t WELCOME TO THE TIC-TAC-TOE GAME!\n\t THIS GAME IS A WORK OF SAKET SAVARN ~\n\nKindly NOTE that while \"PLAYING AGAINST THE SYSTEM\"\nThe system randomly moves even if it's losing which makes the game very easy.\n\n Kindly read the instructions by clicking on the pop-up boxes and entering the data in fields required.\n\n\t\tHope you will like the game!")
void=None
choice=askinteger("CHOOSE YOUR OPPONENT!!","Would you like to play against SYSTEM or against a FRIEND?\n\n\tENTER \"1\" TO PLAY AGAINST YOUR FRIEND!\n\t\t OR\n\t\"2\" TO PLAY AGAINT THE SYSTEM")
if choice!=1 and choice!=2:
showerror("ERROR!","YOU HAVE CHOOSEN A WRONG RESPONSE!!\nYOU HAVE ONE MORE TRY TO CHOOSE THE RIGHT CHOICE OR THE GAME WILL QUIT BY DEFAULT!")
choice=askinteger("CHOOSE YOUR OPPONENT!!","Would you like to play against SYSTEM or against a FRIEND?\n\n\tENTER \"2\" TO PLAY AGAINT THE SYSTEM\n\t\t OR\n\t\"1\" TO PLAY AGAINST YOUR FRIEND!")
if int(choice)!=1 and int(choice)!=2:
showerror("ERROR!!","YOU HAVE INPUTTED THE WRONG OPTION TWICE!\n\tQUITTING GAME BY DEFAULT!\nKINDLY RESTART THE GAME AGAIN TO PLAY!")
quit()
if choice==1:
temp1=askstring("ENTER NAME!","KINLDY ENTER THE NAME OF PLAYER 1 - ")
temp2=askstring("ENTER NAME!","KINDLY ENTER THE NAME OF PLAYER 2 - ")
if type(temp1)==type(void) or type(temp2)==type(void):
f=askyesno("ATTENTION!","YOU HAVE CHOSEN TO CANCEL THE GAME!\nDO YOU REALLY WANT TO END THE GAME?")
if f==True:
showinfo("THANK YOU!","THANK YOU FOR TRYING THE TIC-TAC-TOE GAME!")
quit()
else:
showinfo("ATTENTION!","THIS IS THE LAST CHANCE FOR ENTERING YOUR NAMES\nAND CONTINUING THE GAME!")
temp1=askstring("ENTER NAME!","KINLDY ENTER THE NAME OF PLAYER 1 - ")
temp2=askstring("ENTER NAME!","KINDLY ENTER THE NAME OF PLAYER 2 - ")
if type(temp1)==type(void) or type(temp2)==type(void):
showerror("ATTENTION!","YOU HAVE CHOSEN TO CANCEL THE GAME ABRUPTLY!\nQUITTING THE GAME BY DEFAULT!\nRESTART THE GAME TO PLAY AGAIN!")
quit()
while len(temp1)<1 and len(temp2)<1:
showerror("ATTENTION!","Name of both players has to be at least one character long! Kindly re-enter! ")
temp1=askstring("ENTER NAME!","KINLDY ENTER THE NAME OF PLAYER 1 - ")
temp2=askstring("ENTER NAME!","KINDLY ENTER THE NAME OF PLAYER 2 - ")
name1=temp1.upper();name2=temp2.upper()
font_list=[12,24]
font_size1=len(name1);font_size2=len(name2)
if font_size1>8 or font_size2>8:
Font=font_list[0]
else:
Font=font_list[1]
showinfo("ATTENTION!","\t %s VS %s\n\nKINDLY WAIT TILL THE GAME SETUP IS COMPLETED!"%(name1,name2))
elif choice==2:
level="EASY"
"""
quality=askinteger("CHOOSE LEVEL!","CHOOSE THE LEVEL | |
# Step 2: Exponentiate the diagonals
tfb.TransformDiagonal(tfb.Exp(validate_args=self.VALIDATE_ARGS)),
# Step 1: Expand the vector to a lower triangular matrix
tfb.FillTriangular(validate_args=self.VALIDATE_ARGS),
])
self.prior = PriorModel(
n_length=self.n_length, num_electrode=self.num_electrode)
self.rearrange = ReArrangeBetaSigma(
n_multiple=self.n_multiple,
num_electrode=self.num_electrode,
flash_and_pause_length=self.flash_and_pause_length)
@staticmethod
def session_options(enable_gpu_ram_resizing=False):
"""Convenience function which sets a common 'tf.Session' options."""
config = tf.ConfigProto()
if enable_gpu_ram_resizing:
config.gpu_options.allow_growth = True
return config
def reset_sess(self, config=None):
# Convenience function to create TF graph and session or reset them.
if config is None:
config = self.session_options()
tf.reset_default_graph()
global sess
# noinspection PyBroadException
try:
sess.close()
except:
pass
sess = tf.InteractiveSession(config=config)
def print_test_info(self, test_repetition):
print('This is subject {}.'.format(self.sub_folder_name))
print('We are predicting {} repetitions for testing purpose.'.format(test_repetition))
# Import datafiles with WLS specific requirement:
def import_eeg_processed_dat_wls(self, file_subscript,
letter_dim=None, trn_repetition=None,
reshape_to_1d=True):
[eeg_signals, eeg_code, eeg_type] = \
self.import_eeg_processed_dat(file_subscript, reshape_1d_bool=False)
shape1, shape2, _ = eeg_type.shape
if letter_dim is not None:
assert letter_dim <= shape1, 'Incorrect letter dimension, ' \
'should not be greater than {}.'.format(shape1)
else:
letter_dim = shape1
if trn_repetition is not None:
assert trn_repetition <= shape2, 'Incorrect repetition dimension, ' \
'should not be greater than {}.'.format(shape2)
else:
trn_repetition = shape2
# eeg_signals = eeg_signals / 10
eeg_signals_trun, _ = self.create_truncate_segment_batch(
eeg_signals, eeg_type, letter_dim, trn_repetition)
trn_total_seq_length = (trn_repetition*self.num_rep+self.n_multiple-1)*self.flash_and_pause_length
eeg_signals = np.transpose(eeg_signals[:letter_dim, :trn_total_seq_length, :],
[0, 2, 1])
if reshape_to_1d:
eeg_code = np.reshape(eeg_code[:letter_dim, :trn_repetition, :],
[letter_dim*trn_repetition*self.num_rep])
eeg_type = np.reshape(eeg_type[:letter_dim, :trn_repetition, :],
[letter_dim*trn_repetition*self.num_rep])
return [eeg_signals.astype(self.DAT_TYPE),
eeg_signals_trun.astype(self.DAT_TYPE),
eeg_code.astype(self.DAT_TYPE),
eeg_type.astype(self.DAT_TYPE)]
# Construct design matrix X (letter-specific, intercept excluded)
def construct_design_matrix_per_letter(
self, total_seq_length, eeg_code, target_row_col):
# Assume no letter effect, nor row/column effect
params_type_num = 2
z = np.zeros([params_type_num, self.n_length], dtype=np.int32)
z[0, :] = np.arange(1, self.n_length+1) # Non-target
z[1, :] = np.arange(self.n_length+1, 2*self.n_length+1) # Target
# print('can multiple assignment')
total_seq_num = int((total_seq_length - self.n_length) / self.flash_and_pause_length + 1)
bool_index = np.in1d(eeg_code, target_row_col) * 1
design_x = np.zeros([total_seq_num, total_seq_length], dtype=np.int32)
for i in range(total_seq_num):
low_num = self.flash_and_pause_length * i
upp_num = self.flash_and_pause_length * i + self.n_length
design_x[i, low_num:upp_num] = z[bool_index[i], :]
design_x0 = np.zeros([total_seq_length, self.n_length*2])
# print('design_x0 done!')
for i in range(total_seq_length):
for j in range(total_seq_num):
if design_x[j, i] > 0:
design_x0[i, design_x[j, i]-1] = 1
return design_x0.astype(self.DAT_TYPE)
def create_penalty_fn(self):
# Create the second-order diff matrix object
# as well as smoothing around zero matrix
P1 = np.eye(N=self.n_length, M=self.n_length, dtype=np.int32)
P1 = P1[1:, :] - P1[:-1, :]
P1 = P1[1:, :] - P1[:-1, :]
P1 = np.matmul(P1.T, P1)
P_smooth = np.eye(N=self.n_length * 2, M=self.n_length * 2, dtype=np.int32)
P_smooth[:self.n_length, :self.n_length] = np.copy(P1)
P_smooth[self.n_length:, self.n_length:] = np.copy(P1)
P_zero = np.zeros([self.n_length * 2, self.n_length * 2], dtype=np.int32)
P_zero[self.n_length:, :self.n_length:] = np.copy(np.eye(N=self.n_length, M=self.n_length,
dtype=self.DAT_TYPE))
return [P_smooth, P_zero]
def from_weights_to_beta(self, design_x0, eeg_signals, l_cholesky_inv,
lambda_s, lambda_0, P_smooth, P_zero):
# X^t W X = (L^-1X)^t (L^-1X)
l_cholesky_inv = np.tile(l_cholesky_inv[:, np.newaxis, :, :],
reps=[1, self.num_letter, 1, 1])
l_inv_X = np.matmul(l_cholesky_inv, design_x0)
l_inv_X_t = np.transpose(l_inv_X, axes=(0, 1, 3, 2))
XtWX = np.sum(np.matmul(l_inv_X_t, l_inv_X), axis=1)
# X^t W Y = (L^-1X)^t (L^-1Y)
XtWY = np.sum(np.matmul(l_inv_X_t, np.matmul(l_cholesky_inv, eeg_signals)), axis=1)
# Use cholesky decomposition to solve beta_mle
# XtWX beta = XtWY
# Step 1: Obtain Cholesky decomposition of XtWX with the penalty term
l_XWX = np.linalg.cholesky(XtWX + lambda_s * P_smooth + lambda_0 * P_zero)
l_XWX_inv = np.linalg.inv(l_XWX)
# l_XWX @ l_XWX^t beta = XtWY
# Step 2: Solve l_XWX theta = XtWY
theta = np.matmul(l_XWX_inv, XtWY)
# Step 3: Solve l_XWX^t beta = theta
beta_mle = np.matmul(np.transpose(l_XWX_inv, axes=(0, 2, 1)), theta)
return beta_mle
# MLE iterations for WLS algorithm
def from_beta_to_weights(self, design_x0, eeg_signals, beta_mle, jitter, trn_repetition=-1):
_, x_dim, _ = design_x0.shape
x0_beta = np.matmul(design_x0,
np.tile(beta_mle[:, np.newaxis, :, :],
reps=[1, self.num_letter, 1, 1]))
w_mle_inv = np.matmul(eeg_signals - x0_beta, np.transpose(eeg_signals - x0_beta, axes=(0, 1, 3, 2))) \
/ self.num_letter
w_mle_inv = np.sum(w_mle_inv, axis=1)
# Place different structure on w_mle_inv to simplify the training model
w_mle_inv = self.block_diagonal_weights(w_mle_inv, jitter, x_dim, trn_repetition)
# Apply cholesky decomposition
l_cholesky = np.linalg.cholesky(w_mle_inv)
l_cholesky_inv = np.linalg.inv(l_cholesky)
return l_cholesky_inv
@staticmethod
# Assume different across channels
def unstructured_weights(w_mle_inv, jitter, x_dim):
assert len(w_mle_inv.shape) == 3
w_mle_inv += jitter * np.eye(N=x_dim, M=x_dim)
return w_mle_inv
# Assume different across channels
def block_diagonal_weights(self, w_mle_inv, jitter, x_dim, trn_repetition, channel_dim=1):
assert len(w_mle_inv.shape) == 3
w_mle_inv_block_diag = np.zeros([channel_dim, x_dim, x_dim])
for i in range(trn_repetition):
block_low = i * self.flash_and_pause_length * self.num_rep
block_upp = (i + 1) * self.flash_and_pause_length * self.num_rep
w_mle_inv_block_diag[:, block_low:block_upp, block_low:block_upp] = \
w_mle_inv[:, block_low:block_upp, block_low:block_upp]
# The last smaller block
block_low_2 = trn_repetition * self.num_rep * self.flash_and_pause_length
w_mle_inv_block_diag[block_low_2:, block_low_2:] = \
w_mle_inv[block_low_2:, block_low_2:]
# Add jitter to the diagonal to make it more pdf
w_mle_inv_block_diag = self.unstructured_weights(w_mle_inv_block_diag, jitter, x_dim)
return w_mle_inv_block_diag
@staticmethod
# Assume different across channels
def multi_diagonal_weights(w_mle_inv, jitter, x_dim, max_lag, channel_dim=1):
assert len(w_mle_inv.shape) == 3
# Add jitter and digonal term together
w_mle_inv_md = np.diagonal(w_mle_inv, offset=0, axis1=1, axis2=2)[:, :, np.newaxis] \
* np.eye(N=x_dim, M=x_dim)[np.newaxis, :, :]
w_mle_inv_md += jitter * np.eye(N=x_dim, M=x_dim)[np.newaxis, :, :]
for i in range(1, max_lag):
off_diagonal_val = np.diagonal(w_mle_inv, offset=i, axis1=1, axis2=2)
# print('off_diagonal_val has shape {}'.format(off_diagonal_val.shape))
for j in range(channel_dim):
np.fill_diagonal(w_mle_inv_md[j, i:, :], off_diagonal_val[j, :])
np.fill_diagonal(w_mle_inv_md[j, :, i:], off_diagonal_val[j, :])
return w_mle_inv_md
# Compute the loss function
def compute_mahalanobis_dist_sq(self, design_x0, eeg_signals, beta_mle, l_cholesky_inv):
Xbeta_l = np.matmul(design_x0, np.tile(beta_mle[:, np.newaxis, :, :],
reps=[1, self.num_letter, 1, 1]))
# print('Xbeta_l has shape {}'.format(Xbeta_l.shape))
Y_Xb = eeg_signals - Xbeta_l
# print('Y_Xb has shape {}'.format(Y_Xb.shape))
l_cholesky_inv = np.tile(l_cholesky_inv[:, np.newaxis, :, :],
reps=[1, self.num_letter, 1, 1])
L_inv_res = np.matmul(l_cholesky_inv, Y_Xb)
# print('L^-1 (Y-Xb) has shape {}'.format(L_inv_res.shape))
L_inv_res_sq = np.sum(np.matmul(np.transpose(L_inv_res, axes=[0, 1, 3, 2]),
L_inv_res), axis=0)
return np.squeeze(L_inv_res_sq)
# def get_log_prob_eeg_convol_fn(
# self, letter_dim, repet_dim,
# hyper_param_dict, eeg_signals, id_beta, design_x):
#
# mean_vec_1 = hyper_param_dict['mean_vec_1']
# mean_vec_0 = hyper_param_dict['mean_vec_0']
# hyper_delta_var = hyper_param_dict['hyper_delta_var']
# hyper_sigma_r_sq = hyper_param_dict['hyper_sigma_r_sq']
#
# def _log_prob_eeg_convol_fn(delta_1_value,
# delta_0_value,
# pres_array_1_value,
# pres_array_0_value):
#
# # def _print_precision(pres_chky_1, pres_chky_0):
# # print('precision_chky_1:\n {}'.format(pres_chky_1))
# # print('precision_chky_0:\n {}'.format(pres_chky_0))
# # return False # operations must return something!
# # # Turn our method into a tensorflow operation
# # prec_chky_op = tf.numpy_function(_print_precision, [pres_chky_1_value, pres_chky_0_value], tf.bool)
# #
# # assertion_op_1 = tf.compat.v1.assert_equal(
# # tf.reduce_sum(tf.linalg.band_part(pres_chky_1_value, -1, 0)), tf.cast(0, dtype=self.DAT_TYPE),
# # message='Not lower triangular for pres', summarize=4, name='low-tri-check-1'
# # )
# #
# # assertion_op_0 = tf.assert_equal(
# # tf.reduce_sum(tf.linalg.band_part(pres_chky_0_value, -1, 0)), tf.cast(0, dtype=self.DAT_TYPE),
# # message='Not symmetrical', summarize=4, name='low-tri-check-0'
# # )
#
# delta_1_log_prob = self.prior.compute_delta_log_lhd(
# mean_vec_1, hyper_delta_var, delta_1_value)
# delta_0_log_prob = self.prior.compute_delta_log_lhd(
# mean_vec_0, hyper_delta_var, delta_0_value)
#
# pres_chky_1_log_prob = self.prior.compute_pres_chky_log_lhd_2(
# hyper_sigma_r_sq, pres_array_1_value)
# pres_chky_0_log_prob = self.prior.compute_pres_chky_log_lhd_2(
# hyper_sigma_r_sq, pres_array_0_value)
# # Intermediate variable eta
# # Convert pres_chky_1/0 to upper-triangular matrices
# pres_array_1_value = self.prior.convert_1d_array_to_upper_triangular(pres_array_1_value)
# pres_array_0_value = self.prior.convert_1d_array_to_upper_triangular(pres_array_0_value)
#
# eta_1, eta_1_log_prob = self.prior.generate_eta_and_compute_log_lhd(
# pres_array_1_value, letter_dim, repet_dim, self.flash_sum)
# eta_0, eta_0_log_prob = self.prior.generate_eta_and_compute_log_lhd(
# pres_array_0_value, letter_dim, repet_dim, self.non_flash_sum)
#
# delta_combined = self.rearrange.tile_and_combine_delta(
# letter_dim, repet_dim, delta_1_value, delta_0_value)
# eta_combined = tf.transpose(tf.concat([eta_1, eta_0], axis=0),
# perm=[1, 0, 2])
# beta_combined = delta_combined + eta_combined
# beta_tilta = self.rearrange.create_joint_beta_tilta(
# letter_dim, repet_dim, beta_combined, id_beta, design_x)
# beta_tilta = tf.squeeze(beta_tilta, axis=-1)
#
# residuals = eeg_signals - beta_tilta
# eeg_signals_log_prob = -0.5 * tf.reduce_sum(tf.pow(tf.linalg.norm(
# residuals, ord='fro', axis=[-2, -1]), 2))
#
# total_log_prob = delta_1_log_prob + delta_0_log_prob \
# + pres_chky_1_log_prob + pres_chky_0_log_prob \
# + eta_1_log_prob + eta_0_log_prob \
# + eeg_signals_log_prob
#
# return total_log_prob
#
# return _log_prob_eeg_convol_fn
# Tenga un cuenta que 'eeg_t_mean_init' y 'eeg_nt_mean_init' son al azar.
def create_initial_chain(self,
eeg_t_mean_init, eeg_nt_mean_init,
eeg_t_cov, eeg_nt_cov):
upper_tri_1 = self.prior.compute_pres_upper_tri(eeg_t_cov) + \
tf.eye(self.n_length, self.n_length, [1])
upper_tri_0 = self.prior.compute_pres_upper_tri(eeg_nt_cov) + \
tf.eye(self.n_length, self.n_length, [1])
upper_array_1 = self.prior.convert_upper_triangular_to_1d_array(upper_tri_1)
upper_array_0 = self.prior.convert_upper_triangular_to_1d_array(upper_tri_0)
initial_chain_states = [
tf.random.normal(mean=eeg_t_mean_init, shape=[], dtype=self.DAT_TYPE),
tf.random.normal(mean=eeg_nt_mean_init, shape=[], dtype=self.DAT_TYPE),
# tf.random.normal(shape=[self.num_electrode,
# int(self.n_length * (1 + self.n_length) / 2)],
# dtype=self.DAT_TYPE),
# tf.random.normal(shape=[self.num_electrode,
# int(self.n_length * (1 + self.n_length) / 2)],
# dtype=self.DAT_TYPE)
upper_array_1,
upper_array_0
]
return initial_chain_states
@staticmethod
def _mala_kernel(target_log_prob_fn, step_size_init):
kernel = tfp.mcmc.MetropolisAdjustedLangevinAlgorithm(
target_log_prob_fn=target_log_prob_fn,
step_size=step_size_init)
return kernel
@staticmethod
def _random_walk_kernel(target_log_prob_fn, scale):
kernel = tfp.mcmc.RandomWalkMetropolis(
target_log_prob_fn=target_log_prob_fn,
new_state_fn=tfp.mcmc.random_walk_normal_fn(scale=scale)
)
return kernel
# Add transformed kernel and bijector
def _ttk_hmc_kernel(self, target_log_prob_fn,
num_burnin_steps, num_leapfrog_steps,
step_size_init, target_accept_prob):
step_size_init = tf.convert_to_tensor(step_size_init, dtype=self.DAT_TYPE)
target_accept_prob = | |
<reponame>ThanksBoomerang/graphql-core-legacy
from pytest import raises
from graphql import GraphQLInt, parse
from graphql.utils.build_ast_schema import build_ast_schema
from graphql.utils.schema_printer import print_schema
from ...type import (
GraphQLDeprecatedDirective,
GraphQLIncludeDirective,
GraphQLSkipDirective,
)
def cycle_output(body):
"""This function does a full cycle of going from a string with the contents of the DSL,
parsed in a schema AST, materializing that schema AST into an in-memory GraphQLSchema,
and then finally printing that GraphQL into the DSL"""
ast = parse(body)
schema = build_ast_schema(ast)
return "\n" + print_schema(schema)
def test_simple_type():
body = """
schema {
query: HelloScalars
}
type HelloScalars {
str: String
int: Int
float: Float
id: ID
bool: Boolean
}
"""
output = cycle_output(body)
assert output == body
def test_with_directives():
body = """
schema {
query: Hello
}
directive @foo(arg: Int) on FIELD
type Hello {
str: String
}
"""
output = cycle_output(body)
assert output == body
def test_maintains_skip_and_include_directives():
body = """
schema {
query: Hello
}
type Hello {
str: String
}
"""
schema = build_ast_schema(parse(body))
assert len(schema.get_directives()) == 3
assert schema.get_directive("skip") == GraphQLSkipDirective
assert schema.get_directive("include") == GraphQLIncludeDirective
assert schema.get_directive("deprecated") == GraphQLDeprecatedDirective
def test_overriding_directives_excludes_specified():
body = """
schema {
query: Hello
}
directive @skip on FIELD
directive @include on FIELD
directive @deprecated on FIELD_DEFINITION
type Hello {
str: String
}
"""
schema = build_ast_schema(parse(body))
assert len(schema.get_directives()) == 3
assert schema.get_directive("skip") != GraphQLSkipDirective
assert schema.get_directive("skip") is not None
assert schema.get_directive("include") != GraphQLIncludeDirective
assert schema.get_directive("include") is not None
assert schema.get_directive("deprecated") != GraphQLDeprecatedDirective
assert schema.get_directive("deprecated") is not None
def test_overriding_skip_directive_excludes_built_in_one():
body = """
schema {
query: Hello
}
directive @skip on FIELD
type Hello {
str: String
}
"""
schema = build_ast_schema(parse(body))
assert len(schema.get_directives()) == 3
assert schema.get_directive("skip") != GraphQLSkipDirective
assert schema.get_directive("skip") is not None
assert schema.get_directive("include") == GraphQLIncludeDirective
assert schema.get_directive("deprecated") == GraphQLDeprecatedDirective
def test_overriding_include_directive_excludes_built_in_one():
body = """
schema {
query: Hello
}
directive @include on FIELD
type Hello {
str: String
}
"""
schema = build_ast_schema(parse(body))
assert len(schema.get_directives()) == 3
assert schema.get_directive("skip") == GraphQLSkipDirective
assert schema.get_directive("deprecated") == GraphQLDeprecatedDirective
assert schema.get_directive("include") != GraphQLIncludeDirective
assert schema.get_directive("include") is not None
def test_adding_directives_maintains_skip_and_include_directives():
body = """
schema {
query: Hello
}
directive @foo(arg: Int) on FIELD
type Hello {
str: String
}
"""
schema = build_ast_schema(parse(body))
assert len(schema.get_directives()) == 4
assert schema.get_directive("skip") == GraphQLSkipDirective
assert schema.get_directive("include") == GraphQLIncludeDirective
assert schema.get_directive("deprecated") == GraphQLDeprecatedDirective
def test_type_modifiers():
body = """
schema {
query: HelloScalars
}
type HelloScalars {
nonNullStr: String!
listOfStrs: [String]
listOfNonNullStrs: [String!]
nonNullListOfStrs: [String]!
nonNullListOfNonNullStrs: [String!]!
}
"""
output = cycle_output(body)
assert output == body
def test_recursive_type():
body = """
schema {
query: Recurse
}
type Recurse {
str: String
recurse: Recurse
}
"""
output = cycle_output(body)
assert output == body
def test_two_types_circular():
body = """
schema {
query: TypeOne
}
type TypeOne {
str: String
typeTwo: TypeTwo
}
type TypeTwo {
str: String
typeOne: TypeOne
}
"""
output = cycle_output(body)
assert output == body
def test_single_argument_field():
body = """
schema {
query: Hello
}
type Hello {
str(int: Int): String
floatToStr(float: Float): String
idToStr(id: ID): String
booleanToStr(bool: Boolean): String
strToStr(bool: String): String
}
"""
output = cycle_output(body)
assert output == body
def test_simple_type_with_multiple_arguments():
body = """
schema {
query: Hello
}
type Hello {
str(int: Int, bool: Boolean): String
}
"""
output = cycle_output(body)
assert output == body
def test_simple_type_with_interface():
body = """
schema {
query: HelloInterface
}
type HelloInterface implements WorldInterface {
str: String
}
interface WorldInterface {
str: String
}
"""
output = cycle_output(body)
assert output == body
def test_simple_output_enum():
body = """
schema {
query: OutputEnumRoot
}
enum Hello {
WORLD
}
type OutputEnumRoot {
hello: Hello
}
"""
output = cycle_output(body)
assert output == body
def test_simple_input_enum():
body = """
schema {
query: InputEnumRoot
}
enum Hello {
WORLD
}
type InputEnumRoot {
str(hello: Hello): String
}
"""
output = cycle_output(body)
assert output == body
def test_multiple_value_enum():
body = """
schema {
query: OutputEnumRoot
}
enum Hello {
WO
RLD
}
type OutputEnumRoot {
hello: Hello
}
"""
output = cycle_output(body)
assert output == body
def test_simple_union():
body = """
schema {
query: Root
}
union Hello = World
type Root {
hello: Hello
}
type World {
str: String
}
"""
output = cycle_output(body)
assert output == body
def test_multiple_union():
body = """
schema {
query: Root
}
union Hello = WorldOne | WorldTwo
type Root {
hello: Hello
}
type WorldOne {
str: String
}
type WorldTwo {
str: String
}
"""
output = cycle_output(body)
assert output == body
def test_custom_scalar():
body = """
schema {
query: Root
}
scalar CustomScalar
type Root {
customScalar: CustomScalar
}
"""
output = cycle_output(body)
assert output == body
def test_input_object():
body = """
schema {
query: Root
}
input Input {
int: Int
}
type Root {
field(in: Input): String
}
"""
output = cycle_output(body)
assert output == body
def test_input_types_are_read():
schema = build_ast_schema(
parse(
"""
schema {
query: Query
}
type Query {
field(input: Input): Int
}
input Input {
id: Int
}
"""
)
)
input_type = schema.get_type("Input")
assert input_type.fields["id"].type == GraphQLInt
def test_input_types_can_be_recursive():
schema = build_ast_schema(
parse(
"""
schema {
query: Query
}
type Query {
field(input: Input): Int
}
input Input {
id: Input
}
"""
)
)
input_type = schema.get_type("Input")
assert input_type.fields["id"].type == input_type
def test_simple_argument_field_with_default():
body = """
schema {
query: Hello
}
type Hello {
str(int: Int = 2): String
}
"""
output = cycle_output(body)
assert output == body
def test_simple_type_with_mutation():
body = """
schema {
query: HelloScalars
mutation: Mutation
}
type HelloScalars {
str: String
int: Int
bool: Boolean
}
type Mutation {
addHelloScalars(str: String, int: Int, bool: Boolean): HelloScalars
}
"""
output = cycle_output(body)
assert output == body
def test_simple_type_with_subscription():
body = """
schema {
query: HelloScalars
subscription: Subscription
}
type HelloScalars {
str: String
int: Int
bool: Boolean
}
type Subscription {
subscribeHelloScalars(str: String, int: Int, bool: Boolean): HelloScalars
}
"""
output = cycle_output(body)
assert output == body
def test_unreferenced_type_implementing_referenced_interface():
body = """
schema {
query: Query
}
type Concrete implements Iface {
key: String
}
interface Iface {
key: String
}
type Query {
iface: Iface
}
"""
output = cycle_output(body)
assert output == body
def test_unreferenced_type_implementing_referenced_union():
body = """
schema {
query: Query
}
type Concrete {
key: String
}
type Query {
union: Union
}
union Union = Concrete
"""
output = cycle_output(body)
assert output == body
def test_supports_deprecated_directive():
body = """
schema {
query: Query
}
enum MyEnum {
VALUE
OLD_VALUE @deprecated
OTHER_VALUE @deprecated(reason: "Terrible reasons")
}
type Query {
field1: String @deprecated
field2: Int @deprecated(reason: "Because I said so")
enum: MyEnum
}
"""
output = cycle_output(body)
assert output == body
def test_requires_a_schema_definition():
body = """
type Hello {
bar: Bar
}
"""
doc = parse(body)
with raises(Exception) as excinfo:
build_ast_schema(doc)
assert "Must provide a schema definition." == str(excinfo.value)
def test_allows_only_a_single_schema_definition():
body = """
schema {
query: Hello
}
schema {
query: Hello
}
type Hello {
bar: Bar
}
"""
doc = parse(body)
with raises(Exception) as excinfo:
build_ast_schema(doc)
assert "Must provide only one schema definition." == str(excinfo.value)
def test_requires_a_query_type():
body = """
schema {
mutation: Hello
}
type Hello {
bar: Bar
}
"""
doc = parse(body)
with raises(Exception) as excinfo:
build_ast_schema(doc)
assert "Must provide schema definition with query type." == str(excinfo.value)
def test_allows_only_a_single_query_type():
body = """
schema {
query: Hello
query: Yellow
}
type Hello {
bar: Bar
}
type Yellow {
isColor: Boolean
}
"""
doc = parse(body)
with raises(Exception) as excinfo:
build_ast_schema(doc)
assert "Must provide only one query type in schema." == str(excinfo.value)
def test_allows_only_a_single_mutation_type():
body = """
schema {
query: Hello
mutation: Hello
mutation: Yellow
}
type Hello {
bar: Bar
}
type Yellow {
isColor: Boolean
}
"""
doc = parse(body)
with raises(Exception) as excinfo:
build_ast_schema(doc)
assert "Must provide only one mutation type in schema." == str(excinfo.value)
def test_allows_only_a_single_subscription_type():
body = """
schema {
query: Hello
subscription: Hello
subscription: Yellow
}
type Hello {
bar: Bar
}
type Yellow {
isColor: Boolean
}
"""
doc = parse(body)
with raises(Exception) as excinfo:
build_ast_schema(doc)
assert "Must provide only one subscription type in schema." == str(excinfo.value)
def test_unknown_type_referenced():
body = """
schema {
query: Hello
}
type Hello {
bar: Bar
}
"""
doc = parse(body)
with raises(Exception) as excinfo:
build_ast_schema(doc)
assert 'Type "Bar" not found in document' in str(excinfo.value)
def test_unknown_type_in_union_list():
body = """
schema {
query: Hello
}
union TestUnion = Bar
type Hello { testUnion: TestUnion }
"""
doc = parse(body)
with raises(Exception) as excinfo:
build_ast_schema(doc)
assert 'Type "Bar" not found in document' in str(excinfo.value)
def test_unknown_query_type():
body = """
schema {
query: Wat
}
type Hello {
str: String
}
"""
doc = parse(body)
with raises(Exception) as excinfo:
build_ast_schema(doc)
assert 'Specified query type "Wat" not found in document' in str(excinfo.value)
def test_unknown_mutation_type():
body = """
schema {
query: Hello
mutation: Wat
}
type Hello {
str: String
}
"""
doc = parse(body)
with raises(Exception) as excinfo:
build_ast_schema(doc)
assert 'Specified mutation type "Wat" not found in document' in str(excinfo.value)
def test_unknown_subscription_type():
body = """
schema {
query: Hello
mutation: Wat
subscription: Awesome
}
type Hello {
str: String
}
type Wat {
str: String
}
"""
doc = parse(body)
with raises(Exception) as excinfo:
build_ast_schema(doc)
assert 'Specified subscription type "Awesome" not found in document' in str(
excinfo.value
)
def test_does_not_consider_query_names():
body = """
schema {
query: Foo
}
type Hello {
str: String
}
"""
doc = parse(body)
with raises(Exception) as excinfo:
build_ast_schema(doc)
assert 'Specified query type | |
#!/usr/bin/env python
"""
..__main__.py
~~~~~~~~~~~~~~~~
Picasso command line interface
:author: <NAME>, 2015
:copyright: Copyright (c) 2015 Jungmann Lab, Max Planck Institute of Biochemistry
"""
import os.path
def _average(args):
from glob import glob
from .io import load_locs, NoMetadataFileError
from .postprocess import average
kwargs = {'iterations': args.iterations,
'oversampling': args.oversampling}
paths = glob(args.file)
if paths:
for path in paths:
print('Averaging {}'.format(path))
try:
locs, info = load_locs(path)
except NoMetadataFileError:
continue
kwargs['path_basename'] = os.path.splitext(path)[0] + '_avg'
average(locs, info, **kwargs)
def _hdf2visp(path, pixel_size):
from glob import glob
paths = glob(path)
if paths:
from .io import load_locs
import os.path
from numpy import savetxt
for path in paths:
print('Converting {}'.format(path))
locs, info = load_locs(path)
locs = locs[['x', 'y', 'z', 'photons', 'frame']].copy()
locs.x *= pixel_size
locs.y *= pixel_size
outname = os.path.splitext(path)[0] + '.3d'
savetxt(outname, locs, fmt=['%.1f', '%.1f', '%.1f', '%.1f', '%d'], newline='\r\n')
def _link(files, d_max, tolerance):
import glob
paths = glob.glob(files)
if paths:
from . import io, postprocess
for path in paths:
try:
locs, info = io.load_locs(path)
except io.NoMetadataFileError:
continue
linked_locs = postprocess.link(locs, info, d_max, tolerance)
base, ext = os.path.splitext(path)
link_info = {'Maximum Distance': d_max,
'Maximum Transient Dark Time': tolerance,
'Generated by': 'Picasso Link'}
info.append(link_info)
io.save_locs(base + '_link.hdf5', linked_locs, info)
def _cluster(files):
import glob
paths = glob.glob(files)
if paths:
from . import io, postprocess
for path in paths:
try:
locs, info = io.load_locs(path)
except io.NoMetadataFileError:
continue
clustered_locs = postprocess.cluster(locs)
base, ext = os.path.splitext(path)
cluster_info = {'Generated by': 'Picasso Cluster'}
info.append(cluster_info)
io.save_locs(base + '_clust.hdf5', clustered_locs, info)
def _clusterdist(files):
import glob
paths = glob.glob(files)
if paths:
from . import io, postprocess
for path in paths:
try:
locs, info = io.load_locs(path)
except io.NoMetadataFileError:
continue
clustered_locs = postprocess.clusterdist(locs)
base, ext = os.path.splitext(path)
cluster_info = {'Generated by': 'Picasso Clusterdist'}
info.append(cluster_info)
io.save_locs(base + '_cdist.hdf5', clustered_locs, info)
def _clusterfilter(files, clusterfile, parameter, minval, maxval):
from glob import glob
from itertools import chain
from .io import load_locs, save_locs
from .postprocess import align
from os.path import splitext
from tqdm import tqdm
import numpy as np
paths = glob(files)
if paths:
from . import io, postprocess
for path in paths:
try:
locs, info = io.load_locs(path)
except io.NoMetadataFileError:
continue
clusters = io.load_clusters(clusterfile)
try:
selector = (clusters[parameter]>minval) & (clusters[parameter]<maxval)
print('Isolating locs.. Step 1: in range')
groups = clusters['groups'][selector]
first = True
for group in tqdm(groups):
if first:
all_locs = locs[locs['group'] == group]
first = False
else:
all_locs = np.append(all_locs, locs[locs['group'] == group])
base, ext = os.path.splitext(path)
clusterfilter_info = {'Generated by': 'Picasso Clusterfilter - in','Paramter':parameter, 'Minval': minval,'Maxval': maxval}
info.append(clusterfilter_info)
all_locs.sort(kind='mergesort', order='frame')
all_locs = all_locs.view(np.recarray)
out_path = base + '_filter_in.hdf5'
io.save_locs(out_path, all_locs, info)
print('Complete. Saved to: {}'.format(out_path))
print('Isolating locs.. Step 2: out of range')
groups = clusters['groups'][~selector]
first = True
for group in tqdm(groups):
if first:
all_locs = locs[locs['group'] == group]
first = False
else:
all_locs = np.append(all_locs, locs[locs['group'] == group])
base, ext = os.path.splitext(path)
clusterfilter_info = {'Generated by': 'Picasso Clusterfilter - out','Paramter':parameter, 'Minval': minval,'Maxval': maxval}
info.append(clusterfilter_info)
all_locs.sort(kind='mergesort', order='frame')
all_locs = all_locs.view(np.recarray)
out_path = base + '_filter_out.hdf5'
io.save_locs(out_path, all_locs, info)
print('Complete. Saved to: {}'.format(out_path))
except ValueError:
print('Error: Field {} not found.'.format(parameter))
def _undrift(files, segmentation, display, fromfile):
import glob
from . import io, postprocess
from numpy import genfromtxt, savetxt
paths = glob.glob(files)
undrift_info = {'Generated by': 'Picasso Undrift'}
if fromfile is not None:
undrift_info['From File'] = fromfile
drift = genfromtxt(fromfile)
else:
undrift_info['Segmentation'] = segmentation
for path in paths:
try:
locs, info = io.load_locs(path)
except io.NoMetadataFileError:
continue
info.append(undrift_info)
if fromfile is not None:
#this works for mingjies drift files but not for the own ones
locs.x -= drift[:, 1][locs.frame]
locs.y -= drift[:, 0][locs.frame]
if display:
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.figure(figsize=(17, 6))
plt.suptitle('Estimated drift')
plt.subplot(1, 2, 1)
plt.plot(drift[:, 1], label='x')
plt.plot(drift[:, 0], label='y')
plt.legend(loc='best')
plt.xlabel('Frame')
plt.ylabel('Drift (pixel)')
plt.subplot(1, 2, 2)
plt.plot(drift[:, 1], drift[:, 0], color=list(plt.rcParams['axes.prop_cycle'])[2]['color'])
plt.axis('equal')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
else:
print('Undrifting file {}'.format(path))
drift, locs = postprocess.undrift(locs, info, segmentation, display=True)
base, ext = os.path.splitext(path)
io.save_locs(base + '_undrift.hdf5', locs, info)
savetxt(base + '_drift.txt', drift, header='dx\tdy', newline='\r\n')
def _density(files, radius):
import glob
paths = glob.glob(files)
if paths:
from . import io, postprocess
for path in paths:
locs, info = io.load_locs(path)
locs = postprocess.compute_local_density(locs, info, radius)
base, ext = os.path.splitext(path)
density_info = {'Generated by': 'Picasso Density',
'Radius': radius}
info.append(density_info)
io.save_locs(base + '_density.hdf5', locs, info)
def _dbscan(files, radius, min_density):
import glob
paths = glob.glob(files)
if paths:
from . import io, postprocess
from h5py import File
for path in paths:
print('Loading {} ...'.format(path))
locs, info = io.load_locs(path)
clusters, locs = postprocess.dbscan(locs, radius, min_density)
base, ext = os.path.splitext(path)
dbscan_info = {'Generated by': 'Picasso DBSCAN',
'Radius': radius,
'Minimum local density': min_density}
info.append(dbscan_info)
io.save_locs(base + '_dbscan.hdf5', locs, info)
with File(base + '_clusters.hdf5', 'w') as clusters_file:
clusters_file.create_dataset('clusters', data=clusters)
def _nneighbor(files):
import glob
import h5py as _h5py
import numpy as np
from scipy.spatial import distance
paths = glob.glob(files)
if paths:
from . import io, postprocess
from h5py import File
for path in paths:
print('Loading {} ...'.format(path))
with _h5py.File(path, 'r') as locs_file:
locs = locs_file['clusters'][...]
clusters = np.rec.array(locs, dtype=locs.dtype)
points = np.array(clusters[['com_x','com_y']].tolist())
alldist = distance.cdist(points,points)
alldist[alldist==0]=float('inf')
minvals = np.amin(alldist,axis=0)
base, ext = os.path.splitext(path)
out_path = base + '_minval.txt'
#np.savetxt(base + '_minval.txt', minvals, header='dx\tdy', newline='\r\n')
np.savetxt(out_path, minvals, newline='\r\n')
print('Saved filest o: {}'.format(out_path))
def _dark(files):
import glob
paths = glob.glob(files)
if paths:
from . import io, postprocess
for path in paths:
locs, info = io.load_locs(path)
locs = postprocess.compute_dark_times(locs)
base, ext = os.path.splitext(path)
dbscan_info = {'Generated by': 'Picasso Dark'}
info.append(dbscan_info)
io.save_locs(base + '_dark.hdf5', locs, info)
def _align(files, display):
from glob import glob
from itertools import chain
from .io import load_locs, save_locs
from .postprocess import align
from os.path import splitext
files = list(chain(*[glob(_) for _ in files]))
print('Aligning files:')
for f in files:
print(' ' + f)
locs_infos = [load_locs(_) for _ in files]
locs = [_[0] for _ in locs_infos]
infos = [_[1] for _ in locs_infos]
aligned_locs = align(locs, infos, display=display)
align_info = {'Generated by': 'Picasso Align',
'Files': files}
for file, locs_, info in zip(files, aligned_locs, infos):
info.append(align_info)
base, ext = splitext(file)
save_locs(base + '_align.hdf5', locs_, info)
def _join(files):
from .io import load_locs, save_locs
from os.path import splitext
from numpy import append
import numpy as np
locs, info = load_locs(files[0])
join_info = {'Generated by': 'Picasso Join',
'Files': [files[0]]}
for path in files[1:]:
locs_, info_ = load_locs(path)
locs = append(locs, locs_)
join_info['Files'].append(path)
base, ext = splitext(files[0])
info.append(join_info)
locs.sort(kind='mergesort', order='frame')
locs = locs.view(np.recarray)
save_locs(base + '_join.hdf5', locs, info)
def _groupprops(files):
import glob
paths = glob.glob(files)
if paths:
from .io import load_locs, save_datasets
from .postprocess import groupprops
from os.path import splitext
for path in paths:
locs, info = load_locs(path)
groups = groupprops(locs)
base, ext = splitext(path)
save_datasets(base + '_groupprops.hdf5', info, locs=locs, groups=groups)
def _pair_correlation(files, bin_size, r_max):
from glob import glob
paths = glob(files)
if paths:
from .io import load_locs
from .postprocess import pair_correlation
from matplotlib.pyplot import plot, style, show, xlabel, ylabel
style.use('ggplot')
for path in paths:
print('Loading {}...'.format(path))
locs, info = load_locs(path)
print('Calculating pair-correlation...')
bins_lower, pc = pair_correlation(locs, info, bin_size, r_max)
plot(bins_lower, pc)
xlabel('r (pixel)')
ylabel('pair-correlation (pixel^-2)')
show()
def _localize(files):
from glob import glob
from .io import load_movie, save_locs
from .localize import identify_async, identifications_from_futures, fit_async, locs_from_fits
from os.path import splitext
from time import sleep
paths = glob(files)
if paths:
def prompt_info():
info = {}
info['Byte Order'] = input('Byte Order (< or >): ')
info['Data Type'] = input('Data Type (e.g. "uint16"): ')
info['Frames'] = int(input('Frames: '))
info['Height'] = int(input('Height: '))
info['Width'] = int(input('Width: '))
save = input('Save info to yaml file (y/n): ') == 'y'
return info, save
box = int(input('Box side length: '))
min_net_gradient = float(input('Min. net gradient: '))
camera_info = {}
camera_info['baseline'] = float(input('Baseline: '))
camera_info['sensitivity'] = float(input('Sensitivity: '))
camera_info['gain'] = int(input('EM Gain: '))
camera_info['qe'] = float(input('Quantum efficiency: '))
convergence = float(input('Convergence criterion: '))
max_iterations = int(input('Max. iterations: '))
for path in paths:
print('Processing {}'.format(path))
movie, info = load_movie(path, prompt_info=prompt_info)
current, futures = identify_async(movie, min_net_gradient, box)
n_frames = len(movie)
while current[0] < n_frames:
print('Identifying in frame {:,} of {:,}'.format(current[0]+1, n_frames), end='\r')
sleep(0.2)
print('Identifying in frame {:,} of {:,}'.format(n_frames, n_frames))
ids = identifications_from_futures(futures)
current, thetas, CRLBs, likelihoods, iterations = fit_async(movie,
camera_info,
ids,
box,
convergence,
max_iterations)
n_spots = len(ids)
while current[0] < n_spots:
| |
<reponame>RaphaelOlivier/armory
"""
Metrics for scenarios
Outputs are lists of python variables amenable to JSON serialization:
e.g., bool, int, float
numpy data types and tensors generally fail to serialize
"""
import logging
import numpy as np
import time
from contextlib import contextmanager
import io
from collections import defaultdict, Counter
import cProfile
import pstats
from armory.data.adversarial_datasets import ADV_PATCH_MAGIC_NUMBER_LABEL_ID
from armory.data.adversarial.apricot_metadata import APRICOT_PATCHES
logger = logging.getLogger(__name__)
def categorical_accuracy(y, y_pred):
"""
Return the categorical accuracy of the predictions
"""
y = np.asarray(y)
y_pred = np.asarray(y_pred)
if y.ndim == 0:
y = np.array([y])
y_pred = np.array([y_pred])
if y.shape == y_pred.shape:
return [int(x) for x in list(y == y_pred)]
elif y.ndim + 1 == y_pred.ndim:
if y.ndim == 0:
return [int(y == np.argmax(y_pred, axis=-1))]
return [int(x) for x in list(y == np.argmax(y_pred, axis=-1))]
else:
raise ValueError(f"{y} and {y_pred} have mismatched dimensions")
def top_5_categorical_accuracy(y, y_pred):
"""
Return the top 5 categorical accuracy of the predictions
"""
return top_n_categorical_accuracy(y, y_pred, 5)
def top_n_categorical_accuracy(y, y_pred, n):
if n < 1:
raise ValueError(f"n must be a positive integer, not {n}")
n = int(n)
if n == 1:
return categorical_accuracy(y, y_pred)
y = np.asarray(y)
y_pred = np.asarray(y_pred)
if y.ndim == 0:
y = np.array([y])
y_pred = np.array([y_pred])
if len(y) != len(y_pred):
raise ValueError("y and y_pred are of different length")
if y.shape == y_pred.shape:
raise ValueError("Must supply multiple predictions for top 5 accuracy")
elif y.ndim + 1 == y_pred.ndim:
y_pred_top5 = np.argsort(y_pred, axis=-1)[:, -n:]
if y.ndim == 0:
return [int(y in y_pred_top5)]
return [int(y[i] in y_pred_top5[i]) for i in range(len(y))]
else:
raise ValueError(f"{y} and {y_pred} have mismatched dimensions")
def norm(x, x_adv, ord):
"""
Return the given norm over a batch, outputting a list of floats
"""
x = np.asarray(x)
x_adv = np.asarray(x_adv)
# elevate to 64-bit types first to prevent overflow errors
assert not (
np.iscomplexobj(x) ^ np.iscomplexobj(x_adv)
), "x and x_adv mix real/complex types"
dtype = complex if np.iscomplexobj(x) else float
diff = (x.astype(dtype) - x_adv.astype(dtype)).reshape(x.shape[0], -1)
values = np.linalg.norm(diff, ord=ord, axis=1)
# normalize l0 norm by number of elements in array
if ord == 0:
return list(float(x) / diff[i].size for i, x in enumerate(values))
return list(float(x) for x in values)
def linf(x, x_adv):
"""
Return the L-infinity norm over a batch of inputs as a float
"""
return norm(x, x_adv, np.inf)
def l2(x, x_adv):
"""
Return the L2 norm over a batch of inputs as a float
"""
return norm(x, x_adv, 2)
def l1(x, x_adv):
"""
Return the L1 norm over a batch of inputs as a float
"""
return norm(x, x_adv, 1)
def lp(x, x_adv, p):
"""
Return the Lp norm over a batch of inputs as a float
"""
if p <= 0:
raise ValueError(f"p must be positive, not {p}")
return norm(x, x_adv, p)
def l0(x, x_adv):
"""
Return the L0 'norm' over a batch of inputs as a float,
normalized by the number of elements in the array
"""
return norm(x, x_adv, 0)
def _snr(x_i, x_adv_i):
assert not (
np.iscomplexobj(x_i) ^ np.iscomplexobj(x_adv_i)
), "x_i and x_adv_i mix real/complex types"
dtype = complex if np.iscomplexobj(x_i) else float
x_i = np.asarray(x_i, dtype=dtype)
x_adv_i = np.asarray(x_adv_i, dtype=dtype)
if x_i.shape != x_adv_i.shape:
raise ValueError(f"x_i.shape {x_i.shape} != x_adv_i.shape {x_adv_i.shape}")
signal_power = (np.abs(x_i) ** 2).mean()
noise_power = (np.abs(x_i - x_adv_i) ** 2).mean()
if noise_power == 0:
return np.inf
return signal_power / noise_power
def snr(x, x_adv):
"""
Return the SNR of a batch of samples with raw audio input
"""
if len(x) != len(x_adv):
raise ValueError(f"len(x) {len(x)} != len(x_adv) {len(x_adv)}")
return [float(_snr(x_i, x_adv_i)) for (x_i, x_adv_i) in zip(x, x_adv)]
def snr_db(x, x_adv):
"""
Return the SNR of a batch of samples with raw audio input in Decibels (DB)
"""
return [float(i) for i in 10 * np.log10(snr(x, x_adv))]
def _snr_spectrogram(x_i, x_adv_i):
x_i = np.asarray(x_i, dtype=float)
x_adv_i = np.asarray(x_adv_i, dtype=float)
if x_i.shape != x_adv_i.shape:
raise ValueError(f"x_i.shape {x_i.shape} != x_adv_i.shape {x_adv_i.shape}")
signal_power = np.abs(x_i).mean()
noise_power = np.abs(x_i - x_adv_i).mean()
return signal_power / noise_power
def word_error_rate(y, y_pred):
"""
Return the word error rate for a batch of transcriptions.
"""
if len(y) != len(y_pred):
raise ValueError(f"len(y) {len(y)} != len(y_pred) {len(y_pred)}")
return [_word_error_rate(y_i, y_pred_i) for (y_i, y_pred_i) in zip(y, y_pred)]
def _word_error_rate(y_i, y_pred_i):
if isinstance(y_i, str):
reference = y_i.split()
elif isinstance(y_i, bytes):
reference = y_i.decode("utf-8").split()
else:
raise TypeError(f"y_i is of type {type(y_i)}, expected string or bytes")
hypothesis = y_pred_i.split()
r_length = len(reference)
h_length = len(hypothesis)
matrix = np.zeros((r_length + 1, h_length + 1))
for i in range(r_length + 1):
for j in range(h_length + 1):
if i == 0:
matrix[0][j] = j
elif j == 0:
matrix[i][0] = i
for i in range(1, r_length + 1):
for j in range(1, h_length + 1):
if reference[i - 1] == hypothesis[j - 1]:
matrix[i][j] = matrix[i - 1][j - 1]
else:
substitute = matrix[i - 1][j - 1] + 1
insertion = matrix[i][j - 1] + 1
deletion = matrix[i - 1][j] + 1
matrix[i][j] = min(substitute, insertion, deletion)
return (matrix[r_length][h_length], r_length)
def char_error_rate(y, y_pred):
"""
Return the word error rate for a batch of transcriptions.
"""
if len(y) != len(y_pred):
raise ValueError(f"len(y) {len(y)} != len(y_pred) {len(y_pred)}")
return [_char_error_rate(y_i, y_pred_i) for (y_i, y_pred_i) in zip(y, y_pred)]
def _char_error_rate(y_i, y_pred_i):
if isinstance(y_i, str):
reference = list(y_i)
elif isinstance(y_i, bytes):
reference = list(y_i.decode("utf-8"))
else:
raise TypeError(f"y_i is of type {type(y_i)}, expected string or bytes")
hypothesis = list(y_pred_i)
r_length = len(reference)
h_length = len(hypothesis)
matrix = np.zeros((r_length + 1, h_length + 1))
for i in range(r_length + 1):
for j in range(h_length + 1):
if i == 0:
matrix[0][j] = j
elif j == 0:
matrix[i][0] = i
for i in range(1, r_length + 1):
for j in range(1, h_length + 1):
if reference[i - 1] == hypothesis[j - 1]:
matrix[i][j] = matrix[i - 1][j - 1]
else:
substitute = matrix[i - 1][j - 1] + 1
insertion = matrix[i][j - 1] + 1
deletion = matrix[i - 1][j] + 1
matrix[i][j] = min(substitute, insertion, deletion)
return (matrix[r_length][h_length], r_length)
# Metrics specific to MARS model preprocessing in video UCF101 scenario
def verify_mars(x, x_adv):
if len(x) != len(x_adv):
raise ValueError(f"len(x) {len(x)} != {len(x_adv)} len(x_adv)")
for x_i, x_adv_i in zip(x, x_adv):
if x_i.shape[1:] != x_adv_i.shape[1:]:
raise ValueError(f"Shape {x_i.shape[1:]} != {x_adv_i.shape[1:]}")
if x_i.shape[1:] != (3, 16, 112, 112):
raise ValueError(f"Shape {x_i.shape[1:]} != (3, 16, 112, 112)")
def mars_mean_l2(x, x_adv):
"""
Input dimensions: (n_batch, n_stacks, channels, stack_frames, height, width)
Typically: (1, variable, 3, 16, 112, 112)
"""
verify_mars(x, x_adv)
out = []
for x_i, x_adv_i in zip(x, x_adv):
out.append(np.mean(l2(x_i, x_adv_i)))
return out
def mars_reshape(x_i):
"""
Reshape (n_stacks, 3, 16, 112, 112) into (n_stacks * 16, 112, 112, 3)
"""
return np.transpose(x_i, (0, 2, 3, 4, 1)).reshape((-1, 112, 112, 3))
def mars_mean_patch(x, x_adv):
verify_mars(x, x_adv)
out = []
for x_i, x_adv_i in zip(x, x_adv):
out.append(
np.mean(
image_circle_patch_diameter(mars_reshape(x_i), mars_reshape(x_adv_i))
)
)
return out
@contextmanager
def resource_context(name="Name", profiler=None, computational_resource_dict=None):
if profiler is None:
yield
return 0
profiler_types = ["Basic", "Deterministic"]
if profiler is not None and profiler not in profiler_types:
raise ValueError(f"Profiler {profiler} is not one of {profiler_types}.")
if profiler == "Deterministic":
logger.warn(
"Using Deterministic profiler. This may reduce timing accuracy and result in a large results file."
)
pr = cProfile.Profile()
pr.enable()
startTime = time.perf_counter()
yield
elapsedTime = time.perf_counter() - startTime
if profiler == "Deterministic":
pr.disable()
s = io.StringIO()
sortby = "cumulative"
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
stats = s.getvalue()
if name not in computational_resource_dict:
computational_resource_dict[name] = defaultdict(lambda: 0)
if profiler == "Deterministic":
computational_resource_dict[name]["stats"] = ""
comp = computational_resource_dict[name]
comp["execution_count"] += 1
comp["total_time"] += elapsedTime
if profiler == "Deterministic":
comp["stats"] += stats
return 0
def snr_spectrogram(x, x_adv):
"""
Return the SNR of a batch of samples with spectrogram input
NOTE: Due to phase effects, this is only an estimate of the SNR.
For instance, if x[0] = sin(t) and x_adv[0] = sin(t + 2*pi/3),
Then the SNR will be calculated as infinity, when it should be 1.
However, the spectrograms will look identical, so as long as the
model uses spectrograms and not the underlying raw signal,
this should not have a significant effect on the results.
"""
if x.shape != x_adv.shape:
raise ValueError(f"x.shape {x.shape} != x_adv.shape {x_adv.shape}")
return [float(_snr_spectrogram(x_i, | |
is one realization)
(image is None if mpds_geosClassicOutput->outputImage is NULL)
nwarning:
(int) total number of warning(s) encountered
(same warnings can be counted several times)
warnings:
(list of strings) list of distinct warnings encountered
(can be empty)
"""
# --- Set grid geometry and varname
# Set grid geometry
nx, ny, nz = dimension
sx, sy, sz = spacing
ox, oy, oz = origin
nxy = nx * ny
nxyz = nxy * nz
# spatial dimension
space_dim = 3
# Set varname
varname = 'V0'
# --- Check and prepare parameters
# category_values and ncategory (computed)
try:
category_values = np.asarray(category_values, dtype='float').reshape(-1)
except:
print("ERROR (SIMUL_INDIC_3D): 'category_values' is not valid")
return None
ncategory = len(category_values)
if ncategory <= 0:
print("ERROR (SIMUL_INDIC_3D): 'category_values' is empty")
return None
# cov_model_for_category
cov_model_for_category = np.asarray(cov_model_for_category).reshape(-1)
if len(cov_model_for_category) == 1:
cov_model_for_category = np.repeat(cov_model_for_category, ncategory)
elif len(cov_model_for_category) != ncategory:
print("ERROR (SIMUL_INDIC_3D): 'cov_model_for_category' of invalid length")
return None
if not np.all([isinstance(c, gcm.CovModel3D) for c in cov_model_for_category]):
print("ERROR (SIMUL_INDIC_3D): 'cov_model_for_category' should contains CovModel3D objects")
return None
for cov_model in cov_model_for_category:
for el in cov_model.elem:
# weight
w = el[1]['w']
if np.size(w) != 1 and np.size(w) != nxyz:
print("ERROR (SIMUL_INDIC_3D): covariance model: weight ('w') not compatible with simulation grid")
return None
# ranges
if 'r' in el[1].keys():
for r in el[1]['r']:
if np.size(r) != 1 and np.size(r) != nxyz:
print("ERROR (SIMUL_INDIC_3D): covariance model: range ('r') not compatible with simulation grid")
return None
# additional parameter (s)
if 's' in el[1].keys():
s = el[1]['s']
if np.size(s) != 1 and np.size(s) != nxyz:
print("ERROR (SIMUL_INDIC_3D): covariance model: parameter ('s') not compatible with simulation grid")
return None
# alpha
angle = cov_model.alpha
if np.size(angle) != 1 and np.size(angle) != nxyz:
print("ERROR (SIMUL_INDIC_3D): covariance model: angle (alpha) not compatible with simulation grid")
return None
# beta
angle = cov_model.beta
if np.size(angle) != 1 and np.size(angle) != nxyz:
print("ERROR (SIMUL_INDIC_3D): covariance model: angle (beta) not compatible with simulation grid")
return None
# gamma
angle = cov_model.gamma
if np.size(angle) != 1 and np.size(angle) != nxyz:
print("ERROR (SIMUL_INDIC_3D): covariance model: angle (gamma) not compatible with simulation grid")
return None
# method
# computationMode=0: GEOS_CLASSIC_OK
# computationMode=1: GEOS_CLASSIC_SK
# computationMode=2: GEOS_CLASSIC_SIM_OK
# computationMode=3: GEOS_CLASSIC_SIM_SK
# if method not in ('simple_kriging', 'ordinary_kriging'):
# print("ERROR (SIMUL_INDIC_3D): 'method' is not valid")
# return None
if method == 'simple_kriging':
computationMode = 3
elif method == 'ordinary_kriging':
computationMode = 2
else:
print("ERROR (SIMUL_INDIC_3D): 'method' is not valid")
return None
# data points: x, v
dataPointSet = []
# data point set from x, v
if x is not None:
x = np.asarray(x, dtype='float').reshape(-1, 3) # cast in 2-dimensional array if needed
v = np.asarray(v, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
if len(v) != x.shape[0]:
print("(ERROR (SIMUL_3D): length of 'v' is not valid")
return None
xc = x[:,0]
yc = x[:,1]
zc = x[:,2]
dataPointSet.append(
PointSet(npt=v.shape[0], nv=4, val=np.array((xc, yc, zc, v)), varname=['X', 'Y', 'Z', varname])
)
# Check parameters - mask
if mask is not None:
try:
mask = np.asarray(mask).reshape(nz, ny, nx)
except:
print("ERROR (SIMUL_INDIC_3D): 'mask' is not valid")
return None
# Check parameters - searchRadiusRelative
searchRadiusRelative = np.asarray(searchRadiusRelative, dtype='float').reshape(-1)
if len(searchRadiusRelative) == 1:
searchRadiusRelative = np.repeat(searchRadiusRelative, ncategory)
elif len(searchRadiusRelative) != ncategory:
print("ERROR (SIMUL_INDIC_3D): 'searchRadiusRelative' of invalid length")
return None
for srr in searchRadiusRelative:
if srr < geosclassic.MPDS_GEOSCLASSIC_SEARCHRADIUSRELATIVE_MIN:
print("ERROR (SIMUL_INDIC_3D): a 'searchRadiusRelative' is too small (should be at least {})".format(geosclassic.MPDS_GEOSCLASSIC_SEARCHRADIUSRELATIVE_MIN))
return None
# Check parameters - nneighborMax
nneighborMax = np.asarray(nneighborMax, dtype='intc').reshape(-1)
if len(nneighborMax) == 1:
nneighborMax = np.repeat(nneighborMax, ncategory)
elif len(nneighborMax) != ncategory:
print("ERROR (SIMUL_INDIC_3D): 'nneighborMax' of invalid length")
return None
for nn in nneighborMax:
if nn != -1 and nn <= 0:
print("ERROR (SIMUL_INDIC_3D): any 'nneighborMax' should be greater than 0 or equal to -1 (unlimited)")
return None
# Check parameters - searchNeighborhoodSortMode
searchNeighborhoodSortMode = np.asarray(searchNeighborhoodSortMode).reshape(-1)
if len(searchNeighborhoodSortMode) == 1:
searchNeighborhoodSortMode = np.repeat(searchNeighborhoodSortMode, ncategory)
elif len(searchNeighborhoodSortMode) != ncategory:
print("ERROR (SIMUL_INDIC_3D): 'searchNeighborhoodSortMode' of invalid length")
return None
for i in range(ncategory):
if searchNeighborhoodSortMode[i] is None:
# set greatest possible value
if cov_model_for_category[i].is_stationary():
searchNeighborhoodSortMode[i] = 2
elif cov_model_for_category[i].is_orientation_stationary() and cov_model_for_category[i].is_range_stationary():
searchNeighborhoodSortMode[i] = 1
else:
searchNeighborhoodSortMode[i] = 0
else:
if searchNeighborhoodSortMode[i] == 2:
if not cov_model_for_category[i].is_stationary():
print("ERROR (SIMUL_INDIC_3D): 'searchNeighborhoodSortMode set to 2' not allowed with non-stationary covariance model")
return None
elif searchNeighborhoodSortMode[i] == 1:
if not cov_model_for_category[i].is_orientation_stationary() or not cov_model_for_category[i].is_range_stationary():
print("ERROR (SIMUL_INDIC_3D): 'searchNeighborhoodSortMode set to 1' not allowed with non-stationary range or non-stationary orientation in covariance model")
return None
searchNeighborhoodSortMode = np.asarray(searchNeighborhoodSortMode, dtype='intc')
# Check parameters - probability
if probability is not None:
# if method == 'ordinary_kriging':
# print("ERROR (SIMUL_INDIC_3D): specifying 'probability' not allowed with ordinary kriging")
# return None
probability = np.asarray(probability, dtype='float').reshape(-1) # cast in 1-dimensional array if needed
if probability.size not in (ncategory, ncategory*nxyz):
print("ERROR (SIMUL_INDIC_3D): size of 'probability' is not valid")
return None
# Check parameters - nreal
nreal = int(nreal) # cast to int if needed
if nreal <= 0:
if verbose >= 1:
print('SIMUL_INDIC_3D: nreal <= 0: nothing to do!')
return None
# --- Fill mpds_geosClassicInput structure (C)
mpds_geosClassicIndicatorInput, flag = fill_mpds_geosClassicIndicatorInput(
space_dim,
nx, ny, nz,
sx, sy, sz,
ox, oy, oz,
varname,
ncategory,
category_values,
outputReportFile,
computationMode,
cov_model_for_category,
None,
dataPointSet,
mask,
probability,
searchRadiusRelative,
nneighborMax,
searchNeighborhoodSortMode,
seed,
nreal)
if not flag:
print("ERROR (SIMUL_INDIC_3D): can not fill input structure!")
return None
# --- Prepare mpds_geosClassicIOutput structure (C)
# Allocate mpds_geosClassicOutput
mpds_geosClassicOutput = geosclassic.malloc_MPDS_GEOSCLASSICOUTPUT()
# Init mpds_geosClassicOutput
geosclassic.MPDSGeosClassicInitGeosClassicOutput(mpds_geosClassicOutput)
# --- Set progress monitor
mpds_progressMonitor = geosclassic.malloc_MPDS_PROGRESSMONITOR()
geosclassic.MPDSInitProgressMonitor(mpds_progressMonitor)
# Set function to update progress monitor:
# according to geosclassic.MPDS_SHOW_PROGRESS_MONITOR set to 4 for compilation of py module
# the function
# mpds_updateProgressMonitor = geosclassic.MPDSUpdateProgressMonitorAllOnlyPercentStdout_ptr
# should be used, but the following function can also be used:
# mpds_updateProgressMonitor = geosclassic.MPDSUpdateProgressMonitor0_ptr: no output
# mpds_updateProgressMonitor = geosclassic.MPDSUpdateProgressMonitorWarningOnlyStdout_ptr: warning only
if verbose == 0:
mpds_updateProgressMonitor = geosclassic.MPDSUpdateProgressMonitor0_ptr
elif verbose == 1:
mpds_updateProgressMonitor = geosclassic.MPDSUpdateProgressMonitor0_ptr
# mpds_updateProgressMonitor = geosclassic.MPDSUpdateProgressMonitorWarningOnlyStdout_ptr
else:
mpds_updateProgressMonitor = geosclassic.MPDSUpdateProgressMonitorAllOnlyPercentStdout_ptr
# --- Set number of threads
if nthreads <= 0:
nth = max(os.cpu_count() + nthreads, 1)
else:
nth = nthreads
if verbose >= 1:
print('Geos-Classic running... [VERSION {:s} / BUILD NUMBER {:s} / OpenMP {:d} thread(s)]'.format(geosclassic.MPDS_GEOS_CLASSIC_VERSION_NUMBER, geosclassic.MPDS_GEOS_CLASSIC_BUILD_NUMBER, nth))
sys.stdout.flush()
sys.stdout.flush() # twice!, so that the previous print is flushed before launching GeosClassic...
# --- Launch "GeosClassicSim" (launch C code)
# err = geosclassic.MPDSGeosClassicIndicatorSim(mpds_geosClassicIndicatorInput, mpds_geosClassicOutput, mpds_progressMonitor, mpds_updateProgressMonitor )
err = geosclassic.MPDSOMPGeosClassicIndicatorSim(mpds_geosClassicIndicatorInput, mpds_geosClassicOutput, mpds_progressMonitor, mpds_updateProgressMonitor, nth)
# Free memory on C side: mpds_geosClassicIndicatorInput
geosclassic.MPDSGeosClassicFreeGeosClassicIndicatorInput(mpds_geosClassicIndicatorInput)
#geosclassic.MPDSFree(mpds_geosClassicIndicatorInput)
geosclassic.free_MPDS_GEOSCLASSICINDICATORINPUT(mpds_geosClassicIndicatorInput)
if err:
err_message = geosclassic.mpds_get_error_message(-err)
err_message = err_message.replace('\n', '')
print(err_message)
geosclassic_output = None
else:
geosclassic_output = geosclassic_output_C2py(mpds_geosClassicOutput, mpds_progressMonitor)
# Free memory on C side: mpds_geosClassicOutput
geosclassic.MPDSGeosClassicFreeGeosClassicOutput(mpds_geosClassicOutput)
#geosclassic.MPDSFree (mpds_geosClassicOutput)
geosclassic.free_MPDS_GEOSCLASSICOUTPUT(mpds_geosClassicOutput)
# Free memory on C side: mpds_progressMonitor
#geosclassic.MPDSFree(mpds_progressMonitor)
geosclassic.free_MPDS_PROGRESSMONITOR(mpds_progressMonitor)
if verbose >= 1 and geosclassic_output:
print('Geos-Classic run complete')
# Show (print) encountered warnings
if verbose >= 1 and geosclassic_output and geosclassic_output['nwarning']:
print('\nWarnings encountered ({} times in all):'.format(geosclassic_output['nwarning']))
for i, warning_message in enumerate(geosclassic_output['warnings']):
print('#{:3d}: {}'.format(i+1, warning_message))
return geosclassic_output
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def estimateIndicator1D(
category_values,
cov_model_for_category,
dimension, spacing=1.0, origin=0.0,
method='simple_kriging',
probability=None,
x=None, v=None,
mask=None,
use_unique_neighborhood=False,
searchRadiusRelative=1.,
nneighborMax=12,
searchNeighborhoodSortMode=None,
seed=None,
outputReportFile=None,
nthreads=-1, verbose=1):
"""
Computes estimate probabilities of categories (indicators) for 1D grid
based on simple or ordinary kriging.
:param category_values:
(sequence of floats or ints) list of the category values;
let ncategory be the length of the list, then:
- if ncategory == 1:
- the unique category value given must not be
equal to 0
- it is used for a binary case with values
("unique category value", 0), where 0 indicates
the absence of the considered medium
- conditioning data values should be
"unique category value" or 0
- if ncategory >= 2:
- it is used for a multi-category case with given
values (distinct)
- conditioning data values should be in the list
of given values
:param cov_model_for_category:
(sequence of CovModel1D class of length ncategory (see
see category_values), or one CovModel1D, recycled)
covariance model in 1D per category, see definition of
the class in module geone.covModel
:param dimension: (int) nx, | |
'.png', out + str(n) + '.png')
n += 1
os.rename(f'{pngdir}/top' + str(nfr) + '.png', out + str(n) + '.png')
else:
os.rename(f'{pngdir}/top' + str(nfr) + '.png', out + str(n) + '.png')
n += 1
os.rename(f'{pngdir}/bot' + str(nfr) + '.png', out + str(n) + '.png')
except:
end = True
nim = n - 1
elif binning > 1:
# binning bin*bin for reducing file size
command = f"ffmpeg -i {avifile} -frames {maxim} -vf scale=iw/{binning}:-1 {out}%d.png -loglevel quiet"
subprocess.call(command, shell=cshell)
nim = check_files(out, maxim)
else:
# regular processing of frames
command = f"ffmpeg -i {avifile} -frames {maxim} {out}%d.png -loglevel quiet"
subprocess.call(command, shell=cshell)
nim = check_files(out, maxim)
if debug:
print(f'last file written: {out}' + str(nim) + '.png')
# get dattim from filename
dattim, sta = tfits(avifile)
except Exception as e:
info = 'problem with ffmpeg, no images converted'
sg.PopupError(info + f'\n{e}', title='AVI conversion')
logging.error(info)
logging.error({e})
return nim, dattim, sta, out
# -------------------------------------------------------------------
def create_file_list(file, n, ext='.png', start=1):
"""
create a file series according to IRIS convention
:param file: filebase
:param n: number of files
:param ext: extension, default = .png
:param start: index of first file
:return: file_list
"""
result = []
for a in range(start, start + n):
filen = file + str(a) + ext
result.append(filen)
return result
# -------------------------------------------------------------------
def check_files(file, n, ext='.png'):
"""
check if files in file series file+index+ext exist, starting with index 1
:param file: filebase
:param n: last index to check
:param ext: file extension, default = .png
:return: number of files found, 0 if no file exists
"""
filelist = create_file_list(file, n, ext=ext)
index = 0
for i in range(len(filelist)):
if path.exists(file + str(i + 1) + ext):
index = i + 1
else:
index = i
return index
return index
# -------------------------------------------------------------------
def delete_old_files(file, n, ext='.png'):
"""
delete files in order to clean up directory before new calculation
:param file: filebase
:param n: last index to check
:param ext: file extension, default = .png
:return:
number of files found
number of deleted files
"""
oldfiles = check_files(file, n, ext)
deleted = 0
answer = ''
if oldfiles:
answer = sg.PopupOKCancel(f'delete {oldfiles} existing files {file}, \nARE YOU SURE?', title='Delete old Files')
if answer == 'OK':
for index in range(oldfiles):
os.remove(file + str(index + 1) + ext)
deleted = oldfiles
return oldfiles, deleted, answer
# -------------------------------------------------------------------
def create_background_image(im, nb, colorflag=False): # returns background image
"""
creates background image from first nb png images extracted from
video with VirtualDub
Parameters:
im: filebase of image without number and .png extension
e.g. m_ for series m_1.png, m_2.png,...
nb: number of images, starting with index 1,
for calculation of background image
n = 0: zero intensity background image
colorflag: True: color image, False: b/w image output
Return:
background image, average of input images, as image array
"""
if nb > 0:
# create list of filenames
image_list = create_file_list(im, nb)
# open a series of files and store the data in a list, image_concat
first = True
for image in image_list:
ima = get_png_image(image, colorflag)
if first:
image_sum = ima
first = False
else:
image_sum += ima
ave_image = image_sum / nb
else:
# zero background
ave_image = 0 * get_png_image(im + '1.png', colorflag)
return ave_image
# -------------------------------------------------------------------
def apply_dark_distortion(im, backfile, outpath, mdist, first, nm, window, fits_dict, dist=False,
background=False, center=None, a3=0, a5=0, rotation=0, yscale=1, colorflag=False,
show_images=True, cval=0):
# subtracts background and transforms images in a single step
"""
subtracts background image from png images and stores the result
as fit-images
(peak image from series, sum image from series)
Perform a dist transformation
Parameters:
im: filebase of image without number and .bmp extension
e.g. m_ for series m_1.bmp, m_2.bmp,...
backfile: background fit-file created in previous step without extension
outpath: path to mdist (output files)
mdist: file base of output files, appended with number starting from 1
(IRIS convention) and .fit
first: index of first image converted
nm: number of images created (if exist)
dist: flag, if True the distortion is calculated,
with additional parameters
background: flag, if True the background image (backfile) is subtracted
center : (column, row) tuple or (2,) ndarray, optional
Center coordinate of transformation, corresponds to optical axis.
If None, the image center is assumed
a3 : float, optional
The cubic coefficient of radial transformation
a5 : float, optional
The quintic coefficient of radial transformation
(the linear coefficient is set equal 1 to preserve image scale
at center, even order coefficients are equal zero due to the
symmetry of the transformation
rotation : float, optional
Additional rotation applied to the image.
yscale : float, optional
scales image by a factor in y-direction to compensate for non-square
pixels. The center coordinate y0 is scaled as well
colorflag: True for colour images, False for b/w images
fits_dict: dictionary with fits-header info
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Return:
actual number of images created
peak image from series, sum image from series
disttext: multiline info about success
The distortion was adapted from skimage.transform.swirl.
Instead of a swirl transformation a rotation symmetric radial transformation
for converting tangential projection to orthographic projection and/or to
correct lens distorsion described by
r =rp*(1+a3*rp^2 +a5*rp^4)
Other parameters, as used in swirl
----------------
# output_shape : tuple (rows, cols), optional
# Shape of the output image generated. By default the shape of the input
# image is preserved.
order : int, optional
The order of the spline interpolation, default is 1. The order has to
be in the range 0-5. See `skimage.transform.warp` for detail.
0: Nearest-neighbor
1: Bi-linear (default)
2: Bi-quadratic
# 3: Bi-cubic
# 4: Bi-quartic
# 5: Bi-quintic
mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}, optional
Points outside the boundaries of the input are filled according
to the given mode, with 'constant' used as the default.
clip : bool, optional
Whether to clip the output to the range of values of the input image.
This is enabled by default, since higher order interpolation may
produce values outside the given input range.
# preserve_range : bool, optional
# Whether to keep the original range of values. Otherwise, the input
# image is converted according to the conventions of `img_as_float`.
Also see
http://scikit-image.org/docs/dev/user_guide/data_types.html
"""
def _distortion_mapping(xy, center, rotation, a3, a5, yscale=1.0):
"""
the original images are converted to square pixels by scaling y
with factor yscale
if yscale is omitted, square pixels are assumed
Calculate shifted coordinates: xs,ys =x',y' – x0,y0
Calculate r', phi': r' =sqrt(xs^2+ys^2)
phi' =phi = arctan2(ys,xs)
Calculate r: r =r'*(1+a3*(r'/f)^2 +...)
Calculate x,y: x=x0+r*cos(phi)
y= y0 + r*sin(phi)
(Pixel value at x',y': I'(x',y') = I(x,y) in the original image)
"""
x, y = xy.T
x0, y0 = center
y0 = y0 * yscale # the center in the original image has to be scaled as well
# y has been scaled in a previous step with resize image
rp = np.sqrt((x - x0) ** 2 + (y - y0) ** 2)
phi = np.arctan2(y - y0, x - x0) + rotation
r = rp * (1 + rp ** 2 * (a3 + a5 * rp ** 2)) # 8sec, 2.9217, 2.906 for single image png
xy[..., 0] = x0 + r * np.cos(phi)
xy[..., 1] = y0 + r * np.sin(phi)
return xy
idg = None
dattim = ''
sta = ''
# scale image
back, header = get_fits_image(backfile)
# notice order of coordinates in rescale
if center is None:
center = np.array(back.shape)[:2][::-1] / 2
warp_args = {'center': center,
'a3': a3,
'a5': a5,
'rotation': rotation,
'yscale': yscale}
# warnings.filterwarnings('ignore') # ignore warnings for cleaner output
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# create list of filenames
image_list = create_file_list(im, nm, start=first)
a = 0
if dist:
if len(back.shape) == 3:
multichannel = True
else:
multichannel = False
ima = my_rescale(back, (yscale, 1), multichannel=multichannel) # scale sum and peak image start
if debug:
print('imy imx , x00 y00: ', ima.shape, | |
<reponame>EzzEddin/bayesloop
#!/usr/bin/env python
"""
Transition models refer to stochastic or deterministic models that describe how the time-varying parameter values of a
given time series model change from one time step to another. The transition model can thus be compared to the state
transition matrix of Hidden Markov models. However, instead of explicitly stating transition probabilities for all
possible states, a transformation is defined that alters the distribution of the model parameters in one time step
according to the transition model. This altered distribution is subsequently used as a prior distribution in the next
time step.
"""
from __future__ import division, print_function
import numpy as np
from scipy.signal import fftconvolve
from scipy.ndimage.filters import gaussian_filter1d
from scipy.ndimage.interpolation import shift
from collections import Iterable
from inspect import getargspec
from copy import deepcopy
from .exceptions import ConfigurationError, PostProcessingError
class TransitionModel:
"""
Parent class for transition models. All transition models inherit from this class. It is currently only used to
identify transition models as such.
"""
class Static(TransitionModel):
"""
Constant parameters over time. This trivial model assumes no change of parameter values over time.
"""
def __init__(self):
self.study = None
self.latticeConstant = None
self.hyperParameterNames = []
self.hyperParameterValues = []
self.prior = None
self.tOffset = 0 # is set to the time of the last Breakpoint by SerialTransition model
def __str__(self):
return 'Static/constant parameter values'
def computeForwardPrior(self, posterior, t):
"""
Compute new prior from old posterior (moving forwards in time).
Args:
posterior(ndarray): Parameter distribution from current time step
t(int): integer time step
Returns:
ndarray: Prior parameter distribution for subsequent time step
"""
return posterior
def computeBackwardPrior(self, posterior, t):
return self.computeForwardPrior(posterior, t - 1)
class GaussianRandomWalk(TransitionModel):
"""
Gaussian parameter fluctuations. This model assumes that parameter changes are Gaussian-distributed. The standard
deviation can be set individually for each model parameter.
Args:
name(str): custom name of the hyper-parameter sigma
value(float, list, tuple, ndarray): standard deviation(s) of the Gaussian random walk for target parameter
target(str): parameter name of the observation model to apply transition model to
prior: hyper-prior distribution that may be passed as a(lambda) function, as a SymPy random variable, or
directly as a Numpy array with probability values for each hyper-parameter value
"""
def __init__(self, name='sigma', value=None, target=None, prior=None):
if isinstance(value, (list, tuple)): # Online study expects Numpy array of values
value = np.array(value)
self.study = None
self.latticeConstant = None
self.hyperParameterNames = [name]
self.hyperParameterValues = [value]
self.prior = prior
self.selectedParameter = target
self.tOffset = 0 # is set to the time of the last Breakpoint by SerialTransition model
if target is None:
raise ConfigurationError('No parameter set for transition model "GaussianRandomWalk"')
def __str__(self):
return 'Gaussian random walk'
def computeForwardPrior(self, posterior, t):
"""
Compute new prior from old posterior (moving forwards in time).
Args:
posterior(ndarray): Parameter distribution from current time step
t(int): integer time step
Returns:
ndarray: Prior parameter distribution for subsequent time step
"""
axisToTransform = self.study.observationModel.parameterNames.index(self.selectedParameter)
normedSigma = self.hyperParameterValues[0]/self.latticeConstant[axisToTransform]
if normedSigma > 0.:
newPrior = gaussian_filter1d(posterior, normedSigma, axis=axisToTransform)
else:
newPrior = posterior.copy()
return newPrior
def computeBackwardPrior(self, posterior, t):
return self.computeForwardPrior(posterior, t - 1)
class AlphaStableRandomWalk(TransitionModel):
"""
Parameter changes follow alpha-stable distribution. This model assumes that parameter changes are distributed
according to the symmetric alpha-stable distribution. For each parameter, two hyper-parameters can be set: the
width of the distribution (c) and the shape (alpha).
Args:
name1(str): custom name of the hyper-parameter c
value1(float, list, tuple, ndarray): width(s) of the distribution (c >= 0).
name2(str): custom name of the hyper-parameter alpha
value2(float, list, tuple, ndarray): shape(s) of the distribution (0 < alpha <= 2).
target(str): parameter name of the observation model to apply transition model to
prior: list of two hyper-prior distributions, where each may be passed as a(lambda) function, as a SymPy random
variable, or directly as a Numpy array with probability values for each hyper-parameter value
"""
def __init__(self, name1='c', value1=None, name2='alpha', value2=None, target=None, prior=(None, None)):
if isinstance(value1, (list, tuple)):
value1 = np.array(value1)
if isinstance(value2, (list, tuple)):
value2 = np.array(value2)
self.study = None
self.latticeConstant = None
self.hyperParameterNames = [name1, name2]
self.hyperParameterValues = [value1, value2]
self.prior = prior
self.selectedParameter = target
self.kernel = None
self.kernelParameters = None
self.tOffset = 0 # is set to the time of the last Breakpoint by SerialTransition model
if target is None:
raise ConfigurationError('No parameter set for transition model "AlphaStableRandomWalk"')
def __str__(self):
return 'Alpha-stable random walk'
def computeForwardPrior(self, posterior, t):
"""
Compute new prior from old posterior (moving forwards in time).
Args:
posterior(ndarray): Parameter distribution from current time step
t(int): integer time step
Returns:
ndarray: Prior parameter distribution for subsequent time step
"""
# if hyper-parameter values have changed, a new convolution kernel needs to be created
if not self.kernelParameters == self.hyperParameterValues:
normedC = []
for lc in self.latticeConstant:
normedC.append(self.hyperParameterValues[0] / lc)
alpha = [self.hyperParameterValues[1]] * len(normedC)
axisToTransform = self.study.observationModel.parameterNames.index(self.selectedParameter)
selectedC = normedC[axisToTransform]
normedC = [0.]*len(normedC)
normedC[axisToTransform] = selectedC
self.kernel = self.createKernel(normedC[0], alpha[0], 0)
for i, (a, c) in enumerate(zip(alpha[1:], normedC[1:])):
self.kernel *= self.createKernel(c, a, i+1)
self.kernel = self.kernel.T
self.kernelParameters = deepcopy(self.hyperParameterValues)
newPrior = self.convolve(posterior)
newPrior /= np.sum(newPrior)
return newPrior
def computeBackwardPrior(self, posterior, t):
return self.computeForwardPrior(posterior, t - 1)
def createKernel(self, c, alpha, axis):
"""
Create alpha-stable distribution on a grid as a kernel for convolution.
Args:
c(float): Scale parameter.
alpha(float): Tail parameter (alpha = 1: Cauchy, alpha = 2: Gauss)
axis(int): Axis along which the distribution is defined, for 2D-Kernels
Returns:
ndarray: kernel
"""
gs = self.study.gridSize
if len(gs) == 2:
if axis == 1:
l1 = gs[1]
l2 = gs[0]
elif axis == 0:
l1 = gs[0]
l2 = gs[1]
else:
raise ConfigurationError('Transformation axis must either be 0 or 1.')
elif len(gs) == 1:
l1 = gs[0]
l2 = 0
axis = 0
else:
raise ConfigurationError('Parameter grid must either be 1- or 2-dimensional.')
kernel_fft = np.exp(-np.abs(c*np.linspace(0, np.pi, int(3*l1/2+1)))**alpha)
kernel = np.fft.irfft(kernel_fft)
kernel = np.roll(kernel, int(3*l1/2-1))
if len(gs) == 2:
kernel = np.array([kernel]*(3*l2))
if axis == 1:
return kernel.T
elif axis == 0:
return kernel
def convolve(self, distribution):
"""
Convolves distribution with alpha-stable kernel.
Args:
distribution(ndarray): Discrete probability distribution to convolve.
Returns:
ndarray: convolution
"""
gs = np.array(self.study.gridSize)
padded_distribution = np.zeros(3*np.array(gs))
if len(gs) == 2:
padded_distribution[gs[0]:2*gs[0], gs[1]:2*gs[1]] = distribution
elif len(gs) == 1:
padded_distribution[gs[0]:2*gs[0]] = distribution
padded_convolution = fftconvolve(padded_distribution, self.kernel, mode='same')
if len(gs) == 2:
convolution = padded_convolution[gs[0]:2*gs[0], gs[1]:2*gs[1]]
elif len(gs) == 1:
convolution = padded_convolution[gs[0]:2*gs[0]]
return convolution
class ChangePoint(TransitionModel):
"""
Abrupt parameter change at a specified time step. Parameter values are allowed to change only at a single point in
time, right after a specified time step (Hyper-parameter tChange). Note that a uniform parameter distribution is
used at this time step to achieve this "reset" of parameter values.
Args:
name(str): custom name of the hyper-parameter tChange
value(int, list, tuple, ndarray): Integer value(s) of the time step of the change point
prior: hyper-prior distribution that may be passed as a(lambda) function, as a SymPy random variable, or
directly as a Numpy array with probability values for each hyper-parameter value
"""
def __init__(self, name='tChange', value=None, prior=None):
if isinstance(value, (list, tuple)):
value = np.array(value)
self.study = None
self.latticeConstant = None
self.hyperParameterNames = [name]
self.hyperParameterValues = [value]
self.prior = prior
self.tOffset = 0 # is set to the time of the last Breakpoint by SerialTransition model
def __str__(self):
return 'Change-point'
def computeForwardPrior(self, posterior, t):
"""
Compute new prior from old posterior (moving forwards in time).
Args:
posterior(ndarray): Parameter distribution from current time step
t(int): integer time step
Returns:
ndarray: Prior parameter distribution for subsequent time step
"""
if t == self.hyperParameterValues[0]:
# check if custom prior is used by observation model
if hasattr(self.study.observationModel.prior, '__call__'):
prior = self.study.observationModel.prior(*self.study.grid)
elif isinstance(self.study.observationModel.prior, np.ndarray):
prior = deepcopy(self.study.observationModel.prior)
else:
prior = np.ones(self.study.gridSize) # flat prior
# normalize prior (necessary in case an improper prior is used)
prior /= np.sum(prior)
prior *= np.prod(self.study.latticeConstant)
return prior
else:
return posterior
def computeBackwardPrior(self, posterior, t):
return self.computeForwardPrior(posterior, t - 1)
class Independent(TransitionModel):
"""
Observations are treated as independent. This transition model restores the prior distribution for the parameters
at each time step, effectively assuming independent observations.
Note:
| |
<gh_stars>0
import os
from os import listdir
from os.path import isfile, join
# File system
dir_path = os.path.dirname(os.path.realpath(__file__))
filesys = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
def get_dir_size(path=dir_path):
total = 0
with os.scandir(dir_path) as it:
for entry in it:
if entry.is_file():
total += entry.stat().st_size
elif entry.is_dir():
total += get_dir_size(entry.path)
return total/1024
size=0
for path, dirs, files in os.walk(dir_path):
for f in files:
fp = os.path.join(path, f)
size += os.path.getsize(fp)
while True:
command_line=input("C:\> ")
if command_line=="help":
print("")
print("help = Prints commands currently usable, ")
print("listdir = Prints a list of available directories, ")
print("dir = Prints a list of all available files in the current directory, ")
print("run (file) = Executes the file entered, ")
print("end = Closes and resets the EYN-DOS terminal, ")
print("browser = Takes you to the EYN-DOS browser terminal, ")
print("ver = Prints the EYN-DOS version that is running, ")
print("credits = Prints a list of all the people who worked on EYN-DOS, ")
print("cd (File/Folder location) = Takes you to the directory entered, ")
print("cdate = Prints the current date and time, ")
print("read = Prints the contents of the file entered, ")
print("find = Prints the directory path of the file entered, ")
print("write = Writes 1 line of custom text to the file entered (creates new file), ")
print("del = Deletes any newly writtten file entered, ")
print("size = Prints the size of the file entered, ")
print("clear = Clears the screen of all previously printed lines, ")
print("errorlist = Prints all error codes and their meanings.")
print("A: = Takes you to the A drive (Floppy disk drive 1)")
print("B: = Takes you to the B drive (Floppy disk drive 2)")
print("C: = Takes you to the C drive (Hard drive)")
print("D: = Takes you to the D drive (Recovery drive)")
print("E: = Takes you to the E drive (Compact Disc drive)")
print("")
print("Misc:")
print("")
print(" insert(1-9).py = You can add a custom Python file into the EYN-DOS folder and execute it by typing 'run insert(Number in the filename (1-9)).py, ")
print("")
if command_line=="listdir":
print("")
print("DIR1 - ", float(size)/1024, " Kilobytes")
print("")
print("DIR2 - ", "0.0", " Kilobytes")
print("")
print("DIR3 - ", "0.0", " Kilobytes")
print("")
if command_line=="dir":
print("")
print(filesys)
print("")
print(get_dir_size('data/src'))
print(" | Kilobytes")
print("")
if command_line=="run eyndos.py":
print("")
print("This is already running!")
print("")
if command_line=="end":
print("")
exit()
if command_line=="run calculator.py":
print("")
os.system('python3 calculator.py')
print("")
if command_line==("run minesweeper.py"):
print("")
os.system('python3 minesweeper.py')
print("")
if command_line==("run notebook.py"):
print("")
os.system("python3 notebook.py")
print("")
if command_line==("lgr"):
print("")
print("Hey, that's a good YouTube channel!")
print("")
if command_line==("fdisk"):
print("")
print("ERROR EYN_C3-FNI")
print("")
if command_line==("win"):
print("")
print("No.")
print("")
if command_line==("run solitaire.py"):
"Credit to 'shomikj' on GitHub for this code!"
print("")
os.system('python3 solitaire.py')
print("")
if command_line==("run weight_converter.py"):
print("")
os.system("python3 weight_converter.py")
print("")
if command_line==("run gui_calculator.py"):
print("")
os.system('python3 gui_calculator.py')
print("")
if command_line==("run clock.py"):
print("")
os.system('python3 clock.py')
print("")
if command_line==("count"):
print("")
count_1=input("WARNING: THIS WILL MAKE EYN-DOS UNUSABLE FOR THE REST OF THE SESSION. CONTINUE? (y/n) ")
print("")
if count_1==("y"):
print("")
os.system('python3 counter.py')
print("")
if count_1==("n"):
print("")
print("Command disbanded")
print("")
if command_line==("run insert1.py"):
print("")
os.system('python3 insert1.py')
print("")
if command_line==("troll"):
print("")
print("░░░░░░▄▄▄▄▀▀▀▀▀▀▀▀▄▄▄▄▄▄▄")
print("░░░░░█░░░░░░░░░░░░░░░░░░▀▀▄")
print("░░░░█░░░░░░░░░░░░░░░░░░░░░░█")
print("░░░█░░░░░░▄██▀▄▄░░░░░▄▄▄░░░░█")
print("░▄▀░▄▄▄░░█▀▀▀▀▄▄█░░░██▄▄█░░░░█")
print("█░░█░▄░▀▄▄▄▀░░░░░░░░█░░░░░░░░░█")
print("█░░█░█▀▄▄░░░░░█▀░░░░▀▄░░▄▀▀▀▄░█")
print("░█░▀▄░█▄░█▀▄▄░▀░▀▀░▄▄▀░░░░█░░█")
print("░░█░░░▀▄▀█▄▄░█▀▀▀▄▄▄▄▀▀█▀██░█")
print("░░░█░░░░██░░▀█▄▄▄█▄▄█▄▄██▄░░█")
print("░░░░█░░░░▀▀▄░█░░░█░█▀█▀█▀██░█")
print("░░░░░▀▄░░░░░▀▀▄▄▄█▄█▄█▄█▄▀░░█")
print("░░░░░░░▀▄▄░░░░░░░░░░░░░░░░░░░█")
print("░░░░░░░░░░▀▀▄▄░░░░░░░░░░░░░░░█")
print("░░░░░░░░░░░░░░▀▄▄▄▄▄░░░░░░░░█")
print("░░░░░░░░░░░░░░░░░░█▄▄▄▄▄▄▄▄▀")
print("")
if command_line==("run oregon_trail.py"):
print("")
os.system('python3 oregon_trail.py')
print("")
if command_line==("run snake.py"):
print("")
os.system('python3 snake.py')
print("")
if command_line==("run pong.py"):
print("")
os.system('python3 pong.py')
print("")
if command_line==("run tetris.py"):
print("")
print("Use A to go left, D to go right and spacebar to rotate.")
os.system('python3 tetris.py')
print("")
if command_line==('run invaders.py'):
print("")
print("Use the left arrow to go left, the right arrow to go right, and spacebar to shoot.")
os.system('python3 invaders.py')
print("")
if command_line==("run paintbrush.py"):
print("")
os.system('python3 paintbrush.py')
print("")
if command_line==("!devdebug1!"):
print("")
dev_ver=input("THIS OPTION IS FOR DEVELOPERS AND TESTERS ONLY. IF YOU ARE NOT A DEVELOPER OR TESTER, YOU WILL BE REPORTED TO A HR. CONTINUE? (y/n) ")
print("")
if dev_ver==("n"):
print("")
print("Command disbanded")
print("")
if dev_ver==("y"):
print("")
dev_ver1=input("Enter your provided username: ")
if dev_ver1==("kg2"):
print("")
print("Welcome back, Kian.")
print("")
dev_ver2=input("Enter your provided password: ")
if dev_ver2==("celerysticksfiddlebottom20"):
print("")
print("Welcome to the EYN-DOS development terminal, Kian!")
print("")
if dev_ver2!=("celerysticksfiddlebottom20"):
exit()
if dev_ver1==("cj9"):
print("")
print("Welcome back, Cayden.")
print("")
dev_ver3=input("Enter your provided password: ")
if dev_ver3==("carrotfarmmule90"):
print("")
print("Welcome to the EYN=DOS development terminal, Cayden!")
print("")
if dev_ver3!=("carrotfarmmule90"):
exit()
if dev_ver1==("ig1"):
print("")
print("Welcome back, Ian.")
print("")
dev_ver4=input("Enter your provided password: ")
if dev_ver4==("isaacboatorange30"):
print("")
print("Welcome to the EYN-DOS development terminal, Ian!")
print("")
if dev_ver4!=("isaacboatorange30"):
exit()
if dev_ver1==(""):
exit()
while True:
command_line1=input("C:\DEVDEBUG1\> ")
if command_line1==("debug"):
print("")
print("Coming soon...")
print("")
if command_line1==("end"):
exit()
if command_line1==("eyn_os"):
print("")
print("Welcome to...")
print(" (Built on EYN-DOS)")
print(" ██████████████████████████")
print(" ███░█████░██░░░██░██░░░█░██")
print("██ ██ ██ ██░░█░░░░░░░███░░░██░░░█░░██")
print(" ██ ██ ██░░█████░░░░█░░░░█░█░░█░░██")
print("██ ██ ██ ██░░█░░░░░░░░█░░░░█░░█░█░░██")
print(" ██ ██ ███░█████░░░░█░░░░█░░░██░░██")
print("██ ██ ██ ████████████████████████████")
print(" ██ ██ ███░░░█████░░░░░░█████░░░░██")
print(" ██ ██ ██░░░█░░░░░█░░░░█░░░░░░░░░██")
print(" ██ ██ ██░░░█░░░░░█░░░░░█████░░░░██")
print(" ██ ██░░░█░░░░░█░░░░░░░░░░█░░░██")
print(" ██ ███░░░█████░░░░░░█████░░░██")
print(" ██████████████████████████")
print(" A nostalgic, yet modern")
print(" O.S...")
print("")
os.system('python3 eyn_os_0_1.py')
print("")
if command_line1==("calculate"):
print("")
gc1=input("GUI based or CLI based? (g/c) ")
if gc1==("g"):
print("")
os.system('python3 gui_calculator.py')
print("")
if gc1==("c"):
print("")
os.system('python3 calculator.py')
print("")
if command_line1==("time"):
print("")
os.system('python3 clock.py')
print("")
if command_line1==("coder"):
print("")
print("Coming soon...")
print("")
if command_line1==("count"):
print("")
countperm=input("WARNING: counter.py WILL LOCK YOUR PC UNTIL RESTARTED PHYSICALLY. CONTINUE? (y/n) ")
if countperm==("n"):
print("")
print("Command disbanded")
print("")
if countperm==("y"):
print("")
os.system('python3 counter.py')
print("")
if command_line1==("eynos01 files"):
print("")
print(" - - - - EYNOS1 - - - - ")
print("")
print(" eyn_os_0_1.py - 3kb")
print(" user.folder - 0kb")
print("")
print(" TOTAL: 3kb")
print("")
if command_line1==("dir1 files"):
print("")
print(" - - - - DIR1 - - - - ")
print("")
print(" eyndos.py - 29kb")
print(" calculator.py - 1kb")
print(" minesweeper.py - 9kb")
print(" notebook.py - 1kb")
print(" solitaire.py - 12kb")
print(" test1.py - 1kb")
print(" weight_converter.py - 1kb")
print(" gui_calculator.py - 4kb")
print(" clock.py - 1kb")
print(" oregon_trail.py - 8kb")
print(" snake.py - 4kb")
print(" pong.py - 3kb")
print(" tetris.py - 7kb")
print(" paintbrush.py - 3kb")
print(" test3.py - 15kb")
print(" mouse_detection.py - 1kb")
print("")
print(" TOTAL: 100kb - LEFT: 900kb - 16 Files")
print("")
if command_line1==("return"):
print("")
print("Returning to main terminal...")
print("")
break
if command_line1==("help"):
print("")
print("help = Prints a list of available commands, end = Ends the current EYN-DOS session, eyn_os = Runs the latest version of EYN-OS, calculate = Runs a calculator program, time = Runs a clock program, count = Counts infinitely (locks current EYN-DOS session), (directory) files = Prints files and information about the entered directory, return = Returns you to the main EYN-DOS terminal. Attempting to type an unknown command results in a blank response.")
print("")
if command_line1==("run mouse_detection.py"):
print("")
os.system('python3 mouse_detection.py')
print("")
if command_line==("ver"):
print("")
print("█████████ ███ ███ ███ ███ ██████ ██████ ██████")
print("███ ███ ███ ██████ ███ ███ ███ ███ ███ ███")
print("█████████ ███ ███ ███ ███ ██████ ███ ███ ███ ███ ██████")
print("███ ███ ███ █████ ███ ███ ███ ███ ███")
print("█████████ ███ ███ ███ ██████ ██████ ██████")
print("")
print(" ████ ████████")
print(" ███ ███ ███")
print(" ███ ███")
print(" ███ ███")
print(" ███ ███")
print(" █████████ ██ ███")
print("")
print("EYN-DOS 1.7 (2022)")
print("")
if command_line==("credits"):
print("")
print("The EYN-DOS Team:")
print("")
print(" Primary coder: <NAME> (Founder and CEO of J.K Incorporated)")
print(" Secondary coder: <NAME> (Musician and Lead Artist of J.K Incorporated.")
print(" Logo designer: <NAME>.")
print(" Staff commander: <NAME>")
print(" Everyone involved: <NAME>, <NAME>, <NAME>. and other J.K Incorporated employees.")
print("")
print("-----------------------------------------------------------------------------------------")
print("")
print(" Honorable mentions:")
print("")
print(" <NAME>: Coder of the 'Snake' game included with EYN-DOS.")
print(" shomikj: Coder of the command line version of 'Solitaire' for EYN-DOS.")
print(" <NAME>: Supporter.")
print(" <NAME>: Supporter and artist.")
print(" Github, StackOverflow & GeeksForGeeks: Saver of countless hours of research.")
print(" You: For using EYN-DOS.")
print(" Linux: Just awesome")
print("")
print(" Thank you for using EYN-DOS!")
print("")
if command_line==("run insert2.py"):
print("")
os.system("python3 insert2.py")
print("")
if | |
# pybids has to be greater than 0.5
from .interfaces import BIDSDataGrabberPatch
from nipype.pipeline import engine as pe
from argparse import ArgumentParser
from nipype.interfaces import utility as niu
import os
def main():
opts = get_parser().parse_args()
# define and create the output directory
outdir = os.path.join(
os.path.dirname(
os.path.abspath(opts.deriv_pipeline)), 'atlasCorrelations')
os.makedirs(outdir, exist_ok=True)
# define and create the work directory
workdir = os.path.join(
os.path.dirname(
os.path.abspath(opts.deriv_pipeline)), 'work')
os.makedirs(workdir, exist_ok=True)
if opts.analysis_level == 'participant':
# initialize participant workflow
participant_wf = pe.Workflow(name='participant_wf', base_dir=workdir)
# initialize connectivity workflow
connectivity_wf = init_connectivity_wf(workdir, outdir, opts.hp, opts.lp, os.path.abspath(opts.atlas_img),
os.path.abspath(opts.atlas_lut), opts.confounds)
imgs_criteria = {
'imgs':
{
'space': 'MNI152NLin2009cAsym',
'modality': 'func',
'type': 'preproc'
}
}
# add in optional search criteria
if opts.session:
imgs_criteria['matrices']['session'] = opts.session
if opts.task:
imgs_criteria['matrices']['task'] = opts.task
if opts.run:
imgs_criteria['matrices']['run'] = opts.run
if opts.variant:
imgs_criteria['matrices']['variant'] = opts.variant
input_node = pe.Node(
BIDSDataGrabberPatch(
domains=['bids', 'derivatives'],
output_query=imgs_criteria,
base_dir=os.path.abspath(opts.deriv_pipeline)),
name='input_node')
participant_wf.connect([
(input_node, connectivity_wf,
[('imgs', 'input_node.img')]),
])
# run the participant workflow
participant_wf.run()
elif opts.analysis_level == 'group':
# set the input dir (assumed participant level already run).
input_dir = os.path.join(
os.path.dirname(
os.path.abspath(opts.deriv_pipeline)), 'atlasCorrelations')
# catch if directory doesn't exist
if not os.path.isdir(input_dir):
raise OSError('DOES NOT EXIST: {input_dir}'.format(input_dir=input_dir))
# add in optional search criteria
matrices_criteria = {
'matrices':
{
'space': 'MNI152NLin2009cAsym',
'modality': 'func',
'type': 'corrMatrix',
}
}
if opts.session:
matrices_criteria['matrices']['session'] = opts.session
if opts.task:
matrices_criteria['matrices']['task'] = opts.task
if opts.run:
matrices_criteria['matrices']['run'] = opts.run
if opts.variant:
matrices_criteria['matrices']['variant'] = opts.variant
group_wf = pe.Workflow(name='group_wf', base_dir=workdir)
group_collection_wf = init_group_collection_wf(work_dir=workdir,
outdir=input_dir)
input_node = pe.Node(
BIDSDataGrabberPatch(
domains=['bids', 'derivatives'],
output_query=matrices_criteria,
base_dir=input_dir),
name='input_node')
group_wf.connect([
(input_node, group_collection_wf,
[('matrices', 'input_node.matrix_tsv')]),
])
group_wf.run()
else:
raise NameError('specify either participant or group for analysis level')
def get_parser():
"""Build parser object"""
parser = ArgumentParser(description='atlas_correlations')
parser.add_argument('--deriv-pipeline', '-d', action='store', required=True,
help='input derivative directory (e.g. fmriprep). '
'I assume the inputs are in MNI space.')
parser.add_argument('--atlas-img', '-a', action='store',
help='input atlas nifti')
parser.add_argument('--atlas-lut', '-l', action='store', required=True,
help='atlas look up table formatted with the columns: '
'index, regions')
parser.add_argument('--confounds', '-c', action='store', nargs='+',
help='names of confounds to be included in analysis')
parser.add_argument('analysis_level', choices=['participant', 'group'],
help='run participant level analysis, or aggregate '
'group level results')
parser.add_argument('--participant_label', '--participant-label',
action='store', nargs='+',
help='one or more participant identifiers with the '
'sub- prefix removed')
parser.add_argument('--hp', action='store', default=None,
help='highpass filter to apply to the data')
parser.add_argument('--lp', action='store', default=None,
help='lowpass filter to apply to the data')
parser.add_argument('--variant', action='store',
help='only analyze files with a specific variant label')
parser.add_argument('--run', action='store',
help='only analyze files with a specific run label')
parser.add_argument('--session', action='store',
help='only analyze files with a specific session label')
parser.add_argument('--task', action='store',
help='only analyze files with a specific task label')
return parser
def init_connectivity_wf(work_dir, output_dir, hp, lp,
atlas_img, atlas_lut, confounds):
"""
Generates a connectivity matrix for a bold file
.. workflow::
:graph2use: orig
:simple_form: yes
from atlascorr.atlas_correlations import init_connectivity_wf
wf = init_connectivity_wf(
work_dir='.',
output_dir='.',
hp=None,
lp=None,
atlas_img='',
atlas_lut='',
confounds=[''],
)
Parameters
----------
work_dir : str
full path to directory where intermediate files will be written
output_dir : str
full path to directory where output files will be written
hp : float or None
high pass filter (frequencies higher than this pass)
lp : float or None
low pass filter (frequencies lower than this pass)
atlas_img : str
full path and name of the atlas file
atlas_lut : str
full path and name to atlas lookup tsv with two columns
(regions and index)
confounds : list
list of confounds to include in the model
Inputs
------
img : str
full path and name of the bold file
atlas_img : str
full path and name of the atlas file
atlas_lut : str
full path and name to atlas lookup tsv with two columns
(regions and index)
Outputs
-------
dst : str
full path and name of the correlation matrix
"""
connectivity_wf = pe.Workflow(name='connectivity_wf')
connectivity_wf.base_dir = work_dir
input_node = pe.MapNode(
niu.IdentityInterface(
fields=['img', 'atlas_img', 'atlas_lut']),
iterfield=['img'],
name='input_node')
input_node.inputs.atlas_img = atlas_img
input_node.inputs.atlas_lut = atlas_lut
get_files_node = pe.MapNode(
niu.Function(
function=get_files,
input_names=['img'],
output_names=['confounds', 'brainmask']),
iterfield=['img'],
name='get_files_node')
confounds2df_node = pe.MapNode(
niu.Function(
function=proc_confounds,
input_names=['confounds', 'confound_file'],
output_names=['confounds_df']),
iterfield=['confound_file'],
name='confounds2df_node')
confounds2df_node.inputs.confounds = confounds
extract_ts_node = pe.MapNode(
niu.Function(
function=extract_ts,
input_names=['img',
'brainmask',
'atlas_img',
'confounds_df',
'hp',
'lp'],
output_names=['ts_matrix']),
iterfield=['img', 'confounds_df', 'brainmask'],
name='extract_ts_node')
# initialize highpass and lowpass
extract_ts_node.inputs.lp = lp
extract_ts_node.inputs.hp = hp
make_corr_matrix_node = pe.MapNode(
niu.Function(
function=make_corr_matrix,
input_names=['ts_matrix'],
output_names=['zcorr_matrix']),
iterfield=['ts_matrix'],
name='make_corr_matrix_node')
write_out_corr_matrix_node = pe.MapNode(
niu.Function(
function=write_out_corr_matrix,
input_names=['corr_matrix', 'atlas_lut', 'img', 'output_dir'],
output_names=['matrix_tsv']),
iterfield=['corr_matrix', 'img'],
name='write_out_corr_matrix_node')
write_out_corr_matrix_node.inputs.output_dir = output_dir
connectivity_wf.connect([
(input_node, get_files_node,
[('img', 'img')]),
(get_files_node, confounds2df_node,
[('confounds', 'confound_file')]),
(get_files_node, extract_ts_node,
[('brainmask', 'brainmask')]),
(confounds2df_node, extract_ts_node,
[('confounds_df', 'confounds_df')]),
(input_node, extract_ts_node,
[('atlas_img', 'atlas_img'),
('img', 'img')]),
(extract_ts_node, make_corr_matrix_node,
[('ts_matrix', 'ts_matrix')]),
(make_corr_matrix_node, write_out_corr_matrix_node,
[('zcorr_matrix', 'corr_matrix')]),
(input_node, write_out_corr_matrix_node,
[('atlas_lut', 'atlas_lut'),
('img', 'img')]),
])
return connectivity_wf
def init_group_collection_wf(work_dir, outdir):
"""
Combines correlation matrices derived from the individual
bold files.
.. workflow::
:graph2use: orig
:simple_form: yes
from atlascorr.atlas_correlations import init_group_collection_wf
wf = init_group_collection_wf(
work_dir='.',
outdir='.',
)
Parameters
----------
work_dir : str
full path to directory where intermediate files will be written
outdir : str
full path to directory where the group tsv will be written
Inputs
------
matrix_tsv : str
full path and name to correlation matrix
"""
group_collection_wf = pe.Workflow(name='group_collection_wf')
group_collection_wf.base_dir = work_dir
input_node = pe.MapNode(
niu.IdentityInterface(
fields=['matrix_tsv']),
iterfield=['matrix_tsv'],
name='input_node')
matrix_proc_node = pe.MapNode(
niu.Function(
function=proc_matrix,
input_names=['matrix_tsv'],
output_names=['participant_df']),
iterfield=['matrix_tsv'],
name='matrix_proc_node')
merge_dfs_node = pe.Node(
niu.Function(
function=merge_dfs,
input_names=['dfs'],
output_names=['df']),
name='merge_dfs_node')
write_out_group_tsv_node = pe.Node(
niu.Function(
function=write_out_group_tsv,
input_names=['outdir', 'df'],
output_names=['out_file']),
name='write_out_group_tsv_node')
write_out_group_tsv_node.inputs.outdir = outdir
group_collection_wf.connect([
(input_node, matrix_proc_node,
[('matrix_tsv', 'matrix_tsv')]),
(matrix_proc_node, merge_dfs_node,
[('participant_df', 'dfs')]),
(merge_dfs_node, write_out_group_tsv_node,
[('df', 'df')]),
])
return group_collection_wf
def get_files(img):
"""
Find the brainmask and confound files given the bold file.
Parameters
----------
img : str
full path and name of the bold file
Returns
-------
confound : str
full path and name of the confounds file
brainmask : str
full path and name of the brainmask file
"""
import re
import os
PROC_EXPR = re.compile(
r'^(?P<path>.*/)?'
r'(?P<subject_id>sub-[a-zA-Z0-9]+)'
r'(_(?P<session_id>ses-[a-zA-Z0-9]+))?'
r'(_(?P<task_id>task-[a-zA-Z0-9]+))?'
r'(_(?P<acq_id>acq-[a-zA-Z0-9]+))?'
r'(_(?P<rec_id>rec-[a-zA-Z0-9]+))?'
r'(_(?P<run_id>run-[a-zA-Z0-9]+))?'
r'_bold'
r'(_(?P<space_id>space-[a-zA-Z0-9]+))?'
r'(_(?P<variant_id>variant-[a-zA-Z0-9]+))?'
r'_preproc.nii.gz')
def get_confound(img):
CONF_REPL = (r'\g<path>'
r'\g<subject_id>'
r'_\g<session_id>'
r'_\g<task_id>'
r'_\g<run_id>'
r'_bold_confounds.tsv')
conf_tmp = PROC_EXPR.sub(CONF_REPL, img)
conf = re.sub('_+', '_', conf_tmp)
if os.path.isfile(conf):
return conf
else:
raise IOError('cannot find {conf}'.format(conf=conf))
def get_brainmask(img):
MASK_REPL = (r'\g<path>'
r'\g<subject_id>'
r'_\g<session_id>'
r'_\g<task_id>'
r'_\g<run_id>'
r'_bold_\g<space_id>_brainmask.nii.gz')
bmask = PROC_EXPR.sub(MASK_REPL, img)
bmask = re.sub('_+', '_', bmask)
if os.path.isfile(bmask):
return bmask
else:
raise IOError('cannot find {bmask}'.format(bmask=bmask))
confound = get_confound(img)
brainmask = get_brainmask(img)
return confound, brainmask
def proc_confounds(confounds, confound_file):
"""
Filter confounds file to selected confounds &
replaces "n/a"s in confounds file with the mean.
Parameters
----------
confounds : list
list of confounds to include in the model
confounds_file : str
full path and name of the confounds file
Returns
-------
confounds_df : pandas.core.frame.DataFrame
dataframe containing the selected confounds
"""
import pandas as pd
import numpy as np
confounds_df = pd.read_csv(confound_file, sep='\t', na_values='n/a')
if 'FramewiseDisplacement' in confounds:
confounds_df['FramewiseDisplacement'] = confounds_df['FramewiseDisplacement'].fillna(
np.mean(confounds_df['FramewiseDisplacement']))
return confounds_df[confounds]
def extract_ts(img, brainmask, atlas_img, confounds_df, hp=None, lp=None):
"""
Extract timeseries from each region of interest described by an atlas.
Parameters
----------
img : str
full path and name of the bold file
brainmask : str
full path and name of the brainmask file
atlas_img : str
full path and name of the atlas file
confounds_df : pandas.core.frame.DataFrame
dataframe containing confound measures
hp : float or None
high pass filter (frequencies higher than this pass)
lp : float or None
low pass filter (frequencies lower than this pass)
Returns
-------
signals : numpy.ndarray
2D numpy array with each column representing an atlas region
and each row representing a volume (time point)
"""
from nilearn.input_data import NiftiLabelsMasker
if hp:
hp = float(hp)
if lp:
lp = float(lp)
masker = NiftiLabelsMasker(
labels_img=atlas_img, standardize=True, mask_img=brainmask,
low_pass=lp, high_pass=hp, t_r=2.0)
return masker.fit_transform(img, confounds=confounds_df.values)
def make_corr_matrix(ts_matrix):
"""
Make a symmetric pearson's r->z transforme correlation matrix.
Parameters
----------
ts_matrix : numpy.ndarray
2D numpy array with each column representing an atlas region
and each row representing a volume (time point)
Returns
-------
zcorr_matrix : numpy.ndarray
2D symmetric matrix measuring region-region correlations
main diagnal is all zeros
"""
from nilearn.connectome import ConnectivityMeasure
import numpy as np
def fisher_r_to_z(r):
import math
if r == 1.:
return 0.
else:
return math.log((1. + r)/(1. - r))/2.
correlation_measure = ConnectivityMeasure(kind='correlation')
corr_matrix = correlation_measure.fit_transform([ts_matrix])[0]
vfisher_r_to_z = np.vectorize(fisher_r_to_z)
# fisher's r | |
<filename>src/graph_transpiler/webdnn/frontend/tensorflow/ops/gen_math_ops.py<gh_stars>1-10
import numpy as np
import tensorflow as tf
from tensorflow.core.framework.types_pb2 import DT_FLOAT
from webdnn.frontend.tensorflow.converter import TensorFlowConverter
from webdnn.frontend.tensorflow.util import elementwise_binary_op_handler, unary_op_handler
from webdnn.frontend.util import check_broadcast_constraints
from webdnn.graph.operators.abs import Abs
from webdnn.graph.operators.acos import Acos
from webdnn.graph.operators.acosh import Acosh
from webdnn.graph.operators.asin import Asin
from webdnn.graph.operators.asinh import Asinh
from webdnn.graph.operators.atan import Atan
from webdnn.graph.operators.atanh import Atanh
from webdnn.graph.operators.cos import Cos
from webdnn.graph.operators.cosh import Cosh
from webdnn.graph.operators.elementwise_add import ElementwiseAdd
from webdnn.graph.operators.elementwise_div import ElementwiseDiv
from webdnn.graph.operators.elementwise_mul import ElementwiseMul
from webdnn.graph.operators.elementwise_pow import ElementwisePow
from webdnn.graph.operators.exp import Exp
from webdnn.graph.operators.log import Log
from webdnn.graph.operators.max import Max
from webdnn.graph.operators.min import Min
from webdnn.graph.operators.prod import Prod
from webdnn.graph.operators.rsqrt import Rsqrt
from webdnn.graph.operators.scalar_add import ScalarAdd
from webdnn.graph.operators.scalar_mul import ScalarMul
from webdnn.graph.operators.select import Select
from webdnn.graph.operators.sigmoid import Sigmoid
from webdnn.graph.operators.sin import Sin
from webdnn.graph.operators.sinh import Sinh
from webdnn.graph.operators.sum import Sum
from webdnn.graph.operators.tan import Tan
from webdnn.graph.operators.tanh import Tanh
from webdnn.graph.operators.tensordot import Tensordot
from webdnn.graph.order import Order
from webdnn.graph.variables.constant_variable import ConstantVariable
from webdnn.util import console
TensorFlowConverter.register_handler("Abs")(unary_op_handler(Abs))
TensorFlowConverter.register_handler("Acos")(unary_op_handler(Acos))
TensorFlowConverter.register_handler("Acosh")(unary_op_handler(Acosh))
TensorFlowConverter.register_handler("Add")(elementwise_binary_op_handler(ElementwiseAdd, ScalarAdd))
TensorFlowConverter.register_handler("AddN")(elementwise_binary_op_handler(ElementwiseAdd, ScalarAdd))
@TensorFlowConverter.register_handler("All")
def all_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Any")
def any_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ApproximateEqual")
def approximate_equal_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ArgMax")
def arg_max_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ArgMin")
def arg_min_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
TensorFlowConverter.register_handler("Asin")(unary_op_handler(Asin))
TensorFlowConverter.register_handler("Asinh")(unary_op_handler(Asinh))
TensorFlowConverter.register_handler("Atan")(unary_op_handler(Atan))
@TensorFlowConverter.register_handler("Atan2")
def atan2_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
TensorFlowConverter.register_handler("Atanh")(unary_op_handler(Atanh))
@TensorFlowConverter.register_handler("BatchMatMul")
def batch_mat_mul_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Betainc")
def betainc_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Bincount")
def bincount_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Bucketize")
def bucketize_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Cast")
def cast_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
dst_t = tf_op.get_attr("DstT")
if dst_t != DT_FLOAT:
console.warning("[TensorFlowConverter] Operator 'Cast' is ignored.")
x = converter.get_variable(tf_op.inputs[0])
converter.set_variable(tf_op.outputs[0], x)
@TensorFlowConverter.register_handler("Ceil")
def ceil_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Complex")
def complex_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ComplexAbs")
def complex_abs_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Conj")
def conj_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
TensorFlowConverter.register_handler("Cos")(unary_op_handler(Cos))
TensorFlowConverter.register_handler("Cosh")(unary_op_handler(Cosh))
@TensorFlowConverter.register_handler("Cross")
def cross_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Cumprod")
def cumprod_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Cumsum")
def cumsum_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Digamma")
def digamma_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
TensorFlowConverter.register_handler("Div")(elementwise_binary_op_handler(ElementwiseDiv))
@TensorFlowConverter.register_handler("Equal")
def equal_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Erf")
def erf_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Erfc")
def erfc_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
TensorFlowConverter.register_handler("Exp")(unary_op_handler(Exp))
@TensorFlowConverter.register_handler("Expm1")
def expm1_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
console.warning(
"[TensorFlowConverter] In WebDNN, \"Expm1(x)\" is converted into \"Exp(x)-1\", which is not enough accurate as Expm1 when"
"x is so small that \"Exp(x) == 1\" in floating point accuracy.")
x = converter.get_variable(tf_op.inputs[0])
y = Exp(None)(x)[0] - 1
converter.set_variable(tf_op.outputs[0], y)
@TensorFlowConverter.register_handler("Floor")
def floor_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("FloorDiv")
def floor_div_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("FloorMod")
def floor_mod_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Greater")
def greater_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
y = converter.get_variable(tf_op.inputs[1])
check_broadcast_constraints(x, y)
z = x > y
converter.set_variable(tf_op.outputs[0], z)
@TensorFlowConverter.register_handler("GreaterEqual")
def greater_equal_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
y = converter.get_variable(tf_op.inputs[1])
check_broadcast_constraints(x, y)
z = x >= y
converter.set_variable(tf_op.outputs[0], z)
@TensorFlowConverter.register_handler("Igamma")
def igamma_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Igammac")
def igammac_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Imag")
def imag_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Inv")
def inv_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
y = 1 / x
converter.set_variable(tf_op.outputs[0], y)
@TensorFlowConverter.register_handler("InvGrad")
def inv_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("IsFinite")
def is_finite_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("IsInf")
def is_inf_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("IsNan")
def is_nan_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Less")
def less_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
y = converter.get_variable(tf_op.inputs[1])
check_broadcast_constraints(x, y)
z = y > x
converter.set_variable(tf_op.outputs[0], z)
@TensorFlowConverter.register_handler("LessEqual")
def less_equal_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
y = converter.get_variable(tf_op.inputs[1])
check_broadcast_constraints(x, y)
z = y >= x
converter.set_variable(tf_op.outputs[0], z)
@TensorFlowConverter.register_handler("Lgamma")
def lgamma_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("LinSpace")
def lin_space_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Log")
def log_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
y, = Log(None)(x)
converter.set_variable(tf_op.outputs[0], y)
@TensorFlowConverter.register_handler("Log1p")
def log1p_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
console.warning(
"[TensorFlowConverter] In WebDNN, \"Log1p(x)\" is converted into \"Log(1+x)\", which is not enough accurate as Log1p when"
"x is so small that \"1 + x == 1\" in floating point accuracy.")
x = converter.get_variable(tf_op.inputs[0])
y, = Log(None)(1 + x)
converter.set_variable(tf_op.outputs[0], y)
@TensorFlowConverter.register_handler("LogicalAnd")
def logical_and_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("LogicalNot")
def logical_not_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("LogicalOr")
def logical_or_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("MatMul")
def matmul_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
a = converter.get_variable(tf_op.inputs[0])
b = converter.get_variable(tf_op.inputs[1])
transposed_a = tf_op.get_attr("transpose_a")
transposed_b = tf_op.get_attr("transpose_b")
if a.ndim > 2 or b.ndim > 2:
raise NotImplementedError("[TensorFlowConverter] Currently, MatMul is supported only 2D * 2D case.")
reduced_axes = []
if transposed_a:
reduced_axes.append(a.order.axes[0])
else:
reduced_axes.append(a.order.axes[1])
if transposed_b:
reduced_axes.append(b.order.axes[1])
else:
reduced_axes.append(b.order.axes[0])
c, = Tensordot(None, axes=reduced_axes)(a, b)
converter.set_variable(tf_op.outputs[0], c)
@TensorFlowConverter.register_handler("Max")
def max_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
axis = converter.get_variable(tf_op.inputs[1])
assert isinstance(axis, ConstantVariable), "[TensorFlowConverter] Operation 'Max' with dynamic axis is not supported yet."
for axis in [x.order.axes[i] for i in axis.data.astype(int).flatten().tolist()]:
x, = Max(None, axis=axis)(x)
if not tf_op.get_attr("keep_dims") and x.ndim > 1:
x = x.squeeze(axis)
converter.set_variable(tf_op.outputs[0], x)
@TensorFlowConverter.register_handler("Maximum")
def maximum_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
y = converter.get_variable(tf_op.inputs[1])
check_broadcast_constraints(x, y)
tmp = x > y
z = x * tmp + y * (1 - tmp)
converter.set_variable(tf_op.outputs[0], z)
@TensorFlowConverter.register_handler("Mean")
def mean_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Min")
def min_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
axis = converter.get_variable(tf_op.inputs[1])
assert isinstance(axis, ConstantVariable), "[TensorFlowConverter] Operation 'Min' with dynamic axis is not supported yet."
for axis in [x.order.axes[i] for i in axis.data.astype(int).flatten().tolist()]:
x, = Min(None, axis=axis)(x)
if not tf_op.get_attr("keep_dims") and x.ndim > 1:
x = x.squeeze(axis)
converter.set_variable(tf_op.outputs[0], x)
@TensorFlowConverter.register_handler("Minimum")
def minimum_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
y = converter.get_variable(tf_op.inputs[1])
check_broadcast_constraints(x, y)
tmp = x > y
z = x * (1 - tmp) + y * tmp
converter.set_variable(tf_op.outputs[0], z)
@TensorFlowConverter.register_handler("Mod")
def mod_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
TensorFlowConverter.register_handler("Mul")(elementwise_binary_op_handler(ElementwiseMul, ScalarMul))
@TensorFlowConverter.register_handler("Neg")
def neg_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
y = -x
converter.set_variable(tf_op.outputs[0], y)
@TensorFlowConverter.register_handler("NotEqual")
def not_equal_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Polygamma")
def polygamma_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
TensorFlowConverter.register_handler("Pow")(elementwise_binary_op_handler(ElementwisePow))
@TensorFlowConverter.register_handler("Prod")
def prod_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
axis = converter.get_variable(tf_op.inputs[1])
assert isinstance(axis, ConstantVariable), "[TensorFlowConverter] Operation 'Prod' with dynamic axis is not supported yet."
for axis in [x.order.axes[i] for i in axis.data.astype(int).flatten().tolist()]:
x, = Prod(None, axis=axis)(x)
if not tf_op.get_attr("keep_dims") and x.ndim > 1:
x = x.squeeze(axis)
converter.set_variable(tf_op.outputs[0], x)
@TensorFlowConverter.register_handler("QuantizeDownAndShrinkRange")
def quantize_down_and_shrink_range_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizedAdd")
def quantized_add_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizedMatMul")
def quantized_mat_mul_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("QuantizedMul")
def quantized_mul_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Range")
def range_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
start = converter.get_variable(tf_op.inputs[0])
limit = converter.get_variable(tf_op.inputs[1])
delta = converter.get_variable(tf_op.inputs[2])
if not isinstance(start, ConstantVariable):
raise NotImplementedError("[TensorFlowConverter] 'Range' operator with dynamic range is not supported yet")
if not isinstance(limit, ConstantVariable):
raise NotImplementedError("[TensorFlowConverter] 'Range' operator with dynamic range is not supported yet")
if not isinstance(delta, ConstantVariable):
raise NotImplementedError("[TensorFlowConverter] 'Range' operator with dynamic range is not supported yet")
start = start.data.flatten()[0]
limit = limit.data.flatten()[0]
delta = delta.data.flatten()[0]
y = ConstantVariable(np.arange(start, limit, delta), Order([None]))
converter.set_variable(tf_op.outputs[0], y)
@TensorFlowConverter.register_handler("Real")
def real_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
TensorFlowConverter.register_handler("RealDiv")(elementwise_binary_op_handler(ElementwiseDiv))
@TensorFlowConverter.register_handler("Reciprocal")
def reciprocal_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ReciprocalGrad")
def reciprocal_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("RequantizationRange")
def requantization_range_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Requantize")
def requantize_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Rint")
def rint_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Round")
def round_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("Rsqrt")
def rsqrt_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
x = converter.get_variable(tf_op.inputs[0])
y, = Rsqrt(None)(x)
converter.set_variable(tf_op.outputs[0], y)
@TensorFlowConverter.register_handler("RsqrtGrad")
def rsqrt_grad_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("SegmentMax")
def segment_max_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("SegmentMean")
def segment_mean_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("SegmentMin")
def segment_min_handler(converter: | |
<reponame>scvannost/multilang<gh_stars>0
"""Run Python, R, Matlab, and bash in the same file.
Expected uses
-------------
1. Natively in Python:
>>> import multilang
This allows for both script and interactive use
>>> # run a script
>>> fname = 'path/to/file.mul'
>>> ml = multilang.as_multilang(fname)
>>> ml = multilang.as_multilang('''#! multilang
... <code>
... ''')
>>> # use interactively
>>> ml = multilang.Master()
>>> ml.r('# code here')
2. Running scripts from Terminal:
$ python -m multilang path/to/file.mul
Warning
-------
The underlying connection relies on `pexpect`, which does not
support a Windows environment.
Some future release will be Windows-compatible.
Scripts
-------
Scripts contain source code in Python, R, Matlab, and/or bash.
These files are marked by the first line of code:
#! multilang [<lang>]
Switching between environments is done using:
#! <lang> -> [<vars>]
See the docs for as_multilang to learn more.
Interactive
-----------
Using the `mutlilang.Master()` class, you can interact with multiple
environments without having to write a script.
The Python environment here is only a dictionary to load/store variables.
All Python code is expected to be run directly by the user.
See the docs for Master to learn more.
How It Works
------------
Passing variables between most environments uses temporary .mat files.
Python's file interactions use scipy.io.
R's file interactions use R.matlab.
Matlab's file interactions use the `load` and `save` commands.
Bash's interactions are done using a dict, starting with `os.environ`.
Bash commands are run using:
subprocess.run(<code>, shell=True, executable='/bin/bash')
Matlab is running as a script, so function definitions are not allowed.
Subpackages
-----------
All imported directly into the main module for convenience.
objects
Underlying classes for R and Matlab environments
Attributes
----------
DEFAULT_DUMP : func
The function called by `multilang.dump`
Default: `multlilang.dump_dict`
DEFAULT_DUMPS : func
The function called by `multilang.dumps`
Default: `multlilang.dumps_json`
_VERSION : str
The current version of multilang
_SUPPORTED : list[str]
The currently supported languages
_VARIABLES : dict[str, object]
The storage of variables in the Python environment
_IMPORTED : dict[name: object]
Things imported by multilang which are available without
import in Scripts.
Major Functions
---------------
as_multilang
Either as_multilang_unix or as_multilang_windows as detected
by platform.system
as_multilang_unix
Run multilang code on a Unix-based system; eg. Ubuntu, Mac
as_multilang_windows
Not implemented
Run multilang code on Windows
Classes
-------
Master
An interactive object for multilang coding
RObject
An interactive R environment
MatlabObject
An interactive Matlab environment
Builtin Functions for Scripting
-------------------------------
as_array
For passing Python variables as arrays
mod
Replaces Python's modulo operator %
Minor Functions
---------------
py_to_bash
py_to_r
py_to_mat
Move variables from the Python variable dict to the given environment
r_to_bash
r_to_py
r_to_mat
Move variables from R to the given environment
mat_to_bash
mat_to_py
mat_to_r
Move variables from Matlab to the given environment
bash_to_py
bash_to_mat
bash_to_r
Move variables from the bash env dict to the given environment
"""
# ------------------------------- Imports ------------------------------- #
import json
import numpy as np
import os
import pandas as pd
from platform import system
from random import choices
import re
import scipy.io as sio
import sys
import subprocess
from tempfile import NamedTemporaryFile
from .objects import RObject, MatlabObject
# ------------------------------ Constants ------------------------------ #
global _VARIABLES
_VARIABLES = {}
_IMPORTED = {
'json' : json,
'np' : np,
'os' : os,
'pd' : pd,
'system': system,
'choices': choices,
're' : re,
'sio' : sio,
'subprocess': subprocess,
'NamedTemporaryFile': NamedTemporaryFile,
'RObject': RObject,
'MatlabObject': MatlabObject
}
_SUPPORTED = ['python3', 'matlab', 'r', 'bash']
_VERSION = '0.1.3a1'
# Defaults at bottom
# --------------------------- Helper Functions --------------------------- #
def py_to_bash(_line, _environ : dict = None):
"""Move variables from Python to bash.
Parameters
----------
_line : str, Iterable[str]
If str, one of the following:
1. '#! b[ash] -> <vars>'
2. '<vars>'
where <vars> is a comma separated list of Python variable names
If Iterable[str]: [<var1>, <var2>, ...]
where <varX> is the name of a Python variable
All variables must be str, int, float.
_environ : optional[dict]
The dict to which the variables are added
Default: os.environ
Returns
-------
dict[str: object]
The requested variables and their corresponding values
Meant to be used as @env in `subprocess.run`
Raises
------
ValueError
If _line is not the right format
NameError
If a requested variable is not in the Python environment
TypeError
If a requested variable is not str, int, float
"""
## input validation
if not _environ: _environ = os.environ.copy() # default
if type(_line) is str and ('#!' in _line or '%!' in _line):
# _line = '#! <lang> -> <vars>'
if not '->' in _line:
raise ValueError('Misformatted _line: ' + _line)
_to_load = _line.split('->')[1].replace(' ','').split(',')
elif type(_line) is str:
# _line = '<vars>'
_to_load = _line.replace(' ','').split(',')
elif hasattr(_line, '__iter__') and all([type(i) is str for i in _line]):
# _line = [<var i>, ...]
_to_load = list(_line)
else:
raise ValueError('Unrecognized _line: ' + str(_line))
if _to_load[0] == '':
# null case
return _environ
## get the variables
_out = {}
for _i in _to_load:
if _i not in _VARIABLES:
raise NameError(_i+' not in Python environment')
elif type(_VARIABLES[_i]) not in [str, int, float]:
raise TypeError('Only str, int, float can be passed to bash')
else:
_out[_i] = _VARIABLES[_i]
# move the variables
_environ.update(_out)
return _environ
def bash_to_py(_line, _environ : dict, _load : bool = True):
"""Move variables from bash to Python
Parameters
----------
_line : str, Iterable[str]
If str, one of the following:
1. '#! p[y[thon]] -> <vars>'
2. '<vars>'
where <vars> is a comma separated list of bash variable names
If Iterable[str]: [<var1>, <var2>, ...]
where <varX> is the name of a bash variable
All variables must be str, int, float.
_environ : dict
The dictionary where the variables are stored
Generally, comes from `multilang.bash`
_load : optional[bool]
If True, loads in `multilang._VARIABLES`
Returns
-------
dict[str, object]
The requested variables and their corresponding values
Raises
------
ValueError
If _line is not the right format
NameError
If a requested variable is not in the given @_environ
"""
## input validation
if type(_line) is str and ('#!' in _line or '%!' in _line):
# _line = '#! <lang> -> <vars>'
if not '->' in _line:
raise ValueError('Misformatted line: "' + _line + '"')
_to_load = _line.split('->')[1].replace(' ','').split(',')
elif type(_line) is str:
# _line = '<vars>'
_to_load = _line.replace(' ','').split(',')
elif hasattr(_line, '__iter__') and all([type(i) is str for i in _line]):
# _line = [<var i>, ...]
_to_load = list(_line)
else:
raise ValueError('Unrecognized _line')
# null case
if _to_load[0] == '':
return {}
# get the variables
_out = {}
for _i in _to_load:
if _i not in _environ:
raise NameError(str(_i) + ' not in bash environment.')
else:
_out[_i] = _environ[_i]
if _load:
# move the variables to python
_VARIABLES.updat(_out)
return _out
def bash_to_r(_line, _environ : dict, _r_object : RObject = RObject()):
"""Move variables from bash to R
Parameters
----------
_line : str, Iterable[str]
If str, one of the following:
1. '#! r[lang] -> <vars>'
2. '<vars>'
where <vars> is a comma separated list of bash variable names
If Iterable[str]: [<var1>, <var2>, ...]
where <varX> is the name of a bash variable
All variables must be str, int, float.
_environ : dict
The dictionary where the variables are stored
Generally, comes from `multilang.bash`
_r_object : optional[RObject]
The R environment to load the variables into
Default: new RObject()
Returns
-------
RObject
An R environment with the given variables loaded
Raises
------
ValueError
If _line is not the right format
NameError
If a requested variable is not in the given @_environ
"""
## input validation
if type(_line) is str and ('#!' in _line or '%!' in _line):
# _line = '#! <lang> -> <vars>'
if not '->' in _line:
raise ValueError('Misformatted line: "' + _line + '"')
_to_load = _line.split('->')[1].replace(' ','').split(',')
elif type(_line) is str:
# _line = '<vars>'
_to_load = _line.replace(' ','').split(',')
elif hasattr(_line, '__iter__') and all([type(i) is str for i in _line]):
# _line = [<var i>,...]
_to_load = list(_line)
else:
raise ValueError('Unrecognized _line')
if not _r_object.isalive:
# make it if we need it
_r_object = RObject()
if _to_load[0] == '':
# null case
return _r_object
# get the variables
_out = {}
for _i in _to_load:
if _i not in _environ:
raise NameError(str(_i) + ' not in bash environment.')
else:
_out[_i] = _environ[_i]
# send to R
_r_object.sendlines([
_k + ' <- ' + ('"' + _v + '"' if type(_v) is str else str(_v))
for _k, _v in _out.items()
]
)
return _r_object
def bash_to_mat(_line, _environ : dict, _mat_object : MatlabObject = MatlabObject()):
"""Move variables from bash to Matlab
Parameters
----------
_line : str, Iterable[str]
If str, one of the following:
1. '#! m[at[lab]] -> <vars>'
2. '<vars>'
where <vars> is a comma separated list of bash variable names
If Iterable[str]: [<var1>, <var2>, ...]
where <varX> is the name of a bash variable
All variables must be str, int, float.
_environ : dict
The dictionary where the variables are stored
Generally, comes from `multilang.bash`
_mat_object : optional[MatlabObject]
The Matlab environment to load the variables into
Default: new MatlabObject()
Returns
-------
MatlabObject
A Matlab environment with the given variables loaded
Raises
------
ValueError
If _line is not the right format
NameError
If a requested variable is not in the given @_environ
"""
## input validation
if type(_line) is str and ('#!' in _line or '%!' in _line):
# _line = '#! <lang> -> <vars>'
if not '->' in _line:
raise ValueError('Misformatted line: "' + _line + '"')
_to_load = _line.split('->')[1].replace(' ','').split(',')
elif type(_line) is str:
# _line = '<vars>'
_to_load = _line.replace(' ','').split(',')
elif hasattr(_line, '__iter__') and all([type(i) is str for i in _line]):
# _line = [<var i>, ...]
_to_load = list(_line)
else:
raise ValueError('Unrecognized _line')
if not _mat_object.isalive:
# make it if we need it
_mat_object = MatlabObject()
if _to_load[0] == '':
# null case
return _mat_object
# get the variables
_out = {}
for _i in _to_load:
if _i not in _environ:
raise NameError(str(_i) + ' not in bash environment.')
else:
_out[_i] = _environ[_i]
# bundle them
_temp_file = NamedTemporaryFile(suffix='.mat')
sio.savemat(_temp_file, _out)
_temp_file.seek(0)
# load them
_mat_object.sendline('load \'' + _temp_file.name + '\';')
return _mat_object
def r_to_bash(_line, _r_object : MatlabObject, _environ : dict = None):
"""Move variables from R to bash.
Parameters
----------
_line : str, Iterable[str]
If str, one of the following:
1. '#! b[ash] -> <vars>'
2. '<vars>'
where <vars> is a comma separated list of R variable names
If Iterable[str]: [<var1>, <var2>, ...]
where <varX> is the name of an R variable
All variables must be | |
<gh_stars>0
import tensorflow as tf
import numpy as np
import os
import h5py
import time
from PIL import Image
class SRCNN:
def __init__(self, args, sess):
self.sess = sess
self.do_train = args.do_train
self.do_test = args.do_test
self.train_dir = args.train_dir
self.test_dir = args.test_dir
self.valid_dir = args.valid_dir
self.model_dir = args.model_dir
self.result_dir = args.result_dir
self.scale = args.scale
self.learning_rate = args.learning_rate
self.epochs = args.epochs
self.n_channels = args.n_channels
self.batch_size = args.batch_size
self.momentum = args.momentum
self.colour_format = args.colour_format
self.network_filters = args.network_filters
self.prepare_data = args.prepare_data
if self.colour_format == 'ych':
self.model_name = 'srcnn_ych'
elif self.colour_format == 'ycbcr':
self.model_name = 'srcnn_ycbcr'
elif self.colour_format == 'rgb':
self.model_name = 'srcnn_rgb'
self.X = tf.placeholder(dtype=tf.float32, shape=[None, None, None, self.n_channels])
self.y = tf.placeholder(dtype=tf.float32, shape=[None, None, None, self.n_channels])
if self.network_filters == '9-1-5':
self.weights = {
'w1': tf.Variable(initial_value=tf.random_normal(shape=[9, 9, self.n_channels, 64], stddev=1e-3),
dtype=tf.float32),
'w2': tf.Variable(initial_value=tf.random_normal(shape=[1, 1, 64, 32], stddev=1e-3),
dtype=tf.float32),
'w3': tf.Variable(initial_value=tf.random_normal(shape=[5, 5, 32, self.n_channels], stddev=1e-3),
dtype=tf.float32)
}
elif self.network_filters == '9-3-5':
self.weights = {
'w1': tf.Variable(initial_value=tf.random_normal(shape=[9, 9, self.n_channels, 64], stddev=1e-3),
dtype=tf.float32),
'w2': tf.Variable(initial_value=tf.random_normal(shape=[3, 3, 64, 32], stddev=1e-3),
dtype=tf.float32),
'w3': tf.Variable(initial_value=tf.random_normal(shape=[5, 5, 32, self.n_channels], stddev=1e-3),
dtype=tf.float32)
}
elif self.network_filters == '9-5-5':
self.weights = {
'w1': tf.Variable(initial_value=tf.random_normal(shape=[9, 9, self.n_channels, 64], stddev=1e-3),
dtype=tf.float32),
'w2': tf.Variable(initial_value=tf.random_normal(shape=[5, 5, 64, 32], stddev=1e-3),
dtype=tf.float32),
'w3': tf.Variable(initial_value=tf.random_normal(shape=[5, 5, 32, self.n_channels], stddev=1e-3),
dtype=tf.float32)
}
self.biases = {
'b1': tf.Variable(initial_value=tf.zeros(shape=[64], dtype=tf.float32)),
'b2': tf.Variable(initial_value=tf.zeros(shape=[32], dtype=tf.float32)),
'b3': tf.Variable(initial_value=tf.zeros(shape=[self.n_channels], dtype=tf.float32))
}
self.output = self.model()
self.loss = tf.reduce_mean(tf.squared_difference(self.output, self.y))
self.result = tf.clip_by_value(self.output, clip_value_min=0., clip_value_max=1.)
self.saver = tf.train.Saver()
self.optimizer = self.optimize()
def optimize(self):
var_list_1 = [self.weights['w1'], self.biases['b1'], self.weights['w2'], self.biases['b2']]
var_list_2 = [self.weights['w3'], self.biases['b3']]
optimizer_1 = tf.train.MomentumOptimizer(
learning_rate=self.learning_rate, momentum=self.momentum
).minimize(loss=self.loss, var_list=var_list_1)
optimizer_2 = tf.train.MomentumOptimizer(
learning_rate=self.learning_rate, momentum=self.momentum
).minimize(loss=self.loss, var_list=var_list_2)
optimizer = tf.group(optimizer_1, optimizer_2)
return optimizer
def model(self):
conv1 = tf.nn.conv2d(input=self.X, filters=self.weights['w1'], strides=[1, 1, 1, 1],
padding='VALID')
conv1 = tf.nn.bias_add(conv1, self.biases['b1'])
activ1 = tf.nn.relu(conv1)
conv2 = tf.nn.conv2d(input=activ1, filters=self.weights['w2'], strides=[1, 1, 1, 1],
padding='VALID')
conv2 = tf.nn.bias_add(conv2, self.biases['b2'])
activ2 = tf.nn.relu(conv2)
conv3 = tf.nn.conv2d(input=activ2, filters=self.weights['w3'], strides=[1, 1, 1, 1],
padding='VALID')
conv3 = tf.nn.bias_add(conv3, self.biases['b3'])
return conv3
def train(self):
print("Training Will Start Shortly")
if self.prepare_data == 'matlab':
train_X, train_y = load_matlab_data(self.train_dir, self.colour_format)
valid_X_bc, valid_y_bc = make_matlab_bc_data(self.valid_dir, self.scale, self.colour_format)
valid_y_gt = make_matlab_gt_data(self.valid_dir, self.colour_format)
elif self.prepare_data == 'octave':
train_X, train_y = load_octave_data(self.train_dir, self.colour_format)
valid_X_bc, valid_y_bc = make_octave_bc_data(self.valid_dir, self.scale, self.colour_format)
valid_y_gt = make_octave_gt_data(self.valid_dir, self.colour_format)
else:
print("Invalid arguments for prepare_data")
start_time = time.time()
init = tf.global_variables_initializer()
self.sess.run(init)
self.model()
self.optimize()
total_batches = len(train_X) // self.batch_size
batch_size = self.batch_size
for i in range(self.epochs):
loss = 0
a = start_time
b = time.time()
for j in range(total_batches):
batch_X = train_X[j * batch_size:(j + 1) * batch_size]
batch_y = train_y[j * batch_size:(j + 1) * batch_size]
_, batch_error = self.sess.run([self.optimizer, self.loss],
feed_dict={self.X: batch_X, self.y: batch_y})
loss = loss + batch_error
valid_psnr = []
bicubic_psnr = []
for k in range(len(valid_X_bc)):
h1, w1, c1 = valid_X_bc[k].shape
h2, w2, c2 = valid_y_bc[k].shape
h3, w3, c3 = valid_y_gt[k].shape
if self.n_channels == 1:
valid_X_bc_ych = valid_X_bc[k][:, :, 0]
valid_y_bc_ych = valid_y_bc[k][:, :, 0]
valid_y_gt_ych = valid_y_gt[k][:, :, 0]
valid_X_bc_ych = valid_X_bc_ych.reshape([1, h1, w1, 1])
valid_y_bc_ych = valid_y_bc_ych.reshape([1, h2, w2, 1])
valid_y_gt_ych = valid_y_gt_ych.reshape([1, h3, w3, 1])
results = self.sess.run(self.result, feed_dict={self.X: valid_X_bc_ych, self.y: valid_y_gt_ych})
valid_y_bc_ych = border_crop(valid_y_bc_ych[0])
valid_y_gt_ych = border_crop(valid_y_gt_ych[0])
results = results[0]
bicubic_psnr.append(psnr(valid_y_bc_ych, valid_y_gt_ych))
valid_psnr.append(psnr(results, valid_y_gt_ych))
elif self.n_channels == 3:
valid_X_bc_ = valid_X_bc[k]
valid_y_bc_ = valid_y_bc[k]
valid_y_gt_ = valid_y_gt[k]
valid_X_bc_ = valid_X_bc_.reshape([1, h1, w1, c1])
valid_y_bc_ = valid_y_bc_.reshape([1, h2, w2, c2])
valid_y_gt_ = valid_y_gt_.reshape([1, h3, w3, c3])
results = self.sess.run(self.result, feed_dict={self.X: valid_X_bc_, self.y: valid_y_gt_})
valid_y_bc_ = border_crop(valid_y_bc_[0])
valid_y_gt_ = border_crop(valid_y_gt_[0])
results = results[0]
bicubic_psnr.append(psnr(valid_y_bc_, valid_y_gt_))
valid_psnr.append(psnr(results, valid_y_gt_))
else:
print("Invalid Argument for n_channels")
print(f"Epoch: {i + 1}, Bicubic PSNR: {np.mean(bicubic_psnr)}, SRCNN PSNR: {np.mean(valid_psnr)}"
f"Time: {b - a}")
self.save()
print("Training Complete")
end_time = time.time()
print("Time Taken: {}".format(end_time - start_time))
def test(self):
print("Testing will commence")
if self.prepare_data == 'matlab':
test_X_bc, test_y_bc = make_matlab_bc_data(self.test_dir, self.scale, self.colour_format)
test_y_gt = make_matlab_gt_data(self.test_dir, self.colour_format)
elif self.prepare_data == 'octave':
test_X_bc, test_y_bc = make_octave_bc_data(self.test_dir, self.scale, self.colour_format)
test_y_gt = make_octave_gt_data(self.test_dir, self.colour_format)
init = tf.global_variables_initializer()
self.sess.run(init)
self.load()
bicubic_psnr = []
test_psnr = []
start_time = time.time()
for i in range(len(test_X_bc)):
h1, w1, c1 = test_X_bc[i].shape
h2, w2, c2 = test_y_bc[i].shape
h3, w3, c3 = test_y_gt[i].shape
if self.n_channels == 1:
test_X_bc_ych = test_X_bc[i][:, :, 0]
test_y_bc_ych = test_y_bc[i][:, :, 0]
test_y_gt_ych = test_y_gt[i][:, :, 0]
test_y_bc_cbcr = test_y_bc[i][:, :, 1:3]
test_X_bc_ych = test_X_bc_ych.reshape([1, h1, w1, 1])
test_y_bc_ych = test_y_bc_ych.reshape([1, h2, w2, 1])
test_y_gt_ych = test_y_gt_ych.reshape([1, h3, w3, 1])
results = self.sess.run(self.result, feed_dict={self.X: test_X_bc_ych, self.y: test_y_gt_ych})
test_y_bc_ych = border_crop(test_y_bc_ych[0])
test_y_gt_ych = border_crop(test_y_gt_ych[0])
results = results[0]
gt = test_y_gt[i]
bc = test_y_bc[i]
srcnn = np.concatenate((results, test_y_bc_cbcr), axis=2)
save_res(gt, bc, srcnn, i, self.colour_format)
bicubic_psnr.append(psnr(test_y_bc_ych, test_y_gt_ych))
test_psnr.append(psnr(results, test_y_gt_ych))
elif self.n_channels == 3:
test_X_bc_ = test_X_bc[i]
test_y_bc_ = test_y_bc[i]
test_y_gt_ = test_y_gt[i]
test_X_bc_ = test_X_bc_.reshape([1, h1, w1, c1])
test_y_bc_ = test_y_bc_.reshape([1, h2, w2, c2])
test_y_gt_ = test_y_gt_.reshape([1, h3, w3, c3])
results = self.sess.run(self.result, feed_dict={self.X: test_X_bc_, self.y: test_y_gt_})
test_y_bc_ = border_crop(test_y_bc_[0])
test_y_gt_ = border_crop(test_y_gt_[0])
results = results[0]
gt = test_y_gt[i]
bc = test_y_bc[i]
srcnn = results
save_res(self.result_dir, gt, bc, srcnn, i, self.colour_format)
bicubic_psnr.append(psnr(test_y_bc_, test_y_gt_))
test_psnr.append(psnr(results, test_y_gt_))
else:
print("Invalid Argument for n_channels")
for p in range(len(bicubic_psnr)):
print("Bicubic PSNR of Image {}: {:.2f}".format(p, bicubic_psnr[p]))
for q in range(len(test_psnr)):
print("SRCNN PSNR of Image {}: {:.2f}".format(q, test_psnr[q]))
print("Average Bicubic PSNR: {:.2f}".format(np.mean(bicubic_psnr)))
print("Average SRCNN PSNR: {:.2f}".format(np.mean(test_psnr)))
end_time = time.time()
print("Time taken: {}".format(end_time - start_time))
def save(self):
path = self.model_dir
if not os.path.exists(path):
os.mkdir(self.model_dir)
self.saver.save(self.sess, self.model_dir + self.model_name, global_step=self.epochs)
def load(self):
path = self.model_dir
if path:
checkpoint_path = tf.train.latest_checkpoint(path)
self.saver.restore(self.sess, checkpoint_path)
print("Model Loaded from {}".format(self.model_dir))
else:
print("No model to load")
def psnr(x, y):
mse = np.mean(np.square(np.subtract(x, y)))
if mse == 0:
return 100
else:
return 10 * np.log10(1. / mse)
def load_matlab_data(train_dir, colour_format):
if colour_format == 'ych':
train_dir = train_dir + '/train_91_ychannels_matlab.h5'
elif colour_format == 'ycbcr':
train_dir = train_dir + '/train_91_ycbcrchannels_matlab.h5'
elif colour_format == 'rgb':
train_dir = train_dir + '/train_91_rgbchannels_matlab.h5'
with h5py.File(train_dir, 'r') as f:
x = np.array(f.get('data'))
y = np.array(f.get('label'))
return x, y
def load_octave_data(train_dir, colour_format):
if colour_format == 'ych':
train_dir = train_dir + '/train_91_ychannels_octave.h5'
elif colour_format == 'ycbcr':
train_dir = train_dir + '/train_91_ycbcrchannels_octave.h5'
elif colour_format == 'rgb':
train_dir = train_dir + '/train_91_rgbchannels_octave.h5'
with h5py.File(train_dir, 'r') as f:
x = np.array(f.get('data').get('value'))
y = np.array(f.get('label').get('value'))
return x, y
def imread(path):
return Image.open(path)
def make_matlab_bc_data(train_dir, scale, colour_format):
scale = scale
path = train_dir
if colour_format == 'ych' or 'ycbcr':
if scale == 2:
lr_us_path = path + '_2x_upscaled_mat_ycbcr/'
elif scale == 3:
lr_us_path = path + '_3x_upscaled_mat_ycbcr/'
elif scale == 4:
lr_us_path = path + '_4x_upscaled_mat_ycbcr/'
else:
print("Invalid value for scale")
elif colour_format == 'rgb':
if scale == 2:
lr_us_path = path + '_2x_upscaled_mat_rgb/'
elif scale == 3:
lr_us_path = path + '_3x_upscaled_mat_rgb/'
elif scale == 4:
lr_us_path = path + '_4x_upscaled_mat_rgb/'
else:
print("Invalid value for scale")
dir_list = os.listdir(lr_us_path)
x = []
y = []
count = 0
for file in dir_list:
count += 1
x_ = imread(os.path.join(lr_us_path, file))
y_ = imread(os.path.join(lr_us_path, file))
x_ = np.array(x_)
y_ = np.array(y_)
x.append(x_ / 255.)
y.append(y_ / 255.)
return x, y
def make_octave_bc_data(train_dir, scale, colour_format):
scale = scale
path = train_dir
if colour_format == 'ych' or 'ycbcr':
if scale == 2:
lr_us_path = path + '_2x_upscaled_oct_ycbcr/'
elif scale == 3:
lr_us_path = path + '_3x_upscaled_oct_ycbcr/'
elif scale == 4:
lr_us_path = path + '_4x_upscaled_oct_ycbcr/'
else:
print("Invalid value for scale")
elif colour_format == 'rgb':
if scale == 2:
lr_us_path = path + '_2x_upscaled_oct_rgb/'
elif scale == 3:
lr_us_path = path + '_3x_upscaled_oct_rgb/'
elif scale == 4:
lr_us_path = path + '_4x_upscaled_oct_rgb/'
else:
print("Invalid value for scale")
dir_list = os.listdir(lr_us_path)
x = []
y = []
count = 0
for file in dir_list:
count += 1
x_ = imread(os.path.join(lr_us_path, file))
y_ = imread(os.path.join(lr_us_path, file))
x_ = np.array(x_)
y_ = np.array(y_)
x.append(x_ / 255.)
y.append(y_ / 255.)
return x, y
def make_matlab_gt_data(train_dir, colour_format):
path = train_dir
if colour_format == 'ych' or 'ycbcr':
gt_path = path + '_gt_mat_ycbcr/'
elif colour_format == 'rgb':
gt_path = path + '_gt_mat_rgb/'
dir_list = os.listdir(gt_path)
y = []
count = 0
for file in dir_list:
count += 1
y_ = imread(os.path.join(gt_path, file))
y_ = np.array(y_)
y.append(y_ / 255.)
return y
def | |
<filename>LPES-video/08.02-rezystor+kondensator/08.02.01-rezystor.py
# Copyright (c) 2020-2021 Matematyka dla Ciekawych Świata (http://ciekawi.icm.edu.pl/)
# Copyright (c) 2020-2021 <NAME> <<EMAIL>>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
try: clipData
except NameError: clipData = []
clipData += [
{ 'title': [ "#08.2", "Rezystor i ", "kondensator", "" ] },
{ 'comment': 'rezystor' },
{
'image': [
[0.0, eduMovie.convertFile("rezystor.sch", negate=True)],
],
'text' : [
'Mianem elementów biernych określamy takie elementy dla których <m> spełnione jest prawo Ohma, bądź jego uogólnienia dla prądów przemiennych, <m> oczywiście z granicą stałości temperaturowej parametrów danego elementu. <m>'
'Podstawowym takim elementem jest opornik, zwany też rezystorem, <m> jest to taki element który w najlepszym stopniu ze wszystkich innych elementów <m> spełnia nam prawo Ohma, <m>'
'ponieważ jego ideą jest reprezentować właśnie tą oporność w obwodzie. <m>'
'Rezystor wprowadzony w obwód wprowadza do niego jakąś rezystancję, <m> inaczej opór, związany ze swoją wartością nominalną. <m>'
'Wartość oporu wyrażamy w omach - najczęściej są to setki omów lub kilo omy. <m>'
'Na ekranie widzimy też symbol rezystora w obu notacjach, <m> czyli tak zwanej europejskiej i amerykańskiej. <m>'
'Należy zaznaczyć że ten amerykański zygzaczek nie może być zaokrąglony, <m> musi mieć ostre zęby, bo inaczej zacząłby przypominać symbol innego elementu. <m>'
'Rezystor często służy do ograniczenia wartości prądu <m> przepływającego w obwodzie, bo jak wprowadzamy większy opór <m>'
'to przy stałym napięciu maleje nam prąd płynący w takim obwodzie, <m> co wynika z prawa Ohma. <m>'
'Na ogół większość układów elektronicznych rozważamy właśnie <m> z punktu widzenia napięciowego (a rzadziej prądowego), <m>'
'czyli to wartość napięcia ma charakter pierwotny, <m> a prąd jest wynikowym tej wartości i wartości elementów w układzie. <m>'
'W takich przypadkach rezystor ogranicza prąd <m> – im większa rezystancja tym mniejszy prąd. <m>'
'Są jednak układy w których to wartość prądu jest pierwotna, <m> a napięcia wynikają z niej i wartości elementów. <m>'
'W tych przypadkach rezystor służy do uzyskania <m> spadku napięcia proporcjonalnego do płynącego prądu. <m>'
]
},
{
'image': [
[0.0, eduMovie.convertFile('moc.tex', margins=12, dpi=260, viaCairo=True)],
],
'text' : [
'Jako że potencjały tożsame są z pracą <m> związaną z przenoszeniem ładunku do nieskończoności, <m> to ze spadkiem napięcia związana jest pewna różnica energetyczna. <m>'
'Jako że mamy prawo zachowania energii, to ta energia gdzieś musi się podziać. <m>'
'W związku z czym taki rezystor zamienia nam tą energię prądu elektrycznego <m> na ciepło zgodnie z zależnością, że moc to jest napięcie razy prąd. <m>'
'Możemy też powiedzieć że to jest <m> napięcie w kwadracie podzielone przez rezystancję takiego rezystora <m> lub prąd w kwadracie pomnożony przez oporność takiego rezystora. <m>'
'Warto zauważyć że o ile przy stałym napięciu jeżeli zwiększamy rezystancję <m> to moc wydzielona na takim rezystorze będzie malała, <m>'
'to przy stałym prądzie jeżeli zwiększamy rezystancję <m> to moc wydzielana będzie rosła. <m>'
]
},
{
'image': [
[0.0, eduMovie.convertFile('rezystory.svg', margins=0)],
],
'text' : [
'Rzeczywisty rezystor oprócz samej rezystancji posiada też inne parametry. <m>'
'Jednym z nich jest maksymalna moc, która może się na tym elemencie wydzielić. <m>'
'Na przykład jeżeli mamy mały rezystorek, stosowany powszechnie <m> w układach elektronicznych, to nie jesteśmy w stanie wydzielić na nim <m> kilowatów energii elektrycznej, ponieważ on się po prostu spali. <m>'
'Na ogół dla takich powszechnie używanych rezystorów <m> maksymalna moc wydzielana jest poniżej połówki wata. <m>'
'Bardzo często są to rezystory <0,25>[jedna czwarta] wata, niekiedy <0,5>[jedna druga] lub <0,75>[trzy czwarte] wata. <m>'
'Zasadniczo powyżej jednego, czy nawet pół wata mówimy o rezystorach dużej mocy <m> i wtedy na schemacie oprócz rezystancji danego elementu często podaje się to <m>'
'jaka powinna być maksymalna moc która może się na nim wydzielić, <m> gdyż wymaga to zastosowania elementu niestandardowego. <m>',
'Kolejnym parametrem jest dokładność, czyli to jak bardzo <m> opór konkretnego elementu może odbiegać od wartości nominalnej. <m>'
'Jeżeli mówimy że rezystor ma na przykład 1000 Omów, <m> to rzeczywiście wyprodukowane elementy nie będą miały dokładnie 1000 Omów, <m> tylko będą bliskie tej wartości <m>'
'i dokładność mówi nam jak bliskie tej wartości powinny być. <m>'
'Na ogół jest to około pięciu procent, jeżeli wymagamy większej dokładności <m> to również takie rzeczy oznaczane są na schemacie albo w wykazie elementów. <m>',
'Innym aspektem również powiązanym poniekąd z dokładnością danego elementu <m> jest stabilność oporu w funkcji temperatury oraz w funkcji przyłożonego napięcia. <m>'
'Typowo chcemy aby opór nie zależał znacznie od tych parametrów. <m>'
'Niekiedy jednak w elektronice stosuje się elementy, <m> których rezystancja celowo zależy od temperatury. <m>'
'Mają one dobrze określoną charakterystykę oporu w funkcji temperatury <m> i to może być zarówno charakterystyka rosnąca, malejąca, jak też nie liniowa, <m>'
'czyli opór danego elementu może rosnąć wraz z temperaturą, może maleć <m> albo może zmienić się dość gwałtownie <m> przy przekroczeniu pewnej krytycznej temperatury. <m>'
'Są to różnego rodzaju termistory. <m>'
'Takie o charakterystyce liniowej służą często do pomiaru temperatury. <m>'
'Natomiast elementy których opór rośnie, a powyżej pewnej temperatury bardzo <m> gwałtownie osiąga duże wartości mogą być używane jako bezpieczniki termiczne, <m>'
'gdyż taki element wraz z przepływem dużego prądu nagrzewa się, <m> co powoduje wzrost oporu i ograniczenie tego prądu <m> lub całkowite wyłączenie gdy temperatura wzrośnie powyżej granicznej. <m>'
'Po ostygnięciu zaczyna przewodzić ponownie. <m>'
'Stosuje się także elementy których opór zmienia się <m> (i to dość gwałtownie) z napięciem do nich przyłożonym. <m>'
'Są to warystory, używane niekiedy jako zabezpieczenia przepięciowe. <m>'
]
},
{
'image': [
[0.0, eduMovie.circuitjs("dzielnik", 0, 6)],
["out3v", eduMovie.circuitjs("dzielnik", 4, 6, [("setSlider", 2, 1)])],
["load1k", eduMovie.circuitjs("dzielnik", 4, 6, [("switch", 510, 300)])],
["load100", eduMovie.circuitjs("dzielnik", 4, 6, [("switch", 600, 300)])],
["proporcj", eduMovie.circuitjs("dzielnik", 4, 10, preActions = [("setSlider", 2, 1), ("setSlider", 1, 0)], actions = [
("setSlider", 1, 0.1), ("wait", 0.7), ("setSlider", 1, 0.2), ("wait", 0.7), ("setSlider", 1, 0.4), ("wait", 0.7), ("setSlider", 1, 0.7), ("wait", 0.7), ("setSlider", 1, 1.0)
])],
],
'text' : [
'Jednym z najistotniejszych układów złożonych z samych rezystorów <m> jest rezystancyjny dzielnik napięcia. <m>'
'Są to dwa rezystory połączone ze sobą w szereg <m> i podłączone typowo do jakiegoś źródła napięcia. <m>'
'Czyli tak jak widzimy w lewej gałęzi symulacji pokazanej na ekranie. <m>'
'W efekcie między tymi rezystorami mamy napięcie <m> związane ze stosunkiem ich rezystancji. <m>'
'W pokazanym przykładzie są one równe sobie, <m> zatem mamy tam połowę napięcia zasilania, czyli 6 woltów. <mark name="out3v" />'
'Jeżeli wartość górnego zmienimy na przykład na 1500, <m> to będziemy mieli podział napięcia zasilania w proporcjach <1/4>[jedna czwarta] do <3/4>[trzech czwartych]. <m>'
'Czyli napięcie wyjściowe wyniesie 3 wolty. <m>'
'Dzieje się tak dlatego że prąd płynący przez oba rezystory jest taki sam <m>'
'(i równy prądowi jaki płynąłby przez opornik o rezystancji równej sumie ich rezystancji), a przy stałym prądzie większe napięcie odkłada się na większej rezystancji. <m>'
'Czyli na górnym, większym rezystorze mamy spadek dziewięciu woltów. <m>'
'W dzielniku suma rezystancji określa prąd płynący w takiej gałęzi, <m> a poszczególne rezystancje określają nam spadki napięcia na sobie. <m>'
'Oczywiście suma tych spadków musi być równa napięciu przyłożonemu do dzielnika, <m>'
'w efekcie napięcie wyjściowe na dzielniku zależy tylko od <m> stosunku rezystancji i napięcia wejściowego, <m> a nie zależy od łącznej rezystancji tego dzielnika. <mark name="load1k" />'
'Układ taki może być używany jako źródło jakiegoś napięcia. <m>'
'Wadą takiego źródła napięcia, jest to, że <m> napięcie wyjściowe mocno zależy od obciążenia. <m>'
'Jeżeli na przykład ten dzielnik obciążymy rezystorem jedno kiloomowym, <m> to napięcie na wyjściu spadło do 4.8 wolta, <m> a jest | |
patched_module.__dict__)
except Exception:
# TODO: syntaxerror, do not produce those mutations
exec("", patched_module.__dict__)
sys.modules[module_path] = patched_module
def pytest_configure(config):
mutation = config.getoption("mutation", default=None)
if mutation is not None:
uid = UUID(hex=mutation)
install_module_loader(uid)
def pytest_addoption(parser, pluginmanager):
parser.addoption("--mutation", dest="mutation", type=str)
def for_each_par_map(loop, pool, inc, proc, items):
out = []
for item in items:
item = proc(item)
item = inc(item)
out.append(item)
return out
def mutation_pass(args): # TODO: rename
command, uid, timeout = args
command = command + ["--mutation={}".format(uid.hex)]
out = run(command, timeout=timeout, silent=True)
if out == 0:
msg = "no error with mutation: {} ({})"
log.trace(msg, " ".join(command), out)
with database_open(".") as db:
db[lexode.pack([2, uid])] = b"\x00"
return False
else:
# TODO: pass root path...
with database_open(".") as db:
del db[lexode.pack([2, uid])]
return True
PYTEST = "pytest --exitfirst --no-header --tb=no --quiet --assert=plain"
PYTEST = shlex.split(PYTEST)
def coverage_read(root):
coverage = Coverage(".coverage") # use pathlib
coverage.load()
data = coverage.get_data()
filepaths = data.measured_files()
out = dict()
root = root.resolve()
for filepath in filepaths:
key = str(Path(filepath).relative_to(root))
value = set(data.lines(filepath))
print(key)
out[key] = value
return out
def database_open(root, recreate=False):
root = root if isinstance(root, Path) else Path(root)
db = root / ".mutation.okvslite"
if recreate and db.exists():
log.trace("Deleting existing database...")
for file in root.glob(".mutation.okvslite*"):
file.unlink()
if not recreate and not db.exists():
log.error("No database, can not proceed!")
sys.exit(1)
db = LSM(str(db))
return db
def run(command, timeout=None, silent=True):
if timeout and timeout < 60:
timeout = 60
if timeout:
command.insert(0, "timeout {}".format(timeout))
command.insert(0, "PYTHONDONTWRITEBYTECODE=1")
if silent and not os.environ.get("DEBUG"):
command.append("> /dev/null 2>&1")
return os.system(" ".join(command))
def sampling_setup(sampling, total):
if sampling is None:
return lambda x: x, total
if sampling.endswith("%"):
# randomly choose percent mutations
cutoff = float(sampling[:-1]) / 100
def sampler(iterable):
for item in iterable:
value = random.random()
if value < cutoff:
yield item
total = int(total * cutoff)
elif sampling.isdigit():
# otherwise, it is the first COUNT mutations that are used.
total = int(sampling)
def sampler(iterable):
remaining = total
for item in iterable:
yield item
remaining -= 1
if remaining == 0:
return
else:
msg = "Sampling passed via --sampling option must be a positive"
msg += " integer or a percentage!"
log.error(msg)
sys.exit(2)
if sampling:
log.info("Taking into account sampling there is {} mutations.", total)
return sampler, total
# TODO: the `command` is a hack, maybe there is a way to avoid the
# following code: `if command is not None.
def check_tests(root, seed, arguments, command=None):
max_workers = arguments["--max-workers"] or (os.cpu_count() - 1) or 1
max_workers = int(max_workers)
log.info("Let's check that the tests are green...")
if arguments["<file-or-directory>"] and arguments["TEST-COMMAND"]:
log.error("<file-or-directory> and TEST-COMMAND are exclusive!")
sys.exit(1)
if command is not None:
command = list(command)
if max_workers > 1:
command.extend(
[
# Use pytest-xdist to make sure it is possible to run the
# tests in parallel
"--numprocesses={}".format(max_workers),
]
)
else:
if arguments["TEST-COMMAND"]:
command = list(arguments["TEST-COMMAND"])
else:
command = list(PYTEST)
command.extend(arguments["<file-or-directory>"])
if max_workers > 1:
command.append(
# Use pytest-xdist to make sure it is possible to run
# the tests in parallel
"--numprocesses={}".format(max_workers)
)
command.extend(
[
# Setup coverage options to only mutate what is tested.
"--cov=.",
"--cov-branch",
"--no-cov-on-fail",
# Pass random seed
"--randomly-seed={}".format(seed),
]
)
with timeit() as alpha:
out = run(command)
if out == 0:
log.info("Tests are green 💚")
alpha = alpha() * max_workers
else:
msg = "Tests are not green... return code is {}..."
log.warning(msg, out)
log.warning("I tried the following command: `{}`", " ".join(command))
# Same command without parallelization
if arguments["TEST-COMMAND"]:
command = list(arguments["TEST-COMMAND"])
else:
command = list(PYTEST)
command.extend(arguments["<file-or-directory>"])
command += [
# Setup coverage options to only mutate what is tested.
"--cov=.",
"--cov-branch",
"--no-cov-on-fail",
# Pass random seed
"--randomly-seed={}".format(seed),
]
with timeit() as alpha:
out = run(command)
if out != 0:
msg = "Tests are definitly red! Return code is {}!!"
log.error(msg, out)
log.error("I tried the following command: `{}`", " ".join(command))
sys.exit(2)
# Otherwise, it is possible to run the tests but without
# parallelization.
msg = "Setting max_workers=1 because tests do not pass in parallel"
log.warning(msg)
max_workers = 1
alpha = alpha()
msg = "Time required to run the tests once: {}..."
log.info(msg, humanize(alpha))
return alpha, max_workers
def mutation_only_deadcode(x):
return getattr(x, "deadcode_detection", False)
def mutation_all(x):
return True
async def play_create_mutations(loop, root, db, max_workers, arguments):
# Go through all files, and produce mutations, take into account
# include pattern, and exclude patterns. Also, exclude what has
# no coverage.
include = arguments.get("--include") or "*.py"
include = include.split(",")
include = glob2predicate(include)
exclude = arguments.get("--exclude") or "*test*"
exclude = exclude.split(",")
exclude = glob2predicate(exclude)
filepaths = root.rglob("*.py")
filepaths = (x for x in filepaths if include(str(x)) and not exclude(str(x)))
# setup coverage support
coverage = coverage_read(root)
only_dead_code = arguments["--only-deadcode-detection"]
if only_dead_code:
mutation_predicate = mutation_only_deadcode
else:
mutation_predicate = mutation_all
def make_item(filepath):
with filepath.open() as f:
content = f.read()
out = (
str(filepath),
content,
coverage.get(str(filepath), set()),
mutation_predicate,
)
return out
items = (make_item(x) for x in filepaths if coverage.get(str(x), set()))
# Start with biggest files first, because that is those that will
# take most time, that way, it will make most / best use of the
# workers.
items = sorted(items, key=lambda x: len(x[1]), reverse=True)
# prepare to create mutations
total = 0
log.info("Crafting mutations from {} files...", len(items))
with tqdm(total=len(items), desc="Files") as progress:
def on_mutations_created(items):
nonlocal total
progress.update()
total += len(items)
for path, delta in items:
# TODO: replace ULID with a content addressable hash.
uid = ULID().to_uuid()
# delta is a compressed unified diff
db[lexode.pack([1, uid])] = lexode.pack([path, delta])
with timeit() as delta:
with futures.ProcessPoolExecutor(max_workers=max_workers) as pool:
await pool_for_each_par_map(
loop, pool, on_mutations_created, mutation_create, items
)
log.info("It took {} to compute mutations...", humanize(delta()))
log.info("The number of mutation is {}!", total)
return total
async def play_mutations(loop, db, seed, alpha, total, max_workers, arguments):
# prepare to run tests against mutations
command = list(arguments["TEST-COMMAND"] or PYTEST)
command.append("--randomly-seed={}".format(seed))
command.extend(arguments["<file-or-directory>"])
eta = humanize(alpha * total / max_workers)
log.success("It will take at most {} to run the mutations", eta)
timeout = alpha * 2
uids = db[lexode.pack([1]) : lexode.pack([2])]
uids = ((command, lexode.unpack(key)[1], timeout) for (key, _) in uids)
# sampling
sampling = arguments["--sampling"]
sampler, total = sampling_setup(sampling, total)
uids = sampler(uids)
step = 10
gamma = time.perf_counter()
remaining = total
log.info("Testing mutations in progress...")
with tqdm(total=100) as progress:
def on_progress(_):
nonlocal remaining
nonlocal step
nonlocal gamma
remaining -= 1
if (remaining % step) == 0:
percent = 100 - ((remaining / total) * 100)
now = time.perf_counter()
delta = now - gamma
eta = (delta / step) * remaining
progress.update(int(percent))
progress.set_description("ETA {}".format(humanize(eta)))
msg = "Mutation tests {:.2f}% done..."
log.debug(msg, percent)
log.debug("ETA {}...", humanize(eta))
for speed in [10_000, 1_000, 100, 10, 1]:
if total // speed == 0:
continue
step = speed
break
gamma = time.perf_counter()
with timeit() as delta:
with futures.ThreadPoolExecutor(max_workers=max_workers) as pool:
await pool_for_each_par_map(
loop, pool, on_progress, mutation_pass, uids
)
errors = len(list(db[lexode.pack([2]) : lexode.pack([3])]))
if errors > 0:
msg = "It took {} to compute {} mutation failures!"
log.error(msg, humanize(delta()), errors)
else:
msg = "Checking that the test suite is strong against mutations took:"
msg += " {}... And it is a success 💚"
log.info(msg, humanize(delta()))
return errors
async def play(loop, arguments):
root = Path(".")
seed = arguments["--randomly-seed"] or int(time.time())
log.info("Using random seed: {}".format(seed))
random.seed(seed)
alpha, max_workers = check_tests(root, seed, arguments)
with database_open(root, recreate=True) as db:
# store arguments used to execute command
if arguments["TEST-COMMAND"]:
command = list(arguments["TEST-COMMAND"])
else:
command = list(PYTEST)
command += arguments["<file-or-directory>"]
command = dict(
command=command,
seed=seed,
)
value = list(command.items())
db[lexode.pack((0, "command"))] = lexode.pack(value)
# let's create mutations!
count = await play_create_mutations(loop, root, db, max_workers, arguments)
# Let's run tests against mutations!
await play_mutations(loop, db, seed, alpha, count, max_workers, arguments)
def mutation_diff_size(db, uid):
_, diff = lexode.unpack(db[lexode.pack([1, uid])])
out = len(zstd.decompress(diff))
return out
def replay_mutation(db, uid, alpha, seed, max_workers, command):
log.info("* Use Ctrl+C to exit.")
command = list(command)
command.append("--randomly-seed={}".format(seed))
max_workers = 1
if max_workers > 1:
command.append("--numprocesses={}".format(max_workers))
timeout = alpha * 2
while True:
ok = mutation_pass((command, uid, timeout))
if not ok:
mutation_show(uid.hex)
msg = "* Type 'skip' to go to next mutation or just enter to retry."
log.info(msg)
skip | |
# Copyright (C) 2011-2016, Quentin "mefyl" Hocquet
#
# This software is provided "as is" without warranty of any kind,
# either expressed or implied, including but not limited to the
# implied warranties of fitness for a particular purpose.
#
# See the LICENSE file for more information.
import collections
import greenlet
import os
import sys
import threading
import time
import traceback
import types
import drake.debug
import drake.threadpool
import drake.log
class Indentation:
def __init__(self):
self.__indentation = {}
def __enter__(self):
self.__indentation[Coroutine.current] += 1
def __exit__(self, type, value, traceback):
self.__indentation[Coroutine.current] -= 1
@property
def indentation(self):
coroutine = Coroutine.current
if coroutine not in self.__indentation:
initial = 0
if coroutine is not None and coroutine.parent is not None and coroutine.parent in self.__indentation:
initial = self.__indentation[coroutine.parent]
self.__indentation[coroutine] = initial
return initial
else:
return self.__indentation[Coroutine.current]
conf = None
if 'DRAKE_LOG_LEVEL' in os.environ:
conf = os.environ['DRAKE_LOG_LEVEL']
logger = drake.log.Logger(configuration_string = conf,
indentation = Indentation())
class Frozen:
pass
class CoroutineDone(Exception):
pass
class CoroutineFrozen(Exception):
pass
class Terminate(BaseException):
def __init__(self, coroutine):
self.__coroutine = coroutine
def __str__(self):
return 'termination of %s' % self.__coroutine
class NonInterruptible:
def __enter__(self):
self.__interruptible = Coroutine.current.interruptible
Coroutine.current.interruptible = False
def __exit__(self, t, v, tb):
Coroutine.current.interruptible = self.__interruptible
class Scope:
__scopes = {}
def __init__(self, exception_join = False):
super().__init__()
self.__exception_join = exception_join
def __enter__(self):
Scope.__scopes.setdefault(Coroutine.current, []).append(self)
self.__coroutine = Coroutine.current
self.__scheduler = self.__coroutine.scheduler
self.__coroutines = []
return self
def __exit__(self, type, value, traceback):
try:
exception = None
if value is not None:
logger.log('drake.scheduler', drake.log.LogLevel.trace,
'%s: terminate on exception %s' % (self, value))
self.terminate()
else:
while True:
try:
self.__coroutine.wait(self.__coroutines)
break
except Exception as e:
exception = e
if self.__exception_join:
continue
else:
self.terminate()
if exception:
raise exception
finally:
del Scope.__scopes[Coroutine.current][-1]
def run(self, routine, name):
coro = Coroutine(routine, name, self.__scheduler,
parent = self.__coroutine)
self.__coroutines.append(coro)
def terminate(self):
for coro in self.__coroutines:
coro.terminate()
while True:
try:
self.__coroutine.wait(self.__coroutines)
except:
pass
else:
break
from orderedset import OrderedSet
class SchedulingPolicy:
pass
class RoundRobin(SchedulingPolicy):
def __init__(self):
self.__coroutines = OrderedSet()
@property
def busy(self):
return bool(self.__coroutines)
def add(self, coroutine):
self.__coroutines.add(coroutine)
def remove(self, coroutine):
self.__coroutines.remove(coroutine)
def freeze(self, coroutine):
self.__coroutines.remove(coroutine)
def unfreeze(self, coroutine):
self.__coroutines.add(coroutine)
def round(self):
for coro in list(self.__coroutines):
yield coro
class DepthFirst(SchedulingPolicy):
def __init__(self):
self.__coroutines = OrderedSet()
self.__hierarchy = {}
@property
def busy(self):
return bool(self.__coroutines)
def add(self, coroutine):
parent = coroutine.parent
self.__coroutines.add(coroutine)
self.__hierarchy.setdefault(parent, OrderedSet()).add(coroutine)
def remove(self, coroutine):
self.__coroutines.remove(coroutine)
self.__hierarchy.get(coroutine.parent).remove(coroutine)
def freeze(self, coroutine):
self.__coroutines.remove(coroutine)
def unfreeze(self, coroutine):
self.__coroutines.add(coroutine)
def round(self):
return (self.__round(self.__hierarchy.get(None, ())),)
def __round(self, coroutines):
for coroutine in coroutines:
active = coroutine in self.__coroutines
if active and coroutine.exception:
return coroutine
sub = self.__round(self.__hierarchy.get(coroutine, ()))
if sub is not None:
return sub
if active:
return coroutine
class Scheduler:
__instance = None
__pool = None
@classmethod
def scheduler(self):
return Scheduler.__instance
def __init__(self, jobs = None, policy = None):
self.reset()
Scheduler.__instance = self
self.__lock = threading.Condition()
self.__policy = policy or RoundRobin()
self.__scheduled = []
def __str__(self):
return 'Scheduler'
def reset(self):
self.__coroutines_frozen = set()
self.__running = False
self.__exception = None
self.__traceback = None
def running(self):
return self.__running
def debug(self, msg):
return logger.log('drake.scheduler', drake.log.LogLevel.trace, msg)
def add(self, coro):
self.debug('%s: new coroutine: %s' % (self, coro.name))
self.__policy.add(coro)
def run(self):
assert not self.__running
self.__running = True
self.die = False
Scheduler.__pool = drake.threadpool.ThreadPool()
try:
while True:
if self.__exception is not None:
self.debug('%s: pending exception %s, dying' % \
(self, self.__exception))
# FIXME: terminate everyone
e = self.__exception
self.__exception = None
raise e
if not self.__policy.busy:
if not self.__coroutines_frozen:
self.debug('no more coroutine, dying')
break
else:
while not self.__policy.busy:
with self.__lock:
if not self.__scheduled:
self.__lock.wait()
assert self.__scheduled
for f in self.__scheduled:
f()
self.__scheduled = []
for coro in self.__policy.round():
assert coro is not None
with self.__lock:
for f in self.__scheduled:
f()
self.__scheduled = []
self.__step(coro)
finally:
Scheduler.__pool.stop()
Scheduler.__pool = None
self.__running = False
def __step(self, coro):
if (coro.done):
return
assert not coro.frozen
with self.debug('%s: schedule %s' % (self, coro)):
try:
with self.debug('%s: step %s' % (self, coro)):
coro.step()
except Exception as e:
self.debug('%s: %s threw %s' % (self, coro, e))
parent = coro._Coroutine__parent
if parent is None:
self.__exception = e#.with_traceback(coro._Coroutine__traceback)
if coro.done:
self.debug('%s ended' % coro)
self.__policy.remove(coro)
elif coro.frozen:
self.debug('%s froze' % coro)
self.__coroutines_frozen.add(coro)
self.__policy.freeze(coro)
def unfreeze(self, coro):
self.debug('%s wakes up' % coro)
self.__policy.unfreeze(coro)
self.__coroutines_frozen.remove(coro)
def schedule(self, f):
with self.__lock:
self.__scheduled.append(f)
self.__lock.notify()
class Waitable:
def __init__(self):
self.__waiting = set()
def __wait(self, coro):
self.__waiting.add(coro)
return True
def __unwait(self, coro):
self.__waiting.remove(coro)
def __wake_all(self, exception = None):
for coro in self.__waiting:
coro._Coroutine__unwait(self, exception)
self.__waiting.clear()
def __wake_one(self):
if self.__waiting:
self.__wake(next(iter(self.__waiting)))
return True
else:
return False
def __wake(self, coro):
coro._Coroutine__unwait(self)
self.__waiting.remove(coro)
class Signal(Waitable):
def signal(self):
self._Waitable__wake_all()
class classproperty:
def __init__(self, f):
self.__f = f
def __get__(self, instance, owner):
return self.__f(instance or owner)
class Coroutine(Waitable):
__current = None
def __init__(self, routine, name, scheduler = None, parent = None):
Waitable.__init__(self)
self.__coro = greenlet.greenlet(routine)
self.__done = False
self.__done_hooks = []
self.__exception = None
self.__frozen = False
self.__interrupted = False
self.__interruptible = True
self.__joined = False
self.__name = name
self.__parent = parent
self.__scheduler = scheduler
self.__started = False
self.__traceback = None
self.__waited = set()
if scheduler is not None:
scheduler.add(self)
def throw(self, exn, tb = None):
self.__exception = exn
self.__traceback = tb
if self.frozen:
self.__unfreeze()
for waited in self.__waited:
waited._Waitable__unwait(self)
self.__waited.clear()
@property
def parent(self):
return self.__parent
@property
def scheduler(self):
return self.__scheduler
@property
def name(self):
return self.__name
@property
def frozen(self):
"""Whether this coroutine is frozen."""
return self.__frozen
@property
def done(self):
"""Whether this coroutine is done."""
return self.__done
@classproperty
def current(self):
"""If called on a coroutine, whether this coroutine is the current one.
If called on Coroutine, the current coroutine."""
if self is Coroutine:
return Coroutine.__current
else:
return self is Coroutine.__current
@property
def exception(self):
return self.__exception
def _Waitable__wait(self, coro):
if self.done:
return False
else:
return Waitable._Waitable__wait(self, coro)
def __str__(self):
return 'coroutine %s' % self.name
def __repr__(self):
return 'Coroutine(%s)' % self.name
def __yield(self, *args, **kwargs):
self.__coro.parent.switch(*args, **kwargs)
def wait(self, waitables, handle_exceptions = True):
if isinstance(waitables, Waitable):
return self.wait([waitables], handle_exceptions)
else:
with logger.log('drake.scheduler',
drake.log.LogLevel.trace,
'%s: wait %s', self, waitables):
freeze = False
for waitable in waitables:
if waitable._Waitable__wait(self):
self.__waited.add(waitable)
freeze = True
logger.log('drake.scheduler',
drake.log.LogLevel.debug,
'%s: block on %s', self, self.__waited)
if freeze:
self.__frozen = True
if self.current:
coro_yield(handle_exceptions = handle_exceptions)
def __unwait(self, waitable, exception = None):
assert waitable in self.__waited
if exception is None:
with logger.log('drake.scheduler',
drake.log.LogLevel.debug,
'%s: unwait %s', self, waitable):
self.__waited.remove(waitable)
if not self.__waited:
self.__unfreeze()
else:
with logger.log(
'drake.scheduler',
drake.log.LogLevel.debug,
'%s: wait on %s threw %s', self, waitable, exception):
self.__exception = exception
for w in self.__waited:
if w is not waitable:
assert self in w._Waitable__waiting
w._Waitable__waiting.remove(self)
self.__waited.clear()
self.__unfreeze()
def run(self):
while not self.done:
self.step()
def step(self):
"""Execute one step of the coroutine, until the next yield or freeze."""
if self.done:
raise CoroutineDone()
if self.frozen:
raise CoroutineFrozen()
self.__started = True
self.__coro.parent = greenlet.getcurrent()
prev = Coroutine.__current
try:
Coroutine.__current = self
self.__coro.switch()
except Terminate:
assert not self.__coro
self.__done_set()
except Exception as e:
self.__done_set(e)
self.__traceback = e.__traceback__.tb_next
raise
else:
if not self.__coro:
self.__done_set()
finally:
Coroutine.__current = prev
def __done_set(self, e = None):
self.__done = True
for c in self.__done_hooks:
c()
self._Waitable__wake_all(e)
def __unfreeze(self):
assert self.frozen
self.__frozen = False
if self.__scheduler:
self.__scheduler.unfreeze(self)
def terminate(self):
if self.done:
return
if not self.started:
self.__done_set()
self.scheduler._Scheduler__policy.remove(self)
if self.interruptible:
if self.current:
raise Terminate(self)
else:
self.throw(Terminate(self))
else:
self.__interrupted = True
@property
def status(self):
if self.done:
return 'done'
elif self.frozen:
return 'frozen'
else:
return 'running'
@property
def started(self):
return self.__started
@property
def exception(self):
return self.__exception
@property
def interruptible(self):
return self.__interruptible
@interruptible.setter
def interruptible(self, interruptible):
previous = self.__interruptible
self.__interruptible = interruptible
if not previous and interruptible and self.__interrupted:
if self.current:
raise Terminate(self)
else:
self.throw(Terminate(self))
class ThreadedOperation(Signal):
def start(self):
Scheduler._Scheduler__pool.run(self.__run)
def __run(self):
try:
self.run()
finally:
Scheduler.scheduler().schedule(self.signal)
def coro_yield(handle_exceptions = True):
Coroutine.current._Coroutine__yield()
if handle_exceptions:
exception = Coroutine.current._Coroutine__exception
if exception is not None:
Coroutine.current._Coroutine__exception = None
raise exception#.with_traceback(Coroutine.current._Coroutine__traceback)
def coro_wait(waitable):
done = False
exception = None
while not done:
Coroutine.current.wait(waitable, handle_exceptions = False)
current_exception = Coroutine.current._Coroutine__exception
Coroutine.current._Coroutine__exception = None
if current_exception is None:
done = True
else:
if exception is not None and exception is not current_exception:
current_exception.__context__ = exception
exception = current_exception
if exception is not None:
raise exception#.with_traceback(Coroutine.current._Coroutine__traceback)
def wait(waitable):
return Coroutine.current.wait(waitable)
def background(f):
class Background(ThreadedOperation):
def run(self):
try:
self.result = f()
except BaseException as e:
self.exception = e
op = Background()
op.start()
wait(op)
if hasattr(op, 'exception'):
raise op.exception
else:
return op.result
class Lockable:
def | |
-= 1
if parent.parent is not None:
#print "before, ", parent.parent_id
#self.printTree(parent.segment)
superparent = parent.parent
if left:
superparent.balance -= 1
else:
superparent.balance += 1
if superparent.balance == 0:
superparent.height -= 1
self.propagateArea(superparent, config, -1)
#print "after, ", superparent.id
#self.printTree(parent.segment)
def propagateAreaOnly(self, node, config, diff):
"""
Propagate the change in area given by "diff"
"""
parent = node
while parent is not None:
parent.area -= float(diff)
parent = parent.parent
def propagateArea(self, node, config, inserted=1):
"""
Propagate the area and other features up the score tree, balancing as needed using an AVL algorithm.
For balancing, positive numbers are heavy on the left, negative numbers are heavy on the right.
node - the node to start at (bottom-up traversal)
config - an evo.config.Config object with property "shiftToDb"
inserted - how many new levels have been added to the tree, 1 or 0. If 0, no balancing is needed.
"""
if inserted > 0:
heightChange = inserted
elif inserted < 0:
if node.balance == 0:
heightChange = -1
else:
heightChange = 0
else:
heightChange = 0
current = node
children = sorted(current.children, key=lambda c: not c.left)
while True:
left = children[0]
right = children[1]
if left.min_score < right.max_score:
self.printTree(node.segment)
raise Exception, "Violated ordered assumption!"
# is the current the right or the left child?
onLeft = current.left
rotated = False
if inserted != 0:
if current.balance > 1:
if left.balance != 0:
heightChange -= 1
if left.balance >= 0:
rotated = True
self.rotateRight(current, config)
else:# left.balance < 0:
rotated = True
self.rotateLeft(left, config)
self.rotateRight(current, config)
elif current.balance < -1:
if right.balance != 0:
heightChange -= 1
if right.balance <= 0:
rotated = True
self.rotateLeft(current, config)
else:# right.balance > 0:
rotated = True
self.rotateRight(right, config)
self.rotateLeft(current, config)
if not rotated:
current.area = left.area + right.area
current.taylor = left.taylor + right.taylor
current.child_count = left.child_count + right.child_count + 1
current.min_score = right.min_score
current.max_score = left.max_score
next = None
if current.parent is not None:
next = current.parent
children = sorted(next.children, key=lambda c: not c.left)
if inserted != 0 and next is not None:
oldbal = next.balance
if onLeft:
newbal = next.balance + heightChange
else:
newbal = next.balance - heightChange
next.balance = newbal
if inserted > 0:
if newbal == 0:
heightChange = 0
else:
if newbal != 0:
heightChange = 0
next.height += heightChange
if next is None:
break
current = next
#if inserted != 0:
# print "after, ", node.id, inserted
# self.printTree(node.segment)
def insert(self, point, config, stats):
"""
Insert a new point into the score tree, then propagate area, taylor scores, and rebalance.
Assumes the point has already been inserted into the partition tree
point - the point to insert
config - an evo.config.Config object with properties "shiftToDb", "taylorDepth", "taylorCenter", "selection"
stats - an evo.trainer.RunStats object
"""
stats.start("insert.traverse")
node = self.traverse(point, config)
stats.stop("insert.traverse")
stats.start("insert.main")
lr = config.learningRate
ns = [i+0. for i in xrange(self.segment.taylorDepth)]
fs = ns * 1
if config.taylorDepth > 0:
fs[0] = 1.0
for i in xrange(self.segment.taylorDepth - 1):
fs[i+1] *= fs[i]
ns = array(ns)
fs = array(fs)
center = self.segment.taylorCenter
if node.point is None:
node.point = point
node.area = point.partition_node.area
node.min_score = point.score
node.max_score = point.score
score = (config.minimize and -1 or 1) * point.score * lr
taylor = nan_to_num(score ** ns) / fs
taylor *= node.area
taylor *= nan_to_num(exp(score/center))
node.taylor = nan_to_num(taylor)
return
other = node.point
node.point = None
if other.score > point.score:
upPoint = other
downPoint = point
node.max_score = other.score
node.min_score = point.score
else:
upPoint = point
downPoint = other
node.max_score = point.score
node.min_score = other.score
node.height = 1
upArea = upPoint.partition_node.area
downArea = downPoint.partition_node.area
score1 = (config.minimize and -1 or 1) * upPoint.score * lr
score2 = (config.minimize and -1 or 1) * downPoint.score * lr
taylor1 = (score1 ** ns) / fs
taylor1 *= upArea
taylor1 *= (exp(score1/center))
taylor2 = (score2 ** ns) / fs
taylor2 *= downArea
taylor2 *= (exp(score2/center))
if self.segment.taylorDepth > 0:
if (abs(taylor1) < 1e-300).all() and (abs(taylor2) < 1e-300).all():
node.point = other
node.height = 0
node.max_score = other.score
node.min_score = other.score
# node has not been saved to the DB, so we don't need to rollback
#node.save()
#transaction.rollback()
raise SeparationException, "Zero taylor coeffs"
n1 = ScoreTreeNode(
segment=node.segment,
point=upPoint,
area = upArea,
parent = node,
max_score = upPoint.score,
min_score = upPoint.score,
left = upPoint.score >= downPoint.score,
taylor = taylor1
)
n2 = ScoreTreeNode(
segment=node.segment,
point=downPoint,
area = downArea,
parent = node,
max_score = downPoint.score,
min_score = downPoint.score,
left = upPoint.score < downPoint.score,
taylor = taylor2
)
node.children = [n1,n2]
upPoint.score_node = n1
downPoint.score_node = n2
stats.stop("insert.main")
stats.start("insert.propagate")
self.propagateArea(node, config)
stats.stop("insert.propagate")
def resetTaylor(self, segment, temp, config):
"""
Recompute the taylor coefficients on the Score tree.
This should be done whenever 1/temp gets more than 0.5 away
from the inverse of the taylor center for accuracy.
Requires a loop through all points, but only needs to be done
at a logarithmically often pace.
segment - the segment to recompute
temp - the new temperature center
config - an evo.config.Config object with "shiftToDb", "taylorCenter", "taylorDepth"
This method sets config.taylorCenter to temp when complete.
"""
logg.info("resetTaylor: segment %s, new center %s, old center %s" % (segment, temp, config.taylorCenter))
ns = [i+0. for i in xrange(segment.taylorDepth)]
fs = ns * 1
if len(fs) > 0:
fs[0] = 1.0
for i in xrange(segment.taylorDepth - 1):
fs[i+1] *= fs[i]
ns = array(ns)
fs = array(fs)
lr = config.learningRate
center = temp
next = []
for point in segment.points:
if point.alive and point.score_node is not None:
next.append(point.score_node)
heights = {0: next}
height = 0
mult = config.minimize and -1 or 1
while len(heights) > 0:
nodes = set(heights[height])
for node in nodes:
if node.point is not None:
score = mult * lr * node.point.score
taylor = nan_to_num(score ** ns) / fs
taylor *= node.area
taylor *= nan_to_num(exp(score/center))
node.taylor = nan_to_num(taylor)
else:
node.taylor = zeros(segment.taylorDepth)
for child in node.children:
node.taylor += child.taylor
if node.parent is not None:
if not heights.has_key(node.parent.height):
heights[node.parent.height] = [node.parent]
else:
if node.parent not in heights[node.parent.height]:
heights[node.parent.height].append(node.parent)
del heights[height]
height += 1
segment.taylorCenter = temp
class AreaTreeNode(object):
idgen = 1
parent = None
children = []
val = None
low = 0.0
high = 1.0
left = False
def __init__(self):
self.id = self.__class__.idgen
self.__class__.idgen += 1
class AreaTree(object):
root = None
map = {}
def traverse(self, area):
if self.root is None:
raise AreaException("Cannot traverse empty tree")
#elif self.root.low > area or self.root.high < area: # if we want this check, it needs to get the high from the space
# err = "Area {0} out of bounds ({1},{2})"
# err = err.format(area, self.root.low, self.root.high)
# raise AreaException(err)
next = self.root
while len(next.children):
if area >= next.children[0].low:
next = next.children[0]
else:
next = next.children[1]
return next
def insert(self, partitionNode):
if self.root is None:
self.root = AreaTreeNode()
self.root.val = partitionNode
assert self.root.val is not None
return
parent = self.traverse(partitionNode.area)
mid = partitionNode.area
left = AreaTreeNode()
left.left = True
left.parent = parent
left.high = parent.high
left.low = mid
right = AreaTreeNode()
right.left = False
right.parent = parent
right.high = mid
right.low = parent.low
if parent.val.area >= partitionNode.area:
right.val = partitionNode
left.val = parent.val
else:
left.val = partitionNode
right.val = parent.val
assert left.val is not None
assert right.val is not None
assert left.low >= right.high
assert left.val.area >= right.val.area
parent.val = None
parent.children = [left,right]
self.map[left.val.id] = left
self.map[right.val.id] = right
#print "AFTER INSERT: "
#self.printTree()
def remove(self, partitionNode):
if not self.map.has_key(partitionNode.id):
err | |
data.
Args:
loc: Insertion index.
column: Column labels to insert.
value: Dtype object values to insert.
Returns:
A new PandasQueryCompiler with new data inserted.
"""
if is_list_like(value):
# TODO make work with another querycompiler object as `value`.
# This will require aligning the indices with a `reindex` and ensuring that
# the data is partitioned identically.
if isinstance(value, pandas.Series):
value = value.reindex(self.index)
value = list(value)
def insert(df, internal_indices=[]):
internal_idx = int(internal_indices[0])
old_index = df.index
df.index = pandas.RangeIndex(len(df.index))
df.insert(internal_idx, internal_idx, value, allow_duplicates=True)
df.columns = pandas.RangeIndex(len(df.columns))
df.index = old_index
return df
new_data = self.data.apply_func_to_select_indices_along_full_axis(
0, insert, loc, keep_remaining=True
)
new_columns = self.columns.insert(loc, column)
return self.__constructor__(new_data, self.index, new_columns)
# END Insert
# UDF (apply and agg) methods
# There is a wide range of behaviors that are supported, so a lot of the
# logic can get a bit convoluted.
def apply(self, func, axis, *args, **kwargs):
"""Apply func across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
if callable(func):
return self._callable_func(func, axis, *args, **kwargs)
elif isinstance(func, dict):
return self._dict_func(func, axis, *args, **kwargs)
elif is_list_like(func):
return self._list_like_func(func, axis, *args, **kwargs)
else:
pass
def _post_process_apply(self, result_data, axis, try_scale=True):
"""Recompute the index after applying function.
Args:
result_data: a BaseFrameManager object.
axis: Target axis along which function was applied.
Returns:
A new PandasQueryCompiler.
"""
if try_scale:
try:
internal_index = self.compute_index(0, result_data, True)
except IndexError:
internal_index = self.compute_index(0, result_data, False)
try:
internal_columns = self.compute_index(1, result_data, True)
except IndexError:
internal_columns = self.compute_index(1, result_data, False)
else:
internal_index = self.compute_index(0, result_data, False)
internal_columns = self.compute_index(1, result_data, False)
if not axis:
index = internal_index
# We check if the two columns are the same length because if
# they are the same length, `self.columns` is the correct index.
# However, if the operation resulted in a different number of columns,
# we must use the derived columns from `self.compute_index()`.
if len(internal_columns) != len(self.columns):
columns = internal_columns
else:
columns = self.columns
else:
columns = internal_columns
# See above explanation for checking the lengths of columns
if len(internal_index) != len(self.index):
index = internal_index
else:
index = self.index
return self.__constructor__(result_data, index, columns)
def _dict_func(self, func, axis, *args, **kwargs):
"""Apply function to certain indices across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
if "axis" not in kwargs:
kwargs["axis"] = axis
if axis == 0:
index = self.columns
else:
index = self.index
func = {idx: func[key] for key in func for idx in index.get_indexer_for([key])}
def dict_apply_builder(df, func_dict={}):
# Sometimes `apply` can return a `Series`, but we require that internally
# all objects are `DataFrame`s.
return pandas.DataFrame(df.apply(func_dict, *args, **kwargs))
result_data = self.data.apply_func_to_select_indices_along_full_axis(
axis, dict_apply_builder, func, keep_remaining=False
)
full_result = self._post_process_apply(result_data, axis)
return full_result
def _list_like_func(self, func, axis, *args, **kwargs):
"""Apply list-like function across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
func_prepared = self._prepare_method(
lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs))
)
new_data = self._map_across_full_axis(axis, func_prepared)
# When the function is list-like, the function names become the index/columns
new_index = (
[f if isinstance(f, string_types) else f.__name__ for f in func]
if axis == 0
else self.index
)
new_columns = (
[f if isinstance(f, string_types) else f.__name__ for f in func]
if axis == 1
else self.columns
)
return self.__constructor__(new_data, new_index, new_columns)
def _callable_func(self, func, axis, *args, **kwargs):
"""Apply callable functions across given axis.
Args:
func: The functions to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
def callable_apply_builder(df, axis=0):
if not axis:
df.index = index
df.columns = pandas.RangeIndex(len(df.columns))
else:
df.columns = index
df.index = pandas.RangeIndex(len(df.index))
result = df.apply(func, axis=axis, *args, **kwargs)
return result
index = self.index if not axis else self.columns
func_prepared = self._build_mapreduce_func(callable_apply_builder, axis=axis)
result_data = self._map_across_full_axis(axis, func_prepared)
return self._post_process_apply(result_data, axis)
# END UDF
# Manual Partitioning methods (e.g. merge, groupby)
# These methods require some sort of manual partitioning due to their
# nature. They require certain data to exist on the same partition, and
# after the shuffle, there should be only a local map required.
def _manual_repartition(self, axis, repartition_func, **kwargs):
"""This method applies all manual partitioning functions.
Args:
axis: The axis to shuffle data along.
repartition_func: The function used to repartition data.
Returns:
A `BaseFrameManager` object.
"""
func = self._prepare_method(repartition_func, **kwargs)
return self.data.manual_shuffle(axis, func)
def groupby_reduce(
self,
by,
axis,
groupby_args,
map_func,
map_args,
reduce_func=None,
reduce_args=None,
numeric_only=True,
):
def _map(df, other):
return map_func(
df.groupby(by=other.squeeze(), axis=axis, **groupby_args), **map_args
).reset_index(drop=False)
if reduce_func is not None:
def _reduce(df):
return reduce_func(
df.groupby(by=df.columns[0], axis=axis, **groupby_args),
**reduce_args
)
else:
def _reduce(df):
return map_func(
df.groupby(by=df.columns[0], axis=axis, **groupby_args), **map_args
)
new_data = self.data.groupby_reduce(axis, by.data, _map, _reduce)
if axis == 0:
new_columns = (
self.columns if not numeric_only else self.numeric_columns(True)
)
new_index = self.compute_index(axis, new_data, False)
else:
new_columns = self.compute_index(axis, new_data, False)
new_index = self.index
return self.__constructor__(new_data, new_index, new_columns)
def groupby_agg(self, by, axis, agg_func, groupby_args, agg_args):
remote_index = self.index if not axis else self.columns
def groupby_agg_builder(df):
if not axis:
df.index = remote_index
df.columns = pandas.RangeIndex(len(df.columns))
# We need to be careful that our internal index doesn't overlap with the
# groupby values, otherwise we return an incorrect result. We
# temporarily modify the columns so that we don't run into correctness
# issues.
if all(b in df for b in by):
df = df.add_prefix("_")
else:
df.columns = remote_index
df.index = pandas.RangeIndex(len(df.index))
def compute_groupby(df):
grouped_df = df.groupby(by=by, axis=axis, **groupby_args)
try:
result = agg_func(grouped_df, **agg_args)
# This will set things back if we changed them (see above).
if axis == 0 and not is_numeric_dtype(result.columns.dtype):
result.columns = [int(col[1:]) for col in result]
# This happens when the partition is filled with non-numeric data and a
# numeric operation is done. We need to build the index here to avoid issues
# with extracting the index.
except DataError:
result = pandas.DataFrame(index=grouped_df.size().index)
return result
try:
return compute_groupby(df)
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except ValueError:
return compute_groupby(df.copy())
func_prepared = self._prepare_method(lambda df: groupby_agg_builder(df))
result_data = self._map_across_full_axis(axis, func_prepared)
if axis == 0:
index = self.compute_index(0, result_data, False)
columns = self.compute_index(1, result_data, True)
else:
index = self.compute_index(0, result_data, True)
columns = self.compute_index(1, result_data, False)
# If the result is a Series, this is how `compute_index` returns the columns.
if len(columns) == 0 and len(index) != 0:
return self._post_process_apply(result_data, axis, try_scale=True)
else:
return self.__constructor__(result_data, index, columns)
# END Manual Partitioning methods
# Get_dummies
def get_dummies(self, columns, **kwargs):
"""Convert categorical variables to dummy variables for certain columns.
Args:
columns: The columns to convert.
Returns:
A new QueryCompiler.
"""
cls = type(self)
# `columns` as None does not mean all columns, by default it means only
# non-numeric columns.
if columns is None:
columns = [c for c in self.columns if not is_numeric_dtype(self.dtypes[c])]
# If we aren't computing any dummies, there is no need for any
# remote compute.
if len(columns) == 0:
return self.copy()
elif not is_list_like(columns):
columns = [columns]
# We have to do one of two things in order to ensure the final columns
# are correct. Our first option is to map over the data and assign the
# columns in a separate pass. That is what we have chosen to do here.
# This is not as efficient, but it requires less information from the
# lower layers and does not break any of our internal requirements. The
# second option is that we assign the columns as a part of the
# `get_dummies` call. This requires knowledge of the length of each
# partition, and breaks some of our assumptions and separation of
# concerns.
def set_columns(df, columns):
df.columns = columns
return df
set_cols = self.columns
columns_applied = self._map_across_full_axis(
1, lambda df: set_columns(df, | |
# (C) Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import itertools
import os
from os.path import join, dirname, abspath
import glob
# Import ogr module.
dll_path = abspath(join(dirname(dirname(dirname(dirname(__file__)))), '..', 'arch', 'win32_x86'))
if os.environ['PATH'].endswith(';'):
os.environ['PATH'] += dll_path
else:
os.environ['PATH'] += os.pathsep + dll_path
egg_path = join(dll_path, 'py')
sys.path.append(egg_path)
libs = glob.glob(join(egg_path, 'GDAL*.egg'))
for lib in libs:
sys.path.append(lib)
from osgeo import ogr
import requests
class InvalidToken(Exception):
pass
class ArcGISServiceHelper(object):
"""ArcGIS Server and Portal helper class."""
def __init__(self, portal_url, username, password, verify_ssl, referer='', token_expiration=60, instance=''):
self._username = username
self._password = password
self._verify_ssl = verify_ssl
self._portal_url = portal_url
self._token_expiration = token_expiration
if referer:
self._referer = referer
else:
self._referer = self._portal_url
self._service_types = {"Map Service": "MapServer", "Feature Service": "FeatureServer"}
self._instance = instance
if self._instance == 'arcgis':
self._http = "{0}/arcgis/rest/services".format(self._portal_url)
else:
self._http = "{0}/{1}/rest/services".format(self._portal_url, self._instance)
self.oid_field_name = 'objectid'
self.token = self._generate_token()
def _generate_token(self):
"""Generates a token required for ArcGIS Online authentication."""
try:
query_dict = {'username': self._username,
'password': self._password,
'referer': self._referer,
'expiration': str(self._token_expiration)}
url = "{0}/{1}/tokens/generateToken".format(self._portal_url, self._instance)
response = requests.post(url + "?f=json", query_dict, verify=self._verify_ssl)
token = response.json()
if "token" not in token:
return None
return token['token']
except ValueError:
return ''
def find_item_url(self, service_name, service_type, folder_name='', token=''):
"""Return the layer or table view url.
:param service_name: service name
:param service_type: service type (i.e Feature Server)
:param folder_name: folder name service is located in
:param token: token value
"""
service_url = None
if token:
self.token = token
if folder_name:
search_url = self._http + "/{0}".format(folder_name)
else:
search_url = self._http
query_dict = {'f': 'json',
'token': token,
'q': '''title:"{0}" AND owner:"{1}" AND type:"{2}"'''.format(service_name, self._username, service_type)}
response = requests.get(service_url, params=query_dict, verify=self._verify_ssl)
data = response.json()
if 'error' in data:
if data['error']['message'] == "Invalid Token":
raise InvalidToken(data['error']['message'])
else:
raise Exception(data['error']['message'])
if 'results' in data:
if data['results']:
result = data['results'][0]
service_url = result['url']
else:
raise IndexError
else:
if folder_name:
search_url = self._portal_url + '/{3}/rest/services/{0}/{1}/{2}'.format(folder_name, service_name, self._service_types[service_type], self._instance)
else:
search_url = self._portal_url + '/{2}/rest/services/{0}/{1}'.format(service_name, self._service_types[service_type], self._instance)
if not service_url:
if not self._service_types[service_type] in search_url:
service_url = search_url + "/{0}/{1}".format(service_name, self._service_types[service_type])
else:
service_url = search_url
if self.token:
r = requests.get(service_url, params={'f': 'json', 'token': self.token, 'referer': self._referer}, verify=self._verify_ssl)
else:
r = requests.get(service_url, params={'f': 'json'}, verify=self._verify_ssl)
items = r.json()
if 'error' in items:
if items['error']['message'] == "Invalid Token":
raise InvalidToken(items['error']['message'])
else:
raise Exception(items['error']['message'])
return service_url, items
def get_item_row_count(self, url, layer_id, token, where):
"""Returns the row count of a service layer or table.
:param url: Service url
:param layer_id: service layer/table ID
:param token: token value
"""
if self.token:
query = {'where': where, 'returnCountOnly':True, 'token': token, 'f': 'json'}
else:
query = {'where': where, 'returnCountOnly':True, 'f': 'json'}
response = requests.get('{0}/{1}/query?'.format(url, layer_id), params=query, verify=self._verify_ssl)
data = response.json()
num_records = data['count']
id_groups = range(0, num_records, 1000)
return id_groups, num_records
def get_item_fields(self, url, layer_id, token):
"""Return the fields of a service layer or table.
:param url: service url
:param layer_id: service layer/table ID
:param token: token value
"""
if self.token:
query = {'where': '1=1', 'outFields': '*', 'returnGeometry': False, 'token': token, 'f': 'json'}
else:
query = {'where': '1=1', 'outFields': '*', 'returnGeometry': False, 'f': 'json'}
response = requests.get('{0}/{1}/query?'.format(url, layer_id), params=query, verify=self._verify_ssl)
data = response.json()
fields = data['fields']
return fields
def get_item_rows(self, url, layer_id, token, spatial_rel='esriSpatialRelIntersects',
where='1=1', out_fields='*', resultOffset=0, resultRecordCount=1000, out_sr=4326, return_geometry=True, response_format='json'):
"""Return the rows for a service layer or table.
:param url: service url
:param layer_id: service layer/table ID
:param token: token value
:param spatial_rel: spatial relationship (default is esriSpatialRelIntersects)
:param where: where clause
:param out_fields: output fields (default is *)
:param out_sr: output spatial reference WKID
:param return_geometry: boolean to return geometry
"""
if self.token:
query = {'spatialRel': spatial_rel, 'where': where, 'outFields': out_fields, 'resultOffset': resultOffset, 'resultRecordCount': resultRecordCount, 'returnGeometry': return_geometry, 'outSR': out_sr, 'token': token, 'f': response_format}
else:
query = {'spatialRel': spatial_rel, 'where': where, 'outFields': out_fields, 'resultOffset': resultOffset, 'resultRecordCount': resultRecordCount, 'returnGeometry': return_geometry, 'outSR': out_sr, 'f': response_format}
try:
response = requests.get('{0}/{1}/query?'.format(url, layer_id), params=query, verify=self._verify_ssl)
data = response.json()
except ValueError:
if self.token:
query = {'spatialRel': spatial_rel, 'where': where, 'outFields': out_fields, 'resultOffset': resultOffset, 'resultRecordCount': resultRecordCount,
'returnGeometry': return_geometry, 'outSR': out_sr, 'token': token, 'f': 'json'}
else:
query = {'spatialRel': spatial_rel, 'where': where, 'outFields': out_fields, 'resultOffset': resultOffset, 'resultRecordCount': resultRecordCount,
'returnGeometry': return_geometry, 'outSR': out_sr, 'f': 'json'}
response = requests.get('{0}/{1}/query?'.format(url, layer_id), params=query, verify=self._verify_ssl)
data = response.json()
return data
class GeoJSONConverter(object):
"""
Class with helper methods to convert GeoJSON to WKT.
"""
def __str__(self):
return "GeoJSONConverter"
def create_polyline(self, coords):
polyline = ogr.Geometry(ogr.wkbLineString)
for coord in coords:
try:
polyline.AddPoint(coord[0], coord[1])
except TypeError as te:
try:
polyline.AddPoint(coord[0][0], coord[0][1])
except Exception:
return None
return polyline.ExportToWkt()
def create_polygon(self, coords):
try:
ring = ogr.Geometry(ogr.wkbLinearRing)
for coord in coords:
ring.AddPoint(coord[0], coord[1])
# Create polygon
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
return poly.ExportToWkt()
except Exception:
return None
def convert_to_wkt(self, geojson, number_of_decimals):
if geojson['type'].upper() == 'POINT':
wkt = self._point_to_wkt(geojson, number_of_decimals)
elif geojson['type'].upper() == 'MULTIPOINT':
wkt = self._multipoint_to_wkt(geojson, number_of_decimals)
elif geojson['type'].upper() == 'LINESTRING':
wkt = self._line_to_wkt(geojson, number_of_decimals)
elif geojson['type'].upper() == 'MULTILINESTRING':
wkt = self._multiline_to_wkt(geojson, number_of_decimals)
elif geojson['type'].upper() == 'POLYGON':
wkt = self._polygon_to_wkt(geojson, number_of_decimals)
elif geojson['type'].upper() == 'MULTIPOLYGON':
wkt = self._multipolygon_to_wkt(geojson, number_of_decimals)
elif geojson['type'].upper() == 'GEOMETRYCOLLECTION':
wkt = self._geometry_collection_to_wkt(geojson, number_of_decimals)
else:
raise Exception('Unknown geometry type.')
return wkt
def _point_to_wkt(self, point, decimals=3):
"""Converts a GeoJSON POINT to WKT."""
x_coord = round(point['coordinates'][0], decimals)
y_cood = round(point['coordinates'][1], decimals)
wkt_point = 'POINT ({0} {1})'.format(x_coord, y_cood)
return wkt_point
def _multipoint_to_wkt(self, multipoint, decimals=3):
"""Converts a GeoJSON MULTIPOINT to WKT."""
coords = multipoint['coordinates']
points = (' '.join(str(round(c, decimals)) for c in pt) for pt in coords)
points = ('({0})'.format(pt) for pt in points)
wkt_multipoint = 'MULTIPOINT ({0})'.format(', '.join(points))
return wkt_multipoint
def _line_to_wkt(self, polyline, decimals=3):
"""Converts a GeoJSON LINESTRING to WKT."""
coords = polyline['coordinates']
wkt_line = 'LINESTRING ({0})'.format(', '.join(' '.join(str(round(c, decimals)) for c in pt) for pt in coords))
return wkt_line
def _multiline_to_wkt(self, multiline, decimals=3):
"""Converts a GeoJSON MULTILINESTRING to WKT."""
coords = multiline['coordinates']
lines = ('({0})'.format(', '.join(' '.join(str(round(c, decimals)) for c in pt) for pt in coord)) for coord in coords)
wkt_multilines = 'MULTILINESTRING ({0})'.format(', '.join(ls for ls in lines))
return wkt_multilines
def _polygon_to_wkt(self, polygon, decimals=3):
"""Converts a GeoJSON POLYGON to WKT."""
coords = polygon['coordinates']
parts = (', '.join(' '.join(str(round(c, decimals)) for c in pt) for pt in part) for part in coords)
parts = ('({0})'.format(r) for r in parts)
wkt_polygon = 'POLYGON ({0})'.format(', '.join(parts))
return wkt_polygon
def _multipolygon_to_wkt(self, multipolygon, decimals=3):
"""Converts a GeoJSON MULTIPOLYGON to WKT."""
coords = multipolygon['coordinates']
polys = (', '.join('({0})'.format(', '.join('({0})'.format(', '.join(' '.join(str(round(c, decimals)) for c in pt) for pt in part)) for part in poly)) for poly in coords))
wkt_multipolygon = 'MULTIPOLYGON ({0})'.format(polys)
return wkt_multipolygon
def _geometry_collection_to_wkt(self, geometrycollection, decimals=3):
"""Converts a GeoJSON GEOMETRYCOLLECTION to WKT."""
geometries = geometrycollection['geometries']
wkt_geometries = list()
for geometry in geometries:
geometry_type = geometry['type']
if geometry_type.upper() == 'POINT':
wkt_geometries.append(self._point_to_wkt(geometry, decimals))
elif geometry_type.upper() == 'MULTIPOINT':
wkt_geometries.append(self._multipoint_to_wkt(geometry, decimals))
elif geometry_type.upper() == 'LINESTRING':
wkt_geometries.append(self._line_to_wkt(geometry, decimals))
elif geometry_type.upper() == 'MULTILINESTRING':
wkt_geometries.append(self._multiline_to_wkt(geometry, decimals))
elif geometry_type.upper() == 'POLYGON':
wkt_geometries.append(self._polygon_to_wkt(geometry, decimals))
elif geometry_type.upper() == 'MULTIPOLYGON':
wkt_geometries.append(self._multipolygon_to_wkt(geometry, decimals))
else:
raise Exception('Unknown geometry type.')
return 'GEOMETRYCOLLECTION ({0})'.format(wkt_geometries)
class GeometryOps(object):
"""Geometry operators."""
def __init__(self):
self.__max_precision = 25
self.__hash_len_to_lat_height = []
self.__hash_len_to_lon_width = []
self.__hash_len_to_lat_height.append(90.0*2)
self.__hash_len_to_lon_width.append(180.0*2)
even = False
for i in range(1, self.__max_precision + 1):
self.__hash_len_to_lat_height.append(self.__hash_len_to_lat_height[-1] / (8 if even else 4))
self.__hash_len_to_lon_width.append(self.__hash_len_to_lon_width[-1] / (4 if even else 8))
even = not even
def __str__(self):
return "GeometryOps"
def __approximate_radius(self, geometry):
"""Return the approximate radius of a polygon geometry.
:param geometry: an OGR geometry
"""
corners = geometry.GetEnvelope()
centroid = geometry.Centroid()
corner_point = ogr.CreateGeometryFromWkt('POINT({0} {1})'.format(corners[0], corners[2]))
return centroid.Distance(corner_point)
def __compute_distance(self, geometry, tolerance):
"""Return a distance (in DD) based on
:param geometry:
:param tolerance:
:return:
"""
radius = self.__approximate_radius(geometry) * 0.1
level = self.__lookup_hashLen_for_width_height(radius, radius)
distance = self.__lookup_degrees_size_for_hash_len(level)[1] * tolerance
return distance
def __lookup_hashLen_for_width_height(self, lonErr, latErr):
for i in range(1, self.__max_precision):
| |
# Enter a parse tree produced by ora2epasParser#elsif_part.
def enterElsif_part(self, ctx:ora2epasParser.Elsif_partContext):
pass
# Exit a parse tree produced by ora2epasParser#elsif_part.
def exitElsif_part(self, ctx:ora2epasParser.Elsif_partContext):
pass
# Enter a parse tree produced by ora2epasParser#else_part.
def enterElse_part(self, ctx:ora2epasParser.Else_partContext):
pass
# Exit a parse tree produced by ora2epasParser#else_part.
def exitElse_part(self, ctx:ora2epasParser.Else_partContext):
pass
# Enter a parse tree produced by ora2epasParser#loop_statement.
def enterLoop_statement(self, ctx:ora2epasParser.Loop_statementContext):
pass
# Exit a parse tree produced by ora2epasParser#loop_statement.
def exitLoop_statement(self, ctx:ora2epasParser.Loop_statementContext):
pass
# Enter a parse tree produced by ora2epasParser#cursor_loop_param.
def enterCursor_loop_param(self, ctx:ora2epasParser.Cursor_loop_paramContext):
pass
# Exit a parse tree produced by ora2epasParser#cursor_loop_param.
def exitCursor_loop_param(self, ctx:ora2epasParser.Cursor_loop_paramContext):
pass
# Enter a parse tree produced by ora2epasParser#forall_statement.
def enterForall_statement(self, ctx:ora2epasParser.Forall_statementContext):
pass
# Exit a parse tree produced by ora2epasParser#forall_statement.
def exitForall_statement(self, ctx:ora2epasParser.Forall_statementContext):
pass
# Enter a parse tree produced by ora2epasParser#bounds_clause.
def enterBounds_clause(self, ctx:ora2epasParser.Bounds_clauseContext):
pass
# Exit a parse tree produced by ora2epasParser#bounds_clause.
def exitBounds_clause(self, ctx:ora2epasParser.Bounds_clauseContext):
pass
# Enter a parse tree produced by ora2epasParser#between_bound.
def enterBetween_bound(self, ctx:ora2epasParser.Between_boundContext):
pass
# Exit a parse tree produced by ora2epasParser#between_bound.
def exitBetween_bound(self, ctx:ora2epasParser.Between_boundContext):
pass
# Enter a parse tree produced by ora2epasParser#lower_bound.
def enterLower_bound(self, ctx:ora2epasParser.Lower_boundContext):
pass
# Exit a parse tree produced by ora2epasParser#lower_bound.
def exitLower_bound(self, ctx:ora2epasParser.Lower_boundContext):
pass
# Enter a parse tree produced by ora2epasParser#upper_bound.
def enterUpper_bound(self, ctx:ora2epasParser.Upper_boundContext):
pass
# Exit a parse tree produced by ora2epasParser#upper_bound.
def exitUpper_bound(self, ctx:ora2epasParser.Upper_boundContext):
pass
# Enter a parse tree produced by ora2epasParser#null_statement.
def enterNull_statement(self, ctx:ora2epasParser.Null_statementContext):
pass
# Exit a parse tree produced by ora2epasParser#null_statement.
def exitNull_statement(self, ctx:ora2epasParser.Null_statementContext):
pass
# Enter a parse tree produced by ora2epasParser#raise_statement.
def enterRaise_statement(self, ctx:ora2epasParser.Raise_statementContext):
pass
# Exit a parse tree produced by ora2epasParser#raise_statement.
def exitRaise_statement(self, ctx:ora2epasParser.Raise_statementContext):
pass
# Enter a parse tree produced by ora2epasParser#return_statement.
def enterReturn_statement(self, ctx:ora2epasParser.Return_statementContext):
pass
# Exit a parse tree produced by ora2epasParser#return_statement.
def exitReturn_statement(self, ctx:ora2epasParser.Return_statementContext):
pass
# Enter a parse tree produced by ora2epasParser#function_call.
def enterFunction_call(self, ctx:ora2epasParser.Function_callContext):
pass
# Exit a parse tree produced by ora2epasParser#function_call.
def exitFunction_call(self, ctx:ora2epasParser.Function_callContext):
pass
# Enter a parse tree produced by ora2epasParser#body.
def enterBody(self, ctx:ora2epasParser.BodyContext):
pass
# Exit a parse tree produced by ora2epasParser#body.
def exitBody(self, ctx:ora2epasParser.BodyContext):
pass
# Enter a parse tree produced by ora2epasParser#exception_handler.
def enterException_handler(self, ctx:ora2epasParser.Exception_handlerContext):
pass
# Exit a parse tree produced by ora2epasParser#exception_handler.
def exitException_handler(self, ctx:ora2epasParser.Exception_handlerContext):
pass
# Enter a parse tree produced by ora2epasParser#trigger_block.
def enterTrigger_block(self, ctx:ora2epasParser.Trigger_blockContext):
pass
# Exit a parse tree produced by ora2epasParser#trigger_block.
def exitTrigger_block(self, ctx:ora2epasParser.Trigger_blockContext):
pass
# Enter a parse tree produced by ora2epasParser#block.
def enterBlock(self, ctx:ora2epasParser.BlockContext):
pass
# Exit a parse tree produced by ora2epasParser#block.
def exitBlock(self, ctx:ora2epasParser.BlockContext):
pass
# Enter a parse tree produced by ora2epasParser#sql_statement.
def enterSql_statement(self, ctx:ora2epasParser.Sql_statementContext):
pass
# Exit a parse tree produced by ora2epasParser#sql_statement.
def exitSql_statement(self, ctx:ora2epasParser.Sql_statementContext):
pass
# Enter a parse tree produced by ora2epasParser#execute_immediate.
def enterExecute_immediate(self, ctx:ora2epasParser.Execute_immediateContext):
pass
# Exit a parse tree produced by ora2epasParser#execute_immediate.
def exitExecute_immediate(self, ctx:ora2epasParser.Execute_immediateContext):
pass
# Enter a parse tree produced by ora2epasParser#dynamic_returning_clause.
def enterDynamic_returning_clause(self, ctx:ora2epasParser.Dynamic_returning_clauseContext):
pass
# Exit a parse tree produced by ora2epasParser#dynamic_returning_clause.
def exitDynamic_returning_clause(self, ctx:ora2epasParser.Dynamic_returning_clauseContext):
pass
# Enter a parse tree produced by ora2epasParser#sql_statements.
def enterSql_statements(self, ctx:ora2epasParser.Sql_statementsContext):
pass
# Exit a parse tree produced by ora2epasParser#sql_statements.
def exitSql_statements(self, ctx:ora2epasParser.Sql_statementsContext):
pass
# Enter a parse tree produced by ora2epasParser#cursor_manipulation_statements.
def enterCursor_manipulation_statements(self, ctx:ora2epasParser.Cursor_manipulation_statementsContext):
pass
# Exit a parse tree produced by ora2epasParser#cursor_manipulation_statements.
def exitCursor_manipulation_statements(self, ctx:ora2epasParser.Cursor_manipulation_statementsContext):
pass
# Enter a parse tree produced by ora2epasParser#close_statement.
def enterClose_statement(self, ctx:ora2epasParser.Close_statementContext):
pass
# Exit a parse tree produced by ora2epasParser#close_statement.
def exitClose_statement(self, ctx:ora2epasParser.Close_statementContext):
pass
# Enter a parse tree produced by ora2epasParser#open_statement.
def enterOpen_statement(self, ctx:ora2epasParser.Open_statementContext):
pass
# Exit a parse tree produced by ora2epasParser#open_statement.
def exitOpen_statement(self, ctx:ora2epasParser.Open_statementContext):
pass
# Enter a parse tree produced by ora2epasParser#fetch_statement.
def enterFetch_statement(self, ctx:ora2epasParser.Fetch_statementContext):
pass
# Exit a parse tree produced by ora2epasParser#fetch_statement.
def exitFetch_statement(self, ctx:ora2epasParser.Fetch_statementContext):
pass
# Enter a parse tree produced by ora2epasParser#open_for_statement.
def enterOpen_for_statement(self, ctx:ora2epasParser.Open_for_statementContext):
pass
# Exit a parse tree produced by ora2epasParser#open_for_statement.
def exitOpen_for_statement(self, ctx:ora2epasParser.Open_for_statementContext):
pass
# Enter a parse tree produced by ora2epasParser#transaction_control_statements.
def enterTransaction_control_statements(self, ctx:ora2epasParser.Transaction_control_statementsContext):
pass
# Exit a parse tree produced by ora2epasParser#transaction_control_statements.
def exitTransaction_control_statements(self, ctx:ora2epasParser.Transaction_control_statementsContext):
pass
# Enter a parse tree produced by ora2epasParser#set_transaction_command.
def enterSet_transaction_command(self, ctx:ora2epasParser.Set_transaction_commandContext):
pass
# Exit a parse tree produced by ora2epasParser#set_transaction_command.
def exitSet_transaction_command(self, ctx:ora2epasParser.Set_transaction_commandContext):
pass
# Enter a parse tree produced by ora2epasParser#set_constraint_command.
def enterSet_constraint_command(self, ctx:ora2epasParser.Set_constraint_commandContext):
pass
# Exit a parse tree produced by ora2epasParser#set_constraint_command.
def exitSet_constraint_command(self, ctx:ora2epasParser.Set_constraint_commandContext):
pass
# Enter a parse tree produced by ora2epasParser#commit_statement.
def enterCommit_statement(self, ctx:ora2epasParser.Commit_statementContext):
pass
# Exit a parse tree produced by ora2epasParser#commit_statement.
def exitCommit_statement(self, ctx:ora2epasParser.Commit_statementContext):
pass
# Enter a parse tree produced by ora2epasParser#write_clause.
def enterWrite_clause(self, ctx:ora2epasParser.Write_clauseContext):
pass
# Exit a parse tree produced by ora2epasParser#write_clause.
def exitWrite_clause(self, ctx:ora2epasParser.Write_clauseContext):
pass
# Enter a parse tree produced by ora2epasParser#rollback_statement.
def enterRollback_statement(self, ctx:ora2epasParser.Rollback_statementContext):
pass
# Exit a parse tree produced by ora2epasParser#rollback_statement.
def exitRollback_statement(self, ctx:ora2epasParser.Rollback_statementContext):
pass
# Enter a parse tree produced by ora2epasParser#savepoint_statement.
def enterSavepoint_statement(self, ctx:ora2epasParser.Savepoint_statementContext):
pass
# Exit a parse tree produced by ora2epasParser#savepoint_statement.
def exitSavepoint_statement(self, ctx:ora2epasParser.Savepoint_statementContext):
pass
# Enter a parse tree produced by ora2epasParser#explain_statement.
def enterExplain_statement(self, ctx:ora2epasParser.Explain_statementContext):
pass
# Exit a parse tree produced by ora2epasParser#explain_statement.
def exitExplain_statement(self, ctx:ora2epasParser.Explain_statementContext):
pass
# Enter a parse tree produced by ora2epasParser#select_statement.
def enterSelect_statement(self, ctx:ora2epasParser.Select_statementContext):
pass
# Exit a parse tree produced by ora2epasParser#select_statement.
def exitSelect_statement(self, ctx:ora2epasParser.Select_statementContext):
pass
# Enter a parse tree produced by ora2epasParser#subquery_factoring_clause.
def enterSubquery_factoring_clause(self, ctx:ora2epasParser.Subquery_factoring_clauseContext):
pass
# Exit a parse tree produced by ora2epasParser#subquery_factoring_clause.
def exitSubquery_factoring_clause(self, ctx:ora2epasParser.Subquery_factoring_clauseContext):
pass
# Enter a parse tree produced by ora2epasParser#factoring_element.
def enterFactoring_element(self, ctx:ora2epasParser.Factoring_elementContext):
pass
# Exit a parse tree produced by ora2epasParser#factoring_element.
def exitFactoring_element(self, ctx:ora2epasParser.Factoring_elementContext):
pass
# Enter a parse tree produced by ora2epasParser#search_clause.
def enterSearch_clause(self, ctx:ora2epasParser.Search_clauseContext):
pass
# Exit a parse tree produced by ora2epasParser#search_clause.
def exitSearch_clause(self, ctx:ora2epasParser.Search_clauseContext):
pass
# Enter a parse tree produced by ora2epasParser#cycle_clause.
def enterCycle_clause(self, ctx:ora2epasParser.Cycle_clauseContext):
pass
# Exit a parse tree produced by ora2epasParser#cycle_clause.
def exitCycle_clause(self, ctx:ora2epasParser.Cycle_clauseContext):
pass
# Enter a parse tree produced by ora2epasParser#subquery.
def enterSubquery(self, ctx:ora2epasParser.SubqueryContext):
pass
# Exit a parse tree produced by ora2epasParser#subquery.
def exitSubquery(self, ctx:ora2epasParser.SubqueryContext):
pass
# Enter a parse tree produced by ora2epasParser#subquery_operation_part.
def enterSubquery_operation_part(self, ctx:ora2epasParser.Subquery_operation_partContext):
pass
# Exit a parse tree produced by ora2epasParser#subquery_operation_part.
def exitSubquery_operation_part(self, ctx:ora2epasParser.Subquery_operation_partContext):
pass
# Enter a parse tree produced by ora2epasParser#subquery_basic_elements.
def enterSubquery_basic_elements(self, ctx:ora2epasParser.Subquery_basic_elementsContext):
pass
# Exit a parse tree produced by ora2epasParser#subquery_basic_elements.
def exitSubquery_basic_elements(self, ctx:ora2epasParser.Subquery_basic_elementsContext):
pass
# Enter a parse tree produced by ora2epasParser#query_block.
def enterQuery_block(self, ctx:ora2epasParser.Query_blockContext):
pass
# Exit a parse tree produced by ora2epasParser#query_block.
def exitQuery_block(self, ctx:ora2epasParser.Query_blockContext):
pass
# Enter a parse tree produced by ora2epasParser#selected_element.
def enterSelected_element(self, ctx:ora2epasParser.Selected_elementContext):
pass
# Exit a parse tree produced by ora2epasParser#selected_element.
def exitSelected_element(self, ctx:ora2epasParser.Selected_elementContext):
pass
# Enter a parse tree produced by ora2epasParser#from_clause.
def enterFrom_clause(self, ctx:ora2epasParser.From_clauseContext):
pass
# Exit a parse tree produced by ora2epasParser#from_clause.
def exitFrom_clause(self, ctx:ora2epasParser.From_clauseContext):
pass
# Enter a parse tree produced by ora2epasParser#select_list_elements.
def enterSelect_list_elements(self, ctx:ora2epasParser.Select_list_elementsContext):
pass
# Exit a parse tree produced by ora2epasParser#select_list_elements.
def exitSelect_list_elements(self, ctx:ora2epasParser.Select_list_elementsContext):
pass
# Enter a parse tree produced by ora2epasParser#table_ref_list.
def enterTable_ref_list(self, ctx:ora2epasParser.Table_ref_listContext):
pass
# Exit a parse tree produced by ora2epasParser#table_ref_list.
def exitTable_ref_list(self, ctx:ora2epasParser.Table_ref_listContext):
pass
# Enter a parse tree produced by ora2epasParser#table_ref.
def enterTable_ref(self, ctx:ora2epasParser.Table_refContext):
pass
# Exit a parse tree produced by ora2epasParser#table_ref.
def exitTable_ref(self, ctx:ora2epasParser.Table_refContext):
pass
# Enter a parse tree produced by ora2epasParser#table_ref_aux.
def enterTable_ref_aux(self, ctx:ora2epasParser.Table_ref_auxContext):
pass
# Exit a parse tree produced by ora2epasParser#table_ref_aux.
def exitTable_ref_aux(self, ctx:ora2epasParser.Table_ref_auxContext):
pass
# Enter a parse tree produced by ora2epasParser#table_ref_aux_internal_one.
def enterTable_ref_aux_internal_one(self, ctx:ora2epasParser.Table_ref_aux_internal_oneContext):
pass
# Exit a parse tree produced by ora2epasParser#table_ref_aux_internal_one.
def exitTable_ref_aux_internal_one(self, ctx:ora2epasParser.Table_ref_aux_internal_oneContext):
pass
# Enter a parse tree produced by ora2epasParser#table_ref_aux_internal_two.
def enterTable_ref_aux_internal_two(self, ctx:ora2epasParser.Table_ref_aux_internal_twoContext):
pass
# Exit a parse tree produced by ora2epasParser#table_ref_aux_internal_two.
def exitTable_ref_aux_internal_two(self, ctx:ora2epasParser.Table_ref_aux_internal_twoContext):
pass
# Enter a parse tree produced by ora2epasParser#table_ref_aux_internal_three.
def enterTable_ref_aux_internal_three(self, ctx:ora2epasParser.Table_ref_aux_internal_threeContext):
pass
# Exit a parse tree produced by ora2epasParser#table_ref_aux_internal_three.
def exitTable_ref_aux_internal_three(self, ctx:ora2epasParser.Table_ref_aux_internal_threeContext):
pass
# Enter a parse tree produced by ora2epasParser#join_clause.
def enterJoin_clause(self, ctx:ora2epasParser.Join_clauseContext):
pass
# Exit a parse tree produced by ora2epasParser#join_clause.
def exitJoin_clause(self, ctx:ora2epasParser.Join_clauseContext):
pass
# Enter a parse tree produced by ora2epasParser#join_on_part.
| |
import base64
import json
from unittest import TestCase, mock
from unittest.mock import Mock, mock_open
import requests_mock
from kallisticore.exceptions import FailedAction, InvalidHttpProbeMethod, \
InvalidCredentialType, InvalidHttpRequestMethod
from kallisticore.lib.credential import \
EnvironmentUserNamePasswordCredential, \
KubernetesServiceAccountTokenCredential
from kallisticore.modules import common
from kallisticore.modules.common import wait, http_probe, http_request
class TestCommonModule(TestCase):
def test_exported_functions(self):
self.assertListEqual(
['http_probe', 'http_request', 'wait'],
common.__all__)
class TestHttpProbe(TestCase):
test_uname = 'test-username'
test_pw = '<PASSWORD>'
def setUp(self):
self._url = "http://go.test/-/status/health"
self._headers = {"Content-type": "text/html"}
def test_exception_for_invalid_method(self):
method = "PUT"
with self.assertRaises(InvalidHttpProbeMethod) as error:
http_probe(url=self._url, method=method)
self.assertEqual(
"Invalid method: {}. HTTP Probe allows only GET and POST methods"
.format(method),
error.exception.message)
@requests_mock.mock()
def test_empty_response_without_request_headers(self, mock_request):
data = {'status': 'UP'}
response = json.dumps(data)
mock_request.get(url=self._url, text=response)
result = http_probe(url=self._url)
self.assertEqual(response, result['response_text'])
self.assertEqual(data, result['response'])
self.assertEqual(200, result['status_code'])
self.assertEqual({}, result['response_headers'])
@requests_mock.mock()
def test_non_json_response(self, mock_request):
mock_request.get(self._url, text='non-json response')
result = http_probe(url=self._url)
self.assertNotIn('response', result)
@requests_mock.mock()
def test_response_headers_with_request_headers(self, mock_request):
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response, headers=self._headers)
result = http_probe(url=self._url)
self.assertEqual(self._headers, result['response_headers'])
def test_exception_for_4xx_or_5xx_status_code_without_headers(self):
text = 'Not Found'
status_code = 404
mock_duration = 1
with self.assertRaises(FailedAction) as error:
with mock.patch('requests.get') as mock_get:
mock_get.return_value.status_code = status_code
mock_get.return_value.text = text
mock_get.return_value.elapsed.total_seconds.return_value = \
mock_duration
http_probe(url=self._url)
self.assertEqual(
"Http probe failed after {} seconds for url {} with status "
"code {}. Details: {}".format(mock_duration,
self._url, status_code, text),
error.exception.message)
@requests_mock.mock()
def test_response_with_headers(self, mock_request):
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response, headers=self._headers)
result = http_probe(url=self._url, headers=self._headers)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_empty_response_headers_with_request_headers(self, mock_request):
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response)
result = http_probe(url=self._url, headers=self._headers)
self.assertEqual({}, result['response_headers'])
def test_exception_for_4xx_or_5xx_with_headers(self):
text = 'Not Found'
status_code = 404
mock_duration = 1
with self.assertRaises(FailedAction) as error:
with mock.patch('requests.get') as mock_get:
mock_get.return_value.status_code = status_code
mock_get.return_value.elapsed.total_seconds.return_value = \
mock_duration
mock_get.return_value.text = text
http_probe(url=self._url, headers=self._headers)
self.assertEqual("Http probe failed after {} seconds for url {} with "
"status code {}. Details: {}"
.format(mock_duration, self._url, status_code, text),
error.exception.message)
@requests_mock.mock()
def test_post_empty_response_headers(self, mock_request):
method = "POST"
response = json.dumps({'status': 'UP'})
mock_request.post(url=self._url, text=response)
result = http_probe(url=self._url, method=method)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual({}, result['response_headers'])
@requests_mock.mock()
def test_post_request_body_with_response_headers(self, mock_request):
method = "POST"
text = json.dumps({'key': 'value'})
mock_request.post(url=self._url, text=json.dumps(text),
headers=self._headers)
result = http_probe(url=self._url, request_body=text, method=method)
self.assertEqual(self._headers, result['response_headers'])
def test_post_exception_for_4xx_or_5xx_(self):
text = 'Not Found'
method = "POST"
status_code = 404
mock_duration = 1
with self.assertRaises(FailedAction) as error:
with mock.patch('requests.post') as mock_post:
mock_post.return_value.status_code = status_code
mock_post.return_value.elapsed.total_seconds.return_value = \
mock_duration
mock_post.return_value.text = text
http_probe(url=self._url, method=method)
self.assertEqual("Http probe failed after {} seconds for url {} "
"with status code {}. Details: {}"
.format(mock_duration, self._url, status_code, text),
error.exception.message)
@requests_mock.mock()
def test_post_with_headers(self, mock_request):
method = "POST"
response = json.dumps({'status': 'UP'})
mock_request.post(url=self._url, text=response, headers=self._headers)
result = http_probe(url=self._url, method=method,
headers=self._headers)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_post_empty_response_header(self, mock_request):
method = "POST"
response = json.dumps({'status': 'UP'})
mock_request.post(url=self._url, text=response)
result = http_probe(url=self._url, method=method,
headers=self._headers)
self.assertEqual({}, result['response_headers'])
def test_post_exception_for_4xx_or_5xx_with_headers(self):
text = 'Not Found'
method = "POST"
status_code = 404
mock_duration = 1
with self.assertRaises(FailedAction) as error:
with mock.patch('requests.post') as mock_post:
mock_post.return_value.status_code = status_code
mock_post.return_value.elapsed.total_seconds.return_value = \
mock_duration
mock_post.return_value.text = text
http_probe(url=self._url, method=method, headers=self._headers)
self.assertEqual("Http probe failed after {} seconds for url {} "
"with status code {}. Details: {}"
.format(mock_duration, self._url, status_code, text),
error.exception.message)
@requests_mock.mock()
def test_k8s_auth_with_header(self, mock_request):
with mock.patch('kallisticore.modules.common.Credential') \
as mock_credential_module, \
mock.patch('builtins.open', mock_open(read_data='test-token')):
mock_k8s_creds = KubernetesServiceAccountTokenCredential()
mock_credential_module.build.return_value = mock_k8s_creds
auth_config = {
'type': 'oauth2_token',
'credentials': {}
}
expected_headers = {'Authorization': 'test-token', **self._headers}
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response,
headers=expected_headers)
result = http_probe(url=self._url, headers=self._headers,
authentication=auth_config)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
@requests_mock.mock()
def test_env_pw_auth_without_header(self, mock_request):
with mock.patch('kallisticore.modules.common.Credential') \
as mock_credential_module:
mock_credential = Mock(spec=EnvironmentUserNamePasswordCredential)
mock_credential.username.return_value = TestHttpProbe.test_uname
mock_credential.password.return_value = TestHttpProbe.test_pw
mock_credential.fetch.side_effects = None
mock_credential_module.build.return_value = mock_credential
test_auth_url = 'https://test-auth.com'
auth_config = {
'type': 'oauth2_token',
'url': test_auth_url,
'credentials': {},
'client': {
'id': 'test-client-id',
'secret': 'test-client-secret'
}
}
expected_base64 = base64.b64encode(
(auth_config['client']['id'] + ':' +
auth_config['client']['secret']).encode()).decode('utf-8')
auth_expected_headers = {
'Authorization': 'Basic %s' % expected_base64}
auth_mock_response = json.dumps({'access_token': 'test-token'})
mock_request.post(url=test_auth_url, text=auth_mock_response,
headers=auth_expected_headers)
probe_expected_headers = {'Authorization': 'test-token'}
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response,
headers=probe_expected_headers)
result = http_probe(url=self._url, authentication=auth_config)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
@requests_mock.mock()
def test_env_pw_auth_with_resource(self, mock_request):
with mock.patch('kallisticore.modules.common.Credential') \
as mock_credential_module:
mock_credential = Mock(spec=EnvironmentUserNamePasswordCredential)
mock_credential.username.return_value = TestHttpProbe.test_uname
mock_credential.password.return_value = TestHttpProbe.test_pw
mock_credential.fetch.side_effects = None
mock_credential_module.build.return_value = mock_credential
test_auth_url = 'https://test-auth.com'
auth_config = {
'type': 'oauth2_token',
'url': test_auth_url,
'credentials': {},
'client': {
'id': 'test-client-id',
'secret': 'test-client-secret'
},
'resource': 'test-resource-value',
'token_key': 'different_token_key'
}
expected_base64 = base64.b64encode(
(auth_config['client']['id'] + ':' +
auth_config['client']['secret']).encode()).decode('utf-8')
auth_expected_headers = {
'Authorization': 'Basic %s' % expected_base64}
auth_mock_response = json.dumps(
{'access_token': 'test-token',
'different_token_key': 'different-token'})
mock_auth_post = mock_request.post(url=test_auth_url,
text=auth_mock_response,
headers=auth_expected_headers)
probe_expected_headers = {'Authorization': 'different-token'}
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response,
headers=probe_expected_headers)
result = http_probe(url=self._url, authentication=auth_config)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertTrue('resource=test-resource-value' in
mock_auth_post.last_request.body)
def test_env_pw_authentication_fail(self):
with mock.patch('kallisticore.modules.common.Credential') \
as mock_credential_module:
mock_credential = Mock(spec=EnvironmentUserNamePasswordCredential)
mock_credential.username.return_value = TestHttpProbe.test_uname
mock_credential.password.return_value = TestHttpProbe.test_pw
mock_credential.fetch.side_effects = None
mock_credential_module.build.return_value = mock_credential
test_auth_url = 'https://test-auth.com'
auth_config = {
'type': 'oauth2_token',
'url': test_auth_url,
'credentials': {},
'client': {
'id': 'test-client-id',
'secret': 'test-client-secret'
}
}
mock_response_status_code = 404
mock_response_text = 'test-error-message'
with self.assertRaises(FailedAction) as error:
with mock.patch('requests.post') as mock_post:
mock_post.return_value.status_code = \
mock_response_status_code
mock_post.return_value.text = mock_response_text
http_probe(url=self._url, authentication=auth_config)
self.assertEqual(
'Authentication for http request failed with status code {}. '
'Details: {}'.format(mock_response_status_code,
mock_response_text),
error.exception.message)
def test_authentication_unknown_credential(self):
with mock.patch('kallisticore.modules.common.Credential') \
as mock_credential_module:
mock_credential = Mock()
mock_credential_module.build.return_value = mock_credential
auth_config = {
'type': 'oauth2_token',
'credentials': {}
}
with self.assertRaises(InvalidCredentialType) as error:
http_probe(url=self._url, authentication=auth_config)
self.assertEqual('Invalid credential type: %s' %
mock_credential.__class__.__name__,
error.exception.message)
class TestHttpRequest(TestCase):
def setUp(self):
self._url = "http://test.com/-/status/health"
self._headers = {"Content-type": "text/html"}
def test_exception_when_invalid_method_is_provided(self):
method = "INVALID_METHOD"
with self.assertRaises(InvalidHttpRequestMethod) as error:
http_request(url=self._url, method=method)
self.assertEqual("Invalid method: {}. Please specify a valid HTTP "
"request method".format(method),
error.exception.message)
@requests_mock.mock()
def test_not_raise_exception_for_4xx_or_5xx_(self, mock_request):
text = 'Not Found'
status_code = 404
mock_request.get(url=self._url, text=text, status_code=status_code)
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response, headers=self._headers)
result = http_request(url=self._url, headers=self._headers)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_non_json_response(self, mock_request):
mock_request.get(self._url, text='non-json response')
result = http_request(url=self._url)
self.assertNotIn('response', result)
@requests_mock.mock()
def test_get_response(self, mock_request):
data = {'status': 'UP'}
response = json.dumps(data)
mock_request.get(url=self._url, text=response, headers=self._headers)
result = http_request(url=self._url, headers=self._headers)
self.assertEqual(response, result['response_text'])
self.assertEqual(data, result['response'])
self.assertEqual(200, result['status_code'])
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_get_empty_response_headers(self, mock_request):
data = {'status': 'UP'}
response = json.dumps(data)
mock_request.get(url=self._url, text=response)
result = http_request(url=self._url)
self.assertEqual(response, result['response_text'])
self.assertEqual(data, result['response'])
self.assertEqual(200, result['status_code'])
self.assertEqual({}, result['response_headers'])
@requests_mock.mock()
def test_post_empty_response_headers(self, mock_request):
method = "POST"
response = json.dumps({'status': 'UP'})
mock_request.post(url=self._url, text=response)
result = http_request(url=self._url, method=method)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual({}, result['response_headers'])
@requests_mock.mock()
def test_post_request_body_without_request_header(self, mock_request):
method = "POST"
text = json.dumps({'key': 'data'})
mock_request.post(url=self._url, text=text, headers=self._headers)
result = http_request(url=self._url, method=method, request_body=text)
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_post_request_with_request_headers(self, mock_request):
method = "POST"
response = json.dumps({'status': 'UP'})
mock_request.post(url=self._url, text=response, headers=self._headers)
result = http_request(url=self._url, method=method,
headers=self._headers)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_put_with_request_headers(self, mock_request):
method = "PUT"
response = json.dumps({'status': 'UP'})
mock_request.put(url=self._url, text=response)
result = http_request(url=self._url, method=method,
headers=self._headers)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual({}, result['response_headers'])
@requests_mock.mock()
def test_put_request_body_with_response_headers(self, mock_request):
method = "PUT"
text = json.dumps({'key': 'value'})
mock_request.put(url=self._url, text=text, headers=self._headers)
result = http_request(url=self._url, method=method, request_body=text)
self.assertEqual(text, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_patch_without_headers(self, mock_request):
method = "PATCH"
response = json.dumps({'status': 'UP'})
mock_request.patch(url=self._url, text=response)
result = http_request(url=self._url, method=method)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual({}, result['response_headers'])
@requests_mock.mock()
def test_patch_request_body_with_headers(self, mock_request):
method = "PATCH"
text = json.dumps({'key': 'value'})
mock_request.patch(url=self._url, text=text, headers=self._headers)
result = http_request(url=self._url, method=method, request_body=text)
self.assertEqual(text, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_delete_with_empty_response_headers(self, mock_request):
method = "DELETE"
response = json.dumps({'status': 'UP'})
mock_request.delete(url=self._url, text=response)
mock_request.delete(url=self._url, text=response, status_code=204)
result = http_request(url=self._url, method=method)
self.assertEqual(response, result['response_text'])
self.assertEqual(204, result['status_code'])
self.assertEqual({}, result['response_headers'])
@requests_mock.mock()
def test_delete_request_body_with_headers(self, mock_request):
method = "DELETE"
response = json.dumps({'status': 'UP'})
mock_request.delete(url=self._url, text=response, status_code=204,
headers=self._headers)
result = http_request(url=self._url, method=method)
self.assertEqual(response, result['response_text'])
self.assertEqual(204, result['status_code'])
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_authentication_k8s_with_header(self, mock_request):
with mock.patch('kallisticore.modules.common.Credential') \
as mock_credential_module, \
mock.patch('builtins.open', mock_open(read_data='test-token')):
mock_k8s_creds = KubernetesServiceAccountTokenCredential()
mock_credential_module.build.return_value = mock_k8s_creds
auth_config = {
'type': 'oauth2_token',
'credentials': {}
}
expected_headers = {'Authorization': 'test-token', **self._headers}
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response,
headers=expected_headers)
result = http_request(url=self._url, headers=self._headers,
authentication=auth_config)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
@requests_mock.mock()
def test_env_pw_auth_without_header(self, mock_request):
with mock.patch('kallisticore.modules.common.Credential') \
as mock_credential_module:
mock_credential = Mock(spec=EnvironmentUserNamePasswordCredential)
mock_credential.username.return_value = TestHttpProbe.test_uname
mock_credential.password.return_value = TestHttpProbe.test_pw
mock_credential.fetch.side_effects = None
mock_credential_module.build.return_value = mock_credential
test_auth_url = 'https://test-auth.com'
auth_config = {
'type': 'oauth2_token',
'url': test_auth_url,
'credentials': {},
'client': {
'id': 'test-client-id',
'secret': 'test-client-secret'
}
}
expected_base64 = base64.b64encode(
(auth_config['client']['id'] + ':' +
auth_config['client']['secret']).encode()).decode('utf-8')
auth_expected_headers = {
'Authorization': 'Basic %s' % expected_base64}
auth_mock_response = json.dumps({'access_token': 'test-token'})
mock_request.post(url=test_auth_url, text=auth_mock_response,
headers=auth_expected_headers)
| |
self.fs2.t]
if reserve_type == "Uber":#2-nautical-mile diversion distance; used by McDonald & German
constraints += [self.fs2.L_D == aircraft.L_D_cruise]
constraints += [self.fs2.V == V_cruise]
R_divert = Variable("R_{divert}",2,"nautical_mile","Diversion distance")
self.R_divert = R_divert
constraints += [R_divert == self.fs2.segment_range]
self.fs3 = Hover(self,aircraft,hoverState)#landing again
self.flight_segments = [self.fs0, self.fs1, self.fs2, self.fs3]
self.levelFlight_segments = [self.fs1, self.fs2]
self.hover_segments = [self.fs0, self.fs3] #not including loiter
#Power and energy consumption by mission segment
with Vectorize(len(self.flight_segments)):
P_battery = Variable("P_{battery}","kW","Segment power draw")
E = Variable("E","kWh","Segment energy use")
#Data from hover segments
with Vectorize(len(self.hover_segments)):
CT = Variable("CT","-","Thrust coefficient")
CP = Variable("CP","-","Power coefficient")
Q_perRotor = Variable("Q_perRotor","lbf*ft","Torque per lifting rotor")
T_perRotor = Variable("T_perRotor","lbf","Thrust per lifting rotor")
P = Variable("P","kW","Total power supplied to all lifting rotors")
P_perRotor = Variable("P_perRotor","kW","Power per lifting rotor")
VT = Variable("VT","ft/s","Propeller tip speed")
omega = Variable("\omega","rpm","Propeller angular velocity")
MT = Variable("MT","-","Propeller tip Mach number")
FOM = Variable("FOM","-","Figure of merit")
p_ratio = Variable("p_{ratio}","-","Sound pressure ratio in hover")
constraints += [self.flight_segments]
constraints += [self.crew, self.passengers]
constraints += [W >= aircraft.W_empty + self.passengers.W \
+ self.crew.W]
constraints += [aircraft.TOGW >= W]
constraints += [mission_range == self.fs1.segment_range]
constraints += [hoverState]
constraints += [V_loiter == ((1/3.)**(1/4.))*V_cruise]
constraints += [E_mission >= sum(c.E for c in self.flight_segments)]
constraints += [C_eff >= E_mission]
constraints += [T_A == segment.rotorPerf.T_A for i,segment in enumerate(self.hover_segments)]
constraints += [aircraft.tailRotor_power_fraction_levelFlight == segment.tailRotor_power_fraction \
for i,segment in enumerate(self.levelFlight_segments)]
constraints += [aircraft.tailRotor_power_fraction_hover == segment.tailRotor_power_fraction \
for i,segment in enumerate(self.hover_segments)]
constraints += [t_hover == segment.t for i,segment in enumerate(self.hover_segments)]
constraints += [P_battery[i] == segment.P_battery for i,segment in enumerate(self.flight_segments)]
constraints += [E[i] == segment.E for i,segment in enumerate(self.flight_segments)]
constraints += [CT[i] == segment.rotorPerf.CT for i,segment in enumerate(self.hover_segments)]
constraints += [CP[i] == segment.rotorPerf.CP for i,segment in enumerate(self.hover_segments)]
constraints += [Q_perRotor[i] == segment.rotorPerf.Q_perRotor for i,segment in enumerate(self.hover_segments)]
constraints += [T_perRotor[i] == segment.rotorPerf.T_perRotor for i,segment in enumerate(self.hover_segments)]
constraints += [P[i] == segment.rotorPerf.P for i,segment in enumerate(self.hover_segments)]
constraints += [P_perRotor[i] == segment.rotorPerf.P_perRotor for i,segment in enumerate(self.hover_segments)]
constraints += [VT[i] == segment.rotorPerf.VT for i,segment in enumerate(self.hover_segments)]
constraints += [omega[i] == segment.rotorPerf.omega for i,segment in enumerate(self.hover_segments)]
constraints += [MT[i] == segment.rotorPerf.MT for i,segment in enumerate(self.hover_segments)]
constraints += [FOM[i] == segment.rotorPerf.FOM for i,segment in enumerate(self.hover_segments)]
constraints += [p_ratio[i] == segment.rotorPerf.p_ratio for i,segment in enumerate(self.hover_segments)]
return constraints
class OnDemandRevenueMission(Model):
#Revenue-generating mission. Exactly the same code as OnDemandDeadheadMission.
def setup(self,aircraft,mission_type="piloted"):
if not(aircraft.autonomousEnabled) and (mission_type != "piloted"):
raise ValueError("Autonomy is not enabled for Aircraft() model.")
W = Variable("W_{mission}","lbf","Weight of the aircraft during the mission")
mission_range = Variable("mission_range","nautical_mile","Mission range")
t_hover = Variable("t_{hover}","s","Time in hover")
V_cruise = Variable("V_{cruise}","mph","Aircraft cruising speed")
T_A = Variable("T/A","lbf/ft**2","Disk loading")
C_eff = aircraft.battery.C_eff #effective battery capacity
t_mission = Variable("t_{mission}","minutes","Time to complete mission (including charging)")
t_flight = Variable("t_{flight}","minutes","Time in flight")
E_mission = Variable("E_{mission}","kWh","Electrical energy used during mission")
self.W = W
self.mission_range = mission_range
self.t_hover = t_hover
self.V_cruise = V_cruise
self.T_A = T_A
self.C_eff = C_eff
self.t_mission = t_mission
self.t_flight = t_flight
self.E_mission = E_mission
self.mission_type = mission_type
self.crew = Crew(mission_type=mission_type)
self.passengers = Passengers()
hoverState = FlightState(h=0*ureg.ft)
self.fs0 = Hover(self,aircraft,hoverState)#takeoff
self.fs1 = LevelFlight(self,aircraft)#fly to destination
self.fs2 = Hover(self,aircraft,hoverState)#landing
self.time_on_ground = TimeOnGround(self)
self.segments = [self.fs0, self.fs1, self.fs2, self.time_on_ground]
self.flight_segments = [self.fs0, self.fs1, self.fs2]
self.levelFlight_segments = [self.fs1]
self.hover_segments = [self.fs0, self.fs2]
#Power and energy consumption by mission segment
with Vectorize(len(self.flight_segments)):
P_battery = Variable("P_{battery}","kW","Segment power draw")
E = Variable("E","kWh","Segment energy use")
#Data from hover segments
numHoverSegments = len(self.hover_segments)
with Vectorize(numHoverSegments):
CT = Variable("CT","-","Thrust coefficient")
CP = Variable("CP","-","Power coefficient")
Q_perRotor = Variable("Q_perRotor","lbf*ft","Torque per lifting rotor")
T_perRotor = Variable("T_perRotor","lbf","Thrust per lifting rotor")
P = Variable("P","kW","Total power supplied to all lifting rotors")
P_perRotor = Variable("P_perRotor","kW","Power per lifting rotor")
VT = Variable("VT","ft/s","Propeller tip speed")
omega = Variable("\omega","rpm","Propeller angular velocity")
MT = Variable("MT","-","Propeller tip Mach number")
FOM = Variable("FOM","-","Figure of merit")
p_ratio = Variable("p_{ratio}","-","Sound pressure ratio in hover")
constraints = []
constraints += [self.fs0.T_A == T_A]
constraints += [self.fs1.L_D == aircraft.L_D_cruise]
constraints += [self.fs1.V == V_cruise]
constraints += [self.segments]
constraints += [self.crew,self.passengers]
constraints += [W >= aircraft.W_empty + self.passengers.W \
+ self.crew.W]
constraints += [aircraft.TOGW >= W]
constraints += [mission_range == self.fs1.segment_range]
constraints += [p_ratio == self.fs0.rotorPerf.p_ratio]
constraints += hoverState
constraints += [E_mission >= sum(c.E for c in self.flight_segments)]
constraints += [C_eff >= E_mission]
constraints += [aircraft.tailRotor_power_fraction_levelFlight == segment.tailRotor_power_fraction \
for i,segment in enumerate(self.levelFlight_segments)]
constraints += [aircraft.tailRotor_power_fraction_hover == segment.tailRotor_power_fraction \
for i,segment in enumerate(self.hover_segments)]
constraints += [t_hover == segment.t for i,segment in enumerate(self.hover_segments)]
constraints += [t_flight >= sum(c.t for c in self.flight_segments)]
constraints += [t_mission >= t_flight + self.time_on_ground.t]
constraints += [P_battery[i] == segment.P_battery for i,segment in enumerate(self.flight_segments)]
constraints += [E[i] == segment.E for i,segment in enumerate(self.flight_segments)]
constraints += [CT[i] == segment.rotorPerf.CT for i,segment in enumerate(self.hover_segments)]
constraints += [CP[i] == segment.rotorPerf.CP for i,segment in enumerate(self.hover_segments)]
constraints += [Q_perRotor[i] == segment.rotorPerf.Q_perRotor for i,segment in enumerate(self.hover_segments)]
constraints += [T_perRotor[i] == segment.rotorPerf.T_perRotor for i,segment in enumerate(self.hover_segments)]
constraints += [P[i] == segment.rotorPerf.P for i,segment in enumerate(self.hover_segments)]
constraints += [P_perRotor[i] == segment.rotorPerf.P_perRotor for i,segment in enumerate(self.hover_segments)]
constraints += [VT[i] == segment.rotorPerf.VT for i,segment in enumerate(self.hover_segments)]
constraints += [omega[i] == segment.rotorPerf.omega for i,segment in enumerate(self.hover_segments)]
constraints += [MT[i] == segment.rotorPerf.MT for i,segment in enumerate(self.hover_segments)]
constraints += [FOM[i] == segment.rotorPerf.FOM for i,segment in enumerate(self.hover_segments)]
constraints += [p_ratio[i] == segment.rotorPerf.p_ratio for i,segment in enumerate(self.hover_segments)]
return constraints
class OnDemandDeadheadMission(Model):
#Deadhead mission. Exactly the same code as OnDemandRevenueMission.
def setup(self,aircraft,mission_type="piloted"):
if not(aircraft.autonomousEnabled) and (mission_type != "piloted"):
raise ValueError("Autonomy is not enabled for Aircraft() model.")
W = Variable("W_{mission}","lbf","Weight of the aircraft during the mission")
mission_range = Variable("mission_range","nautical_mile","Mission range")
t_hover = Variable("t_{hover}","s","Time in hover")
V_cruise = Variable("V_{cruise}","mph","Aircraft cruising speed")
T_A = Variable("T/A","lbf/ft**2","Disk loading")
C_eff = aircraft.battery.C_eff #effective battery capacity
t_mission = Variable("t_{mission}","minutes","Time to complete mission (including charging)")
t_flight = Variable("t_{flight}","minutes","Time in flight")
E_mission = Variable("E_{mission}","kWh","Electrical energy used during mission")
self.W = W
self.mission_range = mission_range
self.t_hover = t_hover
self.V_cruise = V_cruise
self.T_A = T_A
self.C_eff = C_eff
self.t_mission = t_mission
self.t_flight = t_flight
self.E_mission = E_mission
self.mission_type = mission_type
self.crew = Crew(mission_type=mission_type)
self.passengers = Passengers()
hoverState = FlightState(h=0*ureg.ft)
self.fs0 = Hover(self,aircraft,hoverState)#takeoff
self.fs1 = LevelFlight(self,aircraft)#fly to destination
self.fs2 = Hover(self,aircraft,hoverState)#landing
self.time_on_ground = TimeOnGround(self)
self.segments = [self.fs0, self.fs1, self.fs2, self.time_on_ground]
self.flight_segments = [self.fs0, self.fs1, self.fs2]
self.levelFlight_segments = [self.fs1]
self.hover_segments = [self.fs0, self.fs2]
#Power and energy consumption by mission segment
with Vectorize(len(self.flight_segments)):
P_battery = Variable("P_{battery}","kW","Segment power draw")
E = Variable("E","kWh","Segment energy use")
#Data from hover segments
numHoverSegments = len(self.hover_segments)
with Vectorize(numHoverSegments):
CT = Variable("CT","-","Thrust coefficient")
CP = Variable("CP","-","Power coefficient")
Q_perRotor = Variable("Q_perRotor","lbf*ft","Torque per lifting rotor")
T_perRotor = Variable("T_perRotor","lbf","Thrust per lifting rotor")
P = Variable("P","kW","Total power supplied to all lifting rotors")
P_perRotor = Variable("P_perRotor","kW","Power per lifting rotor")
VT = Variable("VT","ft/s","Propeller tip speed")
omega = Variable("\omega","rpm","Propeller angular velocity")
MT = Variable("MT","-","Propeller tip Mach number")
FOM = Variable("FOM","-","Figure of merit")
p_ratio = Variable("p_{ratio}","-","Sound pressure ratio in hover")
constraints = []
constraints += [self.fs0.T_A == T_A]
constraints += [self.fs1.L_D == aircraft.L_D_cruise]
constraints += [self.fs1.V == V_cruise]
constraints += [self.segments]
constraints += [self.crew,self.passengers]
constraints += [W >= aircraft.W_empty + self.passengers.W \
+ self.crew.W]
constraints += [aircraft.TOGW >= W]
constraints += [mission_range == self.fs1.segment_range]
constraints += [p_ratio == self.fs0.rotorPerf.p_ratio]
constraints += hoverState
constraints += [E_mission >= sum(c.E for c in self.flight_segments)]
constraints += [C_eff >= E_mission]
constraints += [aircraft.tailRotor_power_fraction_levelFlight == segment.tailRotor_power_fraction \
for i,segment in enumerate(self.levelFlight_segments)]
constraints += [aircraft.tailRotor_power_fraction_hover == segment.tailRotor_power_fraction \
for i,segment in enumerate(self.hover_segments)]
constraints += [t_hover == segment.t for i,segment in enumerate(self.hover_segments)]
constraints += [t_flight >= sum(c.t for c in self.flight_segments)]
constraints += [t_mission >= t_flight + self.time_on_ground.t]
constraints += [P_battery[i] == segment.P_battery for i,segment in enumerate(self.flight_segments)]
constraints += [E[i] == segment.E for i,segment in enumerate(self.flight_segments)]
constraints += [CT[i] == segment.rotorPerf.CT for i,segment in enumerate(self.hover_segments)]
constraints += [CP[i] == segment.rotorPerf.CP for i,segment in enumerate(self.hover_segments)]
constraints += [Q_perRotor[i] == segment.rotorPerf.Q_perRotor for i,segment in enumerate(self.hover_segments)]
constraints += [T_perRotor[i] == segment.rotorPerf.T_perRotor for i,segment in enumerate(self.hover_segments)]
constraints += [P[i] == segment.rotorPerf.P for i,segment in enumerate(self.hover_segments)]
constraints += [P_perRotor[i] == segment.rotorPerf.P_perRotor for i,segment in enumerate(self.hover_segments)]
constraints += [VT[i] == segment.rotorPerf.VT for i,segment in enumerate(self.hover_segments)]
constraints += [omega[i] == segment.rotorPerf.omega for i,segment in enumerate(self.hover_segments)]
constraints += [MT[i] == segment.rotorPerf.MT for i,segment in enumerate(self.hover_segments)]
constraints += [FOM[i] == segment.rotorPerf.FOM for i,segment in enumerate(self.hover_segments)]
constraints += [p_ratio[i] == segment.rotorPerf.p_ratio for i,segment in enumerate(self.hover_segments)]
return constraints
class OnDemandMissionCost(Model):
#Includes both revenue and deadhead missions
def setup(self,aircraft,revenue_mission,deadhead_mission):
N_passengers = revenue_mission.passengers.N_passengers
trip_distance = revenue_mission.mission_range
cpt = Variable("cost_per_trip","-","Cost (in dollars) for one trip")
cpt_revenue = Variable("revenue_cost_per_trip","-",
"Portion of the cost per trip incurred during the revenue-generating flights")
cpt_deadhead = Variable("deadhead_cost_per_trip","-",
"Portion of the cost per trip incurred during the deadhead flights")
cptpp | |
> 0.1:
# return 0.0351* depth + 0.5538, 0.02, 0.009977*depth + 0.216978, 0.045
# else:
# print "too low depth"
# return 0.529327,0.025785, 0.217839, 0.040334
# if depth > 0.5:
# return 0.06315* (math.log(depth)) + 0.64903, 0.046154, 0.0005007*depth + 0.3311504,0.12216
# else:
# return 0.62036, 0.046154, 0.31785, 0.12216
def getPredefinedModel_F(depth):
if depth > 10:
return 0.874546, 0.022211, 0.620549, 0.060058
elif depth > 5:
return 0.785249,0.021017, 0.609778, 0.054104
elif depth > 2:
return 0.650573, 0.018699,0.548972, 0.047196
elif depth > 1:
return 0.578386,0.018526, 0.502322, 0.041186
elif depth > 0.5:
return 0.529327,0.025785, 0.457839, 0.040334
else:
# print "Warning: Sample region depth is too low < 1"
return 0.529327,0.025785, 0.457839, 0.040334
# if depth > 30:
# return 0.874546, 0.022211, 0.310549, 0.060058
# elif depth > 10:
# return 0.785249,0.021017, 0.279778, 0.054104
# elif depth > 5:
# return 0.650573, 0.018699,0.238972, 0.047196
# elif depth > 2:
# return 0.578386,0.018526, 0.222322, 0.041186
# elif depth > 1:
# return 0.529327,0.025785, 0.217839, 0.040334
# else:
# print "Warning: Sample region depth is too low < 1"
# return 0.529327,0.025785, 0.217839, 0.040334
# if depth > 0.1:
# return 0.0351* depth + 0.5538, 0.02, 0.009977*depth + 0.216978, 0.045
# else:
# print "too low depth"
# return 0.529327,0.025785, 0.217839, 0.040334
# if depth > 0.5:
# return 0.06315* (math.log(depth)) + 0.64903, 0.046154, 0.0005007*depth + 0.3311504,0.12216
# else:
# return 0.62036, 0.046154, 0.31785, 0.12216
def classifying():
AUCs =[]
wholeFeatures = 50
temp =[]
altFreqList = []
keyList = []
for key in sorted(glob_scores):
altFreqList.append(glob_scores[key])
keyList.append(key)
dataSetSize = len(altFreqList)
filter_list = []
for i in range(0, dataSetSize):
for j in range(0, dataSetSize):
if i!=j:
if keyList[j] not in filter_list:
temp.append([keyList[i],keyList[j]])
filter_list.append(keyList[i])
for iterations in range(49,wholeFeatures):
samples = []
numFeatures = iterations
count = 0
for i in range(0,len(temp)):
tempA = set(feature_list[temp[i][0].strip()])
tempB = set(feature_list[temp[i][1].strip()])
selected_feature = tempA.intersection(tempB)
vecA = []
vecB = []
idx = 0
for k in features:
if k in selected_feature:
vecA.append(glob_scores[temp[i][0].strip()][idx])
vecB.append(glob_scores[temp[i][1].strip()][idx])
idx = idx + 1
distance = pearson_def(vecA, vecB)
samples.append(distance)
predStrength = []
training_flag =0
####0715 Append
output_matrix_f = open(outdir + "/output_corr_matrix.txt","w")
output_matrix = dict()
if out_tag!="stdout":
out_f = open(outdir + "/" + out_tag + "_all.txt","w")
out_matched = open(outdir + "/" + out_tag + "_matched.txt","w")
for i in range(0, len(keyList)):
output_matrix[keyList[i]] = dict()
for j in range(0,len(keyList)):
output_matrix[keyList[i]][keyList[j]] = 0
if training_flag == 1:
#make training set
for i in range(0,len(samples)):
trainMatrix= []
trainCategory = []
for j in range(0, len(samples)):
if i==j:
continue
else:
trainMatrix.append(samples[j])
trainCategory.append(classLabel[j])
#training samples in temp
#p0V, p1V, pAb = trainNB0(array(trainMatrix),array(trainCategory))
p1V,p1S, p0V, p0S = trainNV(array(trainMatrix),array(trainCategory))
result = classifyNV(samples[i],p0V,p0S, p1V, p1S)
if result[1] == 1:
print str(temp[i][0]) + '\tsample is matched to\t',str(temp[i][1]),'\t', samples[i]
predStrength.append(result[0])
else :
for i in range(0,len(samples)):
depth = 0
if Nonzero_flag:
depth = min(real_depth[temp[i][0].strip()],real_depth[temp[i][1].strip()])
else:
depth = min(mean_depth[temp[i][0].strip()],mean_depth[temp[i][1].strip()])
p1V,p1S, p0V, p0S = getPredefinedModel(depth)
result = classifyNV(samples[i],p0V,p0S, p1V, p1S)
if result[1] ==1:
output_matrix[temp[i][0].strip()][temp[i][1].strip()] = samples[i]
output_matrix[temp[i][1].strip()][temp[i][0].strip()] = samples[i]
if out_tag=="stdout":
print str(temp[i][0]) + '\tmatched\t',str(temp[i][1]),'\t', round(samples[i],4),'\t',round(depth,2)
else :
out_f.write(str(temp[i][0]) + '\tmatched\t' + str(temp[i][1]) + '\t'+ str(round(samples[i],4)) + '\t' + str(round(depth,2)) + '\n')
out_matched.write(str(temp[i][0]) + '\tmatched\t' + str(temp[i][1]) + '\t'+ str(round(samples[i],4)) + '\t' + str(round(depth,2)) + '\n')
else:
if out_tag=="stdout":
print str(temp[i][0]) + '\tunmatched\t',str(temp[i][1]),'\t', round(samples[i],4),'\t',round(depth,2)
else :
out_f.write(str(temp[i][0]) + '\tunmatched\t' + str(temp[i][1]) + '\t'+ str(round(samples[i],4)) + '\t' + str(round(depth,2)) + '\n')
predStrength.append(result[0])
#testing sample is samples
output_matrix_f.write("sample_ID")
for key in output_matrix.keys():
if key.find(".vcf") != -1:
output_matrix_f.write("\t" + key[0:key.index('.vcf')])
else:
output_matrix_f.write("\t" + key)
output_matrix_f.write("\n")
# for key in output_matrix.keys():
# for otherkey in output_matrix[key].keys():
# if output_matrix[key][otherkey] != 0:
# output_matrix[otherkey][key] = output_matrix[key][otherkey]
for key in output_matrix.keys():
if key.find(".vcf") != -1:
output_matrix_f.write(key[0:key.index('.vcf')])
else:
output_matrix_f.write(key)
for otherkey in output_matrix.keys():
output_matrix_f.write("\t" + str(output_matrix[key][otherkey]))
output_matrix_f.write("\n")
output_matrix_f.close()
if out_tag!="stdout":
out_f.close()
out_matched.close()
def classifying_test():
AUCs =[]
wholeFeatures = 50
temp = []
keyF = open(testsamplename,'r')
temp =[]
for k in keyF.readlines():
keyfile = k.split(":")
keyfile[0] = keyfile[0].strip() + "_1"
keyfile[1] = keyfile[1].strip() + "_2"
temp.append(keyfile)
keyF.close()
for iterations in range(49,wholeFeatures):
samples = []
numFeatures = iterations
count = 0
for i in range(0,len(temp)):
tempA = set(feature_list[temp[i][0].strip()])
tempB = set(feature_list[temp[i][1].strip()])
selected_feature = tempA.intersection(tempB)
vecA = []
vecB = []
idx = 0
for k in features:
if k in selected_feature:
vecA.append(glob_scores[temp[i][0].strip()][idx])
vecB.append(glob_scores[temp[i][1].strip()][idx])
idx = idx + 1
distance = pearson_def(vecA, vecB)
samples.append(distance)
predStrength = []
training_flag =0
####0715 Append
output_matrix_f = open(outdir + "/output_corr_matrix.txt","w")
output_matrix = dict()
if out_tag!="stdout":
out_f = open(outdir + "/" + out_tag + "_all.txt","w")
out_matched = open(outdir + "/" + out_tag + "_matched.txt","w")
for i in range(0, len(keyList)):
output_matrix[keyList[i]] = dict()
for j in range(0,len(keyList)):
output_matrix[keyList[i]][keyList[j]] = 0
if training_flag == 1:
#make training set
for i in range(0,len(samples)):
trainMatrix= []
trainCategory = []
for j in range(0, len(samples)):
if i==j:
continue
else:
trainMatrix.append(samples[j])
trainCategory.append(classLabel[j])
#training samples in temp
#p0V, p1V, pAb = trainNB0(array(trainMatrix),array(trainCategory))
p1V,p1S, p0V, p0S = trainNV(array(trainMatrix),array(trainCategory))
result = classifyNV(samples[i],p0V,p0S, p1V, p1S)
if result[1] == 1:
print str(temp[i][0]) + '\tsample is matched to\t',str(temp[i][1]),'\t', samples[i]
predStrength.append(result[0])
else :
for i in range(0,len(samples)):
depth = min(mean_depth[temp[i][0].strip()],mean_depth[temp[i][1].strip()])
p1V,p1S, p0V, p0S = getPredefinedModel(depth)
result = classifyNV(samples[i],p0V,p0S, p1V, p1S)
if result[1] ==1:
output_matrix[temp[i][0].strip()][temp[i][1].strip()] = samples[i]
output_matrix[temp[i][1].strip()][temp[i][0].strip()] = samples[i]
if out_tag=="stdout":
print str(temp[i][0]) + '\tmatched\t',str(temp[i][1]),'\t', round(samples[i],4),'\t',round(depth,2)
else :
out_f.write(str(temp[i][0]) + '\tmatched\t' + str(temp[i][1]) + '\t'+ str(round(samples[i],4)) + '\t' + str(round(depth,2)) + '\n')
out_matched.write(str(temp[i][0]) + '\tmatched\t' + str(temp[i][1]) + '\t'+ str(round(samples[i],4)) + '\t' + str(round(depth,2)) + '\n')
else:
if out_tag=="stdout":
print str(temp[i][0]) + '\tunmatched\t',str(temp[i][1]),'\t', round(samples[i],4),'\t',round(depth,2)
else :
out_f.write(str(temp[i][0]) + '\tunmatched\t' + str(temp[i][1]) + '\t'+ str(round(samples[i],4)) + '\t' + str(round(depth,2)) + '\n')
predStrength.append(result[0])
#testing sample is samples
output_matrix_f.write("sample_ID")
for key in output_matrix.keys():
output_matrix_f.write("\t" + key[0:key.index('.')])
output_matrix_f.write("\n")
# for key in output_matrix.keys():
# for otherkey in output_matrix[key].keys():
# if output_matrix[key][otherkey] != 0:
# output_matrix[otherkey][key] = output_matrix[key][otherkey]
for key in output_matrix.keys():
output_matrix_f.write(key[0:key.index('.')])
for otherkey in output_matrix.keys():
output_matrix_f.write("\t" + str(output_matrix[key][otherkey]))
output_matrix_f.write("\n")
output_matrix_f.close()
if out_tag!="stdout":
out_f.close()
out_matched.close()
def generate_R_scripts():
r_file = open(outdir + "/r_script.r","w")
if len(feature_list)==0:
r_file.close()
else :
cmd = "output_corr_matrix <- read.delim(\"" + outdir + "/output_corr_matrix.txt\")\n"
cmd = cmd + "data = output_corr_matrix\n"
cmd = cmd + "d3 <- as.dist((1 - data[,-1]))\n"
cmd = cmd + "clust3 <- hclust(d3, method = \"average\")\n"
if len(feature_list) < 5:
cmd = cmd + "pdf(\"" +outdir+ "/" + pdf_tag + ".pdf\", width=10, height=7)\n"
else:
cmd = cmd + "pdf(\"" +outdir+ "/" + pdf_tag + ".pdf\", width="+str(math.log10(len(feature_list))*10) +", height=7)\n"
cmd = cmd + "op = par(bg = \"gray85\")\n"
cmd = cmd + "par(plt=c(0.05, 0.95, 0.2, 0.9))\n"
cmd = cmd + "plot(clust3, lwd = 2, lty = 1,cex=0.8, xlab=\"Samples\", sub = \"\", ylab=\"Distance (1-Pearson correlation)\",hang = -1, axes = FALSE)\n"
cmd = cmd + "axis(side = 2, at = seq(0, 1, 0.2), labels = FALSE, lwd = 2)\n"
cmd = cmd + "mtext(seq(0, 1, 0.2), side = 2, at = seq(0, 1, 0.2), line = 1, las = 2)\n"
cmd = cmd + "dev.off()\n"
r_file.write(cmd)
r_file.close()
def run_R_scripts():
command = "R CMD BATCH " + outdir + "/r_script.r"
proc = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
return_code = proc.wait()
def remove_internal_files():
if outdir.find("*"):
sys.exit()
command = "rm -rf " + outdir + "/output_corr_matrix.txt"
proc = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
return_code = proc.wait()
command = "rm -rf " + outdir + "/r_script.r"
proc = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
return_code = proc.wait()
command = "rm -rf " + outdir + "/r_script.r.Rout"
proc = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
return_code = proc.wait()
def getCallResult(command):
fd_popen = subprocess.Popen(command,stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=True)
(stdoutdata,stderrdata) = fd_popen.communicate()
return stdoutdata,stderrdata
def run_mpileup():
SAMTOOLS=""
BCFTOOLS=""
REF=""
INSTALL_DIR=""
if "NCM_HOME" in os.environ.keys():
INSTALL_DIR=os.environ['NCM_HOME'] + "/"
else:
print "WARNNING : NCM_HOME is not defined yet. Therefore, program will try to search ncm.conf file from the current directory"
INSTALL_DIR=""
with open(INSTALL_DIR + "ncm.conf",'r') as F:
for line in F.readlines():
temp = line.split('=')
if temp[0].startswith("SAMTOOLS"):
SAMTOOLS = temp[1].strip()
elif temp[0].startswith("BCFTOOLS"):
BCFTOOLS = temp[1].strip()
elif temp[0].startswith("REF"):
REF = temp[1].strip()
# REF="/NAS/nas33-2/mpileup/hg19.fasta"
version =""
##version of samtools
samtools_version = getCallResult(SAMTOOLS)
for samtool_line in samtools_version:
if samtool_line.find("Version") != -1:
version_flag = 1
version_line = samtool_line.split("\n")
for version_tag in version_line:
if version_tag.find("Version") != -1:
version_list = version_tag.split(" ")
version = version_list[1]
print version
for sample in bam_list:
filename = sample.split("/")
tag = filename[-1][0:filename[-1].rindex(".")]
if version.startswith("0"):
| |
<reponame>JohnKurian/TableGPT
import time
import os
import string
import queue
import encoder
from tqdm import tqdm
import sys
# bpe vocab
enc = encoder.get_encoder("117M")
# “#”
field_empty = 2
eos = 50256
def join_box(list_in):
"""
Filters empty fields, combines multiple values into same field
Args:
list_in: list of field value pairs
Returns:
List of tuples of (field_name, (value1, value2, ...))
"""
out_list = []
current_name = ""
current_value = ""
for each_item in list_in:
field_name = each_item.split(":")[0]
field_value = each_item.split(":")[1]
if field_name == "":
continue
if not field_name[-1].isdigit():
if field_value != "<none>":
out_list.append((field_name, field_value))
continue
field_name = "_".join(field_name.split("_")[:-1])
if field_name != current_name:
if current_name != "":
# remove none value
if current_value.strip() != "<none>":
out_list.append((current_name, current_value.strip()))
current_name = ""
current_value = ""
current_name = field_name
current_value += (field_value + " ")
if current_value.strip() != "<none>":
out_list.append((current_name, current_value.strip()))
sorted_by_second = sorted(out_list, key=lambda tup: len(tup[1].split(" ")), reverse=True)
return out_list, sorted_by_second
def load_dem_map(file_in):
# TODO
"""
recursively load nationality map
Args:
file_in:
Returns:
"""
dem_map = {}
with open(file_in) as f:
for line in f:
line_list = line.strip().lower().split(",")
if line_list[0] not in dem_map:
dem_map[line_list[0]] = []
if line_list[1] not in dem_map[line_list[0]]:
dem_map[line_list[0]].append(line_list[1])
if line_list[1] not in dem_map:
dem_map[line_list[1]] = []
if line_list[0] not in dem_map[line_list[1]]:
dem_map[line_list[1]].append(line_list[0])
final_res_map = {}
for each_con in dem_map:
res_con = []
q = queue.Queue()
q.put(each_con)
while not q.empty():
con = q.get()
if con in res_con:
continue
res_con.append(con)
if con in dem_map:
for each_sub in dem_map[con]:
q.put(each_sub)
final_res_map[each_con] = res_con
return final_res_map
def fuzzy_match_rep(source, substring, field_name):
# TODO
"""
Args:
source:
substring:
field_name:
Returns:
"""
this_value = substring
out_summary = source
this_value_list_raw = this_value.split(" ")
out_summary_list = out_summary.split(" ")
# print this_value_list
# print out_summary_list
this_value_list = []
for token in this_value_list_raw:
if not(token in string.punctuation) \
and token != "(" \
and token != ")" \
and token != "-lsb-" \
and token != "-rsb-":
this_value_list.append(token)
if len(this_value_list) == 0:
return out_summary
num_consist = 0
min_index = len(out_summary_list) + 1
max_index = -1
for token in this_value_list:
if token in out_summary_list:
num_consist += 1
this_ind = out_summary_list.index(token)
if this_ind < min_index:
min_index = this_ind
if this_ind > max_index:
max_index = this_ind
# print num_consist
# print min_index
# print max_index
if float(num_consist) / len(this_value_list) > 0.4:
if max_index - min_index <= 2 * len(this_value_list):
### regard as match
to_replace = " ".join(out_summary_list[min_index:max_index+1])
replace_len = len(to_replace.split(" "))
if out_summary.startswith(to_replace):
out_summary = out_summary.replace(to_replace + " ", ("<" + field_name + "> ") * replace_len)
else:
out_summary = out_summary.replace(" " + to_replace + " ", " " + ("<" + field_name + "> ") * replace_len)
return out_summary
def gen_mask_field_pos(dem_file, in_summary, in_box, out_field, out_pos, out_rpos):
"""
Mask out the values in the summary by the corresponding fields
Args:
dem_file: demonymns file
in_summary: str, summary file
in_box: str, box file
out_field: masked summary
out_pos: summary with field position values
out_rpos: summary with reversed field position values
Returns:
None
"""
### load nationality demonyms.csv
dem_map = load_dem_map(dem_file)
with open(in_box) as f:
lines_box = f.readlines()
with open(in_summary) as f:
lines_summary = f.readlines()
out_s = open(out_field, "w")
out_p = open(out_pos, "w")
out_rp = open(out_rpos, "w")
for box, summary in tqdm(zip(lines_box, lines_summary)):
box = box.replace("-lrb-", "(")
box = box.replace("-rrb-", ")")
box_list = box.strip().split("\t")
box_out_list, box_field_list = join_box(box_list)
summary = summary.replace("-lrb-", "(")
summary = summary.replace("-rrb-", ")")
tem_summary = summary.strip()
out_summary = summary.strip()
tem_summary_list = tem_summary.split(" ")
out_pos, out_rpos, out_field = [], [], []
out_pos_bpe, out_rpos_bpe, out_field_bpe = [], [], []
out_bpe, _ = enc.encode(summary.strip())
out_bpe_len = len(out_bpe)
for ind in range(out_bpe_len):
out_pos_bpe.append(0)
out_rpos_bpe.append(0)
for ind in range(out_bpe_len):
out_field_bpe.append('#')
for ind in range(len(tem_summary_list)):
out_pos.append(0)
out_rpos.append(0)
for ind in range(len(tem_summary_list)):
out_field.append('#')
for (this_name, this_value) in box_field_list:
this_value_dict = {}
this_pos_bpe_dict = {}
prev = 1
for ind, each_token in enumerate(this_value.split(" ")):
# if each_token not in this_value_dict:
this_value_dict[each_token] = ind + 1
if this_name != "name":
each_token = " " + each_token
else:
if ind != 0:
each_token = " " + each_token
bpe_tokens, bpe_tokens_original = enc.encode(each_token)
# (start ind, len)
this_pos_bpe_dict[ind + 1] = (prev, len(bpe_tokens))
prev += len(bpe_tokens)
if this_name == "name":
bpe_value = this_value
else:
bpe_value = " " + this_value
bpe_tokens, bpe_tokens_original = enc.encode(bpe_value)
this_value_bpe_len = len(bpe_tokens)
this_value_list_len = len(this_value.split(" "))
if " " + this_value + " " in out_summary:
out_summary = out_summary.replace(" " + this_value + " ", " " + ("<" + this_name + "> ") * this_value_list_len)
# name
elif out_summary.startswith(this_value + " "):
out_summary = out_summary.replace(this_value + " ", ("<" + this_name + "> ") * this_value_list_len)
# nationality
elif this_value in dem_map:
this_value_list = dem_map[this_value]
for this_value in this_value_list:
this_value_list_len = len(this_value.split(" "))
if " " + this_value + " " in out_summary:
out_summary = out_summary.replace(" " + this_value + " ", " " + ("<" + this_name + "> ") * this_value_list_len)
else:
# seperate nationality
is_dem_match = 0
this_value_list = this_value.split(" , ")
if len(this_value_list) > 1:
for each_con in this_value_list:
if " " + each_con + " " in out_summary and each_con in dem_map:
each_con_len = len(each_con.split(" "))
out_summary = out_summary.replace(" " + each_con + " ", " " + ("<" + this_name + "> ") * each_con_len)
is_dem_match = 1
break
if each_con in dem_map:
this_con_list = dem_map[each_con]
for this_con in this_con_list:
if " " + this_con + " " in out_summary:
this_con_len = len(this_con.split(" "))
this_con_len = len(this_con.split(" "))
out_summary = out_summary.replace(" " + this_con + " ", " " + ("<" + this_name + "> ") * this_con_len)
is_dem_match = 1
break
if is_dem_match:
continue
out_summary = fuzzy_match_rep(out_summary, this_value, this_name)
assert len(out_summary.split(" ")) == len(tem_summary_list)
for ind, token in enumerate(out_summary.split(" ")):
if token == "<" + this_name + ">":
out_field[ind] = this_name
ori_token = tem_summary_list[ind]
if ori_token in this_value_dict:
out_pos[ind] = this_value_dict[ori_token]
out_rpos[ind] = this_value_list_len - (out_pos[ind] - 1)
# convert to bpe
ori_token_bpe = ori_token
if ind != 0:
ori_token_bpe = " " + ori_token
if ind > 0:
past = tem_summary_list[:ind]
past = " ".join(past)
bpe_past, _ = enc.encode(past)
past_len = len(bpe_past)
else:
past_len = 0
bpe_tokens, bpe_tokens_original = enc.encode(ori_token_bpe)
for it in range(len(bpe_tokens)):
out_field_bpe[past_len + it] = this_name
if ori_token in this_value_dict:
bpe_pos_start, bpe_pos_len = this_pos_bpe_dict[out_pos[ind]]
for it in range(bpe_pos_len):
start = bpe_pos_start + it
end = this_value_bpe_len - (start - 1)
if start > 30:
start = 30
if end > 30:
end = 30
if past_len + it >= len(out_pos_bpe):
this_id = past_len
else:
this_id = past_len + it
out_pos_bpe[this_id] = start
out_rpos_bpe[this_id] = end
bpe_tokens, bpe_tokens_original = enc.encode(summary.strip())
bpe_test = " ".join(bpe_tokens_original)
assert len(out_summary.split(" ")) == len(tem_summary_list)
assert len(out_field) == len(tem_summary_list)
assert len(tem_summary_list) == len(out_pos)
assert len(tem_summary_list) == len(out_rpos)
assert len(out_field_bpe) == len(bpe_tokens)
assert len(out_pos_bpe) == len(bpe_tokens)
assert len(out_rpos_bpe) == len(bpe_tokens)
out_s.write(" ".join(out_field_bpe) + "\n")
out_p.write(" ".join([str(tmp) for tmp in out_pos_bpe]) + "\n")
out_rp.write(" ".join([str(tmp) for tmp in out_rpos_bpe]) + "\n")
out_s.close()
out_p.close()
out_rp.close()
def gen_context(subdir):
"""
Process box data to use as input to GPT
Args:
subdir: str, root path
Returns:
None
"""
boxes = []
context = []
for split in ["train", "valid", "test"]:
boxes.append(os.path.join(subdir, "original_data", split + ".box"))
context.append(os.path.join(subdir, "processed_data", split, split + ".context"))
avg_len = 0
num = 0
for ind, fboxes in enumerate(boxes):
box = open(fboxes, "r").read().strip().split('\n')
context_out = open(context[ind], "w")
for ib in box:
ib = ib.replace("-lrb-", "(")
ib = ib.replace("-rrb-", ")")
item = ib.split('\t')
box_out_list, _ = join_box(item)
write_line = []
for (this_name, this_value) in box_out_list:
if '<none>' in this_value:
continue
to_write = ""
if this_name == "name":
# for humans
if domain == "humans":
to_write = this_value + " ,"
# to_write = "name ,"
# for books
if domain == "books":
to_write = "title : " + this_value + " ,"
# for songs
if domain == "songs":
to_write = "song name : " + this_value + " ,"
else:
write_value = " " + this_value
write_name = " " + this_name.replace("_", " ")
to_write = write_name + " :" + write_value + " ,"
| |
VALUE_TYPE : `type` = `int`
The premium types' values' type.
DEFAULT_NAME : `str` = `'Undefined'`
The default name of the premium types.
Each predefined premium type can also be accessed as class attribute:
+-----------------------+---------------+-------+
| Class attribute name | name | value |
+=======================+===============+=======+
| none | none | 0 |
+-----------------------+---------------+-------+
| nitro_classic | nitro_classic | 1 |
+-----------------------+---------------+-------+
| nitro | nitro | 2 |
+-----------------------+---------------+-------+
"""
INSTANCES = {}
VALUE_TYPE = int
__slots__ = ()
# predefined
none = P(0, 'none')
nitro_classic = P(1, 'nitro_classic')
nitro = P(2, 'nitro')
@export
class RelationshipType(PreinstancedBase):
"""
Represents a ``Relationship``'s type.
Attributes
----------
name : `str`
The relationship type's name.
value : `int`
The Discord side identifier value of the relationship type.
Class Attributes
----------------
INSTANCES : `dict` of (`int`, ``RelationshipType``) items
The predefined relation types stored in a list, so they can be accessed with their respective value as key.
This behaviour is used to translate their Discord side value to their representation.
VALUE_TYPE : `type` = `int`
The relationship types' values' type.
DEFAULT_NAME : `str` = `'Undefined'`
The default name of the relation types.
Each predefined relationship type can also be accessed as class attribute:
+-----------------------+-------------------+-------+
| Class attribute name | name | value |
+=======================+===================+=======+
| stranger | stranger | 0 |
+-----------------------+-------------------+-------+
| friend | friend | 1 |
+-----------------------+-------------------+-------+
| blocked | blocked | 2 |
+-----------------------+-------------------+-------+
| pending_incoming | pending_incoming | 3 |
+-----------------------+-------------------+-------+
| pending_outgoing | pending_outgoing | 4 |
+-----------------------+-------------------+-------+
| implicit | implicit | 5 |
+-----------------------+-------------------+-------+
"""
INSTANCES = {}
VALUE_TYPE = int
__slots__ = ()
# predefined
stranger = P(0, 'stranger')
friend = P(1, 'friend')
blocked = P(2, 'blocked')
pending_incoming = P(3, 'pending_incoming')
pending_outgoing = P(4, 'pending_outgoing')
implicit = P(5, 'implicit')
class FriendRequestFlag(PreinstancedBase):
"""
Represents the friend request flags of a user.
Attributes
----------
name : `str`
The default name of the friend request flag.
value : `int`
Internal identifier value of the friend request flag used for lookup.
Class Attributes
----------------
INSTANCES : `dict` of (`int`, ``FriendRequestFlag``) items
A container to store the predefined friend request flags. This container is accessed by ``.get`` what
translates the friend request flags' value to their wrapper side representation.
VALUE_TYPE : `type` = `int`
The friend request flags' type.
DEFAULT_NAME : `str` = `'Undefined'`
The default name of the friend request flags.
Every predefined friend request flag can also be accessed as class attribute:
+---------------------------+---------------------------+-------+
| Class attribute name | name | value |
+===========================+===========================+=======+
| none | none | 0 |
+---------------------------+---------------------------+-------+
| mutual_guilds | mutual_guilds | 1 |
+---------------------------+---------------------------+-------+
| mutual_friends | mutual_friends | 2 |
+---------------------------+---------------------------+-------+
| mutual_guilds_and_friends | mutual_guilds_and_friends | 3 |
+---------------------------+---------------------------+-------+
| all | all | 4 |
+---------------------------+---------------------------+-------+
"""
# class related
INSTANCES = {}
VALUE_TYPE = int
__slots__ = ()
@classmethod
def get(cls, data):
"""
Converts the friend request flag data sent by Discord to it's wrapper side representation.
Parameters
----------
data : `dict` of (`str`, `bool`)
Received friend request flag data.
Returns
-------
friend_request_flag : ``FriendRequestFlag``
"""
if data is None:
return cls.none
all_ = data.get('all', False)
if all_:
key = 4
else:
mutual_guilds = data.get('mutual_guilds', False)
mutual_friends = data.get('mutual_friends', False)
key = mutual_guilds+(mutual_friends<<1)
return cls.INSTANCES[key]
def __str__(self):
"""Returns the name of the friend request flag."""
return self.name
def encode(self):
"""
Returns the friend request flag's Discord side representation.
Returns
-------
result : `dict` of (`str`, `bool`) items
"""
value = self.value
result = {}
if (value>>2)&1:
result['all'] = True
else:
if (value>>1)&1:
result['mutual_friends'] = True
if value&1:
result['mutual_guilds'] = True
return result
# predefined
none = P(0, 'none')
mutual_guilds = P(1, 'mutual_guilds')
mutual_friends = P(2, 'mutual_friends')
mutual_guilds_and_friends = P(3, 'mutual_guilds_and_friends')
all = P(4, 'all')
class Theme(PreinstancedBase):
"""
Represents a user's theme.
Attributes
----------
value : `str`
The discord side identifier value of the theme.
Class Attributes
----------------
INSTANCES : `dict` of (`str`, ``Theme``) items
Stores the predefined themes in `value` - `theme` relation. This container is accessed when converting a theme
to it's representation.
VALUE_TYPE : `type` = `str`
The themes' values' type.
DEFAULT_NAME : `str` = `'Undefined'`
The default name of the themes.
Each predefined theme instance can also be accessed as class attribute:
+-----------------------+-----------+
| Class attribute name | value |
+=======================+===========+
| dark | dark |
+-----------------------+-----------+
| light | light |
+-----------------------+-----------+
"""
INSTANCES = {}
VALUE_TYPE = str
__slots__ = ()
# predefined
dark = P('dark', 'dark')
light = P('light', 'light')
class TeamMembershipState(PreinstancedBase):
"""
Represents a ``TeamMember``'s state at a ``Team``.
Attributes
----------
name : `str`
The name of state.
value : `int`
The Discord side identifier value of the team membership state.
Class Attributes
----------------
INSTANCES : `dict` of (`int`, ``TeamMembershipState``) items
Stores the created team membership state instances. This container is accessed when translating a Discord
team membership state's value to it's representation.
VALUE_TYPE : `type` = `int`
The team membership states' values' type.
DEFAULT_NAME : `str` = `'Undefined'`
The default name of the team membership states.
Every predefined team membership state can be accessed as class attribute as well:
+-----------------------+-----------+-------+
| Class attribute name | name | value |
+=======================+===========+=======+
| none | none | 0 |
+-----------------------+-----------+-------+
| invited | invited | 1 |
+-----------------------+-----------+-------+
| accepted | accepted | 2 |
+-----------------------+-----------+-------+
"""
INSTANCES = {}
VALUE_TYPE = int
__slots__ = ()
# predefined
none = P(0, 'none')
invited = P(1, 'invited')
accepted = P(2, 'accepted')
class GuildFeature(PreinstancedBase):
"""
Represents a ``Guild``'s feature.
Attributes
----------
value : `str`
The Discord side identifier value of the guild feature.
Class Attributes
----------------
INSTANCES : `dict` of (`str`, ``GuildFeature``) items
Stores the predefined ``GuildFeature`` instances.
VALUE_TYPE : `type` = `str`
The guild features' values' type.
DEFAULT_NAME : `str` = `''`
The default name of the guild features. Guild features have the same value as name, so at their case it is not
applicable.
Every predefined guild feature can be accessed as class attribute as well:
+-------------------------------+-----------------------------------+
| Class attribute names | Value |
+===============================+===================================+
| animated_icon | ANIMATED_ICON |
+-------------------------------+-----------------------------------+
| banner | BANNER |
+-------------------------------+-----------------------------------+
| commerce | COMMERCE |
+-------------------------------+-----------------------------------+
| community | COMMUNITY |
+-------------------------------+-----------------------------------+
| discoverable | DISCOVERABLE |
+-------------------------------+-----------------------------------+
| discoverable_disabled | DISCOVERABLE_DISABLED |
+-------------------------------+-----------------------------------+
| discoverable_enabled_before | ENABLED_DISCOVERABLE_BEFORE |
+-------------------------------+-----------------------------------+
| featurable | FEATURABLE |
+-------------------------------+-----------------------------------+
| member_list_disabled | MEMBER_LIST_DISABLED |
+-------------------------------+-----------------------------------+
| more_emoji | MORE_EMOJI |
+-------------------------------+-----------------------------------+
| news | NEWS |
+-------------------------------+-----------------------------------+
| partnered | PARTNERED |
+-------------------------------+-----------------------------------+
| public | PUBLIC |
+-------------------------------+-----------------------------------+
| public_disabled | PUBLIC_DISABLED |
+-------------------------------+-----------------------------------+
| relay_enabled | RELAY_ENABLED |
+-------------------------------+-----------------------------------+
| invite_splash | INVITE_SPLASH |
+-------------------------------+-----------------------------------+
| vanity | VANITY_URL |
+-------------------------------+-----------------------------------+
| verified | VERIFIED |
+-------------------------------+-----------------------------------+
| vip | VIP_REGIONS |
+-------------------------------+-----------------------------------+
| welcome_screen | WELCOME_SCREEN_ENABLED |
+-------------------------------+-----------------------------------+
| verification_screen | MEMBER_VERIFICATION_GATE_ENABLED |
+-------------------------------+-----------------------------------+
| preview_enabled | PREVIEW_ENABLED |
+-------------------------------+-----------------------------------+
"""
INSTANCES = {}
VALUE_TYPE = str
DEFAULT_NAME = ''
__slots__ = ()
@classmethod
def _from_value(cls, value):
"""
Creates a new guild feature with the given value.
Parameters
----------
value : `str`
The guild feature's identifier value.
Returns
-------
self : ``GuildFeature``
The created guild feature.
"""
self = object.__new__(cls)
self.value = value
self.name = value
self.INSTANCES[value] = self
return self
# predefined
animated_icon = P('ANIMATED_ICON', 'animated_icon')
banner = P('BANNER', 'banner')
commerce = P('COMMERCE', 'commerce')
community = P('COMMUNITY', 'community')
discoverable = P('DISCOVERABLE', 'discoverable')
discoverable_disabled = P('DISCOVERABLE_DISABLED', 'discoverable_disabled')
discoverable_enabled_before = P('ENABLED_DISCOVERABLE_BEFORE', 'discoverable_enabled_before')
featurable = P('FEATURABLE', 'featurable')
member_list_disabled = P('MEMBER_LIST_DISABLED', 'member_list_disabled')
more_emoji = P('MORE_EMOJI', 'more_emoji')
news = P('NEWS', 'news')
partnered = P('PARTNERED', 'partnered')
public = P('PUBLIC', 'public')
public_disabled = P('PUBLIC_DISABLED', 'public_disabled')
relay_enabled = | |
"tags": [
"foo"
],
"documentation": ""
},
{
"test_case_name": "Login with user 'FooBar' and password '<PASSWORD>'",
"arguments": {
"${username}": "FooBar",
"${password}": "<PASSWORD>"
},
"tags": [
"foo",
"2"
],
"documentation": ""
}
]
This can be accessed as usual in Robot Framework®.
``${DataDriver_DATA_LIST}[2][arguments][\${password}]`` would result in ``mode`` .
&{DataDriver_DATA_DICT}
~~~~~~~~~~~~~~~~~~~~~~~
A dictionary as suite variable that contains the same data as the list, with the test names as keys.
.. code :: json
{
"Right user empty pass": {
"test_case_name": "Right user empty pass",
"arguments": {
"${username}": "demo",
"${password}": "${EMPTY}"
},
"tags": [
"1"
],
"documentation": "This is a test case documentation of the first one."
},
"Right user wrong pass": {
"test_case_name": "Right user wrong pass",
"arguments": {
"${username}": "demo",
"${password}": "<PASSWORD>"
},
"tags": [
"2",
"3",
"foo"
],
"documentation": "This test case has the Tags 2,3 and foo"
},
"Login with user '${EMPTY}' and password '<PASSWORD>'": {
"test_case_name": "Login with user '${EMPTY}' and password '<PASSWORD>'",
"arguments": {
"${username}": "${EMPTY}",
"${password}": "<PASSWORD>"
},
"tags": [
"1",
"2",
"3",
"4"
],
"documentation": "This test case has a generated name based on template name."
},
"Login with user '${EMPTY}' and password '${EMPTY}'": {
"test_case_name": "Login with user '${EMPTY}' and password '${EMPTY}'",
"arguments": {
"${username}": "${EMPTY}",
"${password}": "${EMPTY}"
},
"tags": [
""
],
"documentation": ""
},
"Login with user '${EMPTY}' and password '<PASSWORD>'": {
"test_case_name": "Login with user '${EMPTY}' and password '<PASSWORD>'",
"arguments": {
"${username}": "${EMPTY}",
"${password}": "<PASSWORD>"
},
"tags": [
""
],
"documentation": ""
},
"Login with user 'FooBar' and password 'mode'": {
"test_case_name": "Login with user 'FooBar' and password '<PASSWORD>'",
"arguments": {
"${username}": "FooBar",
"${password}": "<PASSWORD>"
},
"tags": [
"foo",
"1"
],
"documentation": ""
},
"Login with user 'FooBar' and password '${EMPTY}'": {
"test_case_name": "Login with user 'FooBar' and password '${EMPTY}'",
"arguments": {
"${username}": "FooBar",
"${password}": "${EMPTY}"
},
"tags": [
"foo"
],
"documentation": ""
},
"Login with user 'FooBar' and password '<PASSWORD>'": {
"test_case_name": "Login with user 'FooBar' and password '<PASSWORD>'",
"arguments": {
"${username}": "FooBar",
"${password}": "<PASSWORD>"
},
"tags": [
"foo",
"2"
],
"documentation": ""
}
}
&{DataDriver_TEST_DATA}
~~~~~~~~~~~~~~~~~~~~~~~
A dictionary as test variable that contains the test data of the current test case.
This dictionary does also contain arguments that are not used in the ``Test Template`` keyword.
This can be used in Test Setup and within a test case.
.. code :: json
{
"test_case_name": "Right user wrong pass",
"arguments": {
"${username}": "demo",
"${password}": "<PASSWORD>"
},
"tags": [
"2",
"3",
"foo"
],
"documentation": "This test case has the Tags 2,3 and foo"
}
Data Sources
------------
CSV / TSV (Character-separated values)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
By default DataDriver reads csv files. With the `Encoding and CSV
Dialect <#file-encoding-and-csv-dialect>`__ settings you may configure which
structure your data source has.
XLS / XLSX Files
~~~~~~~~~~~~~~~~
To use Excel file types, you have to install DataDriver with the Extra XLS.
If you want to use Excel based data sources, you may just set the file
to the extention or you may point to the correct file. If the extention
is ".xls" or ".xlsx" DataDriver will interpret it as Excel file.
You may select the sheet which will be read by the option ``sheet_name``.
By default it is set to 0 which will be the first table sheet.
You may use sheet index (0 is first sheet) or sheet name(case sensitive).
XLS interpreter will ignore all other options like encoding, delimiters etc.
.. code :: robotframework
*** Settings ***
Library DataDriver .xlsx
or:
.. code :: robotframework
*** Settings ***
Library DataDriver file=my_data_source.xlsx sheet_name=2nd Sheet
MS Excel and typed cells
^^^^^^^^^^^^^^^^^^^^^^^^
Microsoft Excel xls or xlsx file have the possibility to type thair data
cells. Numbers are typically of the type float. If these data are not
explicitly defined as text in Excel, pandas will read it as the type
that is has in excel. Because we have to work with strings in Robot
Framework® these data are converted to string. This leads to the
situation that a European time value like "04.02.2019" (4th January
2019) is handed over to Robot Framework® in Iso time "2019-01-04
00:00:00". This may cause unwanted behavior. To mitigate this risk you
should define Excel based files explicitly as text within Excel.
Alternatively you may deactivate that string conversion.
To do so, you have to add the option ``preserve_xls_types`` to ``True``.
In that case, you will get str, float, boolean, int, datetime.time,
datetime.datetime and some others.
.. code :: robotframework
*** Settings ***
Library DataDriver file=my_data_source.xlsx preserve_xls_types=True
PICT (Pairwise Independent Combinatorial Testing)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Pict is able to generate data files based on a model file.
https://github.com/Microsoft/pict
Documentation: https://github.com/Microsoft/pict/blob/master/doc/pict.md
Requirements
^^^^^^^^^^^^
- Path to pict.exe must be set in the %PATH% environment variable.
- Data model file has the file extention ".pict"
- Pict model file must be encoded in UTF-8
How it works
^^^^^^^^^^^^
If the file option is set to a file with the extention pict, DataDriver
will hand over this file to pict.exe and let it automatically generates
a file with the extention ".pictout". This file will the be used as data
source for the test generation. (It is tab seperated and UTF-8 encoded)
Except the file option all other options of the library will be ignored.
.. code :: robotframework
*** Settings ***
Library DataDriver my_model_file.pict
Glob File Pattern
~~~~~~~~~~~~~~~~~
This module implements a reader class that creates a test case for each file or folder that matches the given glob pattern.
With an optional argument "arg_name" you can modify the argument that will be set. See folder example.
Example with json files:
.. code :: robotframework
*** Settings ***
Library DataDriver file=${CURDIR}/DataFiles/*_File.json reader_class=glob_reader
Library OperatingSystem
Test Template Test all Files
*** Test Cases ***
Glob_Reader_Test Wrong_File.NoJson
*** Keywords ***
Test all Files
[Arguments] ${file_name}
${file_content}= Get File ${file_name}
${content}= Evaluate json.loads($file_content)["test_case"]
Should Be Equal ${TEST_NAME} ${content}
Example with folders:
.. code :: robotframework
*** Settings ***
Library DataDriver file=${CURDIR}/FoldersToFind/*/ reader_class=glob_reader arg_name=\${folder_name}
Library OperatingSystem
Test Template Test all Files
*** Test Cases ***
Glob_Reader_Test Wrong_File.NoJson
*** Keywords ***
Test all Files
[Arguments] ${folder_name}
${content}= Get File ${folder_name}/verify.txt
Should Be Equal ${TEST_NAME} ${content}
File Encoding and CSV Dialect
-----------------------------
CSV is far away from well designed and has absolutely no "common"
format. Therefore it is possible to define your own dialect or use
predefined. The default is Excel-EU which is a semicolon separated
file.
These Settings are changeable as options of the Data Driver Library.
file=
~~~~~
.. code :: robotframework
*** Settings ***
Library DataDriver file=../data/my_data_source.csv
- None(default): Data Driver will search in the test suites folder if a
\*.csv file with the same name than the test suite \*.robot file exists
- only file extention: if you just set a file extentions like ".xls" or
".xlsx" DataDriver will search
- absolute path: If an absolute path to a file is set, DataDriver tries
to find and open the given data file.
- relative path: If the option does not point to a data file as an
absolute path, Data Driver tries to find a data file relative to the
folder where the test suite is located.
encoding=
~~~~~~~~~
``encoding=`` must be set if it shall not be cp1252.
**Examples**:
``cp1252, ascii, iso-8859-1, latin-1, utf_8, utf_16, utf_16_be, utf_16_le``
**cp1252** is:
- Code Page 1252
- Windows-1252
- Windows Western European
Most characters are same between ISO-8859-1 (Latin-1) except for the code points 128-159 (0x80-0x9F).
These Characters are available in cp1252 which are not present in Latin-1.
``€ ‚ ƒ „ … † ‡ ˆ ‰ Š ‹ Œ Ž ‘ ’ “ ” • – — ˜ ™ š › œ ž Ÿ``
See `Python Standard Encoding <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ for more encodings
dialect=
~~~~~~~~
You may change the CSV Dialect here.
The dialect option can be one of the | |
to_replace = '{' + arg.number_str + '}'
replacement = statement.argv[int(arg.number_str)]
parts = resolved.rsplit(to_replace, maxsplit=1)
resolved = parts[0] + replacement + parts[1]
# Append extra arguments and use statement.arg_list since these arguments need their quotes preserved
for arg in statement.arg_list[macro.minimum_arg_count:]:
resolved += ' ' + arg
# Restore any terminator, suffix, redirection, etc.
return resolved + statement.post_command
def _redirect_output(self, statement: Statement) -> utils.RedirectionSavedState:
"""Set up a command's output redirection for >, >>, and |.
:param statement: a parsed statement from the user
:return: A bool telling if an error occurred and a utils.RedirectionSavedState object
:raises: RedirectionError if an error occurs trying to pipe or redirect
"""
import io
import subprocess
# Initialize the redirection saved state
redir_saved_state = utils.RedirectionSavedState(self.stdout, sys.stdout,
self._cur_pipe_proc_reader, self._redirecting)
# The ProcReader for this command
cmd_pipe_proc_reader = None # type: Optional[utils.ProcReader]
if not self.allow_redirection:
# Don't return since we set some state variables at the end of the function
pass
elif statement.pipe_to:
# Create a pipe with read and write sides
read_fd, write_fd = os.pipe()
# Open each side of the pipe
subproc_stdin = io.open(read_fd, 'r')
new_stdout = io.open(write_fd, 'w')
# Create pipe process in a separate group to isolate our signals from it. If a Ctrl-C event occurs,
# our sigint handler will forward it only to the most recent pipe process. This makes sure pipe
# processes close in the right order (most recent first).
kwargs = dict()
if sys.platform == 'win32':
kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
else:
kwargs['start_new_session'] = True
# For any stream that is a StdSim, we will use a pipe so we can capture its output
proc = subprocess.Popen(statement.pipe_to,
stdin=subproc_stdin,
stdout=subprocess.PIPE if isinstance(self.stdout, utils.StdSim) else self.stdout,
stderr=subprocess.PIPE if isinstance(sys.stderr, utils.StdSim) else sys.stderr,
shell=True,
**kwargs)
# Popen was called with shell=True so the user can chain pipe commands and redirect their output
# like: !ls -l | grep user | wc -l > out.txt. But this makes it difficult to know if the pipe process
# started OK, since the shell itself always starts. Therefore, we will wait a short time and check
# if the pipe process is still running.
try:
proc.wait(0.2)
except subprocess.TimeoutExpired:
pass
# Check if the pipe process already exited
if proc.returncode is not None:
subproc_stdin.close()
new_stdout.close()
raise RedirectionError(
'Pipe process exited with code {} before command could run'.format(proc.returncode))
else:
redir_saved_state.redirecting = True
cmd_pipe_proc_reader = utils.ProcReader(proc, self.stdout, sys.stderr)
sys.stdout = self.stdout = new_stdout
elif statement.output:
import tempfile
if (not statement.output_to) and (not self._can_clip):
raise RedirectionError(
"Cannot redirect to paste buffer; missing 'pyperclip' and/or pyperclip dependencies")
# Redirecting to a file
elif statement.output_to:
# statement.output can only contain REDIRECTION_APPEND or REDIRECTION_OUTPUT
mode = 'a' if statement.output == constants.REDIRECTION_APPEND else 'w'
try:
# Use line buffering
new_stdout = open(utils.strip_quotes(statement.output_to), mode=mode, buffering=1)
except OSError as ex:
raise RedirectionError('Failed to redirect because - {}'.format(ex))
redir_saved_state.redirecting = True
sys.stdout = self.stdout = new_stdout
# Redirecting to a paste buffer
else:
new_stdout = tempfile.TemporaryFile(mode="w+")
redir_saved_state.redirecting = True
sys.stdout = self.stdout = new_stdout
if statement.output == constants.REDIRECTION_APPEND:
self.stdout.write(get_paste_buffer())
self.stdout.flush()
# These are updated regardless of whether the command redirected
self._cur_pipe_proc_reader = cmd_pipe_proc_reader
self._redirecting = redir_saved_state.redirecting
return redir_saved_state
def _restore_output(self, statement: Statement, saved_redir_state: utils.RedirectionSavedState) -> None:
"""Handles restoring state after output redirection
:param statement: Statement object which contains the parsed input from the user
:param saved_redir_state: contains information needed to restore state data
"""
if saved_redir_state.redirecting:
# If we redirected output to the clipboard
if statement.output and not statement.output_to:
self.stdout.seek(0)
write_to_paste_buffer(self.stdout.read())
try:
# Close the file or pipe that stdout was redirected to
self.stdout.close()
except BrokenPipeError:
pass
# Restore the stdout values
self.stdout = saved_redir_state.saved_self_stdout
sys.stdout = saved_redir_state.saved_sys_stdout
# Check if we need to wait for the process being piped to
if self._cur_pipe_proc_reader is not None:
self._cur_pipe_proc_reader.wait()
# These are restored regardless of whether the command redirected
self._cur_pipe_proc_reader = saved_redir_state.saved_pipe_proc_reader
self._redirecting = saved_redir_state.saved_redirecting
def cmd_func(self, command: str) -> Optional[Callable]:
"""
Get the function for a command
:param command: the name of the command
:Example:
>>> helpfunc = self.cmd_func('help')
helpfunc now contains a reference to the ``do_help`` method
"""
func_name = self._cmd_func_name(command)
if func_name:
return getattr(self, func_name)
def _cmd_func_name(self, command: str) -> str:
"""Get the method name associated with a given command.
:param command: command to look up method name which implements it
:return: method name which implements the given command
"""
target = constants.COMMAND_FUNC_PREFIX + command
return target if callable(getattr(self, target, None)) else ''
# noinspection PyMethodOverriding
def onecmd(self, statement: Union[Statement, str], *, add_to_history: bool = True) -> bool:
""" This executes the actual do_* method for a command.
If the command provided doesn't exist, then it executes default() instead.
:param statement: intended to be a Statement instance parsed command from the input stream, alternative
acceptance of a str is present only for backward compatibility with cmd
:param add_to_history: If True, then add this command to history. Defaults to True.
:return: a flag indicating whether the interpretation of commands should stop
"""
# For backwards compatibility with cmd, allow a str to be passed in
if not isinstance(statement, Statement):
statement = self._input_line_to_statement(statement)
func = self.cmd_func(statement.command)
if func:
# Check to see if this command should be stored in history
if statement.command not in self.exclude_from_history and \
statement.command not in self.disabled_commands and add_to_history:
self.history.append(statement)
stop = func(statement)
else:
stop = self.default(statement)
if stop is None:
stop = False
return stop
def default(self, statement: Statement) -> Optional[bool]:
"""Executed when the command given isn't a recognized command implemented by a do_* method.
:param statement: Statement object with parsed input
"""
if self.default_to_shell:
if 'shell' not in self.exclude_from_history:
self.history.append(statement)
# noinspection PyTypeChecker
return self.do_shell(statement.command_and_args)
else:
err_msg = self.default_error.format(statement.command)
# Set apply_style to False so default_error's style is not overridden
self.perror(err_msg, apply_style=False)
def read_input(self, prompt: str, *, allow_completion: bool = False) -> str:
"""
Read input from appropriate stdin value. Also allows you to disable tab completion while input is being read.
:param prompt: prompt to display to user
:param allow_completion: if True, then tab completion of commands is enabled. This generally should be
set to False unless reading the command line. Defaults to False.
:return: the line read from stdin with all trailing new lines removed
:raises: any exceptions raised by input() and stdin.readline()
"""
completion_disabled = False
orig_completer = None
def disable_completion():
"""Turn off completion while entering input"""
nonlocal orig_completer
nonlocal completion_disabled
if self._completion_supported() and not completion_disabled:
orig_completer = readline.get_completer()
readline.set_completer(lambda *args, **kwargs: None)
completion_disabled = True
def enable_completion():
"""Restore tab completion when finished entering input"""
nonlocal completion_disabled
if self._completion_supported() and completion_disabled:
readline.set_completer(orig_completer)
completion_disabled = False
# Check we are reading from sys.stdin
if self.use_rawinput:
if sys.stdin.isatty():
try:
# Deal with the vagaries of readline and ANSI escape codes
safe_prompt = rl_make_safe_prompt(prompt)
with self.sigint_protection:
# Check if tab completion should be disabled
if not allow_completion:
disable_completion()
line = input(safe_prompt)
finally:
with self.sigint_protection:
# Check if we need to re-enable tab completion
if not allow_completion:
enable_completion()
else:
line = input()
if self.echo:
sys.stdout.write('{}{}\n'.format(prompt, line))
# Otherwise read from self.stdin
else:
if self.stdin.isatty():
# on a tty, print the prompt first, then read the line
self.poutput(prompt, end='')
self.stdout.flush()
line = self.stdin.readline()
if len(line) == 0:
line = 'eof'
else:
# we are reading from a pipe, read the line to see if there is
# anything there, if so, then decide whether to print the
# prompt or not
line = self.stdin.readline()
if len(line):
# we read something, output the prompt and the something
if self.echo:
self.poutput('{}{}'.format(prompt, line))
else:
line = 'eof'
return line.rstrip('\r\n')
def _read_command_line(self, prompt: str) -> str:
"""
Read command line from appropriate stdin
:param prompt: prompt to display to user
:return: command line text of 'eof' if an EOFError was caught
:raises: whatever exceptions are raised by input() except for EOFError
"""
try:
# Wrap in try since terminal_lock may not be locked
try:
# Command line is about to | |
import random
import time
from time import sleep
fancy_line = "~" * 75
bold = "\033[1m"
reset_bold = "\033[0m"
text_length = 80
good_shape = True
players = ['young woman', 'middle-aged woman', 'old man', 'middle-aged man',\
'body builder', 'teenager', 'middle-aged man', 'old lady', 'intelligent man', 'young woman']
coin = ['heads', 'tails']
even_marbles = [2, 4, 6, 8, 10]
odd_marbles = [1, 3, 5, 7, 9]
even_or_odd = ['even', 'odd']
player_number = ['player_1', 'player_2']
left_right = ['left', 'right']
def slow_print(s, line_length):
line_end = False
for i in range(len(s)):
if (i + 1) % line_length == 0 and i > 0:
line_end = True
if line_end and (s[i] == " " or s[i] == "\n"):
print("")
line_end = False
else:
print(s[i], end="")
sleep(1 / 25)
def start():
slow_print(bold + "Welcome to Squid Game\n" + reset_bold, text_length)
input("Press Enter to Continue: ")
home()
def home():
slow_print(bold + "\nYou recently lost your job and have a hard time living with the little money available,\
and also owe people some money.\n" + reset_bold, text_length)
print("1 - Apply for a job.")
print("2 - Gamble the rest of the money you have at a casino.")
option = input("Enter your choice: ")
while not(option == '1' or option == '2'):
option = input("Invalid Answer. Enter your choice again(1 or 2): ")
if option == '1':
job()
elif option == '2':
casino()
def job():
slow_print(bold + "\nYou forgot to wake up early the day of the interview, and arrive an hour late and fail the interview.\n"\
+ reset_bold, text_length)
print("Make a decision")
print("1- Gamble the rest of the money you have at a casino.")
print("2- Walk to the train station to go home.")
option = input("Enter your choice: ")
while not(option == '1' or option == '2'):
option = input("Invalid Answer. Enter your choice again(1 or 2): ")
if option == '1':
casino()
elif option == '2':
train_station()
def casino():
slow_print(bold + "\nYou start making money, however get greedy and lose it all in the end.\n" + reset_bold, text_length)
print("You are forced to walk to the train station to go home.")
input("Press Enter to Continue: ")
train_station()
def train_station():
slow_print(bold + "\nYou arrive at the train station and someone offers you to play a game to win some money,\
and eventually you make some money from the game. He then passes a card to you with a number\
on it and tells you to call it to make a lot more money.\n" + reset_bold, text_length)
print("1 - Call the number and accept the offer.")
print("2 - Ignore the card, and buy lunch with the money. ")
option = input("Enter your choice: ")
while not(option == '1' or option == '2'):
option = input("Invalid Answer. Enter your choice again(1 or 2): ")
if option == '1':
van()
elif option == '2':
financial()
def financial():
slow_print(bold + "\nOne week has passed, and poor financial decisions have led to you losing all the money.\
However, you still have the card.\n" + reset_bold, text_length)
print("1 - Throw out the card and find a new job")
print("2 - Call the number and accept the offer.")
option = input("Enter your choice: ")
while not(option == '1' or option == '2'):
option = input("Invalid Answer. Enter your choice again(1 or 2): ")
if option == '1':
homeless()
elif option == '2':
van()
def homeless():
slow_print(bold + "\nYou Lose\nafter a couple days, you quickly realize that you have no chance of\
getting a job with the poor living conditions you're in, and live the rest of your life homeless.\n" + reset_bold, text_length)
input("Press Enter to Restart: ")
home()
def van():
slow_print(bold + "\nYou get picked up in a van and wake up in a facility with hundreds of other people.\
The people running the facility announce that there will be 6 games played over 6 days with a grand prize of 36 million USD.\n" + reset_bold, text_length)
has_friends = False
print("1 - Try to form an alliance ")
print("2 - Sit alone and ignore others")
option = input("Enter your choice: ")
while not(option == '1' or option == '2'):
option = input("Invalid Answer. Enter your choice again(1 or 2): ")
if option == '1':
has_friends = True
first_game(has_friends)
if option == '2':
has_friends = False
first_game(has_friends)
def first_game(has_friends):
slow_print(bold + "\nThe first game is announced and it is red light, green light. However,\
you have only 5 minutes to get across the line.\n" + reset_bold, text_length)
print("1 - Take your time and watch your step.")
print("2 - Stand behind people to hide movement and move at a comfortable pace.")
print("3 - Run and try to get across quickly.")
option = input("Enter your choice: ")
while not(option == '1' or option == '2' or option == '3'):
option = input("Invalid Answer. Enter your choice again(1, 2, or 3): ")
if option == '1':
lose_slow()
if option == '2':
first_win(has_friends)
if option == '3':
lose_fast()
def lose_slow():
slow_print(bold + "\nYou Lose\nThe timer runs out you get shot after only making it past halfway.\n" + reset_bold, text_length)
input("Press Enter to Restart: ")
home()
def lose_fast():
slow_print(bold + "\nYou Lose\nYou run at a quick pace and almost make it to the finish line,\
however you trip and get shot.\n" + reset_bold, text_length)
input("Press Enter to Restart: ")
home()
def first_win(has_friends):
slow_print(bold + "\nYou barely get across with only 20 seconds left on the timer and\
then head back to the main room\n" + reset_bold, text_length)
input("Press Enter to Continue: ")
second_game(has_friends)
def second_game(has_friends):
slow_print(bold + "\nThe hosts have now brought everyone to play a new game in an indoor playground.\
In front of you, there are four shapes to choose from.\n" + reset_bold, text_length)
print("1 - \u25B3")
print("2 - \u2602")
print("3 - \u2299")
print("4 - \u2605")
option = input("Choose a shape: ")
while not(option == '1' or option == '2' or option == '3' or option == '4'):
option = input("Invalid Answer. Enter your choice again(1, 2, 3, or 4): ")
if option == '1':
dalgona(True, has_friends)
elif option == '2':
dalgona(False, has_friends)
elif option == '3':
dalgona(True, has_friends)
elif option == '4':
dalgona(False, has_friends)
def dalgona(shape, has_friends):
slow_print(bold + "\nThe game is now revealed as Dalgona. Players must carve out their\
shape using a pin without any cracks within a 10 minute time frame.\n" + reset_bold, text_length)
print("1 - Carve it out slowly and carefully.")
print("2 - Use a lighter you found on the floor.")
print("3 - Use your tongue and lick the cookie to melt the shape out.")
option = input("Choose a shape: ")
while not(option == '1' or option == '2' or option == '3'):
option = input("Invalid Answer. Enter your choice again(1, 2, or 3): ")
if option == '1' and shape == True:
win_1(has_friends)
elif option == '2':
win_2(has_friends)
elif option == '3':
win_3(has_friends)
elif option == '1' and shape == False:
lose_dalgona()
def lose_dalgona():
slow_print(bold + "\nYou Lose.\nThe timer runs out and you are not even close to be finished,\
and you must now suffer the consequences.\n" + reset_bold, text_length)
input("Press Enter to Restart: ")
home()
def win_1(has_friends):
slow_print(bold + "\nCongratulations, you carved it out with over 3 minutes left.\n" + reset_bold, text_length)
input("Press Enter to Continue: ")
midnight_fight(has_friends)
def win_2(has_friends):
slow_print(bold + "\nThe lighter really helped and you finished with over 5 minutes left.\n" + reset_bold, text_length)
input("Press Enter to Continue: ")
midnight_fight(has_friends)
def win_3(has_friends):
slow_print(bold + "\nSeems like an odd technique, however it worked in the end and you survive.\n" + reset_bold, text_length)
input("Press Enter to Continue: ")
midnight_fight(has_friends)
def midnight_fight(has_friends):
slow_print(bold + "\nAfter resting for a little bit, a fight breaks out and the guards are not preventing it.\
You soon realize that people will try to kill each other and you must make a decision.\n" + reset_bold, text_length)
if has_friends == True:
print("1 - Work with your team and look out for each other's backs to increase chances of survival.")
print("2 - Go around and try to kill as many people as possible to increase the | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Rank trials module.
On each similarity judgment trial, an agent judges the similarity
between a single query stimulus and multiple reference stimuli.
Classes:
RankObservations: Judged 'Rank' trials.
"""
import copy
import h5py
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import backend as K
from psiz.trials.similarity.rank.rank_trials import RankTrials
class RankObservations(RankTrials):
"""Object that encapsulates seen trials.
The attributes and behavior of RankObservations are largely inherited
from RankTrials.
Attributes:
n_trial: An integer indicating the number of trials.
stimulus_set: An integer matrix containing indices that
indicate the set of stimuli used in each trial. Each row
indicates the stimuli used in one trial. The first column
is the query stimulus. The remaining, columns indicate
reference stimuli. Negative integers are used as
placeholders to indicate non-existent references.
shape = (n_trial, max(n_reference) + 1)
n_reference: An integer array indicating the number of
references in each trial.
shape = (n_trial,)
n_select: An integer array indicating the number of references
selected in each trial.
shape = (n_trial,)
is_ranked: A Boolean array indicating which trials require
reference selections to be ranked.
shape = (n_trial,)
config_idx: An integer array indicating the
configuration of each trial. The integer is an index
referencing the row of config_list and the element of
outcome_idx_list.
shape = (n_trial,)
config_list: A DataFrame object describing the unique trial
configurations.
outcome_idx_list: A list of 2D arrays indicating all possible
outcomes for a trial configuration. Each element in the
list corresponds to a trial configuration in config_list.
Each row of the 2D array indicates one potential outcome.
The values in the rows are the indices of the the reference
stimuli (as specified in the attribute `stimulus_set`.
groups: An integer 2D array indicating the group membership
of each trial. It is assumed that `groups` is composed of
integers from [0, M-1] where M is the total number of
groups for a particular column.
shape = (n_trial, n_col)
agent_id: An integer array indicating the agent ID of a trial.
It is assumed that all IDs are non-negative and that
observations with the same agent ID were judged by a single
agent.
shape = (n_trial,)
session_id: An integer array indicating the session ID of a
trial. It is assumed that all IDs are non-negative. Trials
with different session IDs were obtained during different
sessions.
shape = (n_trial,)
weight: An float array indicating the inference weight of each
trial.
shape = (n_trial,)
rt_ms: An array indicating the response time (in milliseconds)
of the agent for each trial.
Notes:
stimulus_set: The order of the reference stimuli is important.
As usual, the the first column contains indices indicating
query stimulus. The remaining columns contain indices
indicating the reference stimuli. An agent's selected
references are listed first (in order of selection if the
trial is ranked) and remaining unselected references are
listed in any order.
Unique configurations and configuration IDs are determined by
'groups' in addition to the usual 'n_reference',
'n_select', and 'is_ranked' variables.
Methods:
subset: Return a subset of judged trials given an index.
set_groups: Override the group ID of all trials.
set_weight: Override the weight of all trials.
save: Save the observations data structure to disk.
"""
def __init__(self, stimulus_set, n_select=None, is_ranked=None,
groups=None, agent_id=None, session_id=None, weight=None,
rt_ms=None):
"""Initialize.
Extends initialization of SimilarityTrials.
Arguments:
stimulus_set: The order of reference indices is important.
An agent's selected references are listed first (in
order of selection if the trial is ranked) and
remaining unselected references are listed in any
order. See SimilarityTrials.
n_select (optional): See SimilarityTrials.
is_ranked (optional): See SimilarityTrials.
groups (optional): An integer 2D array indicating the
group membership of each trial. It is assumed that
`groups` is composed of integers from [0, M-1] where
M is the total number of groups.
shape = (n_trial, n_col)
agent_id: An integer array indicating the agent ID of a
trial. It is assumed that all IDs are non-negative and
that observations with the same agent ID were judged by
a single agent.
shape = (n_trial,)
session_id: An integer array indicating the session ID of a
trial. It is assumed that all IDs are non-negative.
Trials with different session IDs were obtained during
different sessions.
shape = (n_trial,)
weight (optional): A float array indicating the inference
weight of each trial.
shape = (n_trial,1)
rt_ms(optional): An array indicating the response time (in
milliseconds) of the agent for each trial.
shape = (n_trial,1)
"""
RankTrials.__init__(self, stimulus_set, n_select, is_ranked)
# Handle default settings.
if groups is None:
groups = np.zeros([self.n_trial, 1], dtype=np.int32)
else:
groups = self._check_groups(groups)
self.groups = groups
if agent_id is None:
agent_id = np.zeros((self.n_trial), dtype=np.int32)
else:
agent_id = self._check_agent_id(agent_id)
self.agent_id = agent_id
if session_id is None:
session_id = np.zeros((self.n_trial), dtype=np.int32)
else:
session_id = self._check_session_id(session_id)
self.session_id = session_id
if weight is None:
weight = np.ones((self.n_trial))
else:
weight = self._check_weight(weight)
self.weight = weight
if rt_ms is None:
rt_ms = -np.ones((self.n_trial))
else:
rt_ms = self._check_rt(rt_ms)
self.rt_ms = rt_ms
# Determine unique display configurations.
self._set_configuration_data(
self.n_reference, self.n_select, self.is_ranked, self.groups
)
def _check_agent_id(self, agent_id):
"""Check the argument agent_id."""
agent_id = agent_id.astype(np.int32)
# Check shape agreement.
if not (agent_id.shape[0] == self.n_trial):
raise ValueError((
"The argument 'agent_id' must have the same length as the "
"number of rows in the argument 'stimulus_set'."))
# Check lowerbound support limit.
bad_locs = agent_id < 0
n_bad = np.sum(bad_locs)
if n_bad != 0:
raise ValueError((
"The parameter 'agent_id' contains integers less than 0. "
"Found {0} bad trial(s).").format(n_bad))
return agent_id
def _check_session_id(self, session_id):
"""Check the argument session_id."""
session_id = session_id.astype(np.int32)
# Check shape agreement.
if not (session_id.shape[0] == self.n_trial):
raise ValueError((
"The argument 'session_id' must have the same length as the "
"number of rows in the argument 'stimulus_set'."))
# Check lowerbound support limit.
bad_locs = session_id < 0
n_bad = np.sum(bad_locs)
if n_bad != 0:
raise ValueError((
"The parameter 'session_id' contains integers less than 0. "
"Found {0} bad trial(s).").format(n_bad))
return session_id
def _check_weight(self, weight):
"""Check the argument weight."""
weight = weight.astype(np.float)
# Check shape agreement.
if not (weight.shape[0] == self.n_trial):
raise ValueError((
"The argument 'weight' must have the same length as the "
"number of rows in the argument 'stimulus_set'."))
return weight
def _check_rt(self, rt_ms):
"""Check the argument rt_ms."""
rt_ms = rt_ms.astype(np.float)
# Check shape agreement.
if not (rt_ms.shape[0] == self.n_trial):
raise ValueError((
"The argument 'rt_ms' must have the same length as the "
"number of rows in the argument 'stimulus_set'."))
return rt_ms
def subset(self, index):
"""Return subset of trials as a new RankObservations object.
Arguments:
index: The indices corresponding to the subset.
Returns:
A new RankObservations object.
"""
return RankObservations(
self.stimulus_set[index, :], n_select=self.n_select[index],
is_ranked=self.is_ranked[index], groups=self.groups[index],
agent_id=self.agent_id[index], session_id=self.session_id[index],
weight=self.weight[index], rt_ms=self.rt_ms[index]
)
def _set_configuration_data(
self, n_reference, n_select, is_ranked, groups,
session_id=None):
"""Generate a unique ID for each trial configuration.
Helper function that generates a unique ID for each of the
unique trial configurations in the provided data set.
Arguments:
n_reference: An integer array indicating the number of
references in each trial.
shape = (n_trial,)
n_select: An integer array indicating the number of
references selected in each trial.
shape = (n_trial,)
is_ranked: Boolean array indicating which trials had
selected references that were ordered.
shape = (n_trial,)
groups:
shape = (n_trial,)
session_id: An integer array indicating the session ID of
a trial. It is assumed that observations with the same
session ID were judged by a single agent. A single
agent may have completed multiple sessions.
shape = (n_trial,)
| |
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def default_r(self):
cname = "default_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "default.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def detail(self):
cname = "detail"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "detail.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def detail_r(self):
cname = "detail_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "detail.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def drought_severity(self):
cname = "drought_severity"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "drought_severity.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def drought_severity_r(self):
cname = "drought_severity_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "drought_severity.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def example(self):
cname = "example"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "example.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def example_r(self):
cname = "example_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "example.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def extrema(self):
cname = "extrema"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "extrema.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def extrema_r(self):
cname = "extrema_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "extrema.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def gauss3(self):
cname = "gauss3"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "gauss3.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def gauss3_r(self):
cname = "gauss3_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "gauss3.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def grads_default(self):
cname = "grads_default"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "grads_default.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def grads_default_r(self):
cname = "grads_default_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "grads_default.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def grads_rainbow(self):
cname = "grads_rainbow"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "grads_rainbow.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def grads_rainbow_r(self):
cname = "grads_rainbow_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "grads_rainbow.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def gscyclic(self):
cname = "gscyclic"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "gscyclic.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def gscyclic_r(self):
cname = "gscyclic_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "gscyclic.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def gsdtol(self):
cname = "gsdtol"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "gsdtol.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def gsdtol_r(self):
cname = "gsdtol_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "gsdtol.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def gsltod(self):
cname = "gsltod"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "gsltod.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def gsltod_r(self):
cname = "gsltod_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "gsltod.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def gui_default(self):
cname = "gui_default"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "gui_default.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def gui_default_r(self):
cname = "gui_default_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "gui_default.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def helix(self):
cname = "helix"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "helix.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def helix_r(self):
cname = "helix_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "helix.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def helix1(self):
cname = "helix1"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "helix1.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def helix1_r(self):
cname = "helix1_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "helix1.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def hlu_default(self):
cname = "hlu_default"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "hlu_default.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def hlu_default_r(self):
cname = "hlu_default_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "hlu_default.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def hotcold_18lev(self):
cname = "hotcold_18lev"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "hotcold_18lev.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def hotcold_18lev_r(self):
cname = "hotcold_18lev_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "hotcold_18lev.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def hotcolr_19lev(self):
cname = "hotcolr_19lev"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "hotcolr_19lev.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def hotcolr_19lev_r(self):
cname = "hotcolr_19lev_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "hotcolr_19lev.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def hotres(self):
cname = "hotres"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "hotres.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def hotres_r(self):
cname = "hotres_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "hotres.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def lithology(self):
cname = "lithology"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "lithology.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def lithology_r(self):
cname = "lithology_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "lithology.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def matlab_hot(self):
cname = "matlab_hot"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "matlab_hot.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def matlab_hot_r(self):
cname = "matlab_hot_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "matlab_hot.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def matlab_hsv(self):
cname = "matlab_hsv"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "matlab_hsv.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def matlab_hsv_r(self):
cname = "matlab_hsv_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "matlab_hsv.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def matlab_jet(self):
cname = "matlab_jet"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "matlab_jet.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def matlab_jet_r(self):
cname = "matlab_jet_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "matlab_jet.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def matlab_lines(self):
cname = "matlab_lines"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "matlab_lines.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def matlab_lines_r(self):
cname = "matlab_lines_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "matlab_lines.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def mch_default(self):
cname = "mch_default"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "mch_default.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def mch_default_r(self):
cname = "mch_default_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "mch_default.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def ncl_default(self):
cname = "ncl_default"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "ncl_default.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def ncl_default_r(self):
cname = "ncl_default_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "ncl_default.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def ncview_default(self):
cname = "ncview_default"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "ncview_default.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def ncview_default_r(self):
cname = "ncview_default_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "ncview_default.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, | |
1
if output:
return 1
else:
a = 2
return a
return output, backward
''')
cu = torch.jit.CompilationUnit(code)
g = cu.tanh.graph
FileCheck().check_count("prim::Closure_0", 2).check("int = prim::If") \
.run(g)
code = dedent('''
def loop_in_closure(self):
output = torch.tanh(self)
def backward(grad_output):
for i in range(3):
return 1
return 4
return output, backward
''')
cu = torch.jit.CompilationUnit(code)
fc = FileCheck()
fc.check("prim::Closure").check("(Tensor, NoneType) = prim::TupleConstruct")
# Loop then two if's added in exit transform
fc.check("prim::Closure").check("prim::Loop").check_count("prim::If", 2)
fc.run(cu.loop_in_closure.graph)
code = dedent('''
def tanh(self):
output = torch.tanh(self)
def backward(grad_output):
if 1 == 1:
return 1
else:
return 1.
return output, backward
''')
with self.assertRaisesRegex(RuntimeError, "returned a value of type int but"):
cu = torch.jit.CompilationUnit(code)
@_inline_everything
def test_early_return_fork_join(self):
@torch.jit.script
def foo(x):
if x.dim() == 2:
return torch.neg(x), x
else:
return torch.neg(x), x + 1
x = torch.rand(3, 4)
@torch.jit.script
def wait_script(x):
fut = torch.jit._fork(foo, x)
y_hat = foo(x)
y = torch.jit._wait(fut)
return y, y_hat
FileCheck().check("with prim::fork").check("prim::If").check("return")\
.run(wait_script.graph)
def test_early_return_type_refinement(self):
@torch.jit.script
def test(x):
# type: (Optional[int]) -> int
if x is None:
return 1
else:
return x
self.assertEqual(test(None), 1)
self.assertEqual(test(2), 2)
def test_exceptions_with_control_flow(self):
def test_num_ifs(func, num_ifs):
g = torch.jit.script(func).graph
FileCheck().check_count("prim::If", num_ifs, exactly=True).run(g)
def no_guard_ifs_added(x):
# type: (int) -> int
if x == 1:
return 1
else:
if x == 2:
raise RuntimeError("hi")
else:
raise RuntimeError("hi")
self.checkScript(no_guard_ifs_added, (1,))
self.checkScriptRaisesRegex(no_guard_ifs_added, (2,), Exception, "")
test_num_ifs(no_guard_ifs_added, 2)
# FUNCTION LOOKS LIKE:
# graph(%x.1 : int):
# %7 : str = prim::Constant[value="Exception"]()
# %2 : int = prim::Constant[value=1]()
# %5 : int = prim::Constant[value=2]()
# %19 : int = prim::Uninitialized()
# %3 : bool = aten::eq(%x.1, %2)
# %20 : int = prim::If(%3)
# block0():
# -> (%2)
# block1():
# %6 : bool = aten::eq(%x.1, %5)
# = prim::If(%6)
# block0():
# = prim::RaiseException(%7)
# -> ()
# block1():
# = prim::RaiseException(%7)
# -> ()
# -> (%19)
# return (%20)
def no_ifs_added(x):
# type: (int) -> int
if x < 0:
raise RuntimeError("hi")
return x
self.checkScript(no_ifs_added, (1,))
self.checkScriptRaisesRegex(no_ifs_added, (-2,), Exception, "")
test_num_ifs(no_ifs_added, 1)
def test_if_might(x):
# type: (int)
if x > 0:
if x == 1:
return 1
else:
a = 2
else:
raise RuntimeError("hi")
return a + 2
self.checkScript(test_if_might, (1,))
self.checkScript(test_if_might, (3,))
self.checkScriptRaisesRegex(no_ifs_added, (-2,), Exception, "")
test_num_ifs(test_if_might, 3) # one if added to guard a + 2
def test_loop_no_escape(x):
# type: (int)
if x >= 0:
for i in range(x):
raise RuntimeError("hi")
else:
return 5
return x + 3
self.checkScript(test_loop_no_escape, (0,))
self.checkScript(test_loop_no_escape, (-1,))
self.checkScriptRaisesRegex(test_loop_no_escape, (1,), Exception, "")
# if guard gets optimized away
test_num_ifs(test_loop_no_escape, 1)
def test_loop_exception_with_continue(x):
# type: (int)
i = 0
for i in range(5):
if i == x:
raise RuntimeError("hi")
else:
continue
print(i)
return i + 5
self.checkScript(test_loop_exception_with_continue, (-1,))
self.checkScriptRaisesRegex(test_loop_exception_with_continue, (1,), Exception, "")
test_num_ifs(test_loop_exception_with_continue, 1) # no ifs added to guard print
def test_exception_exits_closure(self):
code = dedent('''
def no_return_func(self):
# type: (Tensor) -> Tensor
output = torch.tanh(self)
def backward(grad_output):
raise RuntimeError("Hi")
''')
with self.assertRaisesRegex(RuntimeError, "does not return along all"):
cu = torch.jit.CompilationUnit(code)
code = dedent('''
def test_exit_pair_reset(x):
# type: (int) -> int
if x > 0:
a = 0
def backward(grad_output):
raise RuntimeError("Hi")
a = a + 1
else:
return x
return a + 1
''')
func = torch.jit.CompilationUnit(code).test_exit_pair_reset
self.assertEqual(func(1,), 2)
self.assertEqual(func(-1,), -1)
# final a + 1 gets inlined into the first branch and optimized away
FileCheck().check_count("prim::If", 1, exactly=True).run(func.graph)
def test_non_final_return(self):
def simple(x):
if bool(x > 3):
return x + 1
else:
return x + 2
raise RuntimeError("nope")
def nest(x):
x = x + 1
if bool(x > 3):
if bool(x > 4):
x += 1
return x + 1
else:
return x + 2
def early_ret(x):
x = x + 1
if bool(x > 3):
return x + 1
x = x + 1
return x + 2
def nest_early_ret(x):
x = x + 1
if bool(x > 3):
if bool(x > 4):
return x + 2
return x + 1
x = x + 1
return x + 2
def not_early_ret(x):
s = ""
if bool(x > 3):
if bool(x > 4):
return 1, s
s += "foo"
else:
s += "5"
s += "hi"
return 7, s
def not_total_ret(x):
s = ""
if bool(x > 3):
if bool(x > 4):
return 1, s
else:
return 2, s
else:
s += "5"
return 7, s
for i in range(3):
for func in [simple, nest, early_ret, nest_early_ret, not_early_ret,
not_total_ret]:
self.checkScript(func, (torch.tensor(2.5 + i),))
def vars_used_after_ret(x):
# type: (int) -> int
if x == 0:
return x
else:
y = 2
z = 3
return x + y * z
self.checkScript(vars_used_after_ret, (1,))
self.checkScript(vars_used_after_ret, (0,))
def complicated(x):
# type: (int) -> int
if x:
if x == 2:
return 1
assert 1 == 2
else:
if x == 3:
return 2
assert 1 == 2
else:
a = 2
b = 3
else:
a = 4
b = 1
return a + b
assert 1 == 2
for i in range(4):
self.checkScript(complicated, (i,))
def test_partial_returns(self):
with self.assertRaisesRegex(RuntimeError, "does not return along all"):
@torch.jit.script
def no_ret():
# type: () -> int
pass
with self.assertRaisesRegex(RuntimeError, "does not return along all"):
@torch.jit.script
def partial(x):
# type: (Tensor) -> int
if x:
return 1
with self.assertRaisesRegex(RuntimeError, "does not return along all"):
@torch.jit.script
def typed_none():
# type: () -> Optional[int]
pass
@torch.jit.script
def none_ret():
pass
self.assertIs(none_ret(), None)
FileCheck().check(": None").run(none_ret.graph)
def test_early_returns_loops(self):
def nest_while_ret(x):
# type: (int) -> int
y = 4
while x < 4:
if x < 3:
return y
else:
y = y + 1
break
y = y + 2
y = y + 1
return y
self.checkScript(nest_while_ret, (2,))
self.checkScript(nest_while_ret, (3,))
self.checkScript(nest_while_ret, (4,))
def loop_ret(x, y):
# type: (int, int) -> (int)
i = 0
for i in range(x):
if x == y:
return x + y
i = i + y
i = i - 1
return i
self.checkScript(loop_ret, (3, 3))
self.checkScript(loop_ret, (2, 3))
self.checkScript(loop_ret, (3, 1))
def test_will_ret(y):
# type: (int) -> int
for i in range(y):
return 2
return 1
self.checkScript(test_will_ret, (0,))
self.checkScript(test_will_ret, (1,))
def test_loop_nest_ret(y):
# type: (int) -> int
for i in range(y):
for i in range(y - 2):
return 10
return 5
return 0
self.checkScript(test_loop_nest_ret, (0,))
self.checkScript(test_loop_nest_ret, (1,))
self.checkScript(test_loop_nest_ret, (2,))
def test_nn_init(self):
tests = (
('constant_', (lambda: (torch.ones(2, 2), 2.5)), "Tensor, float"),
('ones_', (lambda: (torch.ones(2, 2),)), "Tensor"),
('zeros_', (lambda: (torch.ones(2, 2),)), "Tensor"),
('uniform_', (lambda: (torch.ones(2, 2),)), "Tensor"),
('normal_', (lambda: (torch.ones(2, 2),)), "Tensor"),
('xavier_normal_', (lambda: (torch.ones(2, 2),)), "Tensor"),
('xavier_uniform_', (lambda: (torch.ones(2, 2),)), "Tensor"),
)
for name, args_fn, type_str in tests:
# Build test code
arg_str = ', '.join([chr(i + ord('a')) for i in range(len(args_fn()))])
code = dedent('''
def test({arg_str}):
# type: ({type_str})
return torch.nn.init.{name}({arg_str})
''').format(arg_str=arg_str, type_str=type_str, name=name)
cu = torch.jit.CompilationUnit(code)
# Compare functions
init_fn = getattr(torch.nn.init, name)
script_out = self.runAndSaveRNG(cu.test, args_fn())
eager_out = self.runAndSaveRNG(init_fn, args_fn())
self.assertEqual(script_out, eager_out)
FileCheck().check_not("prim::PythonOp").run(cu.test.graph)
def test_early_return_rewrite(self):
def test_foo(x: bool):
if x:
return 1
return 2
self.checkScript(test_foo, (True,))
self.checkScript(test_foo, (False,))
FileCheck().check_count("prim::If", 1, exactly=True).run(torch.jit.script(test_foo).graph)
def test_multiple(x: int):
if x == 5:
return x * x
else:
y = 2 * x
z = y * 2
if z == 8:
return 1
if z != 16:
z = z - 2
abc = 4
else:
return 3
z = z * abc
return z * z * z
self.checkScript(test_multiple, (5,))
self.checkScript(test_multiple, (2,))
self.checkScript(test_multiple, (4,))
self.checkScript(test_multiple, (3,))
self.checkScript(test_multiple, (10,))
graph = torch.jit.script(test_multiple).graph
FileCheck().check_count("prim::If", 3, exactly=True).run(graph)
def test_is_scripting_metacompile(self):
@torch.jit.script
def foo():
if torch.jit.is_scripting():
return 1
else:
print("hello") + 2 # will not be compiled
self.assertEqual(foo(), 1)
def test_boolean_literal_constant_metacompile(self):
class Mod(torch.nn.Module):
__constants__ = ['val']
def __init__(self, val):
super(Mod, self).__init__()
self.val = val
def forward(self):
if self.val:
return 1
else:
return "2"
self.checkModule(Mod(True), ())
self.checkModule(Mod(False), ())
@torch.jit.script
def foo():
if True:
return 1
else:
return "2"
self.assertEqual(foo(), 1)
def test_assert_is_scripting_metacompile(self):
def foo():
assert not torch.jit.is_scripting(), "TestErrorMsg"
print("hello") + 2 # will not be compiled
f = torch.jit.script(foo)
with self.assertRaisesRegex(torch.jit.Error, "TestErrorMsg"):
f()
def test_isinstance_metacompile(self):
@torch.jit.script
def test_primitive_type(x):
# type: (int) -> int
if isinstance(x, int):
return x + 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.